blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bfd2e3c6b7a9d9528f01eb34aede0cd7a9835eb8
|
cb62ed00ef0ebe7e779acebde313a173a43cb345
|
/scripts/mlr-settings/resampling.R
|
6bec9b701c55b867b0c0f21c28dd9565a7166d5b
|
[] |
no_license
|
wlandau/pathogen-modeling
|
a23fb68f099c977b04fff15fd3b70e974c7bbe11
|
0e9e720b83f8acb985b755319fb2152ee1953928
|
refs/heads/master
| 2020-04-11T11:05:16.771982
| 2018-12-13T17:27:35
| 2018-12-13T17:27:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 136
|
r
|
resampling.R
|
spcv_inner_fiveF <- makeResampleDesc("SpCV", iters = 5)
spcv_outer_fiveF_hundredR <- makeResampleDesc("SpRepCV", folds = 5, reps = 100)
|
ea72779bb31382bb00509c7f62e539def314c6f5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pdmod/examples/calculateResponse.Rd.R
|
001957a30fff23c6b3f74f914c563bd42342875c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 183
|
r
|
calculateResponse.Rd.R
|
library(pdmod)
### Name: calculateResponse
### Title: Calculate response from the estimate
### Aliases: calculateResponse
### ** Examples
calculateResponse(0.8, 10, runif(20))
|
d0074191c5e30fca8c9dbebd251e9f9d547bcc2f
|
981cbaf799599f6d23bf79cdeb4ef72a8f3eb8a5
|
/script/5_imbalance.R
|
4c021139a5752a24e2402d65d24cf73ff38c020c
|
[] |
no_license
|
noahhhhhh/Santander_Customer_Satisfaction
|
51249cdc53ef6fcf545cd0e069e3b5e3458857af
|
2cce8e82ab12659445818f42316cdd8e7ae9d8b6
|
refs/heads/master
| 2021-01-17T17:14:28.761063
| 2016-05-10T02:38:32
| 2016-05-10T02:38:32
| 54,017,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,306
|
r
|
5_imbalance.R
|
setwd("/Volumes/Data Science/Google Drive/data_science_competition/kaggle/Santander_Customer_Satisfaction/")
rm(list = ls()); gc();
require(data.table)
require(purrr)
require(caret)
require(xgboost)
require(Ckmeans.1d.dp)
require(Metrics)
require(ggplot2)
require(DMwR)
source("utilities/preprocess.R")
source("utilities/cv.R")
load("../data/Santander_Customer_Satisfaction/RData/dt_cleansed.RData")
#######################################################################################
## 1.0 train, valid, test #############################################################
#######################################################################################
cat("prepare train, valid, and test data set...\n")
set.seed(888)
ind.train <- createDataPartition(dt.cleansed[TARGET >= 0]$TARGET, p = .8, list = F) # remember to change it to .66
dt.train <- dt.cleansed[TARGET >= 0][ind.train]
dt.valid <- dt.cleansed[TARGET >= 0][-ind.train]
dt.test <- dt.cleansed[TARGET == -1]
dim(dt.train); dim(dt.valid); dim(dt.test)
# [1] 60816 310
# [1] 15204 310
# [1] 75818 310
table(dt.train$TARGET)
# 0 1
# 58378 2438
table(dt.valid$TARGET)
# 0 1
# 14634 570
## SMOTE minor cases ----- not useful
# dt.train[, TARGET := as.factor(dt.train$TARGET)]
# df.minor <- SMOTE(TARGET ~ .
# , dt.train
# , k = 3
# , perc.over = 20
# , perc.under = 0)
# table(df.minor$TARGET)
# dt.train <- rbind(dt.train, df.minor)
# dt.train[, TARGET := as.numeric(dt.train$TARGET) - 1]
# dim(dt.train)
# table(dt.train$TARGET)
# UNDER SAMPLE ----- UNDER SAMPLE 0.7, kfold 10, dt.valid .2
# UNDER SAMPLE 0.7, kfold 10, dt.valid .2
# round eta mcw md ss csb mean.dval max.dval min.dval sd.dval mean.valid max.valid min.vaild
# 1 1 0.025 1 9 0.9 0.5 0.8380746 0.8540579 0.8314056 0.00663848 0.8445303 0.8463393 0.8431303
# sd.valid
# 1 0.001091325
# nrow(dt.train[TARGET == 0])
# set.seed(888)
# sp <- sample(nrow(dt.train[TARGET == 0]), nrow(dt.train[TARGET == 0]) * .7)
# length(sp)
# dt.train <- rbind(dt.train[TARGET == 0][sp], dt.train[TARGET == 1])
# table(dt.train$TARGET)
# 0 1
# 31624 5363
# OVER SAMPLE
# OVER SAMPLE .2 + org, dt.valid .1, kfold = 10
# round eta mcw md ss csb mean.dval max.dval min.dval sd.dval mean.valid max.valid min.vaild
# 1 1 0.025 1 9 0.9 0.5 0.8465469 0.869957 0.8333904 0.01111857 0.8443441 0.8455045 0.8424316
# sd.valid
# 1 0.0009773344
# nrow(dt.train[TARGET == 1])
# set.seed(888)
# sp <- sample(nrow(dt.train[TARGET == 1]), nrow(dt.train[TARGET == 1]) * .1, replace = T)
# length(sp)
# dt.train <- rbind(dt.train[TARGET == 1][sp], dt.train[TARGET == 1], dt.train[TARGET == 0])
# table(dt.train$TARGET)
# 0 1
# 25536 3060
# UNDER AND OVER SAMPLE
# UNDER AND OVER SAMPLE, UNDER .7, OVER .2, k = 10
# round eta mcw md ss csb mean.dval max.dval min.dval sd.dval mean.valid max.valid min.vaild
# 1 1 0.025 1 9 0.9 0.5 0.852985 0.8745056 0.8325113 0.01284746 0.8441758 0.8462107 0.8395824
# sd.valid
# 1 0.002092373
nrow(dt.train[TARGET == 0])
set.seed(888)
sp <- sample(nrow(dt.train[TARGET == 0]), nrow(dt.train[TARGET == 0]) * .5)
length(sp)
dt.train <- rbind(dt.train[TARGET == 0][sp], dt.train[TARGET == 1])
table(dt.train$TARGET)
nrow(dt.train[TARGET == 1])
set.seed(888)
sp <- sample(nrow(dt.train[TARGET == 1]), nrow(dt.train[TARGET == 1]) * .2, replace = T)
length(sp)
dt.train <- rbind(dt.train[TARGET == 1][sp], dt.train[TARGET == 1], dt.train[TARGET == 0])
table(dt.train$TARGET)
#######################################################################################
## 2.0 cv #############################################################################
#######################################################################################
params <- list(booster = "gbtree"
, nthread = 8
, objective = "binary:logistic"
, eval_metric = "auc"
, md = 9
, ss = .9
, mcw = 1
, csb = .5
, eta = .025)
df.summary <- myCV_xgb(dt.train
, setdiff(names(dt.train), c("ID", "TARGET"))
, dt.valid
, k = 10
, params)
df.summary
|
728159bc6f12e898782e547160841de56f7b0529
|
c19103d2e850ad2f0f4f5dfad67d6b83bdabcf37
|
/R/survcorr.R
|
12930bad4140590649cf3b1c34a8207e25ecde64
|
[] |
no_license
|
cran/SurvCorr
|
05941fb13797b181447ff3cb08697df1a50e7c00
|
f57dfa76184e4372a3a9785e7b37aff08c8d7106
|
refs/heads/master
| 2022-11-18T22:49:39.717957
| 2022-11-08T13:10:12
| 2022-11-08T13:10:12
| 31,345,431
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,681
|
r
|
survcorr.R
|
## Helper function for bivariate pearson correlation.
pearson = function(data) {
cormatrix = cor(data, method="pearson")
cormatrix[1, 2]
}
survcorr = function
(
formula1, # Formula for defining time-to-event 1, e.g. Surv(TIME1, STATUS1) ~ 1.
formula2, # Formula for defining time-to-event 2, =Surv(TIME2, STATUS2) ~ 1.
data, # Data set
methods="imi", # Vector of method names. Allowed are "imi" and "copula", but copula not yet implemented.
alpha=0.05, # Confidence level; interval is [alpha, 1-alpha].
intra=FALSE, # TRUE if the survival times are symmetric and interchangeable.
M=10, # Imputation size.
MCMCSteps=10, # Number of MCMC steps for double-censored observations.
epsilon= 0.001, # Precision epsilon.
maxiter=100 # Maximum number of iterations.
)
### IMI (Iterative Multiple Imputation)
### MP, 2013-12
{
## Extract times ('t') and events ('delta') of data set.
## For <intra=T> mirror the data.
tmp1 = as.matrix(model.frame(formula1, data))
if (intra) {
tmp = as.matrix(model.frame(formula2, data))
tmp2 = rbind(tmp, tmp1)
tmp1 = rbind(tmp1, tmp)
}
else
tmp2 = as.matrix(model.frame(formula2, data))
colnames(tmp1) = colnames(tmp2) = c("time", "status")
tmp1 = as.data.frame(tmp1)
tmp2 = as.data.frame(tmp2)
t1 = as.vector(tmp1$time)
delta1 = as.vector(tmp1$status)
order1 = order(-t1)
t2 = as.vector(tmp2$time)
delta2 = as.vector(tmp2$status)
order2 = order(-t2)
n = length(t1)
minTime = 0.00000001
## Fill data set.
data1 = data.frame(index=1:n, t1=t1, delta1=delta1, t1unif=rep(NA, n), z1=rep(NA, n))[order1, ]
obj = survfit(coxph(Surv(time, status) ~ 1, data=tmp1, model=TRUE), type="aalen", se.fit=FALSE, conf.type="none")
# Get one row per object.
data1$t1unif = rev(rep(obj$surv, obj$n.event + obj$n.censor))
data1$t1unif = pmin(pmax(data1$t1unif, minTime), 1 - minTime)
data1$z1 = qnorm(data1$t1unif)
data2 = data.frame(index=1:n, t2=t2, delta2=delta2, t2unif=rep(NA, n), z2=rep(NA, n))[order2, ]
obj = survfit(coxph(Surv(time, status) ~ 1, data=tmp2, model=TRUE), type="aalen", se.fit=FALSE, conf.type="none")
# Get one row per object.
data2$t2unif = rev(rep(obj$surv, obj$n.event + obj$n.censor))
data2$t2unif = pmin(pmax(data2$t2unif, minTime), 1 - minTime)
data2$z2 = qnorm(data2$t2unif)
##d = cbind(data1[order(data1$index), -1], data2[order(data2$index), -1])
##print(d)
z1orig = data1$z1[order(data1$index)]
z2orig = data2$z2[order(data2$index)]
## 1) Get initial correlation coefficient estimate.
r0 = pearson(cbind(z1orig, z2orig)[delta1 & delta2, , drop=F])
rj.all = rep(NA, M)
## Calc. indicators for censoring.
ind1 = !delta1 & delta2
ind2 = delta1 & !delta2
indBoth = !delta1 & !delta2
nBoth = sum(indBoth)
z1M = z2M = matrix(NA, n, M)
for (iImp in 1:M) {
rj = r0
## Generate random data for 2).
runif1 = runif(n)
runif2 = runif(n)
runif1mcmc = matrix(runif(nBoth*MCMCSteps), nBoth, MCMCSteps)
runif2mcmc = matrix(runif(nBoth*MCMCSteps), nBoth, MCMCSteps)
for (iter in seq(length=maxiter)) {
## Reset the z's
z1 = z1orig
z2 = z2orig
## a) Only z1i is censored.
p1 = 1 - pnorm(z1, mean=rj * z2, sd=sqrt(1 - rj^2)) # n
unif1 = 1 - pmin(pmax(p1 + runif1 * (1 - p1), minTime), 1 - minTime) # n
z1[ind1] = qnorm(unif1, mean=rj * z2, sd=sqrt(1 - rj^2))[ind1]
## b) Only z2i is censored.
p2 = 1 - pnorm(z2, mean=rj * z1, sd=sqrt(1 - rj^2)) # n
unif2 = 1 - pmin(pmax(p2 + runif2 * (1 - p2), minTime), 1 - minTime) # n
z2[ind2] = qnorm(unif2, mean=rj * z1, sd=sqrt(1 - rj^2))[ind2]
## c) Both are censored.
z1new = z1[indBoth]
z2new = z2[indBoth]
for (MCMCStep in seq(length=MCMCSteps)) {
p1 = 1 - pnorm(z1[indBoth], mean=rj * z2new, sd=sqrt(1 - rj^2)) # n
unif1 = 1 - pmin(pmax(p1 + runif1mcmc[, MCMCStep] * (1 - p1), minTime), 1 - minTime) # n
z1new = qnorm(unif1, mean=rj * z2new, sd=sqrt(1 - rj^2))
p2 = 1 - pnorm(z2[indBoth], mean=rj * z1new, sd=sqrt(1 - rj^2)) # n
unif2 = 1 - pmin(pmax(p2 + runif2mcmc[, MCMCStep] * (1 - p2), minTime), 1 - minTime) # n
z2new = qnorm(unif2, mean=rj * z1new, sd=sqrt(1 - rj^2))
}
z1[indBoth] = z1new
z2[indBoth] = z2new
## Save old correlations.
rj.old = rj
## 3) Calculate the new correlation.
rj = pearson(cbind(z1, z2))
rj.all[iImp] = rj
## 5) Stop condition: all correlations converge.
if (abs(rj - rj.old) < epsilon) {
z1M[, iImp] = z1
z2M[, iImp] = z2
break
}
}
}
## 6) Calculate point and interval estimates.
n = nrow(data) # Above n was twice as large for intra=TRUE.
if (intra)
df0 = n - 3/2
else
df0 = n - 3
rj.trans = atanh(rj.all)
rj.t.mean = mean(rj.trans)
rj.t.var = 1 / df0
BetwVar = var(rj.trans)
TotalVar = rj.t.var + BetwVar * (M + 1) / M
r.W.hat = tanh(rj.t.mean)
df = (M - 1) * (1 + M / (BetwVar * (M + 1) * df0))^2
limits = tanh(rj.t.mean + c(-1, 1) * qt(1 - alpha/2, df) * sqrt(TotalVar))
## Create an object of class <survcorr>
simData = list(M=M, z1M=z1M, z2M=z2M, delta1=delta1, delta2=delta2, t1=t1, t2=t2)
obj = list(rho=r.W.hat, ci.lower=limits[1], ci.upper=limits[2], simData=simData, M=M, MCMCSteps=MCMCSteps, rj.trans=rj.trans, rj.t.mean=rj.t.mean,
var=c(within=rj.t.var, between=BetwVar, total=TotalVar), df=df, intra=intra, alpha=alpha, call=match.call())
class(obj) = "survcorr"
obj
}
print.survcorr = function
(
x, ...
)
## Print the result of a <survcorr> object.
## MP, 2014-02
{
print(c(rho=x$rho, lower=x$ci.lower, upper=x$ci.upper))
}
summary.survcorr = function
(
object, ...
)
### Show the summary of a <survcorr> object.
### MP, 2014-02
{
cat("Results of correlation analysis by Iterative Multiple Imputation\n\n")
cat("\nContingency table of events:")
print(table(object$simData$delta1, object$simData$delta2))
cat("\n\nCorrelation coefficient rho and ", (1-object$alpha)*100,"% confidence interval:\n")
print(object)
cat("\n\nImputations: ", object$M, "\n")
cat("MCMC steps: ", object$MCMCSteps, "\n")
cat("\nPosterior mean of transformed atanh(rho)\n ", object$rj.t.mean, "\n")
cat("\nPosterior variance of atanh(rho)\n")
print(object$var)
cat("\nDegrees of freedom\n", object$df,"\n")
}
plot.survcorr = function
(
x,
what="uniform", # Plot 'uniform' or 'times'.
imputation=1, # Vector of imputation indices to show. E.g. 1:10, or 1:obj$simData$M. Only used if what=='uniform'.
xlab=switch(what, copula=expression(hat(F)(t[2])), uniform=expression(hat(F)(t[2])), times=expression(t[2])),
ylab=switch(what, copula=expression(hat(F)(t[1])), uniform=expression(hat(F)(t[1])), times=expression(t[1])),
xlim, # Optional axis limits.
ylim, # Optional axis limits.
main=switch(what, copula="Bivariate Copula", uniform="Bivariate Copula", times="Bivariate Survival Times"),
legend=TRUE, # Should appear a legend onb the upper left?
cex.legend=switch(what, copula=0.8, uniform=0.8, times=0.7), # Character size of plot legend.
pch="*", # pch value used in the plot.
colEvent="black", # Color of events.
colImput="gray", # Color of imputations.
...
)
### Plot of a <survcorr> object (e.g. made by IMI approach).
### MP, 2014-02
{
if(what=="copula") what<-"uniform"
## Extract z-values of selected imputations.
d = x$simData
z1M = d$z1M[, imputation, drop=FALSE]
z2M = d$z2M[, imputation, drop=FALSE]
nimp<-length(imputation)
## Event indicators.
ind1 = d$delta1 & !d$delta2
ind2 = !d$delta1 & d$delta2
indBoth = d$delta1 & d$delta2
indNone = !d$delta1 & !d$delta2
if (what == "uniform") {
y = 1 - pnorm(z1M)
x = 1 - pnorm(z2M)
if (missing(xlim)) xlim = 0:1
if (missing(ylim)) ylim = 0:1
}
else if (what == "times") {
y = d$t1
x = d$t2
if (missing(xlim)) xlim = range(x)
if (missing(ylim)) ylim = range(y)
}
else
stop("Value for <what> not allowed.")
## Adjust distance from axis label to axis (needed cause hat(F) was out of area)
## http://www.programmingr.com/content/controlling-margins-and-axes-oma-and-mgp/
on.exit({ par(omi = c(0, 0, 0, 0)); par(mgp = c(3, 1, 0)) })
par(oma = c(0, 1, 0, 0))
par(mgp = c(2.2, 1, 0))
## Draw points.
plot(xlim, ylim, type="n", xlab=xlab, ylab=ylab, main=main, ...)
## Try: pch=20, cex=0.8, ...) = large dot. ## ASCII 42.
if (what == "uniform") {
points(x[indBoth, 1], y[indBoth, 1], pch=pch, col=colEvent)
points(x[indNone, 1:nimp], y[indNone, 1:nimp], pch=pch, col=colImput)
points(x[ind1], y[ind1], pch=pch, col=colImput)
points(x[ind2], y[ind2], pch=pch, col=colImput)
if (legend)
legend("topleft",
legend=c("Event", "Censored"),
##pch=c(20, 158, 157, 42), # dot, line|, line-, star
pch=pch, # ASCII if > 32
col=c(colEvent, colImput),
cex=cex.legend,
bty="n")
}
else if (what == "times") {
points(x[indBoth], y[indBoth], pch=pch, col=colEvent)
## Draw small arrows. [2 alternative implementations tested]
##arrowLen = 0.025 * diff(ylim)
##arrows(x[ind1], y[ind1] - arrowLen/2, y1=y[ind1] + arrowLen/2, length=arrowLen, col=colImput)
##arrowLen = 0.025 * diff(xlim)
##arrows(x[ind2] - arrowLen/2, y[ind2], x1=x[ind2] + arrowLen/2, length=arrowLen, col=colImput)
arrowLen = 0.02
length = 0.05
arrow.plot(x[ind1], y[ind1], u=1, v=0, arrow.ex=arrowLen, length=length, col=colImput)
arrow.plot(x[ind2], y[ind2], u=0, v=1, arrow.ex=arrowLen, length=length, col=colImput)
arrow.plot(x[indNone], y[indNone], u=1, v=1, arrow.ex=arrowLen, length=length, col=colImput)
if (legend)
legend("topright",
legend=c("Uncensored times 1&2",
"Censored time 1",
"Censored time 2",
"Censored times 1&2"),
pch=c(pch, "|", "-", "/"),
col=c(colEvent, colImput, colImput, colImput),
cex=cex.legend,
bty="n")
}
}
|
f7581ee6a8caf1ec2ba78eba86e8399d7840563f
|
d68031a6d44fb8e7ee8f5ddbcbf57b08af875df0
|
/global.R
|
29d43bb70035ea4a30f9d43ebcac53fb5e014bc2
|
[] |
no_license
|
bradley-pearson6597/Appsilon-Marine-App
|
4e98d6c32728dcfdd8d948f7a22fe33eec4cfd3f
|
1c7f0a3ab87bd33142b06d009d64c1ba9e2ab790
|
refs/heads/main
| 2023-02-11T09:45:55.695181
| 2021-01-06T13:56:49
| 2021-01-06T13:56:49
| 310,593,716
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,177
|
r
|
global.R
|
#####################################################################################################
###################################### GLOBAL FILE ####################################################
#######################################################################################################
# Load libraries needed for project
# Need to check if installed
library(shiny)
library(shiny.semantic)
library(semantic.dashboard)
library(dplyr)
library(leaflet)
library(htmltools)
library(DT)
library(shinycssloaders)
library(data.table)
library(bit64)
# library(ggplot2)
# library(lubridate)
# Load modules into R Session
source(paste0(getwd(), "/modules.R"))
# Read ships data into the app & ensure data is of correct type
# ships.data <- read.csv("ships (small).csv")
# ships.data <- read.csv("ships.csv")
ships.data <- as.data.frame(data.table::fread("ships.csv")) # Allows for quicker & more efficient csv reading
# # Chane Datetime into correct format
# ships.data <- ships.data %>%
# dplyr::mutate(DATETIME = strptime(DATETIME, format = "%Y-%m-%d %H:%M:%S")) %>%
# dplyr::arrange(DATETIME)
avlon = mean(ships.data$LON)
avlat = mean(ships.data$LAT)
|
17cbd92d622ff08e611251e223d9b9af76fbf766
|
7a8ae73cfd590fd83273e3ec47d37011ab4d8089
|
/tsne/man/tsne-package.Rd
|
cb6410611c64dc1e5f2e5c1eaa64913fa1b7b9c4
|
[] |
no_license
|
jdonaldson/rtsne
|
3dc0bc8f8d82cae1508fb06c40f03dda97195817
|
a33cc0087dea7dfa7671d4d6f0049dbc7b2f77c9
|
refs/heads/master
| 2021-01-17T08:37:47.280295
| 2019-08-27T17:50:58
| 2019-08-27T17:50:58
| 4,193,788
| 56
| 25
| null | 2019-08-21T23:40:49
| 2012-05-01T16:02:53
|
R
|
UTF-8
|
R
| false
| false
| 1,133
|
rd
|
tsne-package.Rd
|
\name{tsne-package}
\Rdversion{1.1}
\alias{tsne-package}
\docType{package}
\title{The tsne-package for multidimensional scaling}
\description{
This package contains one function called \link[tsne]{tsne} which contains all the functionality.
}
\details{
\tabular{ll}{
Package: \tab tsne\cr
Type: \tab Package\cr
Version: \tab 0.1\cr
Date: \tab 2010-02-19\cr
License: \tab GPL\cr
LazyLoad: \tab yes\cr
}
}
\author{
Justin Donaldson
https://github.com/jdonaldson/rtsne
Maintainer: Justin Donaldson (jdonaldson@gmail.com)
}
\references{
L.J.P. van der Maaten and G.E. Hinton. Visualizing High-Dimensional Data Using t-SNE. \emph{Journal of Machine Learning Research} 9 (Nov) : 2579-2605, 2008.
L.J.P. van der Maaten. Learning a Parametric Embedding by Preserving Local Structure. In \emph{Proceedings of the Twelfth International Conference on Artificial Intelligence and Statistics} (AISTATS), JMLR W&CP 5:384-391, 2009.
}
\keyword{ package }
% \seealso{
% ~~ Optional links to other man pages, e.g. ~~
% ~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
% }
% \examples{
% ~~ simple examples of the most important functions ~~
% }
|
af92b01d96c93ad1469c611a5c48dfec1d41199b
|
c024630d8b0ed0f6d1ab731bcea878f6835fbe0e
|
/2020-12-07/test_converter2_er.R
|
2bba7e7ee96c5aa41008ab3aab9d8344cf1ce6d5
|
[] |
no_license
|
kassandra-ru/model
|
ef8fb89acb8492a98608cb57fb2fecf294f7a640
|
e5f54d82885c0eb036349e94df55d08d9e28ca67
|
refs/heads/master
| 2021-12-07T21:56:36.663564
| 2021-08-28T03:14:08
| 2021-08-28T03:14:08
| 145,807,033
| 0
| 5
| null | 2021-01-25T06:34:58
| 2018-08-23T05:53:58
|
R
|
UTF-8
|
R
| false
| false
| 2,422
|
r
|
test_converter2_er.R
|
access_date = Sys.Date()
indprod = rio::import("ind_baza_2018.xlsx", skip = 2, sheet = 1)
indprod_vector = t(indprod[2, 3:ncol(indprod)])
indprod_ts = stats::ts(indprod_vector, start = c(2015, 1), frequency = 12)
indprod_tsibble = tsibble::as_tsibble(indprod_ts)
indprod_tsibble = dplyr::rename(indprod_tsibble, date = index, ind_prod = value)
indprod_tsibble = dplyr::mutate(indprod_tsibble, access_date = access_date)
rio::export(indprod_tsibble, "ind_baza_2018.csv")
# New data for 2020 are taken from: inv20.xlsx
# Главная страница
# Статистика
# Официальная статистика
# Предпринимательство
# Инвестиции
# Инвестиции в нефинансовые активы
data = rio::import("1-06-0.xlsx")
names(data)[1] = "year_col"
# ниже для уровней
# ind_lvl_start = which(
# data$year_col[c(1:length(data$year_col))] == "1.6. Инвестиции в основной капитал1), млрд рублей")
# ind_lvl_finish = which(
# data$year_col[c(1:length(data$year_col))] == "в % к соответствующему периоду предыдущего года")
# idx_start = ind_lvl_start + 2
# idx_finish = ind_lvl_finish - 1
idx_not_year_start = which(
data$year_col[c(1:length(data$year_col))] == "/ percent of corresponding period of previous year"
)
idx_not_year_finish = which(
data$year_col[c(1:length(data$year_col))] == "/ percent of previous period"
)
idx_start = idx_not_year_start + 1
idx_finish = idx_not_year_finish - 2
data_vector = data[idx_start:idx_finish, 3:6] %>% t() %>% as.vector()
colnames(data_vector) = NULL
data_vector = stats::na.omit(data_vector)
data_ts = stats::ts(data_vector, start = c(1999, 1), freq = 4)
data_tsibble = tsibble::as_tsibble(data_ts)
data_tsibble = dplyr::mutate(data_tsibble, access_date = access_date)
data_tsibble = dplyr::rename(data_tsibble, date = index, investment = value)
rio::export(data_tsibble, "invest.csv")
data = rio::import("~/Downloads/i_ipc.xlsx")
data = data[5:16, -1]
data = tidyr::gather(data, year, value)
data1 = dplyr::select(data, -year)
cpi_ts = stats::ts(data1$value, start = c(1991, 1), freq = 12)
cpi_infl = tsibble::as_tsibble(cpi_ts) %>% stats::na.omit() %>% dplyr::rename(date = index, cpi = value)
data_tsibble = dplyr::mutate(cpi_infl, access_date = access_date)
rio::export(data_tsibble, "i_ipc.csv")
|
0d33df9200dc50c5d76a04213e05b51dec79895a
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Applied_Statistics_And_Probability_For_Engineers_by_Douglas_C._Montgomery_And_George_C._Runger/CH2/EX2.30/EX2_30.R
|
6ff176c32483091d05c858bae86e020330c72c87
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 402
|
r
|
EX2_30.R
|
#install.packages("MASS")
library(MASS)
#Flaws and functions(Pg no. 49)
defective_and_surface_flawed = 2
total_defective_parts = 20
total_surface_flawed = 40
P = fractions((defective_and_surface_flawed*defective_and_surface_flawed)/(total_defective_parts*total_surface_flawed))
print(P)
cat("probability of surface flawed and defevtive is",P)
#The answer may slightly vary due to rounding off values
|
f426659f28ab3a07e6149740816ce77ee82e6ce2
|
623ae5f90fd8f7d10c22b61d1bf425a435a00a07
|
/foo.R
|
b9a9bca092e2979e778ca1ac9947c79b78a1f756
|
[] |
no_license
|
bkkkk/my_testing_repo
|
bd5dcc66c08f7713fdc8f5a7c6cf58395fd0d6db
|
e94c117b5f48ba1b4d66cadb5da144a8726c87bc
|
refs/heads/master
| 2020-08-10T11:14:54.889973
| 2019-11-14T05:39:50
| 2019-11-14T05:39:50
| 214,331,275
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 158
|
r
|
foo.R
|
x <- 1
y <- 2
sum_of_xy <- y + x
diff_of_xy <- y - x
increase_x <- x + 2000
my_favourite_color <- "Blue"
ggplot(mpg) +
geom_point(aes(x = cyl, y = mpg))
|
0f998106208c9358ebffb2101175d673026ed27c
|
47760e76ac09abff3f7133c679318aa69e21ce00
|
/3-follow-up/a-make-dataset/e-pheno-preprocess.R
|
cb9af6c70979e2f4003cf53dc81839abacc8f90e
|
[] |
no_license
|
MRCIEU/PHESANT-MR-pheWAS-smoking
|
a50c3ea7c8df3885952845738602f5d90b494de1
|
be941325fe3185772193f3cbd9b7e7453acb366a
|
refs/heads/master
| 2020-04-01T01:28:00.745725
| 2019-05-16T14:41:44
| 2019-05-16T14:41:44
| 152,739,725
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,603
|
r
|
e-pheno-preprocess.R
|
## Adapted from Robyn Wootton's script for the paper - Causal effects of lifetime smoking on risk for depression and schizophrenia: Evidence from a Mendelian randomisation study
##########################################################################
#1. Read in data
##########################################################################
datadir=Sys.getenv('PROJECT_DATA')
phenodir=paste(datadir, '/phenotypes/derived/', sep='')
df2 = read.table(paste(phenodir, 'data.21753-phesant_header-smokingscore-followup.csv',sep=''), header=1, sep=',')
##########################################################################
# 2. Tidy up data - name variables appropriately
##########################################################################
print('Tidy up data - name variables appropriately')
print('- rename')
# Rename the variables
library(reshape)
tidy<-rename(df2, c(x20116_0_0="smoke_status",x40001_0_0="cause", x3436_0_0="start_current", x2897_0_0="stop", x2867_0_0="start_former", x3456_0_0="cpd_current", x2887_0_0="cpd_former", x31_0_0="sex", x189_0_0="SES", x21000_0_0="ethnic", x6138_0_0="edu", x21022_0_0="Age"))
str(tidy)
print('restrict to required variables')
# #subset the time point 1 variables
df<-subset(tidy, select=c("eid", "smoke_status", "start_current", "start_former", "stop", "cpd_current", "cpd_former", "cause", "sex", "Age"))
str(df)
head(df)
##########################################################################
# 3. Restrict to our included subsample
##########################################################################
print('Restrict to our included subsample')
# load score data - this is the right subsample
smokingscoredatadir=paste(datadir, '/smokingscore-snps/', sep='')
smokingscore = read.table(paste(smokingscoredatadir, '/smokescore-withPhenIds-subset.csv',sep=''), header=1, sep=',')
# restrict to this subsample
data = merge(df, smokingscore, by='eid')
print(dim(data))
########################################################################
# 4. Create duration and cpd variables
########################################################################
print('Create duration and cpd variables')
#Make all missing values into NA
print('- start_current')
table(data$start_current)
data$start_current[data$start_current == -3] <-NA
data$start_current[data$start_current == -1] <-NA
table(data$start_current)
print('- start_former')
table(data$start_former)
data$start_former[data$start_former == -3] <-NA
data$start_former[data$start_former == -1] <-NA
table(data$start_former)
print('- stop')
table(data$stop)
data$stop[data$stop == -3] <-NA
data$stop[data$stop == -1] <-NA
table(data$stop)
print('- cpd_current')
table(data$cpd_current)
data$cpd_current[data$cpd_current == -3] <-NA
data$cpd_current[data$cpd_current == -1] <-NA
data$cpd_current[data$cpd_current == -10] <-NA
table(data$cpd_current)
print('- cpd_former')
table(data$cpd_former)
data$cpd_former[data$cpd_former == -3] <-NA
data$cpd_former[data$cpd_former == -1] <-NA
data$cpd_former[data$cpd_former == -10] <-NA
table(data$cpd_former)
print('- smoke_status')
#levels(data$smoke_status)
table(data$smoke_status)
data$smoke_status[data$smoke_status == -3] <-NA
table(data$smoke_status)
print('- Derive num years a smoker')
# Robyn's code
#for (i in 1:nrow(data)){
# if (is.na(data$start_current[i])){
# data$durOFF[i]<-(data$stop[i]-data$start_former[i])}
# else {
# data$durOFF[i]<-(data$Age[i]-data$start_current[i])
# }
#}
# my code (faster)
data$dur=NA
ixNACurrent = is.na(data$start_current)
data$dur[ixNACurrent] = (data$stop[ixNACurrent]-data$start_former[ixNACurrent])
data$dur[!ixNACurrent] = (data$Age[!ixNACurrent] -data$start_current[!ixNACurrent])
# checking my version gives the same as Robyn's code above
#str(data)
#ix=data$dur==data$durOFF
#which(ix==FALSE)
#summary(data[,c('dur', 'durOFF')])
print('- create tsc variable')
#Create tsc (time since cessation) variable
str(data)
data$tsc<-data$Age-data$stop
print('- create cpd variable')
#Create cpd (cigarettes per day) variable by merging cpd_current and cpd_former
data$cpd<-data$cpd_current
my.na <- is.na(data$cpd_current)
data$cpd[my.na] <- data$cpd_former[my.na]
head(data)
#Make NAs ==0
print('- set all non-smokers to 0')
# Set non smokers to 0 but those with missing smoking status are still NA
data$cpd[!is.na(data$smoke_status) & is.na(data$cpd)] <-0
data$dur[!is.na(data$smoke_status) & is.na(data$dur)] <-0
data$tsc[!is.na(data$smoke_status) & is.na(data$tsc)] <-0
#data$cpd[is.na(data$cpd)] <-0
#data$dur[is.na(data$dur)] <-0
#data$tsc[is.na(data$tsc)] <-0
levels(data$smoke_status)
ix = which(is.na(data$smoke_status))
print(data[ix[1:5],])
head(data)
########################################################################
# 5. Create csi with the best fitting values
########################################################################
#the best fitting values were 18 years for tau and 0 for delta
tau<-18
delta<-0
# csi (comprehensive smoking index) function
csi <- function(dur, tau, delta, tsc, int){
#The function
tsc_star <- unlist(lapply(tsc-delta, function (x) max(x, 0)))
dur_star <- unlist(lapply(dur+tsc-delta, function (x) max(x, 0))) - tsc_star
out<-(1-0.5^(dur_star/tau))*(0.5^(tsc_star/tau))*(log(int+1))
out
}
data$csi<-csi(data$dur, tau, delta, data$tsc, data$cpd)
str(data)
summary(data)
data = data[,c('eid', 'csi', 'cpd_current', 'cpd_former', 'smoke_status')]
write.table(data, paste(phenodir, 'data.21753-phesant_header-smokingscore-followup-clean.csv',sep=''), row.names=FALSE, sep=',')
|
52f4b035a9c90878f11bcc866cfab4501c005da8
|
3262afc1872d983c36fe27720c41052ef87c10aa
|
/run_analysis.R
|
16e12d930fa3f44eb761960ae4fd6b4291f0d3b9
|
[] |
no_license
|
kbppdummy/gcdcourseproject
|
ce3a6d732b1d2c1b1a8e5ce5e50e48c9615f93fa
|
6f88851451e1131a031d91bc3e2f7dd1bf37ef7a
|
refs/heads/master
| 2020-04-14T10:39:16.245231
| 2019-01-02T04:36:28
| 2019-01-02T04:36:28
| 163,792,586
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,923
|
r
|
run_analysis.R
|
#
# run_analysis.R
# Code Description:
# This is the main file that must be called.
# Created for a Coursera Course Project.
run_analysis <- function(){
#loads the libraries needed by the script, just in case it is not yet loaded
#also loads the user-defined scripts: functions.R and merge.R
print("****************************************", quote = FALSE)
print("Loading necessary libraries and files...", quote = FALSE)
library(dplyr)
library(tidyr)
source("./functions.R")
source("./merge.R")
print("DONE.", quote = FALSE)
print("****************************************", quote = FALSE)
#starts the analysis process
print("Starting analysis...", quote = FALSE); print("", quote = FALSE)
merged <- processData() #this returns a data frame containing a merged test and train data set
names(merged) <- makeAppropriateNames(names(merged)) #puts more readable variables names to the data frame
merged <- changeActivityNames(merged) #converts the activity labels (i.e., 1-6) into readable form (e.g. 'walking')
#once done, the merged data set will be saved in a file
print("Saving the merged and cleaned data into merged.txt ...", quote = FALSE)
write.table(merged, "merged.txt", row.names = FALSE)
print("DONE.", quote = FALSE)
print("****************************************", quote = FALSE)
#this creates the second data set
print("Creating the second data set...", quote = FALSE)
newData <- createSecondDataSet(merged)
print("DONE.", quote = FALSE)
#and then also saves it to another .txt file
print("Saving second data set into summaries.txt...", quote = FALSE)
write.table(newData, "summaries.txt", row.names = FALSE)
print("DONE.", quote = FALSE)
print("****************************************", quote = FALSE)
}#end of run_analysis function
#end of file
|
f6e2776145452c4cc13f4be1dae45683f90af683
|
f43931a3d2fe0075098a13662c3497e6dbc49115
|
/R/dataLoaders.R
|
70fc3c68ab0afa9713fdf4d51edd26b080ffabd9
|
[
"MIT"
] |
permissive
|
JDMusc/READ-TV
|
43f25df5659d28a044cea5765855a4aab7bec773
|
8ddceec04563f5586bbc35eb9918eda8ed06cf6d
|
refs/heads/master
| 2021-07-04T13:57:40.323156
| 2021-01-19T00:01:36
| 2021-01-19T00:01:36
| 214,256,737
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,621
|
r
|
dataLoaders.R
|
loadFileExpr = function(f_name, ...) {
n_args = nargs()
f_name %>%
file_ext %>%
tolower %>%
switch(
'rds' = expr(read_rds),
'csv' = if(n_args > 1)
expr(read_csv(!!!(rlang::list2(...))))
else
expr(read_csv),
'tsv' = if(n_args > 1)
expr(read_tsv(!!!(rlang::list2(...))))
else
expr(read_tsv)
)
}
quickLoadEventsCode = function(f_name, n_max = 100, cols = list(),
...) {
args = rlang::list2(n_max = n_max) %>% append(rlang::list2(...))
expr(f_name %>%
(!!(loadFileExpr(f_name, !!!args))) %>%
slice(1:!!n_max)) %>%
appendColsRhs(cols)
}
quickLoad = function(f_name, n_max = 100, ...)
eval_tidy(quickLoadEventsCode(f_name, n_max = n_max, ...),
data = list(f_name = f_name))
loadEventsCode = function(f_name, out_pronoun = sym('raw_data'), cols = list(), ...) {
rhs = loadEventsCodeRhs(f_name, cols, ...)
expr(!!out_pronoun <- !!rhs)
}
loadEventsCodeRhs = function(f_name, cols = list(), ...) {
rhs <- expr(f_name %>% !!(loadFileExpr(f_name)))
appendColsRhs(rhs, cols)
}
appendColsRhs = function(code, cols) {
for(i in seq_along(cols)) {
nm = names(cols)[[i]]
val = cols[[i]]
code = expr(!!code %>% mutate(!!sym(nm) := !!val))
}
code
}
appendCols = function(code, cols, output_sym) {
rhs = appendColsRhs(code, cols)
expr(!!output_sym <- !!rhs)
}
appendEventsWithRelativeAndDeltaTimeCode = function(input_sym, output_sym, has_case = TRUE) {
rhs = expr(!!input_sym %>% mutate(`Any Event` = 1))
if(has_case) {
rhs = expr(!!rhs %>%
group_by(Case) %>%
group_modify(~ .x %>%
arrange(Time) %>%
mutate(deltaTime = Time - lag(Time),
RelativeTime = Time - min(Time, na.rm = TRUE))
) %>%
ungroup
)
}
else {
rhs = expr(!!rhs %>%
arrange(Time) %>%
mutate(deltaTime = Time - lag(Time),
RelativeTime = Time - min(Time, na.rm = TRUE)))
}
expr(!!output_sym <- !!rhs)
}
deltaTimesCodeRhs = function()
caseGroupedModifyCodeRhs(.x %>% mutate(deltaTime = Time - lag(Time)))
relativeTimesCodeRhs = function()
caseGroupedModifyCodeRhs(.x %>% mutate(RelativeTime = Time - min(Time, na.rm = TRUE)))
caseGroupedModifyCodeRhs = function(modify_expr)
expr(events %>%
group_by(Case) %>%
group_modify(~ !!modify_expr) %>%
ungroup
)
|
97bb8842b7a57b99d40c941c9c03b99a333a523e
|
7f478ef249f587dd3f68751f00c45def9f171629
|
/NLo_eda.R
|
68d8a88484c2606e4bf41f88b9afac6e1a6c9aa7
|
[] |
no_license
|
ngone8lo/Project-Loans-Pay-Off-Status
|
51964c980518d19738e9cc5845809eadf5e1082e
|
73a445cdb5f92b64ef72bc0075f0c71a903f1647
|
refs/heads/main
| 2023-04-16T20:14:33.663862
| 2021-04-30T08:35:34
| 2021-04-30T08:35:34
| 363,072,868
| 1
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 10,910
|
r
|
NLo_eda.R
|
## @knitr eda
##Ngoné Lo
## March 2020
#### Set up workspace ###
#Importing libraries
library(tidyverse)
library(janitor) # Helps with initial data cleaning and pretty tables
#Loading dataset
loan_data <- read_csv("outputs/datasets/loan_data_cleaned.csv")
#First we take a look at the prediction variable: loan status
#Overview of the variable term
status_prop <- loan_data %>%
tabyl(loan_status) %>%
adorn_pct_formatting()
#### Save term_proportion table####
write_csv(status_prop, "outputs/tables/loan_status.csv")
#The data is highly biased toward Fully Paid Loan Status
#Second, we take a look at credit score
#Summary statistics of the variable credit score
summary(loan_data$credit_score)
#Histogram of credit score
ggplot(data = loan_data, mapping = aes(x=credit_score)) +
geom_histogram(binwidth=10, color="black", fill="ivory2" )+ #plot histogram
#annotate summary statistics to plot
annotate("text", label = "min=585", x = 590, y = 11000, color = "gray35",
hjust = 0, size=4)+
annotate("text", label = "1st Quartile=706", x = 590, y = 9800, color = "gray35",
hjust = 0, size=4)+
annotate("text", label = "mean=724", x = 590, y = 8600, color = "gray35",
hjust = 0, size=4)+
annotate("text", label = "median=718", x = 590, y = 7400, color = "gray35",
hjust = 0, size=4)+
annotate("text", label = "3rd Quartile=739", x = 590, y = 6200, color = "gray35",
hjust = 0, size=4)+
annotate("text", label = "max=751", x = 590, y = 5000, color = "gray35",
hjust = 0, size=4)+
annotate("text", label = "sd=27", x = 590, y = 3800, color = "gray35",
hjust = 0, size=4)+
theme_minimal() + # Make the theme neater
#Define title, subtile, and axis size
theme(plot.title =element_text(size = 16),
plot.subtitle =element_text(size = 16),
axis.title = element_text(size=16))+
#Define title, subtitle, and axis labels
labs(title= "Figure 1: Distribution of Credit Score",
subtitle="With Summary Statistics",
x="Credit Score",
y="Count")
#### Save the graph ####
ggsave("outputs/figures/credit_score_distribution.png",
width = 15, height = 10, units = "cm")
#Third, we take a look at annual income
#Summary statistics of the variable annual_income
summary(loan_data$annual_income)
#Looks like we are dealing with millionaires here
#Histogram of annual income
ggplot(loan_data) +
#plot histogram
geom_histogram(aes(x=annual_income), binwidth=500000,
color="black", fill="ivory2")+
#annotate summary statistics to plot
annotate("text", label = "min=76,627", x = 4000000, y = 18000,
color = "gray35", hjust = 0, size=4)+
annotate("text", label = "1st Quartile=871,976", x = 4000000, y = 16000,
color = "gray35", hjust = 0, size=4)+
annotate("text", label = "median=1,213,264", x = 4000000, y = 14000,
color = "gray35", hjust = 0, size=4)+
annotate("text", label = "mean=1,401,084", x = 4000000, y = 12000,
color = "gray35", hjust = 0, size=4)+
annotate("text", label = "3rd Quartile=1,685,053", x = 4000000, y = 10000,
color = "gray35", hjust = 0, size=4)+
annotate("text", label = "max=165,557,393", x = 4000000, y = 8000,
color = "gray35", hjust = 0, size=4)+
annotate("text", label = "sd=1,145,882", x = 4000000, y = 6000,
color = "gray35", hjust = 0, size=4)+
theme_minimal() + # Make the theme neater
#Define title, subtitle, and axis size
theme(plot.title =element_text(size = 16),
plot.subtitle =element_text(size = 16),
axis.title = element_text(size=16))+
#Define title, subtitle, and axis labels
labs(title= "Figure 2: Distribution of Annual Income",
subtitle= "With Summary Statistics",
x="Annual Income",
y="Count") +
xlim(c(0, 7500000)) #set x axis limit
#### Save the graph ####
ggsave("outputs/figures/annual_income_distribution.png",
width = 15, height = 10, units = "cm")
#The other variables are categorical and are:
# 1 Term of Loan (Long term, Short Term) and
# 2. Years in Current Job (> 1 year, 1 Year, 2 years,..., 10+ years )
#Overview of the variable term
term_prop <- loan_data %>%
tabyl(term) %>%
adorn_pct_formatting()
#### Save term_proportion table####
write_csv(term_prop, "outputs/tables/term_proportion.csv")
#Re-leveling years in current job
loan_data$years_in_current_job<-factor(loan_data$years_in_current_job,
levels=c("< 1 year", "1 year", "2 years", "3 years",
"4 years", "5 years", "6 years", "7 years",
"8 years", "9 years", "10+ years",))
#Overview of the variable years in current in job
current_job_years_prop <- loan_data %>%
tabyl(years_in_current_job) %>%
adorn_pct_formatting()
#### Save term_proportion table####
write_csv(current_job_years_prop, "outputs/tables/current_job_years_proportion.csv")
#Because of skewdness reasons and to align more with real world practices
#where people are often assigned to a income bracket, we decided to categorize
#credit score and annual income.
#Categorizing credit score
loan_data <- loan_data %>%
mutate(credit_score_category = case_when(
credit_score < 620 ~ "Poor",
credit_score >= 620 & credit_score < 690 ~ "Average",
credit_score >= 690 & credit_score < 720 ~ "Good",
credit_score >= 720 ~ "Very Good"), credit_score = NULL)
#Overview of the grouped credit_score
table(loan_data$credit_score_category)
#Categorizing annual_income
loan_data <- loan_data %>%
mutate(income_bracket = case_when(
annual_income <500000 ~ "<500K",
annual_income >= 500000 & annual_income < 1000000 ~ "500K-1M",
annual_income >= 1000000 & annual_income < 1500000 ~ "1M-1.5M",
annual_income >= 1500000 & annual_income < 2000000 ~ "1.5M-2M",
annual_income >= 2000000 ~ "2M+"), annual_income = NULL)
#Overview of the grouped income
table(loan_data$income_bracket)
#For simplicity reasons, we re-grouped years in current job
#in 3 groups instead of 11 groups
#Categorizing job stability based on years_in_current_job
#Store possible categories in vectors
low <- c("< 1 year", "1 year", "2 years", "3 years")
medium <- c("4 years", "5 years", "6 years", "7 years")
high <- c("8 years", "9 years", "10+ years")
#Create job_stability column by grouping years_in_current_job:
loan_data <- loan_data %>%
mutate(job_stability = case_when(
years_in_current_job %in% low ~ "Low", #Assign Low job stability
years_in_current_job %in% medium ~ "Medium", #Assign Medium job stability
years_in_current_job %in% high ~ "High" #Assign High job stability
), years_in_current_job=NULL)
#Overview of the grouped job_stability
table(loan_data$job_stability)
#We suspect credit score and term of loan to be
#the best predictors of loan status
#Re-leveling credit score
loan_data$credit_score_category<-factor(
loan_data$credit_score_category,levels=
c("Poor", "Average", "Good", "Very Good"))
loan_summary1 <- loan_data %>%
group_by(credit_score_category, term, loan_status) %>%
summarise(n = n()) %>% # Count the number in each group and response
group_by(credit_score_category, term) %>%
mutate(prop = n/sum(n)) # Calculate proportions within each group
ggplot(loan_summary1) +
#Specify a barplot of loan status
geom_col(aes(x = loan_status, y = prop, fill=loan_status)) +
#Facet by credit category score and term
facet_grid(credit_score_category~term) +
theme_minimal()+ #Make the theme neater
#Define size of title, axis and legend
theme(plot.title =element_text(size = 16),
axis.title.y = element_text(size = 16),
legend.title = element_text(size = 16),
legend.text = element_text(size = 14),
strip.text = element_text(size = 9))+
#Define title, y_axis, and legend labels
labs(title= "Fgure 3: Loan Status by Credit Score Category
and Loan Term",
y="Proportion", fill="Loan Status")+
theme(axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank() #Delete x_labels
)
#### Save the graph ####
ggsave("outputs/figures/loan_status_by_credit_score_term.png",
width = 15, height = 10, units = "cm")
#Loan status vary with both credit score category and term loan
#Next we look at job stability and income bracket
#Re-leveling job stability
loan_data$job_stability<-factor(loan_data$job_stability,
levels=c("Low", "Medium", "High"))
#Re-leveling income bracket
loan_data$income_bracket<-factor(loan_data$income_bracket,
levels=c("<500K", "500K-1M", "1M-1.5M",
"1.5M-2M", "2M+"))
#Group by job stability and income bracket. Calculate proportions
loan_summary2 <- loan_data %>%
group_by(job_stability, income_bracket, loan_status) %>%
summarise(n = n()) %>% # Count the number in each group and response
group_by(job_stability, income_bracket) %>%
mutate(prop = n/sum(n)) # Calculate proportions within each group
ggplot(loan_summary2) +
#Specify a barplot of loan status
geom_col(aes(x = loan_status, y = prop, fill=loan_status)) +
#Facet by job stability and income bracket
facet_grid(job_stability~income_bracket) +
theme_minimal()+ #Make the theme neater
#Define size of title, axix and legend
theme(plot.title =element_text(size = 16),
axis.title.y = element_text(size = 16),
legend.title = element_text(size = 16),
legend.text = element_text(size = 14),
strip.text = element_text(size = 10))+
#Define title, y_axis, and legend labels
labs(title= "Figure 4: Loan Status by Job Stability
and Income Bracket",
y="Proportion", fill="Loan Status")+
theme(axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank() #Delete x_labels
)
#### Save the graph ####
ggsave("outputs/figures/loan_status_by_job_stability_income.png",
width = 15, height = 10, units = "cm")
#While income bracket seems to cause significant difference, job stability
#does not. Hence credit score category, term loan, and income bracket
#will be used as our predictors in our logistic model
#Select columns/variables of interest
loan_data <- loan_data %>%
select(loan_status, credit_score_category, term, income_bracket)
#### Save selected data for model####
write_csv(loan_data, "outputs/datasets/loan_data_selected.csv")
|
7bbc39690ddf596da93092936773e8b368a3cf25
|
a5ea9d5ec0d70bfa722cfd5e49ce08119e339dda
|
/man/grasp.mod.anova.Rd
|
adc719f0c013b75abd319ed946ca3bb921f44577
|
[] |
no_license
|
cran/grasp
|
c46f16a28babb6cbed65aadbe2ddecc1a7214fd2
|
d57d11504ee99616e55a1a9c49e337cf1caf139d
|
refs/heads/master
| 2021-01-23T16:35:38.670044
| 2008-10-10T00:00:00
| 2008-10-10T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 699
|
rd
|
grasp.mod.anova.Rd
|
\name{grasp.mod.anova}
\alias{grasp.mod.anova}
\title{ Internal GRASP function }
\description{
An ANOVA table is constructed by testing the significance of removing in turn each predictor from the selected model.
}
\usage{
grasp.mod.anova(gr.Yi)
}
\arguments{
\item{gr.Yi}{A vector containing the selected responses}
}
\details{
This function gives a different result from the anova function of Splus \code{anova(model, test="F")}, which tests the successive inclusion of variables into the model and is therefore dependant on the order of variable entry.
}
\author{ Anthony.Lehmann@unige.ch }
\seealso{ grasp \code{\link{grasp}}, grasp.in \code{\link{grasp.in}}}
\keyword{models}
|
cfa474fe79ae1a2c32a375c00c801a7fb00cb6ae
|
43e5b951fb89dff9d5f9c043706cff6489e2628f
|
/man/convert_date_seepolizei.Rd
|
fd678a1e70d5b9f51706c9a4881aec36c7a021f2
|
[
"MIT"
] |
permissive
|
Ostluft/rOstluft
|
dedf9f0c329b9fc2c06c3d175748ec96ac9d96be
|
a231c4d0219f488be9cb3e57471014dd80dc2008
|
refs/heads/master
| 2022-05-28T07:51:41.383789
| 2022-03-16T21:00:35
| 2022-03-16T21:00:35
| 153,456,317
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 448
|
rd
|
convert_date_seepolizei.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read-seepolizei.R
\name{convert_date_seepolizei}
\alias{convert_date_seepolizei}
\title{Helper function to ensure correct string format}
\usage{
convert_date_seepolizei(x)
}
\arguments{
\item{x}{date as string, POSIXct, POSIXlt or Date Object}
}
\value{
string in format "\%Y-\%m-\%d"
}
\description{
Helper function to ensure correct string format
}
\keyword{internal}
|
d09e0d161c367c14c66c0160de427b8db83773ec
|
87bd3070d4c2aefaf0e343a096da12d66b8c119e
|
/R/summary.mgm.R
|
2c33f0e689cdba94e0252b87d67c3adc4eb08c53
|
[] |
no_license
|
bottleling/mgm
|
d03f0371d651d08fbc1fbca0cfa7b1048474af6d
|
01484ec9d668e0fe4050e70716ca2802985b4f6e
|
refs/heads/master
| 2021-01-20T00:50:35.843531
| 2017-02-06T11:23:05
| 2017-02-06T11:23:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,406
|
r
|
summary.mgm.R
|
summary.mgm <- function(object, data = NULL, ...)
{
# ---------- Loop over Time Steps ----------
out_list <- list()
# stationary or time varying?
if('tv.mgm' %in% class(object) | 'tv.var' %in% class(object)) {
tsteps <- object$call$tsteps
} else {
tsteps <- 1
}
# compute nodewise errors
if(!is.null(data)) {
l_elist <- predict.mgm(object, data)
if(tsteps==1) {
l_errors <- list()
l_errors[[1]] <- l_elist$error
} else {
l_errors <- lapply(l_elist, function(x) x$error)
}
}
for(ts in 1:tsteps) {
if(tsteps>1) { # for time varying
call <- object$call
node.models <- object$t.models[[ts]]$node.models
} else { # for stationary
call <- object$call
node.models <- object$node.models
}
type <- call$type
nNodes <- length(type)
# ---------- compute measures to report ----------
l_tau <- list()
for(v in 1:nNodes) l_tau[[v]] <- node.models[[v]]$threshold
l_lambda <- list()
for(v in 1:nNodes) l_lambda[[v]] <- node.models[[v]]$lambda
l_EBIC <- list()
for(v in 1:nNodes) l_EBIC[[v]] <- node.models[[v]]$EBIC
# ---------- Make nice dataframe for save/print ----------
df_out <- data.frame(matrix(NA, nNodes, 1))
colnames(df_out) <- 'Variable'
# variable lable
df_out$Variable <- 1:nNodes
df_out$Type <- type
# degree
if(tsteps>1) {wadj <- object$wadj[,,ts] } else {wadj <- object$wadj}
adj <- wadj; adj[adj!=0] <- 1
if('var' %in% class(object)) {
diag(adj) <- 0
df_out$degree.in <- colSums(adj)
df_out$degree.out <- rowSums(adj)
} else {
df_out$degree <- colSums(adj)
}
# fit parameters
df_out$Lambda <- round(unlist(l_lambda),3)
df_out$Threshold <- round(unlist(l_tau),3)
df_out$EBIC <- round(unlist(l_EBIC),3)
# add errors to data frame
if(!is.null(data)) {
df_out$Error <- l_errors[[ts]]$Error
df_out$ErrorType <- l_errors[[ts]]$ErrorType
}
out_list[[ts]] <- df_out
} # end for: timesteps
if(tsteps==1) {
return(out_list[[1]])
} else {
return(out_list)
}
} # EoF
|
6d9f8551e3688639274829203c82c4df57497d5d
|
fbe57536cc2d84e69a5bf799c88fcb784e853558
|
/man/dispersion.ADM.Rd
|
5235a46e6630ec20735ef269044fb640b379a383
|
[
"MIT"
] |
permissive
|
burrm/lolcat
|
78edf19886fffc02e922b061ce346fdf0ee2c80f
|
abd3915791d7e63f3827ccb10b1b0895aafd1e38
|
refs/heads/master
| 2023-04-02T11:27:58.636616
| 2023-03-24T02:33:34
| 2023-03-24T02:33:34
| 49,685,593
| 5
| 2
| null | 2016-10-21T05:14:49
| 2016-01-15T00:56:55
|
R
|
UTF-8
|
R
| false
| true
| 380
|
rd
|
dispersion.ADM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dispersion.ADM.R
\name{dispersion.ADM}
\alias{dispersion.ADM}
\title{Absolute Deviation From Median}
\usage{
dispersion.ADM(x)
}
\arguments{
\item{x}{Vector to compute ADM values for.}
}
\value{
Vector with results of ADM calculation.
}
\description{
Calculates absolute deviation from median (ADM).
}
|
78d6be44033979b5b3b4895d5e7b706b7c0f37d5
|
9315926fd58d03a3373ff36fbe6a4ce9e30c42c6
|
/code/email-domain.R
|
18b06bd42e64bd16769d37247cbc1f83eaddcd28
|
[] |
no_license
|
hanhy/linux-history
|
b59af4b3f7248987c6c8033e1ca2bca35986325b
|
f4ee09e918ca5106c07e24c61b1b0ed18a8f6629
|
refs/heads/master
| 2021-01-16T22:09:18.942770
| 2016-03-02T16:36:22
| 2016-03-02T16:36:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,787
|
r
|
email-domain.R
|
# yearly
mods <- c('drivers', 'arch', 'net', 'sound', 'fs', 'kernel', 'mm')
tsel <- delta$mod %in% mods #& delta$y >= 2010
res <- t2apply((1:numofdeltas)[tsel], delta$y[tsel], delta$mod[tsel], function(x) {
# t <- c(length(x), numOfUnique(delta$aid[x]), numOfUnique(delta$cid[x]),
# numOfUnique(delta$ae[x]), numOfUnique(delta$ce[x]),
# length(grep('gmail.com', delta$ae[x], ignore.case=T)),
# length(grep('gmail.com', delta$ce[x], ignore.case=T))
# )
# names(t) <- c('nchgs', 'na', 'nc', 'nae', 'nce', 'nga', 'ngc')
# return(t)
return(length(grep('gmail.com', unique(delta$ae[x]), ignore.case=T)) / numOfUnique(delta$ae[x]))
})
#
aes <- tapply(delta$ae[tsel], delta$mod[tsel], numOfUnique)
volnaes <- tapply(delta$ae[tsel], delta$mod[tsel], function(x) {
x <- unique(x)
return(length(grep('gmail.com', x, ignore.case=T)))
})
# 3 - year period
mods <- c('drivers', 'arch', 'net', 'sound', 'fs', 'kernel', 'mm')
gart <- list() # gmail athr
acnt <- list()
for ( i in 1:length(mods)) {
x <- y <- c()
st <- 2005
ed <- st + 3
while (ed < 2015.917) {
m <- as.character(st)
tsel <- delta$m >= st & delta$m < ed & delta$mod == mods[i]
x[m] <- length(grep('gmail.com', unique(delta$ae[tsel]), ignore.case = T))
y[m] <- numOfUnique(delta$ae[tsel])
st <- st + 1/12
ed <- st + 3
}
gart[[mods[i]]] <- x / y
acnt[[mods[i]]] <- y
}
plot(1, type='n', xlim=c(2005, 2013), ylim=c(0, 0.4),
main='Ratio of adjusted # authors to adjusted # committers (in 3-year period)',
xlab='Natural month', ylab='Ratio')
col <- 1:length(mods)
for (i in 1:length(col)) {
x <- gart[[mods[i]]]
lines(as.numeric(names(x)), x, col=col[i], type='l')
}
legend(2007, 0.4, legend=mods,cex=1,lwd=1,
col=col ,bg="white");
|
54ea6d920cc578cf248a078dffebdc2839576c69
|
86422f71fb0db244ea0c49909563a9420c584128
|
/R/loops.R
|
2efae148f5883820f35d3e2c2071a55f17c3e51e
|
[] |
no_license
|
Decision-Stats/s15_codes
|
0d69fb9e95faabf35d41fd327ed490968cf2dbac
|
673a8078163a2eddd02fe418744a71542326c2e0
|
refs/heads/master
| 2021-01-19T06:58:29.421813
| 2015-07-10T17:16:21
| 2015-07-10T17:16:21
| 38,714,623
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 257
|
r
|
loops.R
|
for (number in 1:5)
{
print(number)
}
# the contents go under the {}
for(i in 1:5)
{
print (i^2)
}
for (i in 1:5)
{
print (rnorm(i,10,10))
}
#this generates data but does not print anything, unlike the above code.
for (i in 1:5)
{
rnorm(i,10,10)
}
|
eb130f4a3f2202468b8417eff31417ff1f90ad64
|
dfc525a68a2319d8906045dda6f4db48ae26b6a5
|
/Result_Maps.R
|
e3b5fb9aa4a33d1c281946b6f90d3171b4e3ba9f
|
[] |
no_license
|
geoffreylarnold/Election-2018
|
141b16492a70f8ad0773469ce355600dc246a538
|
91b36fef192408a8077234bf9250c0c93ae0f285
|
refs/heads/master
| 2020-04-06T15:32:47.188137
| 2018-11-14T17:44:17
| 2018-11-14T17:44:17
| 157,582,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,738
|
r
|
Result_Maps.R
|
require(rgdal)
require(readxl)
require(dplyr)
require(leaflet)
require(htmltools)
pittwards <- readOGR("https://services1.arcgis.com/vdNDkVykv9vEWFX4/arcgis/rest/services/VotingDistricts2017_May_v6/FeatureServer/0/query?where=Muni_War_1+LIKE+%27Pittsburgh%25%27&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&resultType=none&distance=0.0&units=esriSRUnit_Meter&returnGeodetic=false&outFields=*&returnGeometry=true&returnCentroid=false&multipatchOption=xyFootprint&maxAllowableOffset=&geometryPrecision=&outSR=&datumTransformation=&applyVCSProjection=false&returnIdsOnly=false&returnUniqueIdsOnly=false&returnCountOnly=false&returnExtentOnly=false&returnDistinctValues=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&having=&resultOffset=&resultRecordCount=&returnZ=false&returnM=false&returnExceededLimitFeatures=true&quantizationParameters=&sqlFormat=standard&f=pgeojson&token=") %>%
spTransform(CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"))
toc <- read_excel("detail.xlsx", sheet = "Table of Contents")
turnout <- read_excel("detail.xlsx", sheet = "Registered Voters") %>%
mutate(`Voter Turnout` = as.numeric(gsub("%| ", "", `Voter Turnout`)))
senResults <- read_excel("detail.xlsx", sheet = "2")
govResults <- read_excel("detail.xlsx", sheet = "3") %>%
rename(County = 1,
`Wolf Total` = 5,
`Wagner Total` = 8,
Total = 18) %>%
mutate(demGov = as.numeric(`Wolf Total`) / as.numeric(Total) * 100,
repGov = as.numeric(`Wagner Total`) / as.numeric(Total) * 100) %>%
select(c(County, demGov, repGov))
pittwards@data <- merge(pittwards@data, turnout, by.x = "Muni_War_1", by.y = "County", sort = FALSE, all.x = TRUE)
pittwards@data <- merge(pittwards@data, govResults, by.x = "Muni_War_1", by.y = "County", sort = FALSE, all.x = TRUE)
palTO <- colorNumeric("Greens", pittwards$`Voter Turnout`)
leaflet(data = pittwards) %>%
addTiles() %>%
addPolygons(color = ~palTO(`Voter Turnout`),
popup = ~paste0("<b>", Muni_War_1, "</b>: ",`Voter Turnout`, "%"),
fillColor = ~palTO(`Voter Turnout`),
fillOpacity = .8) %>%
addLegend(position = "bottomright", pal = palTO, values = ~`Voter Turnout`, title = "Voter Turnout (%)")
bins <- seq(from = 0, to = 100, length.out = 11)
palDem <- colorBin("RdBu", domain = 0:100, bins = bins, pretty = F)
leaflet(data = pittwards) %>%
addTiles() %>%
addPolygons(color = ~palDem(demGov),
popup = ~paste0("<b>", Muni_War_1, "</b>: <br>Wolf: ", round(demGov, 2), "%",
"<br>Wagner: ", round(repGov,2), "%"),
fillColor = ~palDem(demGov),
fillOpacity = 0.8)
allWards <- readOGR("https://services1.arcgis.com/vdNDkVykv9vEWFX4/arcgis/rest/services/VotingDistricts2017_May_v6/FeatureServer/0/query?where=1=1&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&resultType=none&distance=0.0&units=esriSRUnit_Meter&returnGeodetic=false&outFields=*&returnGeometry=true&returnCentroid=false&multipatchOption=xyFootprint&maxAllowableOffset=&geometryPrecision=&outSR=&datumTransformation=&applyVCSProjection=false&returnIdsOnly=false&returnUniqueIdsOnly=false&returnCountOnly=false&returnExtentOnly=false&returnDistinctValues=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&having=&resultOffset=&resultRecordCount=&returnZ=false&returnM=false&returnExceededLimitFeatures=true&quantizationParameters=&sqlFormat=standard&f=pgeojson&token=") %>%
spTransform(CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"))
allWards@data <- merge(allWards@data, turnout, by.x = "Muni_War_1", by.y = "County", sort = FALSE, all.x = TRUE)
allWards@data <- merge(allWards@data, govResults, by.x = "Muni_War_1", by.y = "County", sort = FALSE, all.x = TRUE)
palTO <- colorNumeric("Greens", allWards$`Voter Turnout`)
leaflet(data = allWards) %>%
addTiles() %>%
addPolygons(color = ~palTO(`Voter Turnout`),
popup = ~paste0("<b>", Muni_War_1, "</b>: ",`Voter Turnout`, "%"),
fillColor = ~palTO(`Voter Turnout`),
fillOpacity = .8) %>%
addLegend(position = "bottomright", pal = palTO, values = ~`Voter Turnout`, title = "Voter Turnout (%)")
leaflet(data = allWards) %>%
addTiles() %>%
addPolygons(color = ~palDem(demGov),
popup = ~paste0("<b>", Muni_War_1, "</b>: <br>Wolf: ", round(demGov, 2), "%",
"<br>Wagner: ", round(repGov,2), "%"),
fillColor = ~palDem(demGov),
fillOpacity = 0.8,
opacity = .95) %>%
addLegend(pal = palDem, values = bins, title = "Wolf Vote Share")
|
587d2a9ce57f264097b8ae661169ef14ff981c99
|
4755427593f4e0f5a162640d6de1041110e63763
|
/cursus/data/sigmoid.R
|
a93e137d5df72f2c9ce284220b0a0513dfc8d6bc
|
[] |
no_license
|
HoGentTIN/onderzoekstechnieken-cursus
|
5e642d984ab422f1d001984463f0e693f89e9637
|
bd7e61aa8d2a0a4525de82774568954c76dd33ae
|
refs/heads/master
| 2022-06-28T05:09:34.694920
| 2022-06-21T13:35:59
| 2022-06-21T13:35:59
| 80,239,413
| 21
| 59
| null | 2020-05-25T06:56:06
| 2017-01-27T19:35:24
|
HTML
|
UTF-8
|
R
| false
| false
| 97
|
r
|
sigmoid.R
|
sigmoide <- function (alfa, beta, x){
z <- alfa + beta * x;
e <- exp(z);
return(e/(1+e));
}
|
06e2c8bd8faf64d037e2d1f13297209892b9197d
|
30ee5256f363954bcacf452ab94249ddf04b270e
|
/apply_functions.r
|
773937ac7fbec0618261bbff2bc719f1883d32f4
|
[] |
no_license
|
tiborh/r
|
70c45812347a65786c5bf95eccc7376f8caf7f72
|
8de2d56608b2e52faaf554f3cc955a456c58f57f
|
refs/heads/master
| 2022-11-01T05:54:55.451584
| 2022-10-27T15:15:37
| 2022-10-27T15:15:37
| 36,147,838
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,286
|
r
|
apply_functions.r
|
## lapply can be used on lists or vectors
l <- list(a=51648794597,b=c("alpha","beta","gamma","delta","epsilon"),c="something else")
lapply(l,class)
## -------------------- ##
## create a vector of random strings
source("common.r")
v <- make.string.v(10)
## unlist to make the returned list a vector
vls <- unlist(lapply(v,nchar))
v
vls
## here sapply can also be used:
class(lapply(v,nchar)) # list
vv <- sapply(v,nchar)
class(vv) # integer
## without names
sapply(v,nchar,USE.NAMES=F)
## -------------------- ##
nums <- sample(100,10)
## a simple multiplier
multip <- function(num,factor) {
return(num*factor)
}
nums
## necessary function arguments can be added as further arguments in lapply
unlist(lapply(nums,multip,factor=3))
## -------------------- ##
## with unanymous functions:
pioneers <- c("GAUSS:1777", "BAYES:1702", "PASCAL:1623", "PEARSON:1857")
split <- strsplit(pioneers, split = ":")
split_low <- lapply(split, tolower)
split_low
names <- lapply(split_low, function(x,i) { x[i] }, i=1)
years <- lapply(split_low, function(x,i) { x[i] }, i=2)
names
years
## -------------------- ##
## when the function returns a vector
cities.10 <- us.cities(10)
sort(cities.10)
sapply(cities.10,nchar)
first.n.last <- function(name) {
name <- gsub("\\s", "", name,perl=T) # [[:space:]] is also good
name <- tolower(name)
letters <- strsplit(name, split="")[[1]]
c(first=min(letters), max(letters))
}
first.n.last("New York")
f.n.l <- sapply(cities.10,first.n.last)
class(f.n.l) # matrix
f.n.l
## -------------------- ##
class(sapply(v,unique.letters)) # list, same as lapply (unique.etters is from common.r)
luni <- lapply(v,unique.letters)
suni <- sapply(v,unique.letters)
setdiff(luni,suni)
setdiff(suni,luni) # both empty, so they are equal
identical(luni,suni) # F, not because the names are different
## -------------------- ##
## vapply: the output format must be specified
vapply(cities.10,nchar,numeric(1)) # same as sapply(cities.10,nchar)
vapply(cities.10,first.n.last,character(2)) # same as sapply(cities.10,first.n.last)
## unique letters cannot be performed, as num in character(num) cannot be specified correctly
|
e11fae76661de970c58734721a96a42892622539
|
97b6e598ef6970eed3efd64033ca0a34fd0a594c
|
/R/00_gen_messy_data.R
|
29ff1234d1a1eb014831c97d069295a374648668
|
[] |
no_license
|
eugejoh/messy_data_tutorial
|
15b49e58e2a62c8cbd7dc83222b5f49e02b72905
|
707cbd2b5c55c045ef01cfaa534e98d66be0e34e
|
refs/heads/master
| 2020-05-26T06:46:11.992206
| 2019-06-05T15:50:43
| 2019-06-05T15:50:43
| 188,139,538
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,024
|
r
|
00_gen_messy_data.R
|
# Make Messy Data ---------------------------------------------------------
# Eugene Joh
# 2019-05-21
# generate random data
# - age
# - name
# - sex
# - area
# - hypothetical immune status
# - hypothetical Ab serum concentration
# install.packages("randomNames")
library(randomNames)
make_messy_data <- function(
nrow = 100,
mean_age = 40,
sd_age = 14,
mean_serum = 90,
sd_serum = 0.6,
na = TRUE,
seed = 1) {
if (nrow < 2) stop("please enter number > 1")
set.seed(seed)
rand_ <- function(N, M, sd = 1) {
vec <- rnorm(N, M/N, sd)
round(vec / sum(vec) * M)
}
rand_m <- abs(rand_(2, nrow/2, sd = 3))
rand_f <- abs(rand_(2, nrow/2, sd = 2))
area_m <-
c(
"Downtown, Toronto",
"downton, Toronto",
"Nord York, Tornto",
"North York, Tornto",
"Erindale, Mississauga",
"Etobioke, Toronto",
"Etobicoke, Toronto",
"Scarborough, Toronto",
"Bronte, Oakville",
"Yonge-Eglinton, oronto",
"Yong-Eglington, Toronto",
"Port Credit, mississauga",
"Parkdale, Toronto"
)
df <- data.frame(
full_name = c(randomNames::randomNames(n = round(nrow/2), gender = 0),
randomNames::randomNames(n = round(nrow/2), gender = 1))[1:nrow],
sex = c(
rep("Male", rand_m[1]),
rep("m", rand_m[2]),
rep("Female", rand_f[1]),
rep("F", rand_f[2]))[1:nrow],
age = abs(round(rnorm(n = nrow, mean = mean_age, sd = sd_age))),
area = sample(x = area_m, size = nrow, replace = TRUE),
immune_status = rpois(n = nrow, lambda = 1),
serum_igm = paste(round(replace(rlnorm(n = nrow, meanlog = log(mean_serum), sdlog = sd_serum),
sample(1:nrow,size = 0.05*nrow, replace = TRUE),
sample(c(-1,-99), size = 0.05*nrow, replace = TRUE)),
1), "mg/dL"),
stringsAsFactors = FALSE
)
if (na) {
df <- as.data.frame(
do.call("cbind",
lapply(df, function(x) {
i <- sample(
c(TRUE, NA),
prob = c(0.95, 0.05),
size = length(x),
replace = TRUE)
x[i]
})
), stringsAsFactors = FALSE)
}
df[sample(nrow(df)),]
}
# run function
df1 <- make_messy_data(nrow = 600, mean_age = 39, sd_age = 10, mean_serum = 90, seed = 1)
df2 <- make_messy_data(nrow = 200, mean_age = 5, sd_age = 2, mean_serum = 70, sd_serum = 0.4, seed = 2)
df3 <- make_messy_data(nrow = 200, mean_age = 60, sd_age = 12, mean_serum = 100, sd_serum = 0.6, seed = 3)
# export as .csv
if (!dir.exists(file.path(getwd(), "data"))) dir.create(file.path(getwd(), "data"))
write.csv(x = df1, file = file.path(getwd(), "data", "messy_data_01.csv"), row.names = FALSE)
write.csv(x = df2, file = file.path(getwd(), "data", "messy_data_02.csv"), row.names = FALSE)
write.csv(x = df3, file = file.path(getwd(), "data", "messy_data_03.csv"), row.names = FALSE)
|
7f599082474f85c84b2af8bc2fb466d61c105f40
|
87bee99b5742c75d392186800f845d44c9e48fcf
|
/CH.9./SVM_NonLinear.R
|
5f8cca35e215010aa3777d606f2ec59567036cf0
|
[] |
no_license
|
ssh352/ISLR-5
|
087e84294f2e7ac6fd42472a8d2b6a9dcebf59fa
|
1fff21344a7f6e2f379b9618143925bcb4c6f6b7
|
refs/heads/master
| 2022-08-21T15:36:12.105031
| 2020-05-28T11:12:42
| 2020-05-28T11:12:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,335
|
r
|
SVM_NonLinear.R
|
set.seed(1)
library(e1071)
X = matrix(rnorm(200*2),ncol=2)
X[1:100,]=X[1:100,]+2
X[101:150,]=X[101:150,]-2
Y = c(rep(1,150),rep(2,50))
plot(X,col=Y)
data1=data.frame(X=X,Y=as.factor(Y))
train=sample(200,100)
# lower cost #
svmfit_r = svm(Y~.,data=data1[train,],kernel='radial',gamma=1,cost=1)
summary(svmfit_r)
plot(svmfit_r,data1[train,])
# higher cost #
svmfit_r2 = svm(Y~.,data=data1[train,],kernel='radial',gamma=1,cost=1e5)
summary(svmfit_r2)
plot(svmfit_r2,data1[train,])
# hyperparameter tuning #
tune.out_n = tune(svm,Y~.,data=data1[train,],kernel='radial',ranges=list(gamma=c(.5,1,2,3,4),cost=c(.1,1,10,100,1000)))
# best model on test data #
summary(tune.out_n)
table(true=data1[-train,'Y'],pred=predict(tune.out_n$best.model,newx = data1[-train,]))
# ROC plot
library(ROCR)
rocplot=function(pred,truth,...){
predob=prediction(pred,truth , label.ordering = c(2, 1))
perf=performance(predob,'tpr','fpr')
plot(perf,...) }
svmfit.opt_r=svm(Y~.,data=data1[train,],gamma=.5,cost=1,kernel='radial',decision.values=T)
fitted=attributes(predict(svmfit.opt_r,data1[train,],decision.values = TRUE))$decision.values
par ( mfrow =c(1 ,2) )
rocplot(fitted,data1[train,'Y'],main="training data")
###
svmfit.opt_r1=svm(Y~.,data=data1[train,],gamma=50,cost=1,kernel='radial',decision.values=T)
fitted1=attributes(predict(svmfit.opt_r1,data1[train,],decision.values = TRUE))$decision.values
rocplot(fitted1,data1[train,'Y'],main="training data",add=T,col='red')
### ROC plots on test data
fittedt1=attributes(predict(svmfit.opt_r,data1[-train,],decision.values = TRUE))$decision.values
fittedt2=attributes(predict(svmfit.opt_r1,data1[-train,],decision.values = TRUE))$decision.values
par ( mfrow =c(1 ,2) )
rocplot(fittedt1,data1[-train,'Y'],main="test data")
rocplot(fittedt2,data1[-train,'Y'],main="test data",add=T,col='red')
#### multiple classes ###
# adding an additional class #
X = rbind( X , matrix(rnorm(50*2),ncol=2))
Y=c(Y,c(rep(0,50)))
X[Y==0,]=X[Y==0,]+2
data2=data.frame(X=X,Y=Y)
par ( mfrow =c(1 ,1) )
plot(X,col=c(3-Y), main='scatter plot of multiclass simulated data')
svm_fit_multc=svm(Y~.,data=data2,kernel='radial',gamma=1,cost=10)
summary(svm_fit_multc)
par ( mfrow =c(1 ,1) )
plot(svm_fir_multc,data2)
######################### end ############################
|
4a14145ecf4d105bf0e9872645fdea4a16b1609b
|
7ab8eafb68413a4d1a22262dfbbf31e19245bfc5
|
/script/script/outlier_plt_parallel.R
|
8ce7579194a7d1e5e3f0217493a99fdd16b4a7f0
|
[] |
no_license
|
Ryosuke-Kawamori/medicare
|
cb604954f18a44cc30ecb54c66aff0887b36d99f
|
8ce914b21a24a57bb7a8eee883393d0ed1e3c7d5
|
refs/heads/master
| 2020-04-22T09:36:34.637741
| 2019-02-14T06:56:12
| 2019-02-14T06:56:12
| 170,278,409
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,371
|
r
|
outlier_plt_parallel.R
|
lof_provider_stat_result <- read_rds("data/lof_provider_stat_result.rds")
lof_provider_hcpcs_result <- read_rds("data/lof_provider_hcpcs_result.rds")
lof_providermesh_stat_result <- read_rds("data/lof_providermesh_stat_result.rds")
lof_providermesh_hcpcs_result <- read_rds("data/lof_providermesh_hcpcs_result.rds")
lof_mesh_stat_result <- read_rds("data/lof_mesh_stat_result.rds")
lof_mesh_hcpcs_result <- read_rds("data/lof_mesh_hcpcs_result.rds")
original_lof_df <- read_rds("data/original_lof_df.rds")
svm_provider_stat_result <- read_rds("data/svm_provider_stat_result.rds")
svm_provider_hcpcs_result <- read_rds("data/svm_provider_hcpcs_result.rds")
svm_providermesh_stat_result <- read_rds("data/svm_providermesh_stat_result.rds")
svm_providermesh_hcpcs_result <- read_rds("data/svm_providermesh_hcpcs_result.rds")
svm_mesh_stat_result <- read_rds("data/svm_mesh_stat_result.rds")
#svm_mesh_hcpcs_result <- read_rds("data/svm_mesh_hcpcs_result.rds")
result_lof_df <- tibble(Type = c("NONE STAT", "PROVIDER-STAT", "PROVIDER-HCPCS", "PROVIDER-GRID STAT", "PROVIDER-GRID HCPCS", "GRID STAT", "GRID HCPCS"),
results = list(original_lof_df, lof_provider_stat_result, lof_provider_hcpcs_result, lof_providermesh_stat_result, lof_providermesh_hcpcs_result, lof_mesh_stat_result, lof_mesh_hcpcs_result))
result_svm_df <- tibble(Type = c("PROVIDER STAT", "PROVIDER HCPCS", "PROVIDER-GRID STAT", "PROVIDER-GRID HCPCS", "GRID STAT"),
results = list(svm_provider_stat_result, svm_provider_hcpcs_result, svm_providermesh_stat_result, svm_providermesh_hcpcs_result, svm_mesh_stat_result))
result_lof_df %>%
dplyr::mutate(s = purrr::map(results, . %>% mutate(s=auc(fp,tp)) %>% dplyr::summarize(sum=sum(s)))) %>%
dplyr::select(Type, s) %>%
unnest
result_svm_df %>%
dplyr::mutate(s = purrr::map(results, . %>% mutate(s=auc(fpr,tpr)) %>% dplyr::summarize(sum=sum(s)))) %>%
dplyr::select(Type, s) %>%
unnest
result_lof_df %>%
unnest %>%
ggplot(aes(x=fp, y=tp, color=Type))+
#geom_point()+
geom_line()+
geom_abline(intercept = 0)+
labs(x="FPR", y="TPR")+
theme(text = element_text(size=25))
result_svm_df %>%
unnest %>%
ggplot(aes(x=fpr, y=tpr, color=Type))+
geom_point()+
geom_abline(intercept = 0)+
geom_line()+
labs(x="FPR", y="TPR")+
theme(text = element_text(size=25))
|
b5318d2d32adc716390c5086dc59cd89435ad84f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lordif/examples/runolr.Rd.R
|
986a951e30890f04ac87d9c4bc035cee9d7bbefb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 184
|
r
|
runolr.Rd.R
|
library(lordif)
### Name: runolr
### Title: runs ordinal logistic regression models
### Aliases: runolr
### Keywords: ~kwd1 ~kwd2
### ** Examples
## Not run: runolr(rv, ev, gr)
|
59cc9f466117ba3e55356a662b88051f0d30eca3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/FMAdist/examples/famfit.Rd.R
|
8b57d3ff379eed414f62f636d0452ea87e5ed1ca
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
r
|
famfit.Rd.R
|
library(FMAdist)
### Name: fmafit
### Title: Building frequentist model averaging input models
### Aliases: fmafit
### Keywords: ~kwd1 ~kwd2
### ** Examples
data<-rlnorm(500,meanlog=0,sdlog=0.25)
Fset<-c('gamma','weibull','normal','ED')
type<-'P' #by default type<-'Q'
J<-5 #by default J<-10
myfit<-fmafit(data,Fset,J,type)
|
05159d8fa8a5f80a42effc61d610f6fa50e10cce
|
8e9808c789fc646a66f9b4844e56d6de2a95c405
|
/R/tests.R
|
e91e3d88d416b697cf16c12862ee9a0ce4107389
|
[] |
no_license
|
ddarmon/MUsaic
|
5aa809e1a696effee03411542a2dba9412e273d6
|
039f326fd7b93e2bc9f3d7c8c5be6e720d97bf7f
|
refs/heads/master
| 2022-04-30T16:49:19.645681
| 2022-03-17T00:02:59
| 2022-03-17T00:02:59
| 200,889,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,661
|
r
|
tests.R
|
#' Welch's two-sample t-test with summary statistics
#'
#' Welch's two-sample t-test for testing claims about the different between two population means without assuming the population variances are equal.
#'
#' @param xbar the sample mean of the first sample.
#' @param ybar the sample mean of the second sample.
#' @param sx the sample standard deviation of the first sample.
#' @param sy the sample standard deviation of the second sample.
#' @param nx the sample size of the first sample.
#' @param ny the sample size of the second sample.
#' @param null.diff the assumed difference mu_X - mu_Y under the null hypothesis.
#' @param alternative a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less". You can specify just the initial letter.
#' @param var.equal a logical variable indicating whether to treat the two variances as being equal. If TRUE then the pooled variance is used to estimate the variance otherwise the Welch (or Satterthwaite) approximation to the degrees of freedom is used.
#' @param conf.level confidence level for the interval estimator
#'
#' @export
two.sample.t.test = function(xbar, ybar, sx, sy, nx, ny, null.diff = 0,
alternative = c("two.sided", "less", "greater"),
var.equal = FALSE,
conf.level = 0.95){
if (length(alternative) == 3){
alternative = "two.sided"
}
alpha = 1 - conf.level
if (var.equal){
xbar.se = sx/sqrt(nx); ybar.se = sy/sqrt(ny)
num = (xbar.se^2 + ybar.se^2)^2
denom = (xbar.se^4)/(nx - 1) + (ybar.se^4)/(ny - 1)
nu = num/denom
se.diff = sqrt(sx^2/nx + sy^2/ny)
}else{
nu = nx + ny - 2
sp = sqrt(((nx - 1)*sx^2 + (ny - 1)*sy^2)/(nx + ny - 2))
se.diff = sp*sqrt(1/nx + 1/ny)
}
mean.diff = xbar - ybar
num = mean.diff - null.diff
tobs = num/se.diff
if (alternative == 'two.sided'){
p.value = 2*pt(-abs(tobs), df = nu)
conf.int = mean.diff + se.diff*qt(1-alpha/2, df = nu)*c(-1, 1)
alt.text = sprintf("true difference in means is not equal to %g", null.diff)
}else if (alternative == 'less'){
p.value = pt(tobs, df = nu)
conf.int = c(-Inf, mean.diff + se.diff*qt(conf.level, df = nu))
alt.text = sprintf("true difference in means is less than %g", null.diff)
}else if (alternative == 'greater'){
p.value = 1 - pt(tobs, df = nu)
conf.int = c(mean.diff - se.diff*qt(conf.level, df = nu), Inf)
alt.text = sprintf("true difference in means is greater than %g", null.diff)
}
attr(conf.int, which = "conf.level") = conf.level
names(tobs) <- "t"
names(nu) <- "df"
names(mean.diff) <- "mean(x) - mean(y)"
wtt <- list(method = "Welch Two Sample t-test",
data.name = sprintf('\nxbar = %g, sx = %g, nx = %g\nybar = %g, sy = %g, ny = %g\n', xbar, sx, nx, ybar, sy, ny),
statistic = tobs,
parameter = nu,
p.value = p.value,
conf.int = conf.int,
estimate = mean.diff,
alternative = alt.text)
class(wtt) <- "htest"
return(wtt)
}
#' Gosset's one-sample t-test with summary statistics
#'
#' Gosset's one-sample t-test for testing claims about a population mean.
#'
#' @param xbar the sample mean.
#' @param s the sample standard deviation.
#' @param n the sample size.
#' @param mu0 the assumed population mean under the null hypothesis.
#' @param alternative a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less". You can specify just the initial letter.
#' @param conf.level confidence level for the interval estimator
#'
#' @export
one.sample.t.test = function(xbar, s, n, mu0 = 0,
alternative = c("two.sided", "less", "greater"),
conf.level = 0.95){
if (length(alternative) == 3){
alternative = "two.sided"
}
alpha = 1 - conf.level
nu = n - 1
se = s/sqrt(n)
tobs = (xbar - mu0)/se
if (alternative == 'two.sided'){
p.value = 2*pt(-abs(tobs), df = nu)
conf.int = xbar + se*qt(1-alpha/2, df = nu)*c(-1, 1)
alt.text = sprintf("true mean is not equal to %g", mu0)
}else if (alternative == 'less'){
p.value = pt(tobs, df = nu)
conf.int = c(-Inf, xbar + se*qt(conf.level, df = nu))
alt.text = sprintf("true mean is less than %g", mu0)
}else if (alternative == 'greater'){
p.value = 1 - pt(tobs, df = nu)
conf.int = c(xbar - se*qt(conf.level, df = nu), Inf)
alt.text = sprintf("true mean is greater than %g", mu0)
}
attr(conf.int, which = "conf.level") = conf.level
names(tobs) <- "t"
names(nu) <- "df"
names(xbar) <- "mean of x"
wtt <- list(method = "One Sample t-test",
data.name = sprintf('\nxbar = %g, s = %g, n = %g\n', xbar, s, n),
statistic = tobs,
parameter = nu,
p.value = p.value,
conf.int = conf.int,
estimate = xbar,
alternative = alt.text)
class(wtt) <- "htest"
return(wtt)
}
#' @export
cor.test.exact = function(x, y, rho0 = 0,
alternative = c("two.sided", "less", "greater"),
conf.level = 0.95, exact = TRUE){
if (exact == TRUE){
pc <- pcorr
}else{
pc <- pcorr.fisher
}
if (length(alternative) == 3){
alternative = "two.sided"
}
n <- length(x)
stopifnot(length(x) == length(y))
alpha = 1 - conf.level
r <- cor(x, y)
# Fudge factor here:
# lower.eval <- -1+1e-3
# upper.eval <- 1-1e-3
approx.int <- cor.test(x, y, conf.level = conf.level, alternative = alternative)$conf.int
if (alternative == 'two.sided'){
p.value = 2*min(pc(r, rho0, n), pc(r, rho0, n, lower.tail = FALSE))
# lb <- uniroot(function(rho) pc(r, rho, n, lower.tail = FALSE) - alpha/2, interval = c(lower.eval, upper.eval))$root
# ub <- uniroot(function(rho) pc(r, rho, n) - alpha/2, interval = c(lower.eval, upper.eval))$root
lb <- newtonRaphson(function(rho) pc(r, rho, n, lower.tail = FALSE) - alpha/2, x0 = approx.int[1])$root
ub <- newtonRaphson(function(rho) pc(r, rho, n) - alpha/2, x0 = approx.int[2])$root
conf.int = c(lb, ub)
alt.text = sprintf("true correlation is not equal to %g", rho0)
}else if (alternative == 'less'){
p.value = pc(r, rho0, n)
# ub <- uniroot(function(rho) pc(r, rho, n) - alpha, interval = c(lower.eval, upper.eval))$root
ub <- newtonRaphson(function(rho) pc(r, rho, n) - alpha, x0 = approx.int[2])$root
conf.int = c(-1,ub)
alt.text = sprintf("true correlation is less than %g", rho0)
}else if (alternative == 'greater'){
p.value = pc(r, rho0, n, lower.tail = FALSE)
# lb <- uniroot(function(rho) pc(r, rho, n, lower.tail = FALSE) - alpha, interval = c(lower.eval, upper.eval))$root
lb <- newtonRaphson(function(rho) pc(r, rho, n, lower.tail = FALSE) - alpha, x0 = approx.int[1])$root
conf.int = c(lb,1)
alt.text = sprintf("true correlation is greater than %g", rho0)
}
attr(conf.int, which = "conf.level") = conf.level
if (exact == TRUE){
method.name <- "Exact bivariate Gaussian correlation test"
}else{
method.name <- "Fisher's z-transform bivariate Gaussian correlation test"
}
names(r) <- "r"
wtt <- list(method = method.name,
data.name = paste(deparse(substitute(x)), "and", deparse(substitute(y))),
statistic = r,
p.value = p.value,
conf.int = conf.int,
estimate = r,
alternative = alt.text)
class(wtt) <- "htest"
return(wtt)
}
|
16debcd8855f7230e3ded39e8bff2c49292a42cd
|
a6364e9cf520b508475803801355a9f95d15ec55
|
/line_prof_test.R
|
56c3dfab9af6dd4d16f97254ceffac19ed79895f
|
[] |
no_license
|
chiu/practice-R
|
d6da6c5b6972429d02142321e0be6313483664a8
|
bf50f616fd93d89b308f7bc40bfed637b20f19c2
|
refs/heads/master
| 2020-04-09T16:50:50.353874
| 2016-05-19T21:31:40
| 2016-05-19T21:31:40
| 51,965,712
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 162
|
r
|
line_prof_test.R
|
# devtools::install_github("hadley/lineprof")
# devtools::install_github("hadley/lineprof")
library(lineprof)
source("profiling-example.R")
l <- lineprof(f())
l
|
b75c5b29754ff1491648619315f6cbd7046a0b03
|
80badebbbe4bd0398cd19b7c36492f5ab0e5facf
|
/man/Polygons-class.Rd
|
4dfa4bd23063914fc585fd9636cfa2df4968f587
|
[] |
no_license
|
edzer/sp
|
12012caba5cc6cf5778dfabfc846f7bf85311f05
|
0e8312edc0a2164380592c61577fe6bc825d9cd9
|
refs/heads/main
| 2023-06-21T09:36:24.101762
| 2023-06-20T19:27:01
| 2023-06-20T19:27:01
| 48,277,606
| 139
| 44
| null | 2023-08-19T09:19:39
| 2015-12-19T10:23:36
|
R
|
UTF-8
|
R
| false
| false
| 1,865
|
rd
|
Polygons-class.Rd
|
\name{Polygons-class}
\docType{class}
\alias{Polygons-class}
\title{Class "Polygons"}
\description{ Collection of objects of class \code{"Polygon"} }
\section{Objects from the Class}{
Objects can be created by calls to the function \code{Polygons}
}
\section{Slots}{
\describe{
\item{\code{Polygons}:}{Object of class \code{"list"}; list with objects
of class \link{Polygon-class} }
\item{\code{plotOrder}:}{Object of class \code{"integer"}; order in which
the Polygon objects should be plotted, currently by order of decreasing size }
\item{\code{labpt}:}{Object of class \code{"numeric"}; pair of x, y coordinates giving a label point, the label point of the largest polygon component }
\item{\code{ID}:}{Object of class \code{"character"}; unique identifier string }
\item{\code{area}:}{Object of class \code{"numeric"}; the gross total planar area of the Polygon list but not double-counting holes (changed from 0.9-58 - islands are summed, holes are ignored rather than subtracted); these values are used to make sure that polygons of a smaller area are plotted after polygons of a larger area, does not respect projection as objects of this class have no projection defined }
}
}
\section{Methods}{
No methods defined with class "Polygons" in the signature.
}
\author{ Roger Bivand }
\note{ By default, single polygons (where Polygons is a list of length one) are not expected to be holes, but in multiple polygons, hole definitions for member polygons can be set. Polygon objects belonging to an Polygons object should either not overlap one-other, or should be fully included (as lakes or islands in lakes). They should not be self-intersecting. Checking of hole FALSE/TRUE status for Polygons objects is included in the maptools package using functions in the rgeos package, function checkPolygonsHoles(). }
\keyword{classes}
|
8c7eb7ff4c44675eb1384e6f3c34078c7b472ee1
|
98005bc40a0a85089d167900f6ff1aaec6fd57f1
|
/Aplicacion_Contaminantes/server.R
|
9bc5c0b364b40ed1dd8518ce17bd8148b325373b
|
[] |
no_license
|
AdrianLandaverde/Contaminantes_en_la_CDMX
|
fb681483578715c9f9ab7bc3f6a6e16f2b094437
|
61a50cc48e1cb855f1a3ea8b7856ddc1dbe564e4
|
refs/heads/main
| 2023-06-02T08:01:18.605204
| 2021-06-23T19:08:46
| 2021-06-23T19:08:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 55,253
|
r
|
server.R
|
library(shiny)
library(tidyverse)
library(dplyr, quietly = TRUE)
library(gapminder)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$regresionLinealGrafico <- renderPlot({
contaminante = "SO2"
periodo_Regresion_Lineal<- input$Periodo # dia, mes, annio
contam_csv_2021 <- "2021/promedios_2021_so2.csv"
contam_csv_2020 <- "2020/promedios_2020_so2.csv"
contam_csv_2019 <- "2019/promedios_2019_so2.csv"
contam_csv_2018 <- "2018/promedios_2018_so2.csv"
contam_csv_2017 <- "2017/promedios_2017_so2.csv"
contam_csv_2016 <- "2016/promedios_2016_so2.csv"
contam_csv_2015 <- "2015/promedios_2015_so2.csv"
contam_csv_2014 <- "2014/promedios_2014_so2.csv"
contam_csv_2013 <- "2013/promedios_2013_so2.csv"
contam_csv_2012 <- "2012/promedios_2012_so2.csv"
contam_csv_2011 <- "2011/promedios_2011_so2.csv"
contam_csv_2010 <- "2010/promedios_2010_so2.csv"
contam_csv_2009 <- "2009/promedios_2009_so2.csv"
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2021.
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2021 <- read_csv(contam_csv_2021,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2021 <- contaminantes_2021 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2020.
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2020 <- read_csv(contam_csv_2020,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2020 <- contaminantes_2020 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2019
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2019 <- read_csv(contam_csv_2019,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2019 <- contaminantes_2019 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2018
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2018 <- read_csv(contam_csv_2018,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2018 <- contaminantes_2018 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2017
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2017 <- read_csv(contam_csv_2017,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2017 <- contaminantes_2017 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2016
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2016 <- read_csv(contam_csv_2016,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2016 <- contaminantes_2016 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2015
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2015 <- read_csv(contam_csv_2015,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2015 <- contaminantes_2015 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2014
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2014 <- read_csv(contam_csv_2014,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2014 <- contaminantes_2014 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2013
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2013 <- read_csv(contam_csv_2013,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2013 <- contaminantes_2013 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2012
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2012 <- read_csv(contam_csv_2012,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2012 <- contaminantes_2012 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2011
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2011 <- read_csv(contam_csv_2011,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2011 <- contaminantes_2011 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2010
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2010 <- read_csv(contam_csv_2010,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2010 <- contaminantes_2010 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2009
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2009 <- read_csv(contam_csv_2009,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2009 <- contaminantes_2009 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# JUNTAR TODOS
#--------------------------------------------------------------------------
contaminantes_2020 %>%
# union_all(contaminantes_2020) %>%
union_all(contaminantes_2019) %>%
union_all(contaminantes_2018) %>%
union_all(contaminantes_2017) %>%
union_all(contaminantes_2016) %>%
union_all(contaminantes_2015) %>%
union_all(contaminantes_2014) %>%
union_all(contaminantes_2013) %>%
union_all(contaminantes_2012) %>%
union_all(contaminantes_2011) %>%
union_all(contaminantes_2010) %>%
union_all(contaminantes_2009) -> df_todos
#----------------------------------------------------------------------------
#*********************-ARREGLAR FECHA****************************************
#Quitar datos con fecha erronea
df_todos<- df_todos[substring(df_todos$date,3,3)=="/", ]
df_todos_RL<- df_todos
if(periodo_Regresion_Lineal== "annio"){
df_todos_RL$date<- paste("01/01/",substring(df_todos_RL$date,7,8),sep = "")
}else if(periodo_Regresion_Lineal=="mes"){
df_todos_RL$date<- paste("01/",substring(df_todos_RL$date,4,8),sep = "")
}else if(periodo_Regresion_Lineal=="dia"){
df_todos_RL$date<- substring(df_todos_RL$date,1,8)
}
df_todos_RL$date<- as.Date(df_todos_RL$date, format="%d/%m/%y")
df_todos %>%
group_by(id_station) %>%
tally()
id_station <- df_todos %>%
group_by(id_station) %>% tally()
estacion=input$Estacion
if(estacion==1){
df_AJM_RL <- df_todos_RL[df_todos_RL$id_station=="AJM", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA AJM
#--------------------------------------------------------------------------
df_AJM_RL<- select(df_AJM_RL, date, partes_por_billon)
df_AJM_RL %>%
group_by(date) %>%
tally()
df_AJM_RL%>%group_by(date)%>%
summarise_all(mean)-> df_AJM_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL AJM
#--------------------------------------------------------------------------
regresion=lm(df_AJM_RL$partes_por_billon~df_AJM_RL$date, data=df_AJM_RL)
summary(regresion)
df_AJM_RL<- cbind(df_AJM_RL,predict(regresion))
ggplot(df_AJM_RL, aes(x=date, y=partes_por_billon)) +
geom_smooth(method="lm", se=FALSE, color="lightgrey") +
geom_segment(aes(xend=date, yend=`predict(regresion)`), col='red', lty='dashed') +
geom_point() +
geom_point(aes(y=`predict(regresion)`), col='red') +
theme_light() +
ggtitle ("Estación AJM: Concentración del dióxido de azufre.") + # Título del gráfico.
theme(plot.title = element_text(hjust = 0.5)) + # Centrar título
labs(x = "Año",
y = "Concentración SO2, ppb")
}else if(estacion==2){
df_CUT_RL <- df_todos_RL[df_todos_RL$id_station=="CUT", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA CUT
#--------------------------------------------------------------------------
df_CUT_RL<- select(df_CUT_RL, date, partes_por_billon)
df_CUT_RL %>%
group_by(date) %>%
tally()
df_CUT_RL%>%group_by(date)%>%
summarise_all(mean)-> df_CUT_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL CUT
#--------------------------------------------------------------------------
regresion=lm(df_CUT_RL$partes_por_billon~df_CUT_RL$date, data=df_CUT_RL)
summary(regresion)
df_CUT_RL<- cbind(df_CUT_RL,predict(regresion))
ggplot(df_CUT_RL, aes(x=date, y=partes_por_billon)) +
geom_smooth(method="lm", se=FALSE, color="lightgrey") +
geom_segment(aes(xend=date, yend=`predict(regresion)`), col='red', lty='dashed') +
geom_point() +
geom_point(aes(y=`predict(regresion)`), col='red') +
theme_light() +
ggtitle ("Estación CUT: Concentración del dióxido de azufre.") + # Título del gráfico.
theme(plot.title = element_text(hjust = 0.5)) + # Centrar título
labs(x = "Año",
y = "Concentración SO2, ppb")
}else if(estacion==3){
df_HGM_RL <- df_todos_RL[df_todos_RL$id_station=="HGM", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA HGM
#--------------------------------------------------------------------------
df_HGM_RL<- select(df_HGM_RL, date, partes_por_billon)
df_HGM_RL %>%
group_by(date) %>%
tally()
df_HGM_RL%>%group_by(date)%>%
summarise_all(mean)-> df_HGM_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL HGM
#--------------------------------------------------------------------------
regresion=lm(df_HGM_RL$partes_por_billon~df_HGM_RL$date, data=df_HGM_RL)
summary(regresion)
df_HGM_RL<- cbind(df_HGM_RL,predict(regresion))
ggplot(df_HGM_RL, aes(x=date, y=partes_por_billon)) +
geom_smooth(method="lm", se=FALSE, color="lightgrey") +
geom_segment(aes(xend=date, yend=`predict(regresion)`), col='red', lty='dashed') +
geom_point() +
geom_point(aes(y=`predict(regresion)`), col='red') +
theme_light() +
ggtitle ("Estación HGM: Concentración del dióxido de azufre.") + # Título del gráfico.
theme(plot.title = element_text(hjust = 0.5)) + # Centrar título
labs(x = "Año",
y = "Concentración SO2, ppb")
}else if(estacion==4){
df_INN_RL <- df_todos_RL[df_todos_RL$id_station=="INN", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA INN
#--------------------------------------------------------------------------
df_INN_RL<- select(df_INN_RL, date, partes_por_billon)
df_INN_RL %>%
group_by(date) %>%
tally()
df_INN_RL%>%group_by(date)%>%
summarise_all(mean)-> df_INN_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL INN
#--------------------------------------------------------------------------
regresion=lm(df_INN_RL$partes_por_billon~df_INN_RL$date, data=df_INN_RL)
summary(regresion)
df_INN_RL<- cbind(df_INN_RL,predict(regresion))
ggplot(df_INN_RL, aes(x=date, y=partes_por_billon)) +
geom_smooth(method="lm", se=FALSE, color="lightgrey") +
geom_segment(aes(xend=date, yend=`predict(regresion)`), col='red', lty='dashed') +
geom_point() +
geom_point(aes(y=`predict(regresion)`), col='red') +
theme_light() +
ggtitle ("Estación INN: Concentración del dióxido de azufre.") + # Título del gráfico.
theme(plot.title = element_text(hjust = 0.5)) + # Centrar título
labs(x = "Año",
y = "Concentración SO2, ppb")
}else if(estacion==5){
df_MER_RL <- df_todos_RL[df_todos_RL$id_station=="MER", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA MER
#--------------------------------------------------------------------------
df_MER_RL<- select(df_MER_RL, date, partes_por_billon)
df_MER_RL %>%
group_by(date) %>%
tally()
df_MER_RL%>%group_by(date)%>%
summarise_all(mean)-> df_MER_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL MER
#--------------------------------------------------------------------------
regresion=lm(df_MER_RL$partes_por_billon~df_MER_RL$date, data=df_MER_RL)
summary(regresion)
df_MER_RL<- cbind(df_MER_RL,predict(regresion))
ggplot(df_MER_RL, aes(x=date, y=partes_por_billon)) +
geom_smooth(method="lm", se=FALSE, color="lightgrey") +
geom_segment(aes(xend=date, yend=`predict(regresion)`), col='red', lty='dashed') +
geom_point() +
geom_point(aes(y=`predict(regresion)`), col='red') +
theme_light() +
ggtitle ("Estación MER: Concentración del dióxido de azufre.") + # Título del gráfico.
theme(plot.title = element_text(hjust = 0.5)) + # Centrar título
labs(x = "Año",
y = "Concentración SO2, ppb")
}else if(estacion==6){
df_MON_RL <- df_todos_RL[df_todos_RL$id_station=="MON", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA MON
#--------------------------------------------------------------------------
df_MON_RL<- select(df_MON_RL, date, partes_por_billon)
df_MON_RL %>%
group_by(date) %>%
tally()
df_MON_RL%>%group_by(date)%>%
summarise_all(mean)-> df_MON_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL MON
#--------------------------------------------------------------------------
regresion=lm(df_MON_RL$partes_por_billon~df_MON_RL$date, data=df_MON_RL)
summary(regresion)
df_MON_RL<- cbind(df_MON_RL,predict(regresion))
ggplot(df_MON_RL, aes(x=date, y=partes_por_billon)) +
geom_smooth(method="lm", se=FALSE, color="lightgrey") +
geom_segment(aes(xend=date, yend=`predict(regresion)`), col='red', lty='dashed') +
geom_point() +
geom_point(aes(y=`predict(regresion)`), col='red') +
theme_light() +
ggtitle ("Estación MON: Concentración del dióxido de azufre.") + # Título del gráfico.
theme(plot.title = element_text(hjust = 0.5)) + # Centrar título
labs(x = "Año",
y = "Concentración SO2, ppb")
}else if(estacion==7){
df_MPA_RL <- df_todos_RL[df_todos_RL$id_station=="MPA", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA MPA
#--------------------------------------------------------------------------
df_MPA_RL<- select(df_MPA_RL, date, partes_por_billon)
df_MPA_RL %>%
group_by(date) %>%
tally()
df_MPA_RL%>%group_by(date)%>%
summarise_all(mean)-> df_MPA_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL MPA
#--------------------------------------------------------------------------
regresion=lm(df_MPA_RL$partes_por_billon~df_MPA_RL$date, data=df_MPA_RL)
summary(regresion)
df_MPA_RL<- cbind(df_MPA_RL,predict(regresion))
ggplot(df_MPA_RL, aes(x=date, y=partes_por_billon)) +
geom_smooth(method="lm", se=FALSE, color="lightgrey") +
geom_segment(aes(xend=date, yend=`predict(regresion)`), col='red', lty='dashed') +
geom_point() +
geom_point(aes(y=`predict(regresion)`), col='red') +
theme_light() +
ggtitle ("Estación MPA: Concentración del dióxido de azufre.") + # Título del gráfico.
theme(plot.title = element_text(hjust = 0.5)) + # Centrar título
labs(x = "Año",
y = "Concentración SO2, ppb")
}else if(estacion==8){
df_TLA_RL <- df_todos_RL[df_todos_RL$id_station=="TLA", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA TLA
#--------------------------------------------------------------------------
df_TLA_RL<- select(df_TLA_RL, date, partes_por_billon)
df_TLA_RL %>%
group_by(date) %>%
tally()
df_TLA_RL%>%group_by(date)%>%
summarise_all(mean)-> df_TLA_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL TLA
#--------------------------------------------------------------------------
regresion=lm(df_TLA_RL$partes_por_billon~df_TLA_RL$date, data=df_TLA_RL)
summary(regresion)
df_TLA_RL<- cbind(df_TLA_RL,predict(regresion))
ggplot(df_TLA_RL, aes(x=date, y=partes_por_billon)) +
geom_smooth(method="lm", se=FALSE, color="lightgrey") +
geom_segment(aes(xend=date, yend=`predict(regresion)`), col='red', lty='dashed') +
geom_point() +
geom_point(aes(y=`predict(regresion)`), col='red') +
theme_light() +
ggtitle ("Estación TLA: Concentración del dióxido de azufre.") + # Título del gráfico.
theme(plot.title = element_text(hjust = 0.5)) + # Centrar título
labs(x = "Año",
y = "Concentración SO2, ppb")
}else if(estacion==9){
df_XAL_RL <- df_todos_RL[df_todos_RL$id_station=="XAL", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA XAL
#--------------------------------------------------------------------------
df_XAL_RL<- select(df_XAL_RL, date, partes_por_billon)
df_XAL_RL %>%
group_by(date) %>%
tally()
df_XAL_RL%>%group_by(date)%>%
summarise_all(mean)-> df_XAL_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL XAL
#--------------------------------------------------------------------------
regresion=lm(df_XAL_RL$partes_por_billon~df_XAL_RL$date, data=df_XAL_RL)
summary(regresion)
df_XAL_RL<- cbind(df_XAL_RL,predict(regresion))
ggplot(df_XAL_RL, aes(x=date, y=partes_por_billon)) +
geom_smooth(method="lm", se=FALSE, color="lightgrey") +
geom_segment(aes(xend=date, yend=`predict(regresion)`), col='red', lty='dashed') +
geom_point() +
geom_point(aes(y=`predict(regresion)`), col='red') +
theme_light() +
ggtitle ("Estación MPA: Concentración del dióxido de azufre.") + # Título del gráfico.
theme(plot.title = element_text(hjust = 0.5)) + # Centrar título
labs(x = "Año",
y = "Concentración SO2, ppb")
}
})
output$resumen <- renderPrint({
contaminante = "SO2"
periodo_Regresion_Lineal<- input$Periodo # dia, mes, annio
contam_csv_2021 <- "2021/promedios_2021_so2.csv"
contam_csv_2020 <- "2020/promedios_2020_so2.csv"
contam_csv_2019 <- "2019/promedios_2019_so2.csv"
contam_csv_2018 <- "2018/promedios_2018_so2.csv"
contam_csv_2017 <- "2017/promedios_2017_so2.csv"
contam_csv_2016 <- "2016/promedios_2016_so2.csv"
contam_csv_2015 <- "2015/promedios_2015_so2.csv"
contam_csv_2014 <- "2014/promedios_2014_so2.csv"
contam_csv_2013 <- "2013/promedios_2013_so2.csv"
contam_csv_2012 <- "2012/promedios_2012_so2.csv"
contam_csv_2011 <- "2011/promedios_2011_so2.csv"
contam_csv_2010 <- "2010/promedios_2010_so2.csv"
contam_csv_2009 <- "2009/promedios_2009_so2.csv"
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2021.
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2021 <- read_csv(contam_csv_2021,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2021 <- contaminantes_2021 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2020.
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2020 <- read_csv(contam_csv_2020,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2020 <- contaminantes_2020 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2019
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2019 <- read_csv(contam_csv_2019,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2019 <- contaminantes_2019 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2018
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2018 <- read_csv(contam_csv_2018,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2018 <- contaminantes_2018 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2017
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2017 <- read_csv(contam_csv_2017,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2017 <- contaminantes_2017 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2016
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2016 <- read_csv(contam_csv_2016,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2016 <- contaminantes_2016 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2015
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2015 <- read_csv(contam_csv_2015,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2015 <- contaminantes_2015 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2014
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2014 <- read_csv(contam_csv_2014,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2014 <- contaminantes_2014 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2013
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2013 <- read_csv(contam_csv_2013,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2013 <- contaminantes_2013 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2012
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2012 <- read_csv(contam_csv_2012,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2012 <- contaminantes_2012 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2011
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2011 <- read_csv(contam_csv_2011,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2011 <- contaminantes_2011 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2010
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2010 <- read_csv(contam_csv_2010,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2010 <- contaminantes_2010 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# LECTURA DE LOS DATOS DEL 2009
#--------------------------------------------------------------------------
# Contaminantes.
contaminantes_2009 <- read_csv(contam_csv_2009,
col_types = cols(
date = col_character(),
id_station = col_character(),
id_parameter = col_character(),
value = col_double(),
unit = col_double())
)
contaminantes_2009 <- contaminantes_2009 %>%
filter(id_parameter==contaminante) %>%
dplyr::select(date,id_station, id_parameter, value, unit) %>%
rename(partes_por_billon=value) %>%
drop_na() %>%
select(-c(unit))
#--------------------------------------------------------------------------
# JUNTAR TODOS
#--------------------------------------------------------------------------
contaminantes_2020 %>%
# union_all(contaminantes_2020) %>%
union_all(contaminantes_2019) %>%
union_all(contaminantes_2018) %>%
union_all(contaminantes_2017) %>%
union_all(contaminantes_2016) %>%
union_all(contaminantes_2015) %>%
union_all(contaminantes_2014) %>%
union_all(contaminantes_2013) %>%
union_all(contaminantes_2012) %>%
union_all(contaminantes_2011) %>%
union_all(contaminantes_2010) %>%
union_all(contaminantes_2009) -> df_todos
#----------------------------------------------------------------------------
#*********************-ARREGLAR FECHA****************************************
#Quitar datos con fecha erronea
df_todos<- df_todos[substring(df_todos$date,3,3)=="/", ]
df_todos_RL<- df_todos
if(periodo_Regresion_Lineal== "annio"){
df_todos_RL$date<- paste("01/01/",substring(df_todos_RL$date,7,8),sep = "")
}else if(periodo_Regresion_Lineal=="mes"){
df_todos_RL$date<- paste("01/",substring(df_todos_RL$date,4,8),sep = "")
}else if(periodo_Regresion_Lineal=="dia"){
df_todos_RL$date<- substring(df_todos_RL$date,1,8)
}
df_todos_RL$date<- as.Date(df_todos_RL$date, format="%d/%m/%y")
df_todos %>%
group_by(id_station) %>%
tally()
id_station <- df_todos %>%
group_by(id_station) %>% tally()
estacion=input$Estacion
if(estacion==1){
df_AJM_RL <- df_todos_RL[df_todos_RL$id_station=="AJM", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA AJM
#--------------------------------------------------------------------------
df_AJM_RL<- select(df_AJM_RL, date, partes_por_billon)
df_AJM_RL %>%
group_by(date) %>%
tally()
df_AJM_RL%>%group_by(date)%>%
summarise_all(mean)-> df_AJM_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL AJM
#--------------------------------------------------------------------------
regresion=lm(df_AJM_RL$partes_por_billon~df_AJM_RL$date, data=df_AJM_RL)
summary(regresion)
}else if(estacion==2){
df_CUT_RL <- df_todos_RL[df_todos_RL$id_station=="CUT", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA CUT
#--------------------------------------------------------------------------
df_CUT_RL<- select(df_CUT_RL, date, partes_por_billon)
df_CUT_RL %>%
group_by(date) %>%
tally()
df_CUT_RL%>%group_by(date)%>%
summarise_all(mean)-> df_CUT_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL CUT
#--------------------------------------------------------------------------
regresion=lm(df_CUT_RL$partes_por_billon~df_CUT_RL$date, data=df_CUT_RL)
summary(regresion)
}else if(estacion==3){
df_HGM_RL <- df_todos_RL[df_todos_RL$id_station=="HGM", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA HGM
#--------------------------------------------------------------------------
df_HGM_RL<- select(df_HGM_RL, date, partes_por_billon)
df_HGM_RL %>%
group_by(date) %>%
tally()
df_HGM_RL%>%group_by(date)%>%
summarise_all(mean)-> df_HGM_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL HGM
#--------------------------------------------------------------------------
regresion=lm(df_HGM_RL$partes_por_billon~df_HGM_RL$date, data=df_HGM_RL)
summary(regresion)
}else if(estacion==4){
df_INN_RL <- df_todos_RL[df_todos_RL$id_station=="INN", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA INN
#--------------------------------------------------------------------------
df_INN_RL<- select(df_INN_RL, date, partes_por_billon)
df_INN_RL %>%
group_by(date) %>%
tally()
df_INN_RL%>%group_by(date)%>%
summarise_all(mean)-> df_INN_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL INN
#--------------------------------------------------------------------------
regresion=lm(df_INN_RL$partes_por_billon~df_INN_RL$date, data=df_INN_RL)
summary(regresion)
}else if(estacion==5){
df_MER_RL <- df_todos_RL[df_todos_RL$id_station=="MER", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA MER
#--------------------------------------------------------------------------
df_MER_RL<- select(df_MER_RL, date, partes_por_billon)
df_MER_RL %>%
group_by(date) %>%
tally()
df_MER_RL%>%group_by(date)%>%
summarise_all(mean)-> df_MER_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL MER
#--------------------------------------------------------------------------
regresion=lm(df_MER_RL$partes_por_billon~df_MER_RL$date, data=df_MER_RL)
summary(regresion)
}else if(estacion==6){
df_MON_RL <- df_todos_RL[df_todos_RL$id_station=="MON", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA MON
#--------------------------------------------------------------------------
df_MON_RL<- select(df_MON_RL, date, partes_por_billon)
df_MON_RL %>%
group_by(date) %>%
tally()
df_MON_RL%>%group_by(date)%>%
summarise_all(mean)-> df_MON_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL MON
#--------------------------------------------------------------------------
regresion=lm(df_MON_RL$partes_por_billon~df_MON_RL$date, data=df_MON_RL)
summary(regresion)
}else if(estacion==7){
df_MPA_RL <- df_todos_RL[df_todos_RL$id_station=="MPA", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA MPA
#--------------------------------------------------------------------------
df_MPA_RL<- select(df_MPA_RL, date, partes_por_billon)
df_MPA_RL %>%
group_by(date) %>%
tally()
df_MPA_RL%>%group_by(date)%>%
summarise_all(mean)-> df_MPA_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL MPA
#--------------------------------------------------------------------------
regresion=lm(df_MPA_RL$partes_por_billon~df_MPA_RL$date, data=df_MPA_RL)
summary(regresion)
}else if(estacion==8){
df_TLA_RL <- df_todos_RL[df_todos_RL$id_station=="TLA", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA TLA
#--------------------------------------------------------------------------
df_TLA_RL<- select(df_TLA_RL, date, partes_por_billon)
df_TLA_RL %>%
group_by(date) %>%
tally()
df_TLA_RL%>%group_by(date)%>%
summarise_all(mean)-> df_TLA_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL TLA
#--------------------------------------------------------------------------
regresion=lm(df_TLA_RL$partes_por_billon~df_TLA_RL$date, data=df_TLA_RL)
summary(regresion)
}else if(estacion==9){
df_XAL_RL <- df_todos_RL[df_todos_RL$id_station=="XAL", ]
#--------------------------------------------------------------------------
# PROMEDIAR POR FECHA XAL
#--------------------------------------------------------------------------
df_XAL_RL<- select(df_XAL_RL, date, partes_por_billon)
df_XAL_RL %>%
group_by(date) %>%
tally()
df_XAL_RL%>%group_by(date)%>%
summarise_all(mean)-> df_XAL_RL
#--------------------------------------------------------------------------
# REGRESIÓN LINEAL XAL
#--------------------------------------------------------------------------
regresion=lm(df_XAL_RL$partes_por_billon~df_XAL_RL$date, data=df_XAL_RL)
summary(regresion)
}
})
})
|
e00cdbd98817a422cef49024933c5e492eb89d41
|
5fb4e9f81f7bb146ec228000245025717e54b776
|
/Scripts/Area_information_species.R
|
4e3067888cc3a79448042a57399528640a757233
|
[] |
no_license
|
alipal89/Chapter_1_analysis
|
3b23c8905ebb4cf8d3fe62d7e769a5703751cf68
|
98eedcf87e20fc62a706f510eae1cdee30a9a317
|
refs/heads/master
| 2021-01-13T02:50:05.616648
| 2016-12-22T15:08:47
| 2016-12-22T15:08:47
| 77,153,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,723
|
r
|
Area_information_species.R
|
############### Need to calculate the area for occupied for all species
############### Annual area
############### Seasonal area
rm(list = ls())
setwd("C:/Alison/Chapter 1/Analysis")
# This is the species occurences for all species
species_occ<-read.table("AllGridded_FDB_CPH_filtered.txt",sep = "\t", header=T)
splist<-unique(species_occ$IOC3_1_Binomial)
splist[1]
#### range area
#### NEED TO CHECK
#### Which unique grid cells are occupied in the whole year (Br, Non-br and annual)
for (i in seq(along=splist)) {
species_occ.i<-species_occ[which(species_occ$IOC3_1_Binomial==paste(splist[i])),]
species_occ.i
br_occ.i<-length(which(species_occ.i$Range_type!="non-breeding"))
wi_occ.i<-length(which(species_occ.i$Range_type!="breeding"))
Annual.i<-sum(br_occ.i,wi_occ.i)
Hypo_br<-species_occ[which(species_occ$IOC3_1_Binomial==paste(splist[i]) & species_occ$Range_type!="breeding"),]
Hypo_br<-length(unique(Hypo_br$WM_ID))*2
Hypo_wi<-species_occ[which(species_occ$IOC3_1_Binomial==paste(splist[i]) & species_occ$Range_type!="non-breeding"),]
Hypo_wi<-length(unique(Hypo_wi$WM_ID))*2
Annual_area<- data.frame(Species=splist[i],Area=Annual.i, Hypo_br_area=Hypo_br, Hypo_wi_area=Hypo_wi)
if (i==1) (Annual_area_all <- Annual_area) else (Annual_area_all <- rbind(Annual_area_all, Annual_area))
}
#### I THINK THIS IS RIGHT ###########
##### NEED TO CHECK
#### SAVE INFORMATION WITH THE MIGRATION INFORMATION
Months_clade<-read.table("Months_clade_migration.txt",sep = "\t", header=T)
Months_clade_migration_area<-merge(Months_clade,Annual_area_all)
head(Months_clade_migration_area)
write.table(Months_clade_migration_area, "Months_clade_migration_area.txt", sep="\t", col.names=TRUE, row.names=FALSE)
|
731834459a2cf7922cd1491b719ffab5b89cb1b9
|
9822a9ade61745cdee439169c3533ce74f46b873
|
/R/Old/Explore/Analysis1_summary_stats.R
|
14f5f0fa78ad294312266f075c016e068f3da9eb
|
[] |
no_license
|
MatthewKatzen/NEM_Battery
|
97c6bea3aad4897ff3aa0b4894f4f70171549c93
|
18fe985f872933970cafb2a539780c93e346749d
|
refs/heads/master
| 2023-02-07T11:00:13.521649
| 2021-01-04T08:01:09
| 2021-01-04T08:01:09
| 267,240,061
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,257
|
r
|
Analysis1_summary_stats.R
|
#Analysis1
#buy low sell high
#alpha beta
#load packages
library(tidyverse)
library(tidyr)
library(lubridate)
library(data.table)
library(janitor)
library(readxl)
library(writexl)
library(xtable)
library(nlme)
Sys.setenv(TZ='UTC')
### Load data
full_data <- fread("D:/Battery/Data/full_lmp.csv") %>% mutate(settlementdate = ymd_hms(settlementdate)) %>%
select(settlementdate, duid, lmp, rrp)
generator_details <- fread("D:/NEM_LMP/Data/Raw/generator_details_cleaned.csv") %>%
select(-c(loss_factor, emission_factor, participant)) %>%
filter(schedule_type != "Non-scheduled")
### Summary stats
summary_stats <- full_data %>% group_by(duid, fuel_type) %>%
summarise(mean_lmp = mean(lmp), mean_rrp = mean(rrp), mean_rrp_30 = mean(rrp30)) %>%
ungroup() %>%
mutate(mean_lmp_zscore = (mean_lmp - mean(mean_lmp))/sd(mean_lmp),
mean_lmp_percentrank = percent_rank(mean_lmp))
fwrite(summary_stats, "D:/Battery/Data/summary_stats.csv")
latlon <- read.csv("https://services.aremi.data61.io/aemo/v6/csv/all", stringsAsFactors = FALSE) %>%
clean_names() %>%
select(station_name, region, duid, lat, lon, dispatch_type, classification) %>%
#filter(classification %in% c("Scheduled", "Semi-Scheduled")) %>%
mutate(duid = ifelse(duid == "ARFW1", "ARWF1", duid)) #duid mislabel
summary_stats_latlon <- summary_stats %>% left_join(latlon, by = "duid")
fwrite(summary_stats_latlon, "D:/Battery/Output/summary_stats_latlon.csv")
### Regressions
reg <- full_data %>%
group_by(duid) %>%
nest() %>%
mutate(model = map(data, ~lm(lmp ~ rrp, data = .)))
reg_coeffs <- reg %>%
mutate(alpha = model[[n()]] %>% summary() %>% coefficients() %>% .[1,1],
beta = model[[n()]] %>% summary() %>% coefficients() %>% .[2,1]) %>%
select(duid, alpha, beta)
reg_coeffs %>% pivot_longer(cols = c(alpha, beta)) %>% filter(name == "beta") %>%
ggplot(aes(x = value))+
geom_histogram()+
facet_wrap(~name) +
ggsave("Output/beta.png")
reg_coeffs %>% pivot_longer(cols = c(alpha, beta)) %>% filter(name == "alpha") %>%
ggplot(aes(x = value))+
geom_histogram()+
facet_wrap(~name) +
ggsave("Output/alpha.png")
reg_coeffs %>% left_join(generator_details, by = "duid") %>%
fwrite("Output/coeffs.csv")
|
7ccf611e1bd923fdc4ea22d23195c386cd8be016
|
7c3f2e88c0273324f42c5175795931c668631520
|
/R/cls_ecog_repo.R
|
a852f3f2a2fe6df3f62bee673561b9c2f7992e94
|
[] |
no_license
|
dipterix/rave
|
ddf843b81d784a825acf8fe407c6169c52af5e3e
|
fe9e298fc9f740a70f96359d87987f8807fbd6f3
|
refs/heads/master
| 2020-06-26T19:52:26.927042
| 2019-07-30T22:47:36
| 2019-07-30T22:47:36
| 110,286,235
| 0
| 0
| null | 2018-06-20T03:18:47
| 2017-11-10T19:45:40
|
R
|
UTF-8
|
R
| false
| false
| 17,316
|
r
|
cls_ecog_repo.R
|
#' Baseline signals
#' @param el Tensor or ECoGTensor object
#' @param from baseline start time
#' @param to baseline end time
#' @param method mean or median, default is mean
#' @param unit "\%" percent signal change or "dB" decibel unit
#' @param data_only return array or tensor object?
#' @param hybrid if return tensor object, swap cache? useful for large dataset
#' @param swap_file by default tempfile, or you can specify path
#' @param mem_optimize optimize for large dataset? default is TRUE
#' @param preop function before baselining
#' @param op function for baselining
#' @export
baseline <- function(el, from, to, method = 'mean', unit = '%',
data_only = F, hybrid = T, swap_file = tempfile(), mem_optimize = T, preop = NULL, op){
if(missing(el)){
logger('baseline(el...) is changed now. Please update.', level = 'WARNING')
module_tools = get('module_tools', envir = getDefaultDataRepository())
el = module_tools$get_power()
}
assert_that(is(el, 'Tensor'), msg = 'el must be an Tensor object.')
assert_that('Time' %in% el$varnames, msg = 'Need one dimname to be "Time".')
assert_that(unit %in% c('dB', '%', 'C'), msg = 'unit must be %-percent signal change or dB-dB difference, or C to customize')
time_ind = which(el$varnames == 'Time')
rest_dim = seq_along(el$dim)[-time_ind]
if(unit == 'dB'){
bs = el$subset({Time = Time %within% c(from, to)})
# log 10 of data, collapse by mean
bs$set_data(log10(bs$get_data()))
# Use better format of collapse function to avoid crashing
bs = bs$collapse(keep = rest_dim, method = method)
# for each time points, log10 and substract, Since dB is 10*log10(.)
op = function(e1, e2){ 10 * (log10(e1) - e2) }
bs = el$operate(by = bs, match_dim = rest_dim, mem_optimize = mem_optimize, fun = op)
}else if(unit == '%'){
bs = el$subset(Time = Time %within% c(from, to))
op = function(e1, e2){ e1 / e2 * 100 - 100 }
bs = bs$collapse(keep = rest_dim, method = method)
bs = el$operate(by = bs, match_dim = rest_dim, mem_optimize = mem_optimize, fun = op)
}else{
# customizing
bs = el$subset(Time = Time %within% c(from, to))
if(is.function(preop)){
bs$set_data(preop(bs$get_data()))
}
bs = bs$collapse(keep = rest_dim, method = method)
bs = el$operate(by = bs, match_dim = rest_dim, mem_optimize = mem_optimize, fun = op)
}
if(data_only){
return(bs)
}else{
ECoGTensor$new(data = bs, dim = dim(el), dimnames = dimnames(el), varnames = el$varnames, hybrid = hybrid, swap_file = swap_file)
}
}
#' R6 class for ECoG data Repository
#' @export
ECoGRepository <- R6::R6Class(
classname = 'ECoGRepository',
portable = FALSE,
cloneable = FALSE,
public = list(
subject = NULL,
raw = NULL, # Map to store electrodes
reference = NULL, # Map to store references
epochs = NULL, # map to store epoch infos
raw_volt = NULL,
raw_power = NULL,
raw_phase = NULL,
volt = NULL,
power = NULL,
phase = NULL,
info = function(print = TRUE){
id = self$subject$subject_id
epoch_info = self$epochs$get('epoch_name')
if(length(epoch_info)){
epoch_param = self$epochs$get('epoch_params');
epoch_info = paste0(
'Epoch: ' , epoch_info , '\n' ,
' - Electrodes: ' , deparse_selections(self$epochs$get('electrodes')) , '\n' ,
sprintf(' - From %.2f to %.2f (sec)\n', -epoch_param[1], epoch_param[2])
)
}else{
epoch_info = '(Not epoched yet.)\n'
}
ref_name = self$reference$get('.reference_name')
ref_name %?<-% '(No reference table)'
ref_name = paste0('Reference table: ' , ref_name)
refed = self$reference$get('.is_referenced')
if(!is.null(self$coef)){
wave_info = sprintf('Wavelet coefficients: Loaded (%s)', ifelse(refed$spectrum, 'Referenced', 'Raw'))
}else{
wave_info = 'Wavelet coefficients: Not loaded'
}
if(!is.null(self$volt)){
volt_info = sprintf('Voltage signal: Loaded (%s)', ifelse(refed$voltage, 'Referenced', 'Raw'))
}else{
volt_info = 'Voltage signal: Not loaded'
}
if(print){
cat(sprintf('<ECoG Repository> [%s]\n\n%s\n%s\n%s\n%s\n', id, epoch_info, ref_name, wave_info, volt_info))
}
invisible(list(
id = id, epoch_info = epoch_info, ref_name = ref_name, wave_info = wave_info, volt_info = volt_info
))
},
print = function(...){
# To compatible with globals package
cat(env_address(self))
invisible()
},
initialize = function(subject, reference = 'default', autoload = T){
logger('Initializing a Data Repository')
self$raw = Map$new()
self$reference = Map$new()
self$epochs = Map$new()
if(R6::is.R6(subject) && 'Subject' %in% class(subject)){
self$subject = subject
}else{
assert_that('character' %in% class(subject),
msg = 'Param <subject> needs to be either subject ID or a Subject instance')
subject = str_split_fixed(subject, '\\\\|/', 2)
self$subject = Subject$new(project_name = subject[1], subject_code = subject[2], reference = reference, strict = FALSE)
}
# load electrodes
if(autoload){
self$load_electrodes(reference = reference)
}
invisible()
},
get_electrode = function(electrode, name = 'raw'){
e_str = as.character(electrode)
re = lapply(name, function(nm){
self[[nm]]$get(e_str)
})
names(re) = name
return(re)
},
load_electrodes = function(electrodes, reference = 'default'){
if(missing(electrodes)){
electrodes = self$subject$valid_electrodes
}else{
electrodes = self$subject$filter_valid_electrodes(electrodes)
}
# Set up reference
self$load_reference(reference, electrodes = electrodes)
electrodes = electrodes[!paste(electrodes) %in% self$raw$keys()]
ref_table = self$reference$get('ref_table')
if(length(electrodes) > 0){
progress = progress(title = 'Loading reference...', max = length(electrodes))
for(e in electrodes){
e_str = paste(e)
progress$inc(sprintf('Electrode - %s', e_str))
# get reference
ref = ref_table$Reference[ref_table$Electrode == e]
e_obj = Electrode$new(subject = self$subject, electrode = e, reference_by = self$reference$get(ref), is_reference = F)
self$raw$set(key = e_str, value = e_obj)
}
logger('Loaded.')
progress$close()
}
invisible()
},
epoch = function(epoch_name, pre, post, electrodes = NULL, frequency_range = NULL, data_type = 'power', referenced = T, func = NULL, quiet = FALSE){
if(is.null(electrodes)){
electrodes = self$subject$valid_electrodes
}else{
electrodes = self$subject$filter_valid_electrodes(electrodes)
}
electrodes = electrodes[paste(electrodes) %in% self$raw$keys()]
assert_that(length(electrodes) > 0, msg = 'No electrode loaded.')
self$epochs$set('epoch_name', epoch_name)
self$epochs$set('epoch_params', c(pre, post))
self$epochs$set('epoch_data', load_meta(
meta_type = 'epoch',
subject_id = self$subject$subject_id,
meta_name = epoch_name
))
self$epochs$set('electrodes', electrodes)
self$epochs$set('frequency_range', frequency_range)
freqs = load_meta(subject_id = self$subject$subject_id, meta_type = 'frequencies')
frequency_range %?<-% range(freqs$Frequency)
freq_subset = freqs$Frequency %within% frequency_range
if(!sum(freq_subset)){
logger('Frequency range is invalid, looking for the nearest frequency', level = 'WARNING')
freq_subset[which.min(abs(freqs$Frequency - frequency_range[1]))] = T
}
progress = progress(title = 'Loading data...', max = (length(electrodes) + 1) * length(data_type), quiet = quiet)
on.exit({progress$close()})
epoch_data = self$epochs$get('epoch_data')
re = list()
subject_id = self$subject$id
raws = self$raw
# progress$inc('Finalizing...')
# Get dimension names
# 1. Trial
epochs = load_meta(
meta_type = 'epoch',
meta_name = epoch_name,
project_name = self$subject$project_name,
subject_code = self$subject$subject_code
)
trial_order = order(epochs$Trial)
dn_trial = epochs$Trial[trial_order]
dn_freqs = freqs$Frequency[freq_subset]
n_dt = length(data_type)
n_elec = length(electrodes)
dimnames_wave = list(
Trial = dn_trial,
Frequency = dn_freqs,
Time = seq(-pre, post, by = 1 / subject$sample_rate),
Electrode = electrodes
)
dimnames_volt = list(
Trial = dn_trial,
Time = seq(-pre, post, by = 1 / subject$preprocess_info('srate')),
Electrode = electrodes
)
count = 1
# collapse results
if(!is.function(func)){
if('power' %in% data_type){
lapply_async(electrodes, function(e){
electrode = raws$get(as.character(e))
electrode$epoch(
epoch_name = epoch_name,
pre = pre,
post = post,
types = 'power',
raw = !referenced
) ->
elc
power = elc$power$subset(Frequency = freq_subset, drop = T, data_only = T)
rm(elc)
power = as.vector(power)
return(power)
}, .call_back = function(i){
progress$inc(sprintf('Step %d (of %d) electrode %d (power)', count, n_dt, electrodes[i]))
}) ->
results
count = count + 1
gc()
names(results) = paste0('V', seq_along(electrodes))
results = do.call('data.frame', results)
# Generate tensor for power
power = ECoGTensor$new(0, dim = c(1,1,1,1), varnames = names(dimnames_wave), hybrid = F)
# erase data
power$set_data(NULL)
# reset dim and dimnames
power$dim = vapply(dimnames_wave, length, FUN.VALUE = 0, USE.NAMES = F)
power$dimnames = dimnames_wave
# generate local cache for power
file = tempfile()
write_fst(results, file, compress = 20)
rm(results)
gc()
# change tensor file path
power$swap_file = file
power$hybrid = T
power$use_index = TRUE
# set to be read-only
power$read_only = TRUE
nm = ifelse(referenced, 'power', 'raw_power')
self[[nm]] = power
}
if('phase' %in% data_type){
lapply_async(electrodes, function(e){
electrode = raws$get(as.character(e))
electrode$epoch(
epoch_name = epoch_name,
pre = pre,
post = post,
types = 'phase',
raw = !referenced
) ->
elc
phase = elc$phase$subset(Frequency = freq_subset, drop = T, data_only = T)
rm(elc)
phase = as.vector(phase)
return(phase)
}, .call_back = function(i){
progress$inc(sprintf('Step %d (of %d) electrode %d (phase)', count, n_dt, electrodes[i]))
}) ->
results
count = count + 1
gc()
names(results) = paste0('V', seq_along(electrodes))
results = do.call('data.frame', results)
# Generate tensor for phase
phase = ECoGTensor$new(0, dim = c(1,1,1,1), varnames = names(dimnames_wave), hybrid = F)
# erase data
phase$set_data(NULL)
# reset dim and dimnames
phase$dim = vapply(dimnames_wave, length, FUN.VALUE = 0, USE.NAMES = F)
phase$dimnames = dimnames_wave
# generate local cache for phase
file = tempfile()
write_fst(results, file, compress = 20)
rm(results)
gc()
# change tensor file path
phase$swap_file = file
phase$hybrid = T
phase$use_index = TRUE
# set to be read-only
phase$read_only = TRUE
nm = ifelse(referenced, 'phase', 'raw_phase')
self[[nm]] = phase
}
if('volt' %in% data_type){
lapply_async(electrodes, function(e){
electrode = raws$get(as.character(e))
electrode$epoch(
epoch_name = epoch_name,
pre = pre,
post = post,
types = 'volt',
raw = !referenced
) ->
elc
volt = elc$volt$get_data()
rm(elc)
volt = as.vector(volt)
return(volt)
}, .call_back = function(i){
progress$inc(sprintf('Step %d (of %d) electrode %d (voltage)', count, n_dt, electrodes[i]))
}) ->
results
count = count + 1
gc()
names(results) = paste0('V', seq_along(electrodes))
results = do.call('data.frame', results)
# Generate tensor for voltage
volt = ECoGTensor$new(0, dim = c(1,1,1), varnames = names(dimnames_volt), hybrid = F)
# erase data
volt$set_data(NULL)
# reset dim and dimnames
volt$dim = vapply(dimnames_volt, length, FUN.VALUE = 0, USE.NAMES = F)
volt$dimnames = dimnames_volt
# generate local cache for volt
file = tempfile()
write_fst(results, file, compress = 20)
rm(results)
gc()
# change tensor file path
volt$swap_file = file
volt$hybrid = T
volt$use_index = TRUE
# set to be read-only
volt$read_only = TRUE
nm = ifelse(referenced, 'volt', 'raw_volt')
self[[nm]] = volt
}
return(invisible())
}else{
lapply_async(electrodes, function(e){
electrode = raws$get(as.character(e))
electrode$epoch(
epoch_name = epoch_name,
pre = pre, post = post, types = data_type, raw = !referenced
) -> elc;
if(is(elc$power, 'ECoGTensor')){
elc$power = elc$power$subset(Frequency = freq_subset, drop = F)
}
if(is(elc$phase, 'ECoGTensor')){
elc$phase = elc$phase$subset(Frequency = freq_subset, drop = F)
}
elc = func(elc)
return(elc)
}, .call_back = function(i){
progress$inc(sprintf('Preparing electrode - %d', electrodes[i]))
}) ->
results
gc()
return(results)
}
},
load_reference = function(ref_name, electrodes = NULL){
if(!length(electrodes)){
electrodes = self$subject$valid_electrodes
}else{
electrodes = self$subject$filter_valid_electrodes(electrodes)
}
# Set reference table
ref_table = load_meta(
meta_type = 'references',
project_name = self$subject$project_name,
subject_code = self$subject$subject_code,
meta_name = ref_name
)
self$reference$set('ref_table', ref_table)
# load partial references, also avoid invalid electrodes
ref_table = ref_table[ref_table$Electrode %in% electrodes, ]
# Trick: use lazy assign to allocate reference, this is hack to R6 but avoiding evaluations
ref_env = self$reference$private$env
unique_refs = unique(ref_table$Reference)
progress = progress(title = 'Loading reference...', max = length(unique_refs))
lapply(unique_refs, function(ref){
progress$inc(ref)
# delayedAssign(ref, {
# Electrode$new(subject = self$subject, electrode = ref, is_reference = T)
# }, assign.env = ref_env)
ref_env[[ref]] = Electrode$new(subject = self$subject, electrode = ref, is_reference = T)
})
progress$close()
},
baseline = function(from, to, electrodes = NULL, print.time = FALSE){
assert_that(!is.null(self$power), msg = 'Please epoch power spectrum first.')
start = Sys.time()
# get baseline
electrodes = electrodes[electrodes %in% self$power$dimnames$Electrode]
electrodes %?<-% self$power$dimnames$Electrode
self$power$subset(
Time = Time %within% c(from, to),
Electrode = Electrode %in% electrodes,
data_only = T
) ->
bl
ntimepts = dim(bl)[3]
bl = collapse(bl, keep = c(1,2,4)) / ntimepts
re = ECoGTensor$new(
data = aperm((aperm(
self$power$subset(
Electrode = Electrode %in% electrodes,
drop = F,
data_only = T
),
c(1, 2, 4, 3)
) / as.vector(bl) - 1) * 100, c(1, 2, 4, 3)),
dimnames = c(self$power$dimnames[1:3], list(Electrode = electrodes)),
dim = c(self$power$dim[1:3], length(electrodes)),
varnames = self$power$varnames
)
end = Sys.time()
if(print.time){
logger(sprintf('Baseline calculation - %.0f ms', as.numeric(end-start) * 1000))
}
return(re)
}
)
)
|
920dc858a16b47f8739fe118f130052ce054d888
|
80da7a81e82713dcdedd6f785589e93623fae885
|
/Explorarory.R
|
686447c0b75dde88f22201937d422ff47a37fe5f
|
[] |
no_license
|
MiG-Kharkov/HR-training
|
9a9599b18d6ab277fb19bbb035945bccef6917ca
|
c91f4ef7baab091a0f07b1a6d497eb971c8373ed
|
refs/heads/master
| 2021-01-12T04:30:38.035087
| 2017-01-20T14:35:38
| 2017-01-20T14:35:38
| 77,630,755
| 0
| 3
| null | 2017-01-20T14:35:39
| 2016-12-29T18:05:20
|
R
|
UTF-8
|
R
| false
| false
| 18,063
|
r
|
Explorarory.R
|
# Exploratory data
# Preliminary analysis real work is supposed to be in main.R
install.packages("corrplot")
install.packages(ROCR)
#Load librarys
library(ggplot2)
library(caret)
library(corrplot)
library(e1071)
library(ROCR)
#clear global environment
rm(list = ls())
dataset <- read.csv("HR_comma_sep.csv")
dataset$left <- as.factor(dataset$left)
dataset$promotion_last_5years<- as.factor(dataset$promotion_last_5years)
dataset$Work_accident <- as.factor(dataset$Work_accident)
dataset$salary <- ordered(dataset$salary, c("low","medium" ,"high"))
summary(dataset)
str(dataset)
# check correlation
# only for numeric
num.cols <- sapply(dataset,is.numeric)
cor.data <- cor(dataset[,num.cols])
# visualisation of corrolation with corrlot
corrplot(cor.data, method = "color")
# Feature plot (*caret* package) -
# and save it to a file "pic-01-scatter-plot-matrix.png"
png(filename="pic-01-scatter-plot-matrix.png",
width = 14, height = 14, units = 'in', res = 300)
featurePlot(x=dataset[,c("satisfaction_level","last_evaluation", "number_project",
"average_montly_hours",
"time_spend_company",
"promotion_last_5years",
"sales",
"salary")],
y = dataset$left,
plot="pairs")
dev.off()
featurePlot(x = dataset[,c("satisfaction_level","last_evaluation", "number_project",
"average_montly_hours",
"time_spend_company")],
y = dataset$left,
plot = "pairs",
## Add a key at the top
auto.key = list(columns = 2))
# analysis of dependencies that affect leaves.
ggplot(dataset, aes(x =average_montly_hours, y = time_spend_company))+
geom_point(color = as.numeric(dataset$left))+
geom_density2d()+
labs(title="The probability destribution of leaving", x = "Avrenge hours per month", y = "Years in the company")
ggplot(dataset, aes(x =last_evaluation, y = satisfaction_level))+
geom_point(color = as.numeric(dataset$left))+
geom_density2d()+
labs(x="The level of the last evaluation", y = "The level of employee satisfaction",
title = "The probability destribution of leaving")
ggplot(dataset, aes(x = satisfaction_level, colour = factor(left), fill = factor(left))) + geom_density() +
geom_vline(xintercept = mean(dataset$satisfaction_level))+
ggtitle("Satisfaction Level \n density plot \n w. Left") + xlab("Satisfaction level")
ggplot(dataset, aes(x = salary, y = satisfaction_level, fill = factor(left), colour = factor(left))) +
geom_boxplot(outlier.colour = NA) +
geom_jitter(alpha = 0.1)+
ggtitle("Geom boxplot \n salary vs sat level") + xlab("Salary") + ylab("Satisfacion level")
#various ways to visualize information
#histogram
ggplot(dataset, aes(x = satisfaction_level))+ geom_histogram()
ggplot(dataset, aes(x = last_evaluation))+ geom_histogram()
ggplot(dataset, aes(x = time_spend_company))+ geom_histogram()
#i want to group sales factors. when i creates model for logical regression
#i saw some of sales are not stitistic significant
#so I guess, we can creat dummy variables or group some of them
# saleshr 0.2901505 0.1502195 1.932 0.053420 .
# salesIT -0.1607812 0.1411670 -1.139 0.254727
# salesmanagement -0.5165982 0.1849407 -2.793 0.005217 **
# salesmarketing 0.0077278 0.1513287 0.051 0.959272
# salesproduct_mng -0.1045803 0.1492742 -0.701 0.483558
# salesRandD -0.5518519 0.1650076 -3.344 0.000825 ***
# salessales 0.0433311 0.1174506 0.369 0.712180
# salessupport 0.0369344 0.1258566 0.293 0.769168
# salestechnical 0.0350542 0.1229423 0.285 0.775547
table(dataset$sales)
# accounting hr IT management marketing product_mng RandD sales support technical
# 767 739 1227 630 858 902 787 4140 2229 2720
#good question for discussion can we leave only management and RandD. They involve powerfuly but it's only small %
sales_mod <- function(job){
job <- as.character(job)
if ( job == "management" | job == "RandD" ) { return(job)}
else {return("other")}
}
#reform sales using function
dataset$sales <- as.factor(sapply(dataset$sales, sales_mod))
# if I use ordered factor model.matrix doen't work properly
# dataset$sales <- ordered(dataset$sales, c("other", "management", "RandD"))
summary(dataset)
str(dataset)
#create dummy variable for it
dummy_matrix <- model.matrix(~ sales, data = dataset)
table(dummy_matrix)
#combine dummy and other variable with dataset without sales factor
dataset <- cbind(dataset[, -9], sales_other = dummy_matrix[,2], sales_RandD= dummy_matrix[,3])
#split the dataset into train and test sets (using caret lib, it gives nuber of records)
#library(caTools) can provide with vector true false for spliting
set.seed(123)
split = createDataPartition(y=dataset$left, p=0.75, list=FALSE)
training <- dataset[split, ]
testing <- dataset[-split,]
names(training)
#train models with different set of independen variables
#after model - resal in comments
#quick start with logical regression
modelFit <- glm(formula = left ~ .,
family = binomial ,
training)
# glm(formula = left ~ ., family = binomial, data = training)
#
# Deviance Residuals:
# Min 1Q Median 3Q Max
# -2.2482 -0.6603 -0.4018 -0.1152 3.0534
#
# Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) -0.8996993 0.2062815 -4.362 1.29e-05 ***
# satisfaction_level -4.1619938 0.1134434 -36.688 < 2e-16 ***
# last_evaluation 0.7911745 0.1720377 4.599 4.25e-06 ***
# number_project -0.3223698 0.0248218 -12.987 < 2e-16 ***
# average_montly_hours 0.0045616 0.0005974 7.636 2.25e-14 ***
# time_spend_company 0.2656302 0.0178240 14.903 < 2e-16 ***
# Work_accident1 -1.5948253 0.1043029 -15.290 < 2e-16 ***
# promotion_last_5years1 -1.2249191 0.2933590 -4.175 2.97e-05 ***
# salary.L -1.3793755 0.1050965 -13.125 < 2e-16 ***
# salary.Q -0.3762478 0.0685437 -5.489 4.04e-08 ***
# sales_other 0.5390296 0.1533004 3.516 0.000438 ***
# sales_RandD -0.0363993 0.1960747 -0.186 0.852727
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# (Dispersion parameter for binomial family taken to be 1)
#
# Null deviance: 12351 on 11249 degrees of freedom
# Residual deviance: 9627 on 11238 degrees of freedom
# AIC: 9651
#
# Number of Fisher Scoring iterations: 5
modelFit <- glm(formula = left ~ . - sales_RandD,
family = binomial,
training)
# glm(formula = left ~ . - sales_RandD, family = binomial, data = training)
#
# Deviance Residuals:
# Min 1Q Median 3Q Max
# -2.2487 -0.6603 -0.4018 -0.1155 3.0534
#
# Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) -0.9210179 0.1716128 -5.367 8.01e-08 ***
# satisfaction_level -4.1620957 0.1134436 -36.689 < 2e-16 ***
# last_evaluation 0.7915599 0.1720224 4.601 4.19e-06 ***
# number_project -0.3224366 0.0248201 -12.991 < 2e-16 ***
# average_montly_hours 0.0045606 0.0005974 7.634 2.27e-14 ***
# time_spend_company 0.2658448 0.0177908 14.943 < 2e-16 ***
# Work_accident1 -1.5948993 0.1043024 -15.291 < 2e-16 ***
# promotion_last_5years1 -1.2226420 0.2930786 -4.172 3.02e-05 ***
# salary.L -1.3773495 0.1045071 -13.179 < 2e-16 ***
# salary.Q -0.3750686 0.0682412 -5.496 3.88e-08 ***
# sales_other 0.5606547 0.1000875 5.602 2.12e-08 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# (Dispersion parameter for binomial family taken to be 1)
#
# Null deviance: 12350.7 on 11249 degrees of freedom
# Residual deviance: 9627.1 on 11239 degrees of freedom
# AIC: 9649.1
#
# Number of Fisher Scoring iterations: 5
summary(modelFit)
# build confution matrix
prediction <- predict(modelFit, type = "response", newdata = testing[-7])
y_hat <- ifelse(prediction > 0.5, 1, 0)
y_hat <- as.factor(y_hat)
#get a confusion matrix without extra information
table(y_hat, testing$left)
# y_hat 0 1
# 0 2632 575
# 1 225 317
# plot ROC Curve
ROCRpred = prediction(prediction, testing$left)
ROCRperf = performance(ROCRpred, "tpr", "fpr")
auc <- slot(performance(ROCRpred, "auc"), "y.values")[[1]]
plot(ROCRperf, colorize=TRUE)
abline(h=seq(0,1,0.05), v=seq(0,1,0.05), col = "lightgray", lty = "dotted")
lines(c(0,1),c(0,1), col = "gray", lwd =2)
text(0.6,0.2,paste("AUC=", round(auc,4), sep=""), cex=1.4)
title("ROC Curve")
#plot(ROCRperf, colorize=TRUE, print.cutoffs.at=seq(0,1,0.1), text.adj=c(-0.2,1.7))
#get a confusion matrix with extra datails with caret packege
confusionMatrix(y_hat, testing$left)
# Confusion Matrix and Statistics
#
# Reference
# Prediction 0 1
# 0 2633 575
# 1 224 317
#
# Accuracy : 0.7869
# 95% CI : (0.7734, 0.7999)
# No Information Rate : 0.7621
# P-Value [Acc > NIR] : 0.000166
#
# Kappa : 0.3203
# Mcnemar's Test P-Value : < 2.2e-16
#
# Sensitivity : 0.9216
# Specificity : 0.3554
# Pos Pred Value : 0.8208
# Neg Pred Value : 0.5860
# Prevalence : 0.7621
# Detection Rate : 0.7023
# Detection Prevalence : 0.8557
# Balanced Accuracy : 0.6385
#
# 'Positive' Class : 0
# I have probability for previous result so I am going to create CAP pot
cap_data <- cbind(left = as.numeric(testing$left)-1, prediction)
write.csv(cap_data, "cap_data.csv")
# pic-03-cap analysis my model from exel is a screenshort from file cap_analysis.xlsx
# where I have calculated CAP analysis for the model
# pic-02-cap analysis information.png shows information how to interpret this analysis
# inforamtion for interactive confusion matrix based on threshold
# table was saved in file cap_data.csv and we will use it in for building web server in shiny
# next code deploying application in cloud
library(shiny)
# as if we use public github i deleted secret inforamtion
rsconnect::setAccountInfo(name='damsgroup', token='УДАЛИЛ', secret='УДАЛИЛ')
# test application
runApp()
library(rsconnect)
# deploy app, don't do it from your compluter
deployApp()
#=================================================
# create prediction based on Naive Bayes from library e1071
# importent - it works only with factors
# -7 has our left column so we have to take it off
# modelFit <- naiveBayes(x = training[,-7], y = training$left) #different way for a function call
modelFit <- naiveBayes(left ~. , data = training)
summary(modelFit)
# Length Class Mode
# apriori 2 table numeric
# tables 10 -none- list
# levels 2 -none- character
# call 3 -none- call
# there is no probability for preduction only true false factors
prediction <- predict(modelFit, newdata = testing[-7])
confusionMatrix(prediction, testing$left)
# Confusion Matrix and Statistics
#
# Reference
# Prediction 0 1
# 0 2554 312
# 1 303 580
#
# Accuracy : 0.836
# 95% CI : (0.8237, 0.8477)
# No Information Rate : 0.7621
# P-Value [Acc > NIR] : <2e-16
#
# Kappa : 0.5461
# Mcnemar's Test P-Value : 0.747
#
# Sensitivity : 0.8939
# Specificity : 0.6502
# Pos Pred Value : 0.8911
# Neg Pred Value : 0.6569
# Prevalence : 0.7621
# Detection Rate : 0.6812
# Detection Prevalence : 0.7645
# Balanced Accuracy : 0.7721
#
# 'Positive' Class : 0
# I was trying to improve the model but got the identical result
modelFit <- naiveBayes(left ~. - sales_RandD , data = training)
summary(modelFit)
prediction <- predict(modelFit, newdata = testing[-7])
# privious function gives factors Yes No as a resalt. I am going to get probability
# for drawing CAP plot
prediction_raw <- predict(modelFit, newdata = testing[-7], type = "raw")
# I got not the same probфbility vector as for logical regression.
# this vector has two columns with probability for each variant.
# so folowing expression gets y_hat and it gives absolutely the same result
y_hat <- ifelse(prediction_raw[,1] > prediction_raw[,2], 0, 1)
y_hat <- as.factor(y_hat)
confusionMatrix(y_hat, testing$left)
# Confusion Matrix and Statistics
#
# Reference
# Prediction 0 1
# 0 2554 312
# 1 303 580
#
# Accuracy : 0.836
# 95% CI : (0.8237, 0.8477)
# No Information Rate : 0.7645
# No Information Rate : 0.7621
# P-Value [Acc > NIR] : <2e-16
#
# Kappa : 0.5461
# Mcnemar's Test P-Value : 0.747
#
# Sensitivity : 0.8939
# Specificity : 0.6502
# Pos Pred Value : 0.8911
# Neg Pred Value : 0.6569
# Prevalence : 0.7621
# Detection Rate : 0.6812
# Detection Prevalence : 0.7645
# Balanced Accuracy : 0.7721
#
# 'Positive' Class : 0
# next step is to join two columns form prediction_row table in one vector
prediction_bayes <- (prediction_raw[,2]- prediction_raw[,1]+1)/2
summary(prediction_bayes)
y_hat <- ifelse(prediction_bayes > 0.5, 1, 0)
y_hat <- as.factor(y_hat)
confusionMatrix(testing$left, y_hat)
#ROC curve
ROCRpred = prediction(prediction_bayes, testing$left)
ROCRperf = performance(ROCRpred, "tpr", "fpr")
auc <- slot(performance(ROCRpred, "auc"), "y.values")[[1]]
plot(ROCRperf, colorize=TRUE)
abline(h=seq(0,1,0.05), v=seq(0,1,0.05), col = "lightgray", lty = "dotted")
lines(c(0,1),c(0,1), col = "gray", lwd =2)
text(0.6,0.2,paste("AUC=", round(auc,4), sep=""), cex=1.4)
title("ROC Curve")
#phi<-performance(ROCRpred, "phi") ??????????????
confusionMatrix(y_hat, testing$left )
# Cumulative Accuracy Profile (CAP)
# I have probability for previous result so I am going to create CAP pot
cap_data_bayes <- cbind(left = as.numeric(testing$left)-1, predicted = round(prediction_bayes,5))
# усли не преобразовыывать в дата фрейм то не будет можно имя, надо будет order(cap_dat_bayes[,2)]
cap_data_bayes <- as.data.frame(cap_data_bayes)
cap_data_bayes <- cap_data_bayes[order(cap_data_bayes$predicted, decreasing = TRUE),]
attach(cap_data_bayes)
cap_data_bayes <- cap_data_bayes[order(left, -predicted),]
detach(cap_data_bayes)
write.csv(cap_data_bayes, "cap_data_bayes.csv")
# cap_analysis_bayes.xlsx has information from this model
# pic-04-cap analysis for cap_data_bayes.png is a screenshort with CAP graphic
ggplot(testing, aes(x = prediction_bayes, fill = factor(left), colour = factor(left))) +
geom_density() + ggtitle("Predicted dens test set")
# deploy a new app for bayes model
deployApp()
# Density plots probabilities for testing set
ggplot(testing, aes(x = prediction_bayes, fill = factor(left), colour = factor(left))) +
geom_density() + ggtitle("Predicted denity for the test set")
# Building ROC plot
# These functions return or set information about the individual slots in an object.
auc <- slot(performance(pred, "auc"), "y.values")[[1]]
# This function is used to transform the input data into a standardized format.
pred <- prediction(prediction_bayes, testing$left)
# All kinds of predictor evaluations are performed using this function.
perf <- performance(pred,"tpr", "fpr")
plot(perf)
lines(c(0,1),c(0,1))
text(0.6,0.2,paste("AUC=", round(auc,4), sep=""), cex=1.4)
title("ROC Curve")
# train with random forest model
rf.model <- train(left ~., data = training, method = "rf")
summary(rf.model)
randomForest
rf.prediction <- predict(rf.model, newdata = testing[-7], type = "prob")
prediction_rf <- (rf.prediction[,2]- rf.prediction[,1]+1)/2
summary(prediction_rf)
y_hat <- ifelse(prediction_rf > 0.01, 1, 0)
y_hat <- as.factor(y_hat)
cf.05<-confusionMatrix(y_hat, testing$left )
cf.41<-confusionMatrix(y_hat, testing$left )
confusionMatrix(y_hat, testing$left )
# Density plots probabilities for testing set
ggplot(testing, aes(x = prediction_rf, fill = factor(left), colour = factor(left))) +
geom_density() + ggtitle("Predicted denity for the test set")
# Confusion Matrix and Statistics
#
# Reference
# Prediction 0 1
# 0 2852 28
# 1 5 864
#
# Accuracy : 0.9912
# 95% CI : (0.9877, 0.9939)
# No Information Rate : 0.7621
# P-Value [Acc > NIR] : < 2.2e-16
#
# Kappa : 0.9755
# Mcnemar's Test P-Value : 0.0001283
#
# Sensitivity : 0.9982
# Specificity : 0.9686
# Pos Pred Value : 0.9903
# Neg Pred Value : 0.9942
# Prevalence : 0.7621
# Detection Rate : 0.7607
# Detection Prevalence : 0.7682
# Balanced Accuracy : 0.9834
#
# 'Positive' Class : 0
# Cumulative Accuracy Profile (CAP)
# I have probability for previous result so I am going to create CAP pot
cap_data_rf <- cbind(left = as.numeric(testing$left)-1, predicted = round(prediction_rf,5))
write.csv(cap_data_rf, "cap_data_rf.csv")
|
ce2efbc683f8dfb1a3f2df2f2fdfd804a5d60074
|
831309ea5419fd7a365403c27b9f98606deca04b
|
/R/hpaBinary_generics.R
|
37c56c5ac37786079a49fc5e1c3ac08f940ac398
|
[] |
no_license
|
cran/hpa
|
574f4a8ada8cc7dc911983344543a1bc9df4f566
|
2fe73d9bd0a8d97394989bcf6e017cd5598c6d03
|
refs/heads/master
| 2023-06-01T21:41:55.956317
| 2023-05-07T14:20:02
| 2023-05-07T14:20:02
| 236,612,370
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,217
|
r
|
hpaBinary_generics.R
|
#' Predict method for hpaBinary
#' @param object Object of class "hpaBinary"
#' @template newdata_Template
#' @param is_prob logical; if TRUE (default) then function returns
#' predicted probabilities. Otherwise latent variable
#' (single index) estimates will be returned.
#' @template elipsis_Template
#' @return This function returns predicted probabilities based on
#' \code{\link[hpa]{hpaBinary}} estimation results.
predict.hpaBinary <- function (object, ...,
newdata = NULL,
is_prob = TRUE)
{
if (length(list(...)) > 0)
{
warning("Additional arguments passed through ... are ignored.")
}
return(predict_hpaBinary(object, newdata, is_prob))
}
###
#' Summarizing hpaBinary Fits
#' @param object Object of class "hpaBinary"
#' @template elipsis_Template
#' @return This function returns the same list as \code{\link[hpa]{hpaBinary}}
#' function changing its class to "summary.hpaBinary".
summary.hpaBinary <- function (object, ...)
{
if (length(list(...)) > 0)
{
warning("Additional arguments passed through ... are ignored.")
}
return(summary_hpaBinary(object))
}
###
#' Summary for "hpaBinary" object
#' @param x Object of class "hpaBinary"
#' @template elipsis_Template
print.summary.hpaBinary <- function (x, ...)
{
if (length(list(...)) > 0)
{
warning("Additional arguments passed through ... are ignored.")
}
return(print_summary_hpaBinary(x))
}
###
#' Plot hpaBinary random errors approximated density
#' @param x Object of class "hpaBinary"
#' @param y this parameter currently ignored
#' @template elipsis_plot_Template
plot.hpaBinary <- function (x, y = NULL, ...)
{
if (!is.null(y))
{
warning("Note that y parameter currently ignored.")
}
# Load data from the model
pol_coefficients <- as.numeric(unlist(x["pol_coefficients"]))
pol_degrees <- as.numeric(unlist(x["pol_degrees"]))
mean <- as.numeric(x["mean"])
sd <- as.numeric(x["sd"])
# Adjust precision
errors_exp <- as.numeric(x["errors_exp"])
errors_var <- as.numeric(x["errors_var"])
plot_min <- errors_exp - 3.8 * sqrt(errors_var);
plot_max <- errors_exp + 3.8 * sqrt(errors_var);
n <- 10000;
precise <- (plot_max - plot_min) / n;
x_vec <- plot_min + cumsum(rep(precise, n))
den <- dhpa(x = x_vec,
pol_coefficients = pol_coefficients,
pol_degrees = pol_degrees,
mean = mean, sd = sd)
den_min = min(den)
den_max = max(den)
# Plot the result
# prepare the arguments
plot_args <- list(...)
plot_args_names <- names(plot_args)
if(!("xlim" %in% plot_args_names))
{
plot_args$xlim <- c(plot_min, plot_max)
}
if(!("type" %in% plot_args_names))
{
plot_args$type <- "l"
}
if(!("lwd" %in% plot_args_names))
{
plot_args$lwd <- 3
}
if(!("main" %in% plot_args_names))
{
plot_args$main <- "Random Errors Density Approximation Plot"
}
if(!("xlab" %in% plot_args_names))
{
plot_args$xlab <- "value"
}
if(!("ylab" %in% plot_args_names))
{
plot_args$ylab <- "density"
}
plot_args$x = x_vec
plot_args$y = den
# make the plot
do.call(plot, plot_args)
}
###
#' Calculates log-likelihood for "hpaBinary" object
#' @description This function calculates log-likelihood for "hpaBinary" object
#' @usage \method{logLik}{hpaBinary}(object, ...)
#' @param object Object of class "hpaBinary"
#' @template elipsis_Template
logLik.hpaBinary <- function (object, ...)
{
if (length(list(...)) > 0)
{
warning("Additional arguments passed through ... are ignored.")
}
lnL <- logLik_hpaBinary(object)
attr(lnL, "class") <- "logLik"
attr(lnL, "df") <- length(as.vector(object$x1))
return(lnL)
}
###
#' Print method for "hpaBinary" object
#' @param x Object of class "hpaBinary"
#' @template elipsis_Template
print.hpaBinary <- function (x, ...)
{
if (length(list(...)) > 0)
{
warning("Additional arguments passed through ... are ignored.")
}
cat(paste("It is the object of class",class(x),"\n"))
cat("It contains the following elements:\n")
cat(names(x), sep = ", ")
cat("\n")
cat("---\n")
cat("Estimation results:\n")
print(x$results)
cat("---\n")
cat(paste("Log-likelihood function value is:", round(x$'log-likelihood', 3), "\n"))
cat("---\n")
cat("Please, use summary() function to get additional information\n")
}
#' Extract coefficients from hpaBinary object
#' @param object Object of class "hpaBinary"
#' @template elipsis_Template
coef.hpaBinary <- function (object, ...)
{
if (length(list(...)) > 0)
{
warning("Additional arguments passed through ... are ignored.")
}
return(object$x1)
}
#' Extract covariance matrix from hpaBinary object
#' @param object Object of class "hpaBinary"
#' @template elipsis_Template
vcov.hpaBinary <- function (object, ...)
{
if (length(list(...)) > 0)
{
warning("Additional arguments passed through ... are ignored.")
}
return(object$cov_mat)
}
|
8cd83783f46e5c7140fb202a16f98d7bbd9fb99b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/grt/examples/gqc.Rd.R
|
b933cfe6a9c54dd2b62fff5a150b14fd26230529
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 258
|
r
|
gqc.Rd.R
|
library(grt)
### Name: gqc
### Title: General Quadratic Classifier
### Aliases: gqc print.gqc
### Keywords: multivariate
### ** Examples
data(subjdemo_2d)
fit.2dq <- gqc(response ~ x + y, data=subjdemo_2d,
category=subjdemo_2d$category, zlimit=7)
|
f576fcdd26da6ffd106fec27c95f3977a1f042f7
|
a94308678716ab60f03956e503f554a767e73733
|
/man/DIF.Logistic.MG.Rd
|
9e0b839bc1cadeda2bdacdc58d05fccce4ba8510
|
[] |
no_license
|
cswells1/MeasInv
|
9f9cb20da68b695cc1f65fc5c80f92ea31b030e7
|
b74acffcf8ec0d6886f7081882aa3965306eb4af
|
refs/heads/master
| 2023-07-14T21:35:32.915150
| 2021-09-12T22:50:49
| 2021-09-12T22:50:49
| 405,707,567
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 739
|
rd
|
DIF.Logistic.MG.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DIF.Logistic.MG.R
\name{DIF.Logistic.MG}
\alias{DIF.Logistic.MG}
\title{Logistic regression DIF method for more than two groups}
\usage{
DIF.Logistic.MG(data, group, sig.level, purify, output.filename)
}
\arguments{
\item{data}{numeric: either the data matrix only, or the data matrix plus the vector of group membership.}
\item{group}{factor: the vector of group membership}
\item{sig.level}{numeric: the significance level}
\item{purify}{logical: Default is FALSE}
\item{output.filename}{character: either the file path or file name to save the output}
}
\description{
This code performs logistic regression testing for DIF using more than
two groups.
}
|
4a52ca4d5e39ae3daf7b75b6576152cd6ae0672a
|
1aa41ed59a7ccc6b0cf001cd3bc8ed1f77b2162b
|
/R/user.R
|
67d2fe1c695125e1962f93d0082dda5872547d06
|
[] |
no_license
|
yutannihilation/wunderlistr
|
a30fcf401eec1d2174e0cc49988a514f16ebf798
|
d98b815d5687f1e1563d0aace6769686753aa193
|
refs/heads/master
| 2021-01-10T01:10:59.563351
| 2016-03-28T22:06:33
| 2016-03-28T22:06:33
| 54,811,791
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 484
|
r
|
user.R
|
#' User API
#'
#' All info related to the currently signed in user.
#'
#' @seealso \url{https://developer.wunderlist.com/documentation/endpoints/user}
#'
#' @param list_id List ID
#'
#' @export
wndr_get_user <- function(list_id = NULL) {
if(is.null(list_id)) {
wndr_api(verb = "GET",
path = "/api/v1/user")
} else {
wndr_api(verb = "GET",
path = "/api/v1/users",
body = list(
list_id = list_id
))
}
}
|
6702ebcc35b31ea29c3e7a0e66290b06e0e46003
|
4d2c2e6c274dc94b9b418fbf397cbb5bd06a23ed
|
/pbdr/tutorial3/tutorial3/u4-mcmc_glmm_mclapply.r
|
ee15e6ae1bdd7e53f82b2e586c27bff387f18f83
|
[] |
no_license
|
snoweye/snoweye.github.io
|
5991f8d533e0a0b1428f1c179768925a21bb47b9
|
e5d35e49aca7520f97d0719c829df4fc11ff63e1
|
refs/heads/master
| 2023-07-22T13:35:24.683834
| 2023-07-06T22:09:43
| 2023-07-06T22:09:43
| 64,176,642
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 951
|
r
|
u4-mcmc_glmm_mclapply.r
|
library(nlme)
library(MASS)
propose.glmm.random.mclapply <- function(param, da.mcmc, tau = 1){
f <- function(i.random){
### Random walk.
random.new <- param$random
random.new[i.random] <- rnorm(1, mean = param$random[i.random],
sd = tau * da.mcmc$sd.random)
logL.new <- logL.glmm(param$fixed, random.new, param$sd.random, da.mcmc)
### Acceptance and rejection step.
alpha <- min(0, logL.new - param$logL)
if(!is.nan(alpha) && log(runif(1)) < alpha){
ret <- random.new[i.random]
} else{
ret <- param$random[i.random]
}
ret
} # End of f().
ret <- mclapply(1:length(param$random), f, mc.cores = 4,
mc.preschedule = TRUE)
param$random <- do.call("c", ret)
### Update logL with the new set of random effects.
param$logL <- logL.glmm(param$fixed, param$random, param$sd.random, da.mcmc)
param
} # End of propose.glmm.random.mclapply().
|
3148b5c706ec4e0022c8e4a88179fb9f6db5fd15
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/OasisR/examples/xPy.Rd.R
|
8bfb033c297fa397b60995337ead5284c45a8e28
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 160
|
r
|
xPy.Rd.R
|
library(OasisR)
### Name: xPy
### Title: A function to compute interaction index (xPy)
### Aliases: xPy
### ** Examples
x <- segdata@data[ ,1:2]
xPy(x)
|
ff3b4ffe9176690bd7302d6d3bd67eafe332dd4a
|
763e605372290000e74bcda4db7f0e1cd24b6b46
|
/plot2.R
|
ede89768ec4ab9aa31baa92eef6de1b9ae8c6649
|
[] |
no_license
|
davidmanero/ExData_Plotting1
|
a03c131eab81064c6e32172d8504c07e791b715a
|
5a1d3ab1c404ecb03d199f0bcc2720d5ed0a68ae
|
refs/heads/master
| 2021-01-14T08:54:53.642379
| 2015-04-12T20:36:09
| 2015-04-12T20:36:09
| 33,830,763
| 0
| 0
| null | 2015-04-12T19:36:49
| 2015-04-12T19:36:48
| null |
UTF-8
|
R
| false
| false
| 752
|
r
|
plot2.R
|
## This code is used for creating the plot2 in the Course Project 1 for the Exploratoy
## Data Analysis curse from Coursera
## The downloaded file with the information has been manually cut with days 1st and 2nd
## Of Febrary 2007 in the local file "1st_2nd_Feb2007.txt"
datafile <- "./data/1st_2nd_Feb2007.txt"
data <- read.table(datafile, sep = ";", header = T, na.strings = "?")
## Converting the date and time columns and merging in one
data$Datetime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
## Constructing the plot in the screen
plot(data$Datetime, data$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
## Copying the plot in the PNG file
dev.copy(png, file = "plot2.png")
dev.off()
|
998223e413b7737dca6df3dc142ac98589c6ddd9
|
4052545c292db46b6363f299828dfc1d8d7f8b9a
|
/shiny2/leaflet/choropleths.R
|
91ea91e442cfc74bee824249f5e34a22dc2a6791
|
[] |
no_license
|
uvesco/studioEpi
|
972324a5af179912d1ab2c60ea21f0f04cddfa33
|
ed0849185460d628aa1fdc73eb13e67555a87a75
|
refs/heads/master
| 2020-12-12T05:03:04.953612
| 2020-04-04T23:26:09
| 2020-04-04T23:26:09
| 234,048,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 124
|
r
|
choropleths.R
|
library(geojson)
# https://rstudio.github.io/leaflet/choropleths.html
states <- geojson_read("us_states.json", what = "sp")
|
3d12a3a06670eecff6c182fc15b3b899d4618d7f
|
65553a50702fb30c40ec78f75ec305a8fab298f8
|
/man/Guitar-package.Rd
|
47c6afb599f7232a743d05df32782ca142ad5099
|
[] |
no_license
|
lzcyzm/Guitar
|
e22f1b9a34952b48b219a5bfcb8a91a9d1283408
|
cb1e1146c86ce8cf9caff505d1b22dca2bc37a3e
|
refs/heads/master
| 2021-01-10T07:42:46.819016
| 2015-10-22T02:55:25
| 2015-10-22T02:55:25
| 44,791,600
| 1
| 0
| null | 2015-10-23T04:54:56
| 2015-10-23T04:54:56
| null |
UTF-8
|
R
| false
| false
| 1,391
|
rd
|
Guitar-package.Rd
|
\name{Guitar-package}
\alias{Guitar-package}
\alias{Guitar}
\docType{package}
\title{
Guitar
}
\description{
RNA Landmarks Guided Transcriptomic View of RNA-related Genomic Features.
}
\details{
The package is designed for transcriptomic visualization of RNA-related genomic features represented with genome-based coordinates with respect to the landmarks of RNA transcripts, i.e., transcription starting site, start codon, stop codon and transcription ending site.
}
\author{
Jia Meng <jia.meng@hotmail.com>
}
\references{
~~ Literature or other references for background information ~~
}
\examples{
# read genomic features
narrowPeak <- system.file(
"extdata", "m6A_hg19_1000peaks_macs2.narrowPeak",
package="Guitar")
# bam imported as GAlignments
m6A_Bcell <- narrowPeaktoGRanges(narrowPeak)
# generate a list of genomic features
m6A_Bcell_1 <- m6A_Bcell[1:300]
m6A_Bcell_2 <- m6A_Bcell[301:600]
m6A_Bcell_3 <- m6A_Bcell[601:900]
feature_hg19 <- list(m6A_Bcell_1, m6A_Bcell_2, m6A_Bcell_3)
names(feature_hg19) <- c("m6A_1","m6A_2","m6A_3")
# Make Guitar coordiantes
txdb_file <- system.file("extdata", "hg19_toy.sqlite",
package="Guitar")
txdb <- loadDb(txdb_file)
gc_txdb <- makeGuitarCoordsFromTxDb(txdb,noBins =10)
# Plot
GuitarPlot(feature_hg19,
GuitarCoordsFromTxDb = gc_txdb)
}
|
957a22355f87e0d1aa95ac492bbf70bce42ef6a7
|
ece7ca8a7491a99c92d51a1afc3a053014fa1b3f
|
/R/idxstatsBam.R
|
e61854242f7f8dbf9e0ec045f3f2c127b6ad92b7
|
[
"MIT"
] |
permissive
|
Bioconductor/Rsamtools
|
a31c353bb24545fc5736fd8e99042bbe8068ac1c
|
cded55f6bd1d8363b16f60b79f74f707c8f90b77
|
refs/heads/devel
| 2023-05-12T18:01:23.101002
| 2023-04-25T13:50:37
| 2023-04-25T13:50:37
| 102,150,322
| 22
| 24
|
NOASSERTION
| 2023-09-02T09:43:11
| 2017-09-01T20:24:35
|
R
|
UTF-8
|
R
| false
| false
| 206
|
r
|
idxstatsBam.R
|
setMethod(idxstatsBam, "character",
function(file, index=file, ...)
{
index <- .normalizePath(index)
bam <- open(BamFile(file, index), "rb")
on.exit(close(bam))
idxstatsBam(bam, ...)
})
|
af93d88cea344a801a4022f669340b61acb4eb7c
|
3f985066ad0d90af692f53afdbc00a7f7ca91688
|
/day5Part1.R
|
82cb9da2ce9addae5970d04f423924ad49ddcbe4
|
[] |
no_license
|
leibensperger/Advent.of.Code
|
e36c2ffac730e947733e8a84cc7a9b6f2e1afc02
|
26301a4c44f9083d696f40c323fe15354f99923c
|
refs/heads/master
| 2020-04-12T07:58:17.093258
| 2019-01-02T18:58:18
| 2019-01-02T18:58:18
| 162,377,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,925
|
r
|
day5Part1.R
|
#--- Day 5: Alchemical Reduction ---
# You've managed to sneak in to the prototype suit
# manufacturing lab. The Elves are making decent progress,
# but are still struggling with the suit's size reduction
# capabilities.
#
# While the very latest in 1518 alchemical technology
# might have solved their problem eventually, you can do
# better. You scan the chemical composition of the suit's
# material and discover that it is formed by extremely long
# polymers (one of which is available as your puzzle input).
# The polymer is formed by smaller units which, when
# triggered, react with each other such that two adjacent
# units of the same type and opposite polarity are destroyed.
# Units' types are represented by letters; units' polarity
# is represented by capitalization. For instance, r and R
# are units with the same type but opposite polarity,
# whereas r and s are entirely different types and do not
# react.
# For example:
# In aA, a and A react, leaving nothing behind.
# In abBA, bB destroys itself, leaving aA. As above,
# this then destroys itself, leaving nothing.
# In abAB, no two adjacent units are of the same type,
# and so nothing happens.
# In aabAAB, even though aa and AA are of the same type,
# their polarities match, and so nothing happens.
# Now, consider a larger example, dabAcCaCBAcCcaDA:
# dabAcCaCBAcCcaDA The first 'cC' is removed.
# dabAaCBAcCcaDA This creates 'Aa', which is removed.
# dabCBAcCcaDA Either 'cC' or 'Cc' are removed (the result is the same).
# dabCBAcaDA No further actions can be taken.
# After all possible reactions, the resulting polymer
# contains 10 units.
# How many units remain after fully reacting the polymer
# you scanned? (Note: in this puzzle and others, the input
# is large; if you copy/paste your input, make sure you
# get the whole thing.)
# To begin, get your puzzle input.
input.file<-'/Users/eleib003/Google.Drive/Advent.of.Code.2018/input5.txt'
# Open connection
con<-file(input.file,open='r')
# Read
input<-readLines(con,n=1)
close(con)
inChar <- strsplit(input,'')[[1]]
outChar<-inChar
n<-1
nRec<-length(inChar)
# Loop through
while (n < nRec){
print(n)
#print(outChar[n:(n+5)])
# Check if same letter
if(toupper(outChar[n])==toupper(outChar[n+1])){
# Check if mixed case; -1 is returned for lower, +1
# for upper, so sum is 0
if(unlist(gregexpr("[A-Z]",outChar[n+1])) +
unlist(gregexpr("[A-Z]",outChar[n]))==0){
# Chop out this combo:
if(nRec != length(outChar)){stop()}
# Catch case if n is 1
if(n!=1 & n != (nRec-1)){
outChar<-c(outChar[1:(n-1)],outChar[(n+2):nRec])
} else{
if(n==1){outChar<-outChar[3:nRec]} else{
if(n == (nRec-1)){outChar<-outChar[1:(n-1)]}}
}
nRec<-length(outChar)
n<-n-1 # need to go back and check for previous match
} else {n<-n+1}
} else {n<-n+1}
if(n <= 0){n<-1}
}
|
050d4cc55f4f646f7e9b8297cc0f78ddfa4ee861
|
fd97957ee8a1434b5f43b2cadd38217644797e5b
|
/man/print.hint.test.Rd
|
5f28fb3bb7fba64aa9757fa64cf0ddb1d60ba7a7
|
[] |
no_license
|
cran/hint
|
a2fe4fdf50fc73d92a7a7746e28eae31761c7b24
|
b0b532db7fc788082182f793ce86f479573c95d6
|
refs/heads/master
| 2022-02-13T23:26:10.763801
| 2022-02-02T13:40:02
| 2022-02-02T13:40:02
| 17,696,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 410
|
rd
|
print.hint.test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hint_main.R
\name{print.hint.test}
\alias{print.hint.test}
\title{print.hint.test}
\usage{
\method{print}{hint.test}(x, ...)
}
\arguments{
\item{x}{An object of class `hint.test`.}
\item{...}{Additional arguments to be passed to `print`.}
}
\value{
Prints output to the console.
}
\description{
Prints the resuls of `hint.test`.
}
|
efe93f9319b154021ff628908bb380e1b8d72a24
|
1ca51889971d9fc4c759c6cf2a7b100f8b621592
|
/R/class_cost.R
|
01b82c5c5f768956d606385050c193ae1efe1ff3
|
[
"MIT"
] |
permissive
|
mgaldame/baguette
|
2734f150247f1103394b25c7af555bfd21f0dc7a
|
7bf898b766da8d65c6f848e05341d0f5554b9ec6
|
refs/heads/master
| 2023-05-05T06:21:51.381009
| 2021-05-27T15:12:33
| 2021-05-27T15:12:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,131
|
r
|
class_cost.R
|
#' Cost parameter for minority class
#'
#' Used in `bag_treer()`.
#'
#' @param range A two-element vector holding the _defaults_ for the smallest and
#' largest possible values, respectively.
#'
#' @param trans A `trans` object from the `scales` package, such as
#' `scales::log10_trans()` or `scales::reciprocal_trans()`. If not provided,
#' the default is used which matches the units used in `range`. If no
#' transformation, `NULL`.
#'
#' @details
#' This parameter reflects the cost of a mis-classified sample relative to a
#' baseline cost of 1.0. For example, if the first level of an outcome factor
#' occurred rarely, it might help if this parameter were set to values greater
#' than 1.0. If the second level of the outcome factor is in the minority,
#' values less than 1.0 would cause the model to emphasize the minority class
#' more than the majority class.
#' @examples
#' class_cost()
#' @export
class_cost <- function(range = c(0, 5), trans = NULL) {
dials::new_quant_param(
type = "double",
range = range,
inclusive = c(TRUE, TRUE),
trans = trans,
label = c(class_cost = "Class Cost")
)
}
|
e64384b1d221384f906b067e57da86e646bbc3c4
|
bb8a54bb3ae1a527ad66786d2aed62bb60a51277
|
/man/year_oldest.Rd
|
5955598ab7e6173629f293b7efed08dd0f15e752
|
[] |
no_license
|
JialingMa/NBAsstat1
|
bfa9ba2a6be979193f91de0de93d4a19fc153c4c
|
6a521c92a9fd9194187c119efa3f3292685941b1
|
refs/heads/master
| 2021-02-16T16:27:23.048808
| 2020-03-04T23:18:13
| 2020-03-04T23:18:13
| 245,024,104
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 418
|
rd
|
year_oldest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/year_oldest_function.R
\name{year_oldest}
\alias{year_oldest}
\title{A Year Age Function}
\usage{
year_oldest(year)
}
\arguments{
\item{year}{an individual year}
}
\value{
character
}
\description{
This function allows you to find the name of the player who was the odest player in NBA data for that year.
}
\examples{
year_oldest(2013)
}
|
3d4764b181235ba68bef346432f224423f4848d5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RcmdrMisc/examples/discretePlot.Rd.R
|
14b8e2f030d75d39b26eff83c3fdeab4ac63557a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 361
|
r
|
discretePlot.Rd.R
|
library(RcmdrMisc)
### Name: discretePlot
### Title: Plot Distribution of Discrete Numeric Variable
### Aliases: discretePlot
### Keywords: hplot
### ** Examples
if (require(datasets)){
data(mtcars)
mtcars$cyl <- factor(mtcars$cyl)
with(mtcars, {
discretePlot(carb)
discretePlot(carb, scale="percent")
discretePlot(carb, by=cyl)
})
}
|
97c7df7c92e59248ffc3c3371d2ed67b57f208e9
|
21960cbad6a8d83b8e394513cfed63a96093f7fb
|
/man/convert_Q.Rd
|
de96325dbff982c6b3f44b43b38ba79de42e3816
|
[] |
no_license
|
mattreusswig/convertUnits
|
fbbff61c7fba579862b5b733fa2bfb8ea2401b57
|
e8764a0e7fe94bade7941294dd69bc8fa91a1f90
|
refs/heads/master
| 2021-03-06T05:34:10.347027
| 2020-03-18T01:46:01
| 2020-03-18T01:46:01
| 246,182,675
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 749
|
rd
|
convert_Q.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_Q.R
\name{convert_Q}
\alias{convert_Q}
\title{Convert between flow units typically seen in wastewater and streamflow calculations--cfs, acre-ft/d, mgd, lpm, cms}
\usage{
convert_Q(x, from, to)
}
\arguments{
\item{x}{A vector of numbers to be converted.}
\item{from}{A character vector of the units x is in. Must be length 1 or same length as x.}
\item{to}{A character vector of the units into which x will be converted. Must be length 1 or same length as x.}
}
\value{
A vector of numbers converted FROM old units TO new units.
}
\description{
Convert between flow units typically seen in wastewater and streamflow calculations--cfs, acre-ft/d, mgd, lpm, cms
}
|
fbe583da994d5d092d527bd5d4d76d8f7dca4484
|
1abf8398ec048750d230f77b5467c0d3cf508349
|
/man/LoadRequiredPackage.Rd
|
016e0bd964a523e7bace75c135f7951850829f35
|
[] |
no_license
|
bioinformatics-gao/ChipSeq
|
97e8453cb74663bd2b4f35e44846311ca962850d
|
dde9e5a4f82142657f22d281cb10509715c0ef78
|
refs/heads/master
| 2021-01-12T00:03:09.798091
| 2017-01-11T17:32:19
| 2017-01-11T17:32:19
| 78,662,917
| 1
| 0
| null | 2017-01-11T17:35:08
| 2017-01-11T17:35:08
| null |
UTF-8
|
R
| false
| true
| 260
|
rd
|
LoadRequiredPackage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LoadRequiredPackage.R
\name{LoadRequiredPackage}
\alias{LoadRequiredPackage}
\title{Title}
\usage{
LoadRequiredPackage()
}
\description{
Title
}
\examples{
LoadRequiredPackage()
}
|
2aed9fa85e1037306b89832350a987f9a6998ec6
|
87472097e88f2e3aef1e9f003add2aa149c50233
|
/man/addDemographicFields.Rd
|
09d6a18efd1c0c1a9021a0c6485bfb63c8df440a
|
[] |
no_license
|
RGLab/ImmuneSignatures2
|
f1feca1e5f05f99419a8aca00b0d68928e1b8e82
|
15fc078c4475ae421142aa4b6271c9143db04eda
|
refs/heads/main
| 2023-04-18T07:08:56.765734
| 2022-12-05T22:52:42
| 2022-12-05T22:52:42
| 252,603,828
| 1
| 1
| null | 2022-07-28T23:05:21
| 2020-04-03T01:27:26
|
R
|
UTF-8
|
R
| false
| true
| 419
|
rd
|
addDemographicFields.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sharedMetaData.R
\name{addDemographicFields}
\alias{addDemographicFields}
\title{Add fields from demographic data to meta-data}
\usage{
addDemographicFields(dt, demographicData)
}
\arguments{
\item{dt}{meta-data data.table}
\item{demographicData}{demographic data data.table}
}
\description{
Add fields from demographic data to meta-data
}
|
fb48c710fc36cb346de4d98b555d51bc72f9635a
|
be7347efe4e197e441039642bb61cad76aadd54c
|
/R/random.R
|
b89dbcd07544778c428fdfeab911af3febc39fed
|
[
"MIT"
] |
permissive
|
SimonGoring/giphyR
|
1e95aa960b38d7845c0bf10e4f332562ab82d24f
|
979d204c498fd661495dba16a6ecf5c00a589008
|
refs/heads/master
| 2021-01-22T18:37:47.875982
| 2019-03-14T19:02:46
| 2019-03-14T19:02:46
| 85,097,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,524
|
r
|
random.R
|
#' @title Return a random GIF
#' @description Returns a random GIF from Giphy, which may be limited using tags.
#' @param tag a single, or multiple text strings.
#' @param rating - A rating for age appropriate gifs. Options include \code{"y"}, \code{"g"}, \code{"pg"}, \code{"pg-13"} and \code{"r"}.
#' @param sticker Should a GIF be returned or an animated sticker (with transparent background)? Default to \code{FALSE}.
#' @param api_key - Giphy provides a default \code{api_key} value for public beta testing: \code{"dc6zaTOxFJmzC"}. This is the default value.
#'
#' @importFrom jsonlite fromJSON
#' @importFrom httr content GET
#' @import dplyr
#'
#' @author Simon J. Goring \email{simon.j.goring@@gmail.com}
#' @details
#' Pulls a random GIF from the \code{giphy} API. This may be limited through the use of tags.
#' Be aware that failure to use the \code{rating} tag may result in gifs that are not safe for work use.
#' The default API key used here is for testing purposes only. More information on the use of the giphy API is available at \url{https://github.com/Giphy/GiphyAPI}
#'
#' @examples
#' gif <- random('landlord', rating = 'g')
#' plot(gif)
#' @export
random <- function(tag = NULL, rating = NULL, sticker = FALSE, api_key = 'dc6zaTOxFJmzC') {
params <- as.list(environment())
params$fmt <- 'json'
if (params$sticker == TRUE) {
base_uri <- 'http://api.giphy.com/v1/stickers/random'
params$sticker <- NULL
} else if (params$sticker == FALSE) {
base_uri <- 'http://api.giphy.com/v1/gifs/random'
params$sticker <- NULL
}
giphy_list <- httr::GET(base_uri, query = params) %>%
httr::content(as = "text", encoding = 'UTF-8') %>%
jsonlite::fromJSON()
if(giphy_list$meta$status %in% c(400, 401, 403)) {
stop(paste0('giphy returned an error: ', giphy_list$meta$msg))
}
empty <- data.frame(url = NA,
width = NA,
height = NA,
size = NA, mp4 = NA,
mp4_size = NA, webp = NA,
webp_size = NA)
empty_still <- data.frame(url = NA,
width = NA,
height = NA,
size = NA)
empty_mp4 <- data.frame(mp4 = NA, mp4_size = NA, width = NA, height = NA)
giphy_out <- data.frame(type = giphy_list$data$type,
id = giphy_list$data$id,
slug = '',
url = giphy_list$data$url,
bitly_gif_url = '',
bitly_url = '',
embed_url = '',
username = giphy_list$data$username,
source = '',
rating = '',
content_url = '',
source_tld = '',
source_post_url = '',
is_indexable = NA,
import_datetime = NA,
trending_datetime = NA,
stringsAsFactors = FALSE)
giphy_out$user <- data.frame(avatar_url = '',
banner_url = '',
profile_url = '',
username = giphy_list$data$username,
display_name = '',
twitter = '')
giphy_out$images <- data.frame(a = 1)
giphy_out$images$fixed_height <- empty
giphy_out$images$fixed_height_still <- empty_still
giphy_out$images$fixed_height_downsampled <- data.frame(url = giphy_list$data$fixed_height_downsampled_url,
width = giphy_list$data$fixed_height_downsampled_width,
height = giphy_list$data$fixed_height_downsampled_height,
webp = NA, webp_size = NA)
giphy_out$images$fixed_width <- empty
giphy_out$images$fixed_width_still <- empty_still
giphy_out$images$fixed_width_downsampled <- data.frame(url = giphy_list$data$fixed_width_downsampled_url,
width = giphy_list$data$fixed_width_downsampled_width,
height = giphy_list$data$fixed_width_downsampled_height,
webp = NA, webp_size = NA)
giphy_out$images$fixed_height_small <- data.frame(url = giphy_list$data$fixed_height_small_url,
width = giphy_list$data$fixed_height_small_width,
height = giphy_list$data$fixed_height_small_height,
mp4 = NA, mp4_size = NA,
webp = NA, webp_size = NA)
giphy_out$images$fixed_height_small_still <- data.frame(url = giphy_list$data$fixed_height_small_still_url,
width = NA, height = NA, size = NA)
giphy_out$images$fixed_width_small <- data.frame(url = giphy_list$data$fixed_height_small_url,
width = giphy_list$data$fixed_height_small_width,
height = giphy_list$data$fixed_height_small_height,
mp4 = NA, mp4_size = NA,
webp = NA, webp_size = NA)
giphy_out$images$fixed_width_small_still <- data.frame(url = giphy_list$data$fixed_width_small_still_url,
width = NA, height = NA, size = NA)
giphy_out$images$downsized <- empty
giphy_out$images$downsized_still <- empty_still
giphy_out$images$downsized_large <- empty_still
giphy_out$images$downsized_medium <- empty_still
giphy_out$images$original <- data.frame(url = giphy_list$data$image_original_url,
width = giphy_list$data$image_width,
height = giphy_list$data$image_height,
size = NA,
frames = giphy_list$data$image_frames,
mp4 = giphy_list$data$image_mp4_url,
mp4_size = NA,
webp = NA,
webp_size = NA,
hash = NA, stringsAsFactors = FALSE)
giphy_out$images$original_still <- empty_still
giphy_out$images$looping <- data.frame(mp4 = NA, mp4_size = NA)
giphy_out$images$preview <- empty_mp4
giphy_out$images$downsized_small <- data.frame(mp4 = NA, mp4_size = NA)
giphy_out$images$preview_gif <- empty_still
giphy_out$images$original_mp4 <- empty_mp4
giphy_out$images$hd <- empty_mp4
giphy_out$images$`480w_still` <- empty_still
giphy_out$images <- giphy_out$images[,!colnames(giphy_out$images) %in% 'a']
attr(giphy_out, 'meta') <- giphy_list$meta
attr(giphy_out, 'pagination') <- giphy_list$pagination
class(giphy_out) <- c('giphy', 'data.frame')
return(giphy_out)
}
|
88514c86467ca332e4ccd9c119210717953ff78a
|
e92f5c95c8c17f4b2dc8d754fd212fd4fcc4c8b4
|
/data-raw/frutas.R
|
8c0939124fdf82cdeed1c10efd2d260e80179926
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cienciadedatos/datos
|
de1cab523ce46ed1b1d86dbb4f947ba9e58ff432
|
6008b75bfc68e2e8332fe92c37f5ec59361fa4f4
|
refs/heads/main
| 2023-07-20T12:01:18.032448
| 2023-07-17T12:50:37
| 2023-07-17T15:10:53
| 140,963,726
| 37
| 34
|
CC0-1.0
| 2023-07-17T12:52:14
| 2018-07-14T17:07:44
|
R
|
UTF-8
|
R
| false
| false
| 448
|
r
|
frutas.R
|
# From `dput(datos::frutas)` (245b4af)
frutas <- c(
"banana",
"papaya",
"uva",
"ar\u00e1ndano",
"frutilla",
"mora",
"pl\u00e1tano",
"anan\u00e1",
"pi\u00f1a",
"manzana",
"pera",
"sand\u00eda",
"melocot\u00f3n",
"mel\u00f3n",
"damasco",
"durazno",
"frambuesa",
"mango",
"guayaba",
"guan\u00e1bana",
"maracuy\u00e1",
"fresa",
"coco",
"naranja",
"kiwi"
)
usethis::use_data(frutas, overwrite = TRUE)
|
7ce6b47b858d8fdf8b77f67cc8a35a7121d48da1
|
37520057f8324bbddcebaa3276fdc5c7390bca14
|
/eda/osm_exp_2.R
|
a453327f1d785417e5f242a2031bfed07d7f8d39
|
[] |
no_license
|
JordanJamesSands/melbourne_housing
|
8aa9eac49f5f6ae25a2de4df5793d07a06d150b3
|
e00ed6b81c48b67f5b88bdba102902f5ead1e749
|
refs/heads/master
| 2020-04-29T10:29:44.704673
| 2019-04-30T06:54:41
| 2019-04-30T06:54:41
| 176,063,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,396
|
r
|
osm_exp_2.R
|
#other scripts
source('project_functions.R')
#new pipeline
source('clean/read_data.R')
source('clean/clean2.R')
sdf = n_neighbours(osm_data$school,2000,'nschools_2000')
newdata = merge(property_data,sdf,by='ID')
newdata = merge(newdata,min_dist(osm_data$train,1,'traindist',1000),by='ID')
newdata = merge(newdata,n_neighbours(osm_data$supermarket,1000,'nsuper_1000'),by='ID')
newdata = merge(newdata,n_neighbours(osm_data$bbq,1000,'nbbq_1000'),by='ID')
s = newdata[newdata$traindist<1000,]
plot(jitter(s$traindist,2),log(s$price),pch=19,col=rgb(0,0,0,0.2))
plot(jitter(newdata$nbbq_1000,2),log(newdata$price),pch=19,col=rgb(0,0,0,0.2),xlim=c(0,10))
plot(jitter(newdata$nsuper_1000,2),log(newdata$price),pch=19,col=rgb(0,0,0,0.2))
plot(newdata$traindist,log(newdata$price),pch=19,col=rgb(0,0,0,0.2))
plot(jitter(newdata$nschools_2000,2),log(newdata$price),pch=19,col=rgb(0,0,0,0.2))
lm(log(price) ~ nschools_2000,data=newdata) %>% summary
lm(log(price) ~ nbbq_1000,data=newdata) %>% summary
source('eda/imputing_year.R')
source('eda/imputing_ncar.R')
source('eda/imputing_land_area.R')
source('eda/imputing_building_area.R')
source('eda/splitting.R')
source('eda/splitting0.R')
newdata = merge(property_data,sdf,by='ID')
newdata = merge(newdata,n_neighbours(osm_data$bbq,1000,'nbbq_1000'),by='ID')
cor(newdata$nschools_2000,log(newdata$price))
cor(newdata$nbbq_1000,log(newdata$price))
|
37c7759f167cacd080d79e9dd0d036cf4d5eda91
|
83df92b20aebeeb409aea013078275f77660f4e7
|
/aoc2020_day9.R
|
a254ab0b7b4015e5a004d1762aae0b2eb0e18fa4
|
[] |
no_license
|
nalsalam/aoc2020
|
2ce80f9fe63c52c6142ffe626965c89620d6d607
|
54cc20bb75eb02acb34a849b8d9471f947da11d5
|
refs/heads/main
| 2023-02-01T21:26:34.616763
| 2020-12-22T10:26:53
| 2020-12-22T10:26:53
| 317,584,138
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,065
|
r
|
aoc2020_day9.R
|
# Day 9 - encryption
library(tidyverse)
### Part 1
### Example
input <- read_lines(file = "data-naa/input9_test.txt") %>% as.numeric()
valid <-
map_lgl(6:length(input),
~ input[[.x]] %in% combn(input[(.x - 5):(.x - 1)], 2, FUN = sum))
input[6:20][!valid]
### Puzzle
input <- read_lines(file = "data-naa/input9.txt") %>% as.numeric()
preamble_len <- 25
invalid_number <- function(input, preamble_len) {
# logical vector of valid numbers
valid <-
map_lgl(
(preamble_len + 1):length(input),
# combn() makes the puzzle easy
~ input[[.x]] %in% combn(input[(.x - preamble_len):(.x - 1)], 2, FUN = sum)
)
# first of all the invalid numbers
first(input[(preamble_len + 1):length(input)][which(valid == FALSE)])
}
part1_answer <- invalid_number(input, 25)
### Part 2
# continguous set of arbitrary length that add to invalid number from Part 1
# search 2, 3, ... at a time
input <- read_lines(file = "data-naa/input9_test.txt") %>% as.numeric()
part1_answer <- invalid_number(input, 5)
search_ans <- function(input, part1_answer) {
.x <- 1
set_len <- 2
repeat{
# answer found
if(part1_answer == sum(input[.x : (.x + set_len - 1)])) {
return(
min(input[.x : (.x + set_len - 1)]) +
max(input[.x : (.x + set_len - 1)])
)
# set length doesn work, increment it and start over
} else if(.x == (length(input) - set_len) + 1) {
set_len <- set_len + 1
.x <- 1
# increment start position of set
} else {
.x <- .x + 1
}
# should guard against no anser
}
}
search_ans(input, 127)
### Puzzle
input <- read_lines(file = "data-naa/input9.txt") %>% as.numeric()
preamble_len <- 25
part1_answer <- invalid_number(input, preamble_len)
search_ans(input, part1_answer)
#### Another approach using map_dbl but needed more data to provide the answer
set_len <- 2
repeat {
if(part1_answer %in%
map_dbl(1:(len - set_len),
~ sum(input[.x : (.x + set_len - 1)])
)) break()
set_len <- set_len + 1
}
print(set_len) # need other information
|
ef9f91689f0b6e565b1857b4b79a52b7832674d0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/chemometrics/examples/prm.Rd.R
|
95befc604fbae8fcf82b66d0a60bae9437520954
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 163
|
r
|
prm.Rd.R
|
library(chemometrics)
### Name: prm
### Title: Robust PLS
### Aliases: prm
### Keywords: multivariate
### ** Examples
data(PAC)
res <- prm(PAC$X,PAC$y,a=5)
|
f63346c54d2b558387b2af27b310f32cc55b720d
|
d4608310406b4a60580c47c0ccdfaf8c7e58cf22
|
/Paper1_three_way_interaction_graph_jan2021.R
|
a28d1b12c1092841e5685a2f762defd921070e26
|
[] |
no_license
|
marieleyse/paper-Fall-2020
|
d4c511a0cd318e6a10e547e5ce0fb697fb4bfa9d
|
2e543d6c28015dbb2c9255eed0aa19d1588e18a7
|
refs/heads/master
| 2023-05-06T01:58:51.277656
| 2021-06-02T19:23:07
| 2021-06-02T19:23:07
| 297,434,580
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,486
|
r
|
Paper1_three_way_interaction_graph_jan2021.R
|
setwd("/Users/Marie-Elyse/Downloads")
NEW = read.csv("MAVAN_48M_and_up_jun2020_new.csv")
#CALLED MEAN_CENTERED BUT ZSCORE
NEW$mean_centered_ADHD = c(scale(NEW$ADHD))
NEW$mean_centered_PRS_0_001_adhd_child = c(scale(NEW$PRS_0_001_adhd_child))
NEW$mean_centered_auc_post_cesd = c(scale(NEW$auc_post_cesd))
NEW$mean_centered_Pren_CESD = c(scale(NEW$Pren_CESD))
NEW$mean_centered_PC1 = c(scale(NEW$PC1))
NEW$mean_centered_PC2 = c(scale(NEW$PC2))
NEW$mean_centered_PC3 = c(scale(NEW$PC3))
NEW$mean_centered_mom_age_birth = c(scale(NEW$mom_age_birth))
NEW$gender_male <- to_factor(NEW$gender_male)
NEW$above_college <- to_factor(NEW$above_college)
NEW$Hamilton <- to_factor(NEW$Hamilton)
NEW$mean_centered_Smoking_During_Pregnancy = c(scale(NEW$Smoking_During_Pregnancy))
NEW$mean_centered_Pren_income4 = c(scale(NEW$Pren_income4))
NEW$mean_centered_conners_mother_hyperactivity_score.72m = c(scale(NEW$conners_mother_hyperactivity_score.72m))
#readr::write_csv(NEW, "/Users/Marie-Elyse/Downloads/MAVAN_SEP_PAPER_2020.csv")
options("jtools-digits" = 2)
fit4 <-lm(mean_centered_ADHD ~ mean_centered_Pren_CESD*mean_centered_auc_post_cesd*gender_male + mean_centered_PRS_0_001_adhd_child + mean_centered_PC1 + mean_centered_PC2 + mean_centered_PC3 + mean_centered_mom_age_birth + above_college + Hamilton, data=NEW)
summary(fit4)
png(filename = 'interaction_pre_points.png', width=1000, height=400)
P=interact_plot(fit4, pred = mean_centered_Pren_CESD, modx = gender_male, mod2 = mean_centered_auc_post_cesd, interval = TRUE, plot.points = FALSE,colors = c("orange", "blue"),
x.label = "Prenatal depression",
y.label = "ADHD",
pred.labels = "Prenatal depression",
modx.labels = c("Girls", "Boys"),
mod2.labels = c(" Low "," Moderate", " High"),
main.title = NULL,
legend.main = "Sex")
P +
drop_gridlines() +
# facet_grid(cond ~ mod2) +
geom_vline(aes(xintercept=-0.06),
color="black", linetype="dotted", size=1) +
geom_vline(aes(xintercept= 1.74),
color="black", linetype="dotted", size=1) +
# geom_vline(aes(xintercept=(mean_centered_Pren_CESD==1.74)),
# color="red", linetype="dashed", size=1) +
geom_rect(xmin=-0.06, xmax=1.74, ymin=-2, ymax=4,fill = "gray", colour = NA, alpha=0.02)
dev.off()
#######################
# function to produce summary statistics (mean and +/- sd), as required for ggplot2
data_summary <- function(mean_centered_auc_post_cesd) {
mu <- mean(x)
sigma1 <- mu-sd(x)
sigma2 <- mu+sd(x)
return(c(y=mu,ymin=sigma1,ymax=sigma2))
}
x <- NEW$mean_centered_auc_post_cesd
NEW$auc_post_cesd_3group <-
case_when(x > mean(x)+sd(x) ~ "high",
x < mean(x)+sd(x) & x > mean(x)-sd(x) ~ "average",
x < mean(x)-sd(x) ~ "low")
# mm = mean(moderator)
# sdm = sd(moderator)
# new.df = data.frame(moderator=rep(c(mm-sdm, mm, mm+sdm), times=2),
# predictor=rep(c("C","E"), each=3))
png(filename = 'jnplot_true.png', width=1000, height=400)
sim_slopes(fit4, pred = mean_centered_Pren_CESD, modx = gender_male, mod2 = mean_centered_auc_post_cesd,
jnplot = TRUE)
dev.off
png(filename = 'interaction_pre_wopoints.png', width=1000, height=400)
pdf("interaction_pre_wopoints.pdf", width=10, height=4)
fit4 <-lm(mean_centered_ADHD ~ mean_centered_Pren_CESD*mean_centered_auc_post_cesd*gender_male + mean_centered_PRS_0_001_adhd_child + mean_centered_PC1 + mean_centered_PC2 + mean_centered_PC3 + mean_centered_mom_age_birth + above_college + Hamilton, data=NEW)
P=interact_plot(fit4, pred = mean_centered_Pren_CESD, modx = gender_male, mod2 = mean_centered_auc_post_cesd, interval = TRUE, plot.points = FALSE,colors = c("orange", "blue"),
x.label = "Prenatal depression",
y.label = "ADHD",
pred.labels = "Prenatal depression",
modx.labels = c("Girls","Boys"),
mod2.labels = c("i) Low","ii) Moderate", "iii) High"),
main.title = "Postnatal depression",
legend.main = "Sex")+
theme(plot.title = element_text(hjust = 0.5))
P +
drop_gridlines() +
# theme(main.title = element_text(hjust = 0.5))+
# facet_grid(cond ~ mod2) +
# facet_wrap(~mod2) +
#facet_wrap(. ~ mean_centered_auc_post_cesd) +
#facet_grid(mean_centered_auc_post_cesd ~ .) +
geom_vline(aes(xintercept=-0.06),
color="black", linetype="dotted", size=1) +
geom_vline(aes(xintercept= 1.74),
color="black", linetype="dotted", size=1)
#+ facet_wrap(. ~ mean_centered_auc_post_cesd)
#labs(title = " \n \n ", x= "", y="", color="", fill="")
# geom_vline(aes(xintercept=(mean_centered_Pren_CESD==1.74)),
# color="red", linetype="dashed", size=1) +
# geom_rect(xmin=-0.06, xmax=1.74, ymin=-2, ymax=4,fill = "gray", colour = NA, alpha=0.02)
dev.off()
# mod2.values
# mean-plus-minus
# plus-minus
# If the moderator is a factor variable and modx.values is NULL, each level of the factor is included. You may specify any subset of the factor levels (e.g., c("Level 1", "Level 3")) as long as there is more than 1.
# ann_text <- data.frame(mpg = 15,wt = 5,lab = "Text",
# cyl = factor(8,levels = c("4","6","8")))
# p + geom_text(data = ann_text,label = "Text")
# ann_text <- data.frame(mpg = c(14,15),wt = c(4,5),lab=c("text1","text2"),
# cyl = factor(c(6,8),levels = c("4","6","8")))
# p + geom_text(data = ann_text,aes(label =lab) )
# types <- c("solid", "4242", "2222", "dotdash", "dotted", "twodash",
# "12223242", "F282", "F4448444", "224282F2", "F1")
# # If faceting by mod2, add that to formula
# if (!is.null(mod2)) {
# facet_form <- paste(facet_form,
# ifelse(facet_form == "~", yes = "mod2_group",
# no = "+ mod2_group"))
# if (!is.null(modgroup)) {
# modgroup <- "modgroup"
# } else {
# modgroup <- "mod2group"
# }
# }
#
# if (do_facets == TRUE) {
# if (!is.null(mod2) & (linearity.check == TRUE | facet.modx == TRUE)) {
# num_unique <- nrow(unique(pm[c("modx_group", "mod2_group")]))
# if (num_unique %in% c(3, 6, 9)) {
# # 1 x 3, 2 x 3, or (most commonly) 3 x 3
# num_cols <- 3
# } else if (num_unique %in% c(4)) {
# # 2 x 2
# num_cols <- 2
# } else { # let ggplot2 decide
# num_cols <- NULL
# }
# } else {num_cols <- NULL}
# p <- p + facet_wrap(as.formula(facet_form), ncol = num_cols)
# }
#
# if (linearity.check == TRUE) {
# p <- p + stat_smooth(data = d,
# aes(x = !! pred, y = !! resp, group = !! grp),
# method = "loess", size = 1,
# show.legend = FALSE, inherit.aes = FALSE,
# se = FALSE, span = 2, geom = "line",
# alpha = 0.6, color = "red")
# }
#######################
#######################
png(filename = 'interaction_plot_post_auc_cesd_test_points_BY_SEX_SWITCH.png', width=1000, height=400)
P = interact_plot(fit4, pred = mean_centered_auc_post_cesd, modx = mean_centered_Pren_CESD, mod2 = gender_male, interval = TRUE, plot.points = TRUE,colors = c("orange", "blue"),
x.label = "Postnatal depression",
y.label = "ADHD",
pred.labels = "Postnatal depression",
mod2.labels = c("Girls","Boys"),
modx.labels = c(" Low "," Moderate", " High"),
main.title = NULL,
legend.main = "Prenatal depression")
P +
drop_gridlines() +
# facet_grid(cond ~ mod2) +
geom_vline(aes(xintercept=-0.06),
color="black", linetype="dotted", size=1) +
geom_vline(aes(xintercept= 1.74),
color="black", linetype="dotted", size=1) +
# geom_vline(aes(xintercept=(mean_centered_Pren_CESD==1.74)),
# color="red", linetype="dashed", size=1) +
geom_rect(xmin=-0.06, xmax=1.74, ymin=-2, ymax=4,fill = "gray", colour = NA, alpha=0.02)
dev.off()
#######################
png(filename = 'interaction_plot_points_BY_SEX.png', width=1000, height=400)
P = interact_plot(fit4, pred = mean_centered_Pren_CESD, modx = mean_centered_auc_post_cesd, mod2 = gender_male, interval = TRUE, plot.points = TRUE,colors = c("orange", "blue"),
x.label = "Prenatal depression",
y.label = "ADHD",
pred.labels = "Prenatal depression",
mod2.labels = c("Girls","Boys"),
modx.labels = c("i) Low postnatal depression ","ii) Moderate postnatal depression", "iii) High postnatal depression"),
main.title = NULL,
legend.main = "Sex")
P +
drop_gridlines() +
# facet_grid(cond ~ mod2) +
geom_vline(aes(xintercept=-0.06),
color="black", linetype="dotted", size=1) +
geom_vline(aes(xintercept= 1.74),
color="black", linetype="dotted", size=1)
# geom_vline(aes(xintercept=(mean_centered_Pren_CESD==1.74)),
# color="red", linetype="dashed", size=1) +
#geom_rect(xmin=-0.06, xmax=1.74, ymin=-2, ymax=4,fill = "gray", colour = NA, alpha=0.02)
dev.off()
#######################
png(filename = 'interaction_plot_post_points.png', width=1000, height=400)
P=interact_plot(fit4, pred = mean_centered_auc_post_cesd,modx = gender_male, mod2 = mean_centered_Pren_CESD, interval = TRUE, plot.points = TRUE, colors = c("orange", "blue"),
x.label = "Postnatal depression",
y.label = "ADHD",
pred.labels = "Postnatal depression",
mod2.labels = c("i) Low prenatal depression ","ii) Moderate prenatal depression", "iii) High prenatal depression"),
modx.labels = c("Girls","Boys"),
main.title = NULL,
legend.main = "Sex")
P +
drop_gridlines() +
# facet_grid(cond ~ mod2) +
geom_vline(aes(xintercept=-0.06),
color="black", linetype="dotted", size=1) +
geom_vline(aes(xintercept= 1.74),
color="black", linetype="dotted", size=1)
# geom_vline(aes(xintercept=(mean_centered_Pren_CESD==1.74)),
# color="red", linetype="dashed", size=1) +
# geom_rect(xmin=-0.06, xmax=1.74, ymin=-2, ymax=4,fill = "gray", colour = NA, alpha=0.02)
dev.off()
#######################
######################
######################
######################
png(filename = 'interaction_plot_post_wopoints.png', width=1000, height=400)
P=interact_plot(fit4, pred = mean_centered_auc_post_cesd,modx = gender_male, mod2 = mean_centered_Pren_CESD, interval = TRUE, plot.points = FALSE, colors = c("orange", "blue"),
x.label = "Postnatal depression",
y.label = "ADHD",
pred.labels = "Postnatal depression",
mod2.labels = c("i) Low","ii) Moderate", "iii) High"),
modx.labels = c("Girls","Boys"),
main.title = "Prenatal depression",
legend.main = "Sex")+
theme(plot.title = element_text(hjust = 0.5))
P +
drop_gridlines() +
# facet_grid(cond ~ mod2) +
geom_vline(aes(xintercept=-0.14),
color="black", linetype="dotted", size=1) +
geom_vline(aes(xintercept=1.33),
color="black", linetype="dotted", size=1)
# geom_vline(aes(xintercept=(mean_centered_Pren_CESD==1.74)),
# color="red", linetype="dashed", size=1) +
# geom_rect(xmin=-0.06, xmax=1.74, ymin=-2, ymax=4,fill = "gray", colour = NA, alpha=0.02)
dev.off()
#######################
#######################
# png(filename = 'interaction_plot_post_auc_cesd_test2_wopoints.png', width=1000, height=400)
# interact_plot(fit4, pred = mean_centered_Pren_CESD, modx = gender_male, mod2 = mean_centered_auc_post_cesd, interval = TRUE, plot.points = FALSE, colors ="Rainbow",theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank()))
# dev.off()
# P +
# drop_gridlines() +
# geom_vline(mean_centered_auc_post_cesd = c(-0.95, -0.95), xintercept=c(-0.06, 1.74), linetype='dashed', size=1) +
# geom_hline(mean_centered_auc_post_cesd = c(-0.95, -0.95), yintercept=c(-0.14, 1.33), linetype='solid', size=1)
# dev.off()
#
# data_vline <- DF(
# xintercept = c(-0.06, 1.74),
# mean_centered_auc_post_cesd = c(-0.95, -0.95)
# )
# data_hline <- DF(
# yintercept = c(-0.14, 1.33),
# mean_centered_auc_post_cesd = c(-0.95, -0.95)
# )
# P +
# drop_gridlines() +
# geom_vline(data = data_vline, aes(xintercept = xintercept), linetype='dashed', size=1) +
# geom_hline(data = data_hline, aes(yintercept = yintercept), linetype='solid', size=1) +
# facet_wrap(~mean_centered_auc_post_cesd)
# dev.off()
#
# geom_vline(data = data_vline, aes(xintercept = xintercept), linetype='dashed', size=1) +
# geom_hline(data = data_hline, aes(yintercept = yintercept), linetype='solid', size=1)
# facet_wrap(~mean_centered_auc_post_cesd)
#
# geom_vline(xintercept = c(-0.06, 1.74), mod2 = c(-0.95, -0.95), aes(xintercept = xintercept), linetype='dashed', size=1)
#
#
# data_vline <- DF(
# xintercept = c(-0.06, 1.74),
# mean_centered_auc_post_cesd = c(-0.95, -0.95)
# )
# data_hline <- DF(
# yintercept = c(-0.14, 1.33),
# mean_centered_auc_post_cesd = c(-0.95, -0.95)
# )
#
# P +
# drop_gridlines() +
# geom_vline(xintercept=c(-0.06, 1.74), linetype='dashed', size=1) +
# geom_hline(yintercept=c(-0.14, 1.33), linetype='solid', size=1)
# dev.off()
#
#
#
# P + geom_vline(xintercept=c(-0.06, 1.74), linetype='dashed', size=.1)
# P + drop_gridlines()
#
# mod2.values="minus",
#
# P + geom_vline(xintercept=c(-0.06, 1.74), linetype='dashed', size=.1)
#
# mod2.values = "terciles",
# mod2.values("-1 SD"),
# mod2.values("-1"), xintercept=c(-0.06, 1.74)
# mod2_group=c("Low M","High M"), xintercept=c(1,3)
# + geom_vline(data=test,aes(xintercept=xintercept))
#
# test <- data.frame(mod2_group=1,xintercept=c(-1.24))
# P + geom_vline(data=test,aes(xintercept=xintercept))
#
# + geom_line()
# + drop_gridlines()
# + geom_vline(xintercept = -1.24)
# # p + geom_vline(xintercept = -1.24)
# # p + geom_vline(xintercept = 3.74)
# # p
# P + geom_vline(xintercept=c(-0.06, 1.74))
#
# geom_vline(xintercept=c(-0.06, 1.74), linetype='solid', size=1) +
# geom_hline(yintercept=c(-0.14, 1.33), linetype='dashed', size=1)
|
35d8a43320ebb75110a484121c037c1eab786836
|
fa6e9410f2b66410e68c906b459574501e040ce8
|
/man/deprivation_decile.Rd
|
51f3c0545cac691e7f035407663c1137e7c135f6
|
[] |
no_license
|
carlganz/fingertipsR
|
9df8df727b86ee73cf3081ca1d1171655d9f7c32
|
4e882aec8fd924011f5e29c9bd5ebbe0c3680c6c
|
refs/heads/master
| 2021-08-30T00:37:00.760310
| 2017-12-15T11:39:36
| 2017-12-15T11:39:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,617
|
rd
|
deprivation_decile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprivation_decile.R
\name{deprivation_decile}
\alias{deprivation_decile}
\title{Deprivation deciles}
\usage{
deprivation_decile(AreaTypeID = 102, Year = 2015)
}
\arguments{
\item{AreaTypeID}{Numeric value, limited to either 102 (counties and unitary
authorities), 101 (local authority districts and unitary authorities) or 7 (General Practice); default is 102}
\item{Year}{Numeric value, representing the year of IMD release to be applied, limited to either 2010 or 2015; default is 2015}
}
\value{
A lookup table providing deprivation decile and area code
}
\description{
Outputs a data frame allocating deprivation decile to area code base on the Indices of Multiple Deprivation (IMD) produced by Department of Communities and Local Government
}
\examples{
# Return 2015 deciles for counties and unitary authorities
deprivation_decile()
# Return 2010 deciles for local authority districts and unitary authorities
deprivation_decile(101, 2010)
}
\seealso{
\code{\link{indicators}} for indicator lookups,
\code{\link{profiles}} for profile lookups
\code{\link{indicator_metadata}} for the metadata for each indicator and
\code{\link{area_types}} for area types and their parent mappings and
\code{\link{category_types}} for category lookups and
\code{\link{indicator_areatypes}} for indicators by area types lookups
Other lookup functions: \code{\link{area_types}},
\code{\link{category_types}},
\code{\link{indicator_areatypes}},
\code{\link{indicator_metadata}},
\code{\link{indicators}}, \code{\link{profiles}}
}
|
f69c45c5a1e68642b70bec596a1c32d8f370668c
|
8089f496afe6bf15f774539d0199331e9ad1e337
|
/man/restrict_coef.Rd
|
934597f19292b6e0f1afe5681132abfc3f9a9232
|
[] |
no_license
|
MHaringa/insurancerating
|
365235e34bc294053502590afb8f3070a99d7aba
|
8ce689ee8f6d0df0902f496a57813889f9c245f9
|
refs/heads/master
| 2023-01-07T14:43:12.863949
| 2022-12-23T09:47:49
| 2022-12-23T09:47:49
| 147,171,263
| 58
| 14
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,620
|
rd
|
restrict_coef.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_refinement.R
\name{restrict_coef}
\alias{restrict_coef}
\title{Restrict coefficients in the model}
\usage{
restrict_coef(model, restrictions)
}
\arguments{
\item{model}{object of class glm/restricted}
\item{restrictions}{data.frame with two columns containing restricted data.
The first column, with the name of the risk factor as column name, must
contain the levels of the risk factor. The second column must contain the
restricted coefficients.}
}
\value{
Object of class restricted.
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
Add restrictions, like a bonus-malus structure, on the risk
factors used in the model. \code{restrict_coef()} must always be followed
by \code{update_glm()}.
}
\details{
Although restrictions could be applied either to the frequency or
the severity model, it is more appropriate to impose the restrictions
on the premium model. This can be achieved by calculating the pure
premium for each record (i.e. expected number of claims times the expected
claim amount), then fitting an "unrestricted" Gamma GLM to the pure
premium,and then imposing the restrictions in a final "restricted" Gamma
GLM.
}
\examples{
\dontrun{
# Add restrictions to risk factors for region (zip) -------------------------
# Fit frequency and severity model
library(dplyr)
freq <- glm(nclaims ~ bm + zip, offset = log(exposure), family = poisson(),
data = MTPL)
sev <- glm(amount ~ bm + zip, weights = nclaims,
family = Gamma(link = "log"),
data = MTPL \%>\% filter(amount > 0))
# Add predictions for freq and sev to data, and calculate premium
premium_df <- MTPL \%>\%
add_prediction(freq, sev) \%>\%
mutate(premium = pred_nclaims_freq * pred_amount_sev)
# Restrictions on risk factors for region (zip)
zip_df <- data.frame(zip = c(0,1,2,3), zip_rst = c(0.8, 0.9, 1, 1.2))
# Fit unrestricted model
burn <- glm(premium ~ bm + zip, weights = exposure,
family = Gamma(link = "log"), data = premium_df)
# Fit restricted model
burn_rst <- burn \%>\%
restrict_coef(., zip_df) \%>\%
update_glm()
# Show rating factors
rating_factors(burn_rst)
}
}
\seealso{
\code{\link[=update_glm]{update_glm()}} for refitting the restricted model,
and \code{\link[=autoplot.restricted]{autoplot.restricted()}}.
Other update_glm:
\code{\link{smooth_coef}()}
}
\author{
Martin Haringa
}
\concept{autoplot.restricted}
\concept{update_glm}
|
7e8ff0c39248b75b2974b34a81dba2cee2c50141
|
a1f5c0a6e87880dda823d6f9d73dd9c8becbe660
|
/scripts/archive/modelingNo90CorrCovv2.R
|
c0060eb5cedb9f0005dd2f316f8913ff809706d8
|
[] |
no_license
|
karistenneson/lidarNew
|
da2296cfef98641838035d54550e68cb3c9ce4ec
|
5eeef887d69ee7f734e8a7c844707250d2165304
|
refs/heads/master
| 2021-09-07T12:12:11.493762
| 2018-02-22T17:43:18
| 2018-02-22T17:43:18
| 104,791,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 48,385
|
r
|
modelingNo90CorrCovv2.R
|
### This file is for running the BMA components of the analysis
## Written by MS Patterson (maspatte@uw.edu) and Karis Tenneson (krtenneson@fs.fed.us)
# Last updated: Oct 20, 2017
### Bring in data
#setwd('C:\\Users\\krtenneson\\Desktop\\lidarPaper\\lidarNew\\scripts')
#setwd('\\\\166.2.126.25\\rseat\\Programs\\Reimbursibles\\fy2016\\R3_lidar_equation_transferability\\Analysis\\VersionControl\\lidarNew\\scripts')
#setwd("~/Documents/R/lidarNew/scripts") #Mac
#setwd("~/R/lidarNew/scripts") #WinCampus
### source data
source(file="DataPrepwithZeroesv2.R")
### source functions
source(file="functions.R")
data.mod$R3ERUlabel <- as.factor(data.mod$R3ERUlabel)
data.val$R3ERUlabel <- as.factor(data.val$R3ERUlabel)
data.val.ind$R3ERUlabel <- as.factor(data.val.ind$R3ERUlabel)
### Load required packages
library(cvTools)
library(BAS)
library(corrgram)
#library(robustbase)
head(data.mod); dim(data.mod)
#data.mod <- data.mod[data.mod$STBIOMS != 0 , ]
#data.val <- data.val[data.val$STBIOMS != 0 , ]
#Total area of model construction data set is 713168
data.mod$PercentArea <- (data.mod$fpc / 713168)*100
data.mod$SmplSize <- data.mod$SmplWts <- rep(0, length(data.mod$fpc))
strata<-unique(data.mod$Stratum)
for (i in 1:length(strata)){
SampleSize <- length(data.mod$Stratum[data.mod$Stratum == strata[i]])
data.mod$SmplSize[data.mod$Stratum == strata[i]] <- SampleSize
data.mod$SmplWts[data.mod$Stratum == strata[i]] <- data.mod$PercentArea[data.mod$Stratum == strata[i]]/SampleSize* 100
}
## remove these columns for the models:
## 'Site','Forest',
## "PlotSizeAcres", "fpc", "Stratum",
## R3ERUcodeFull, 'R3ERUlabelName'
## > 95% corr with P60: "Elev_ave", "Elev_P40", "Elev_P50","Elev_P70", "Elev_P75", "Elev_P80",
## > 95% corr with P90: "Elev_P95", "Elev_P99",
## > 95% corr with P30: "Elev_P20", "Elev_P25",
## > 95% corr with stddev: "Elev_variance", "Elev_IQ", "Elev_AAD", "Elev_L2",
## > 95% corr with Elev_LCV: "Elev_CV",
## > 95% corr with Elev_Lskewness: "Elev_skewness",
## > 95% corr with pct_all_returns_above_mean: "Pct_first_returns_above_mean", "All_returns_above_mean_div_Total_first_returns_x_100"
## > 95% corr with Pct_all_returns_above_ht: "Pct_first_returns_above_ht",
## > 95% corr with All_returns_above_mode_div_Total_first_returns_x_100: "pct_all_returns_above_mode", "Pct_first_returns_above_mode",
#corrgram(DATA.mod[ , c(1, 3, 13:18)], type="data", lower.panel=panel.shadeNtext, upper.panel=panel.signif, main="height")
#corrgram(DATA.mod[ , c(1, 4:12)], type="data", lower.panel=panel.shadeNtext, upper.panel=panel.signif, main="shape")
#corrgram(DATA.mod[ , c(1, 19:22)], type="data", lower.panel=panel.shadeNtext, upper.panel=panel.signif, main="density")
predictorSubset <- c("STBIOMSha", "TCUmha",
"Elev_stddev", "Elev_kurtosis", "Elev_MAD_median", "Elev_MAD_mode", "Elev_L3", "Elev_L4", "Elev_LCV", "Elev_Lskewness", "Elev_Lkurtosis",
"Elev_mode", "Elev_P01", "Elev_P10", "Elev_P30", "Elev_P60", "Elev_P90",
"Pct_all_returns_above_ht", "all_returns_above_ht_div_Total_first_returns_x_100", "pct_all_returns_above_mean", "All_returns_above_mode_div_Total_first_returns_x_100",
"elevation", "aspect", "slope", "NDVI_Amp", "R3ERUlabel")
DATA.mod <- data.mod[ , predictorSubset]
DATA.mod.trans<-DATA.mod
DATA.mod.trans$STBIOMSha <- DATA.mod.trans$STBIOMSha + 1
DATA.val <- data.val[ , predictorSubset]
DATA.val.trans<-DATA.val
DATA.val.trans$STBIOMSha <- DATA.val.trans$STBIOMSha + 1
######################################################
#weights = data.mod$SmplWts,
BioMass.Mod <- bas.lm(STBIOMSha ~ .
+ Elev_mode * Pct_all_returns_above_ht + Elev_mode * pct_all_returns_above_mean + Elev_mode * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_mode * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P01 * Pct_all_returns_above_ht + Elev_P01 * pct_all_returns_above_mean + Elev_P01 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P01 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P05 * Pct_all_returns_above_ht + Elev_P05 * pct_all_returns_above_mean + Elev_P05 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P05 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P10 * Pct_all_returns_above_ht + Elev_P10 * pct_all_returns_above_mean + Elev_P10 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P10 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P30 * Pct_all_returns_above_ht + Elev_P30 * pct_all_returns_above_mean + Elev_P30 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P30 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P60 * Pct_all_returns_above_ht +Elev_P60 * pct_all_returns_above_mean + Elev_P60 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P60 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P90 * Pct_all_returns_above_ht + Elev_P90 * pct_all_returns_above_mean + Elev_P90 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P90 * All_returns_above_mode_div_Total_first_returns_x_100
-TCUmha, weights = data.mod$SmplWts,
data=DATA.mod,
prior="hyper-g",
alpha = 3,
modelprior=tr.poisson(10,30),
method="MCMC+BAS")
# Full variable pool, no transform truncated poisson prior, hyper-g
BioMass.Mod.no <- bas.lm(STBIOMSha ~ .
+ Elev_mode * Pct_all_returns_above_ht + Elev_mode * pct_all_returns_above_mean + Elev_mode * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_mode * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P01 * Pct_all_returns_above_ht + Elev_P01 * pct_all_returns_above_mean + Elev_P01 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P01 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P05 * Pct_all_returns_above_ht + Elev_P05 * pct_all_returns_above_mean + Elev_P05 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P05 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P10 * Pct_all_returns_above_ht + Elev_P10 * pct_all_returns_above_mean + Elev_P10 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P10 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P30 * Pct_all_returns_above_ht + Elev_P30 * pct_all_returns_above_mean + Elev_P30 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P30 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P60 * Pct_all_returns_above_ht +Elev_P60 * pct_all_returns_above_mean + Elev_P60 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P60 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P90 * Pct_all_returns_above_ht + Elev_P90 * pct_all_returns_above_mean + Elev_P90 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P90 * All_returns_above_mode_div_Total_first_returns_x_100
-TCUmha,
data=DATA.mod,
prior="hyper-g",
alpha = 3,
modelprior=tr.poisson(10,30),
method="MCMC+BAS")
######################################################
#weights = data.mod$SmplWts,
BioMass.Mod.trans <- bas.lm(log(STBIOMSha)~ .
+ Elev_mode * Pct_all_returns_above_ht + Elev_mode * pct_all_returns_above_mean + Elev_mode * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_mode * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P01 * Pct_all_returns_above_ht + Elev_P01 * pct_all_returns_above_mean + Elev_P01 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P01 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P05 * Pct_all_returns_above_ht + Elev_P05 * pct_all_returns_above_mean + Elev_P05 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P05 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P10 * Pct_all_returns_above_ht + Elev_P10 * pct_all_returns_above_mean + Elev_P10 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P10 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P30 * Pct_all_returns_above_ht + Elev_P30 * pct_all_returns_above_mean + Elev_P30 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P30 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P60 * Pct_all_returns_above_ht +Elev_P60 * pct_all_returns_above_mean + Elev_P60 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P60 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P90 * Pct_all_returns_above_ht + Elev_P90 * pct_all_returns_above_mean + Elev_P90 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P90 * All_returns_above_mode_div_Total_first_returns_x_100
-TCUmha, weights = data.mod$SmplWts,
data = DATA.mod.trans,
prior="hyper-g",
alpha = 3,
modelprior=tr.poisson(10,30),
method="MCMC+BAS")
# Full variable pool, truncated poisson prior, hyper-g
BioMass.Mod.trans.no <- bas.lm(log(STBIOMSha)~ .
+ Elev_mode * Pct_all_returns_above_ht + Elev_mode * pct_all_returns_above_mean + Elev_mode * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_mode * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P01 * Pct_all_returns_above_ht + Elev_P01 * pct_all_returns_above_mean + Elev_P01 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P01 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P05 * Pct_all_returns_above_ht + Elev_P05 * pct_all_returns_above_mean + Elev_P05 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P05 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P10 * Pct_all_returns_above_ht + Elev_P10 * pct_all_returns_above_mean + Elev_P10 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P10 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P30 * Pct_all_returns_above_ht + Elev_P30 * pct_all_returns_above_mean + Elev_P30 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P30 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P60 * Pct_all_returns_above_ht +Elev_P60 * pct_all_returns_above_mean + Elev_P60 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P60 * All_returns_above_mode_div_Total_first_returns_x_100
+ Elev_P90 * Pct_all_returns_above_ht + Elev_P90 * pct_all_returns_above_mean + Elev_P90 * all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P90 * All_returns_above_mode_div_Total_first_returns_x_100
-TCUmha,
data = DATA.mod.trans,
prior="hyper-g",
alpha = 3,
modelprior=tr.poisson(10,30),
method="MCMC+BAS")
BioMass.Mod.trans.noInt <- bas.lm(log(STBIOMSha)~ .
-TCUmha,
data = DATA.mod.trans,
prior="hyper-g",
alpha = 3,
modelprior=tr.poisson(10,30),
method="MCMC+BAS")
####################################################
####################################################
## look up covariates in median models:
BioMass.Mod$namesx[which(BioMass.Mod$probne0>0.5)][-1]
BioMass.Mod.no$namesx[which(BioMass.Mod.no$probne0>0.5)][-1]
BioMass.Mod.trans$namesx[which(BioMass.Mod.trans$probne0>0.5)][-1]
BioMass.Mod.trans.no$namesx[which(BioMass.Mod.trans.no$probne0>0.5)][-1]
BioMass.Mod.trans.noInt$namesx[which(BioMass.Mod.trans.noInt$probne0>0.5)][-1]
# Can also call the Median Probability Model like this:
#MPM <- predict(BioMass.Mod.no, estimator="MPM")
#BioMass.Mod.no$namesx[MPM$bestmodel+1][-1]
####################################################
####################################################
data.mod$R3ERUlabelB <- data.mod$R3ERUlabelE <- data.mod$R3ERUlabelF <- data.mod$R3ERUlabelG <- rep(0, length(data.mod[ , 1]))
data.mod$R3ERUlabelB[data.mod$R3ERUlabel == 'B'] <- 1
data.mod$R3ERUlabelE[data.mod$R3ERUlabel == 'E'] <- 1
data.mod$R3ERUlabelF[data.mod$R3ERUlabel == 'F'] <- 1
data.mod$R3ERUlabelG[data.mod$R3ERUlabel == 'G'] <- 1
data.val$R3ERUlabelB <- data.val$R3ERUlabelE <- data.val$R3ERUlabelF <- data.val$R3ERUlabelG <-
rep(0, length(data.val[ , 1]))
data.val$R3ERUlabelB[data.val$R3ERUlabel == 'B'] <- 1
data.val$R3ERUlabelE[data.val$R3ERUlabel == 'E'] <- 1
data.val$R3ERUlabelF[data.val$R3ERUlabel == 'F'] <- 1
data.val$R3ERUlabelG[data.val$R3ERUlabel == 'G'] <- 1
DATA.mod <- data.mod[ , c(predictorSubset, 'R3ERUlabelB', 'R3ERUlabelE', 'R3ERUlabelF', 'R3ERUlabelG', 'fpc', 'Stratum')]
DATA.mod.trans<-DATA.mod
DATA.mod.trans$STBIOMSha <- DATA.mod.trans$STBIOMSha + 1
DATA.val <- data.val[ , c(predictorSubset, 'R3ERUlabelB', 'R3ERUlabelE', 'R3ERUlabelF', 'R3ERUlabelG', 'fpc', 'Stratum')]
DATA.val.trans<-DATA.val
DATA.val.trans$STBIOMSha <- DATA.val.trans$STBIOMSha + 1
data.svy <- svydesign(ids = ~1, data = DATA.mod, fpc = DATA.mod$fpc, strata = DATA.mod$Stratum)
data.svy.trans <- svydesign(ids = ~1, data = DATA.mod.trans, fpc = DATA.mod.trans$fpc, strata = DATA.mod.trans$Stratum)
data.svy.val <- svydesign(ids = ~1, data = DATA.val, fpc = DATA.val$fpc, strata = DATA.val$Stratum)
data.svy.val.trans <- svydesign(ids = ~1, data = DATA.val.trans, fpc = DATA.val.trans$fpc, strata = DATA.val.trans$Stratum)
########################################
## No transformation (with Zeroes)
MedianBASModel_Weights <- svyglm(STBIOMSha ~ Elev_L4 + Elev_LCV +
Elev_P05 + Elev_P10 + Elev_P30 + Elev_P90 +
all_returns_above_ht_div_Total_first_returns_x_100 + All_returns_above_mode_div_Total_first_returns_x_100 +
aspect + NDVI_Amp +
Elev_mode*Pct_all_returns_above_ht +
Elev_mode*pct_all_returns_above_mean + Elev_mode*All_returns_above_mode_div_Total_first_returns_x_100 +
Elev_P01*Pct_all_returns_above_ht + Elev_P01*pct_all_returns_above_mean +
Elev_P01*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P01*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P05:Pct_all_returns_above_ht + Elev_P05*pct_all_returns_above_mean + Elev_P05*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P10*Pct_all_returns_above_ht + Elev_P10*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P30*pct_all_returns_above_mean + Elev_P30*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P60*all_returns_above_ht_div_Total_first_returns_x_100 + R3ERUlabelE + R3ERUlabelF + R3ERUlabelG
,
design = data.svy)
MedianBASModel_NoWeight <- svyglm(STBIOMSha ~ Elev_MAD_median + Elev_L3 + Elev_Lskewness + Elev_P60 + Elev_P30*pct_all_returns_above_mean + Elev_P90*Pct_all_returns_above_ht,
design = data.svy)
## log transformed models
MedianBASModel_transWeights <- svyglm(log(STBIOMSha) ~ Elev_MAD_median + Elev_MAD_mode + Elev_L4 + Elev_Lskewness + Elev_P60 + Elev_P90 + all_returns_above_ht_div_Total_first_returns_x_100 + pct_all_returns_above_mean + All_returns_above_mode_div_Total_first_returns_x_100 + elevation + aspect + NDVI_Amp + Elev_mode*Pct_all_returns_above_ht + Elev_mode*pct_all_returns_above_mean + Elev_mode*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_mode*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P01*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P30*Pct_all_returns_above_ht + Elev_P30*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P60*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P90*All_returns_above_mode_div_Total_first_returns_x_100 + R3ERUlabelB+ R3ERUlabelE + R3ERUlabelF + R3ERUlabelG,
design = data.svy.trans)
MedianBASModel_transNoWeight <- svyglm(log(STBIOMSha) ~ Elev_stddev + Elev_P05 + Elev_P90 + Pct_all_returns_above_ht + all_returns_above_ht_div_Total_first_returns_x_100 + elevation + aspect + slope + Elev_P60*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P90*Pct_all_returns_above_ht + Elev_P90*All_returns_above_mode_div_Total_first_returns_x_100,
design = data.svy.trans)
MedianBASModel_transNoInt <- svyglm(log(STBIOMSha) ~ Elev_stddev + Elev_LCV + Elev_P05 + Elev_P60 + Elev_P90 + Pct_all_returns_above_ht + elevation + aspect + slope ,
design = data.svy.trans)
##################################
## No transformation
## #1
summary(MedianBASModel_Weights);
summary(MedianBASModel_Weights)$aic
#Root Mean Square Error
predictedoutput <- predict(MedianBASModel_Weights, newdata=DATA.val)
plot(predictedoutput~(DATA.val $STBIOMSha), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val $STBIOMSha)
frame <- cbind(Resoutput, Resoutput^2, data.val [ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec)) # remember to Square the Standard Error
## #2
summary(MedianBASModel_NoWeight);
summary(MedianBASModel_NoWeight)$aic
#Root Mean Square Error
predictedoutput <- predict(MedianBASModel_NoWeight, newdata=DATA.val)
plot(predictedoutput~(DATA.val $STBIOMSha), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val $STBIOMSha)
frame <- cbind(Resoutput, Resoutput^2, data.val [ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec)) # remember to Square the Standard Error
## log transformed models
## #3
summary(MedianBASModel_transWeights);
summary(MedianBASModel_transWeights)$aic
#Root Mean Square Error
predictedoutput <- exp(predict(MedianBASModel_transWeights, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val [ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec)) # remember to Square the Standard Error
## #4
summary(MedianBASModel_transNoWeight);
summary(MedianBASModel_transNoWeight)$aic
#Root Mean Square Error
predictedoutput <- exp(predict(MedianBASModel_transNoWeight, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val [ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec)) # remember to Square the Standard Error
## #5
summary(MedianBASModel_transNoInt);
summary(MedianBASModel_transNoInt)$aic
#Root Mean Square Error
predictedoutput <- exp(predict(MedianBASModel_transNoInt, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val [ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec)) # remember to Square the Standard Error
# Model diagnostics
#plot(BioMass.Mod, ask=F)
#plot(BioMass.Mod.trans, ask=F)
#pull some models out using predict functions
##################################
##################################
# Highest Probability Model
HPM <- predict(BioMass.Mod, estimator="HPM")
BioMass.Mod$namesx[HPM$bestmodel+1][-1]
HPM <- predict(BioMass.Mod.no, estimator="HPM")
BioMass.Mod.no$namesx[HPM$bestmodel+1][-1]
HPM <- predict(BioMass.Mod.trans, estimator="HPM")
BioMass.Mod.trans$namesx[HPM$bestmodel+1][-1]
HPM <- predict(BioMass.Mod.trans.no, estimator="HPM")
BioMass.Mod.trans.no$namesx[HPM$bestmodel+1][-1]
HPM <- predict(BioMass.Mod.trans.noInt, estimator="HPM")
BioMass.Mod.trans.noInt$namesx[HPM$bestmodel+1][-1]
HPMlm <- svyglm(STBIOMSha~ Elev_L4 + Elev_LCV + Elev_P01 + Elev_P05 + Elev_P10 + Elev_P30 + Elev_P90 + all_returns_above_ht_div_Total_first_returns_x_100 + All_returns_above_mode_div_Total_first_returns_x_100 + aspect + NDVI_Amp + Elev_mode*Pct_all_returns_above_ht + Elev_mode*pct_all_returns_above_mean + Elev_mode*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P01*Pct_all_returns_above_ht + Elev_P01*pct_all_returns_above_mean + Elev_P01*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P01*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P05*Pct_all_returns_above_ht + Elev_P05*pct_all_returns_above_mean + Elev_P05*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P10*Pct_all_returns_above_ht + Elev_P10*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P30*pct_all_returns_above_mean + Elev_P30*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P60*all_returns_above_ht_div_Total_first_returns_x_100 + R3ERUlabelE + R3ERUlabelF + R3ERUlabelG,
design = data.svy)
HPMlm.no <- svyglm(STBIOMSha ~ Elev_MAD_median + Elev_L3 + Elev_Lskewness + Elev_Lkurtosis + Elev_P60 + Elev_P10*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P30*pct_all_returns_above_mean + Elev_P90*Pct_all_returns_above_ht,
design = data.svy)
HPMlm.trans <- svyglm(log(STBIOMSha)~ Elev_MAD_median + Elev_MAD_mode + Elev_L4 + Elev_Lskewness + Elev_P60 + Elev_P90 + all_returns_above_ht_div_Total_first_returns_x_100 + pct_all_returns_above_mean + All_returns_above_mode_div_Total_first_returns_x_100 + elevation + aspect + NDVI_Amp + Elev_mode*Pct_all_returns_above_ht + Elev_mode*pct_all_returns_above_mean + Elev_mode*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_mode*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P01*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P30*Pct_all_returns_above_ht + Elev_P30*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P60*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P90*All_returns_above_mode_div_Total_first_returns_x_100 + R3ERUlabelB + R3ERUlabelE + R3ERUlabelF + R3ERUlabelG,
design = data.svy.trans)
HPMlm.trans.no <- svyglm(log(STBIOMSha)~ Elev_stddev + Elev_P05 + Elev_P10 + Elev_P90 + Pct_all_returns_above_ht + all_returns_above_ht_div_Total_first_returns_x_100 + elevation + aspect + slope + Elev_P05*Pct_all_returns_above_ht + Elev_P10*Pct_all_returns_above_ht + Elev_P60*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P90*Pct_all_returns_above_ht,
design = data.svy.trans)
HPMlm.trans.noInt <- svyglm(log(STBIOMSha)~ Elev_stddev + Elev_LCV + Elev_P05 + Elev_P60 + Elev_P90 + Pct_all_returns_above_ht + elevation + aspect + slope,
design = data.svy.trans)
##################################
## No transformation
## #1
summary(HPMlm);
summary(HPMlm)$aic
#Root Mean Square Error
predictedoutput <- predict(HPMlm, newdata=DATA.val)
plot(predictedoutput~(DATA.val $STBIOMSha), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val $STBIOMSha)
frame <- cbind(Resoutput, Resoutput^2, data.val [ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec)) # remember to Square the Standard Error
## #2
summary(HPMlm.no);
summary(HPMlm.no)$aic
#Root Mean Square Error
predictedoutput <- predict(HPMlm.no, newdata=DATA.val)
plot(predictedoutput~(DATA.val $STBIOMSha), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val $STBIOMSha)
frame <- cbind(Resoutput, Resoutput^2, data.val [ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec)) # remember to Square the Standard Error
## log transformed models
## #3
summary(HPMlm.trans);
summary(HPMlm.trans)$aic
#Root Mean Square Error
predictedoutput <- exp(predict(HPMlm.trans, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec)) # remember to Square the Standard Error
## #4
summary(HPMlm.trans.no);
summary(HPMlm.trans.no)$aic
HPMlm_TransNoWts.res <- predict(HPMlm.trans.no, newdata=DATA.val.trans)
#Root Mean Square Error
predictedoutput <- exp(predict(HPMlm.trans.no, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec)) # remember to Square the Standard Error
## #5
summary(HPMlm.trans.noInt);
summary(HPMlm.trans.noInt)$aic
#Root Mean Square Error
predictedoutput <- exp(predict(HPMlm.trans.noInt, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec)) # remember to Square the Standard Error
##################################
######################################################################################################
##################################
########################################
########################################
## No transformation (After Removing Zeroes)
MedianBASModel_Weights.no0 <- svyglm(STBIOMSha ~ Elev_L4 + Elev_LCV +
Elev_P01 + Elev_P05 + Elev_P10 + Elev_P30 + Elev_P90 +
all_returns_above_ht_div_Total_first_returns_x_100 + All_returns_above_mode_div_Total_first_returns_x_100 +
aspect + NDVI_Amp +
Elev_mode*Pct_all_returns_above_ht +
Elev_mode*pct_all_returns_above_mean + Elev_mode*All_returns_above_mode_div_Total_first_returns_x_100 +
Elev_P01*Pct_all_returns_above_ht + Elev_P01*pct_all_returns_above_mean +
Elev_P01*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P01*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P05:Pct_all_returns_above_ht + Elev_P05*pct_all_returns_above_mean + Elev_P05*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P10*Pct_all_returns_above_ht + Elev_P10*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P30*pct_all_returns_above_mean + Elev_P30*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P60*all_returns_above_ht_div_Total_first_returns_x_100 + R3ERUlabelE + R3ERUlabelF + R3ERUlabelG
,
design = data.svy)
MedianBASModel_NoWeight.no0 <- svyglm(STBIOMSha ~ Elev_MAD_median + Elev_L3 + Elev_Lskewness + Elev_P60 + Elev_P30*pct_all_returns_above_mean + Elev_P90*Pct_all_returns_above_ht,
design = data.svy)
## log transformed models
MedianBASModel_transWeights.no0 <- svyglm(log(STBIOMSha) ~ Elev_kurtosis + Elev_LCV + Elev_L3 + Elev_Lskewness + Elev_P05 + Elev_P90 + Pct_all_returns_above_ht + all_returns_above_ht_div_Total_first_returns_x_100 + slope + NDVI_Amp + Elev_mode*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P01*pct_all_returns_above_mean + Elev_P01*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P05*Pct_all_returns_above_ht + Elev_P05*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P10*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P10*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P30*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P30*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P60*Pct_all_returns_above_ht + Elev_P60*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P90*All_returns_above_mode_div_Total_first_returns_x_100 + R3ERUlabelB + R3ERUlabelE + R3ERUlabelF + R3ERUlabelG,
design = data.svy.trans)
MedianBASModel_transNoWeight.no0 <- svyglm(log(STBIOMSha) ~ Elev_stddev + Elev_P05 + Elev_P90 + Pct_all_returns_above_ht + all_returns_above_ht_div_Total_first_returns_x_100 + elevation + slope + Elev_P90*All_returns_above_mode_div_Total_first_returns_x_100 + R3ERUlabelG,
design = data.svy.trans)
MedianBASModel_transNoInt.no0 <- svyglm(log(STBIOMSha) ~ Elev_stddev + Elev_MAD_median + Elev_LCV + Elev_P05 + Elev_P90 + Pct_all_returns_above_ht + elevation + slope,
design = data.svy.trans)
##################################
## No transformation
summary(MedianBASModel_Weights.no0);
summary(MedianBASModel_Weights.no0)$aic
#Root Mean Square Error
predictedoutput <- predict(MedianBASModel_Weights.no0, newdata=DATA.val)
plot(predictedoutput~(DATA.val$STBIOMSha), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec))# remember to Square the Standard Error
summary(MedianBASModel_NoWeight.no0);
summary(MedianBASModel_NoWeight.no0)$aic
#Root Mean Square Error
predictedoutput <- predict(MedianBASModel_NoWeight.no0, newdata=DATA.val)
plot(predictedoutput~(DATA.val$STBIOMSha), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec))# remember to Square the Standard Error
## log transformed models
summary(MedianBASModel_transWeights.no0);
summary(MedianBASModel_transWeights.no0)$aic
#Root Mean Square Error
predictedoutput <- exp(predict(MedianBASModel_transWeights.no0, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec))# remember to Square the Standard Error
summary(MedianBASModel_transNoWeight.no0);
summary(MedianBASModel_transNoWeight.no0)$aic
#Root Mean Square Error
predictedoutput <- exp(predict(MedianBASModel_transNoWeight.no0, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec))# remember to Square the Standard Error
summary(MedianBASModel_transNoInt.no0);
summary(MedianBASModel_transNoInt.no0)$aic
#Root Mean Square Error
predictedoutput <- exp(predict(MedianBASModel_transNoInt.no0, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec))# remember to Square the Standard Error
### models built on data without zeroes.
HPMlm.no0 <- svyglm(STBIOMSha ~ Elev_L4 + Elev_LCV + Elev_P01 + Elev_P05 + Elev_P10 + Elev_P30 + Elev_P90 + all_returns_above_ht_div_Total_first_returns_x_100 + All_returns_above_mode_div_Total_first_returns_x_100 + aspect + NDVI_Amp + Elev_mode*Pct_all_returns_above_ht + Elev_mode*pct_all_returns_above_mean + Elev_mode*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P01*Pct_all_returns_above_ht + Elev_P01*pct_all_returns_above_mean + Elev_P01*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P01*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P05*Pct_all_returns_above_ht + Elev_P05*pct_all_returns_above_mean + Elev_P05*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P10*Pct_all_returns_above_ht + Elev_P10*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P30*pct_all_returns_above_mean + Elev_P30*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P60*all_returns_above_ht_div_Total_first_returns_x_100 + R3ERUlabelE + R3ERUlabelF + R3ERUlabelG,
design = data.svy)
HPMlm.no.no0 <- svyglm(STBIOMSha ~ Elev_MAD_median + Elev_L3 + Elev_Lskewness + Elev_Lkurtosis + Elev_P60 + Elev_P10*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P30*pct_all_returns_above_mean + Elev_P90*Pct_all_returns_above_ht,
design = data.svy)
HPMlm.trans.no0 <- svyglm(log(STBIOMSha)~ Elev_kurtosis + Elev_L3 + Elev_LCV + Elev_Lskewness + Elev_P05 + Elev_P90 + Pct_all_returns_above_ht + all_returns_above_ht_div_Total_first_returns_x_100 + slope + NDVI_Amp + Elev_mode*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P01*pct_all_returns_above_mean + Elev_P01*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P05*Pct_all_returns_above_ht + Elev_P05*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P10*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P10*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P30*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P30*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P60*Pct_all_returns_above_ht + Elev_P60*pct_all_returns_above_mean + Elev_P60*All_returns_above_mode_div_Total_first_returns_x_100 + Elev_P90*All_returns_above_mode_div_Total_first_returns_x_100 + R3ERUlabelB + R3ERUlabelE + R3ERUlabelF + R3ERUlabelG,
design = data.svy.trans)
HPMlm.trans.no.no0 <- svyglm(log(STBIOMSha)~ Elev_stddev + Elev_MAD_median + Elev_P05 + Elev_P90 + Pct_all_returns_above_ht + all_returns_above_ht_div_Total_first_returns_x_100 + elevation + slope + Elev_P60*all_returns_above_ht_div_Total_first_returns_x_100 + Elev_P90*Pct_all_returns_above_ht,
design = data.svy.trans)
HPMlm.trans.noInt.no0 <- svyglm(log(STBIOMSha)~ Elev_stddev + Elev_LCV + Elev_P05 + Elev_P60 + Elev_P90 + Pct_all_returns_above_ht + elevation + slope,
design = data.svy.trans)
##################################
## No transformation
## Model #1
summary(HPMlm.no0);
summary(HPMlm.no0)$aic
#Root Mean Square Error
predictedoutput <- predict(HPMlm.no0, newdata=DATA.val)
plot(predictedoutput~(DATA.val$STBIOMSha), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec))# remember to Square the Standard Error
## Model #2
summary(HPMlm.no.no0);
summary(HPMlm.no.no0)$aic
#Root Mean Square Error
predictedoutput <- predict(HPMlm.no.no0, newdata=DATA.val)
plot(predictedoutput~(DATA.val.trans$STBIOMSha), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec))# remember to Square the Standard Error
## log transformed models
## Model #3
summary(HPMlm.trans.no0);
summary(HPMlm.trans.no0)$aic
#Root Mean Square Error
predictedoutput <- exp(predict(HPMlm.trans.no0, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec))# remember to Square the Standard Error
## Model #4
summary(HPMlm.trans.no.no0);
summary(HPMlm.trans.no.no0)$aic
#Root Mean Square Error
predictedoutput <- exp(predict(HPMlm.trans.no.no0, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec))# remember to Square the Standard Error
## Model #5
summary(HPMlm.trans.noInt.no0);
summary(HPMlm.trans.noInt.no0)$aic
#########################
#Root Mean Square Error
predictedoutput <- exp(predict(HPMlm.trans.noInt.no0, newdata=DATA.val.trans))-1
plot(predictedoutput~(DATA.val.trans$STBIOMSha-1), xlim = c(0,650), ylim = c(0,650))
abline(a = 0, b = 1, col = 'red')
Resoutput <- predictedoutput - (DATA.val.trans$STBIOMSha-1)
frame <- cbind(Resoutput, Resoutput^2, data.val[ , c('fpc', 'Stratum')])
colnames(frame)<- c("Resoutput", "ResoutputSE", "SqResoutput", "SqResoutputSE", "fpc", "Stratum")
ResSqVec <- svydesign(ids = ~1, data = frame, fpc = frame$fpc, strata = frame$Stratum)
sqrt(svymean(frame$SqResoutput, design = ResSqVec)) # remember to Square the Standard Error
#Mean Summed Error
mape(DATA.val.trans$STBIOMSha-1, exp(HPMlm_TransNoWtsInt.no0.res), includeSE=T)
#Mean Bias Error
sum((DATA.val.trans$STBIOMSha-1) - HPMlm_TransNoWtsInt.no0.res)/length(HPMlm_TransNoWtsInt.no0.res)
####################################################
####################################################
# Best Predictive Model, closest to BMA in terms of squared error loss, takes a pretty long time to find.
BPM <- predict(BioMass.Mod, estimator="BPM") #not running, grrrr
BioMass.Mod$namesx[BPM$bestmodel+1][-1]
##################################
## Diagnostics
plot(MPMlm, ask=F) # This model is pretty solid, better redisdual distribution than HPM, and better adjusted R^2
########################################
################################################################################
############################################################################################
############################################################################################
#################################################################################################
######################################################################################################
#################################################################################################
############################################################################################
############################################################################################
########################################
### Same business, but for Total Cubit Feet of wood.
# Full variable pool, truncated poisson prior, hyper-g
TCUFT.Mod <- bas.lm(log(TCUFT)~ . -STBIOMSha,
data=DATA.mod,
prior="hyper-g",
alpha = 3,
modelprior=tr.poisson(10,30),
method="MCMC+BAS")
summary(TCUFT.Mod)
# What are the median model variables
TCUFT.Mod$namesx[which(TCUFT.Mod$probne0>0.5)][-1]
# Highest Probability Model
HPM <- predict(TCUFT.Mod, estimator="HPM")
TCUFT.Mod$namesx[HPM$bestmodel+1][-1]
HPMlm <- lm(log(TCUFT)~ Elev_skewness + Elev_P80 + Pct_first_returns_above_ht + Pct_all_returns_above_ht +
All_returns_above_ht + Pct_first_returns_above_mean + All_returns_above_mean_div_Total_first_returns_x_100 +
Total_first_returns + Total_all_returns + R3ERUCODE + elevation + slope, data=DATA.mod)
summary(HPMlm)
plot(HPMlm, ask=F)
# Median Probability Model
MPM <- predict(TCUFT.Mod, estimator="MPM")
TCUFT.Mod$namesx[MPM$bestmodel+1][-1]
MPMlm <- lm(log(TCUFT)~ Elev_skewness + Elev_P80 + Pct_first_returns_above_ht + Pct_all_returns_above_ht +
All_returns_above_ht + Pct_first_returns_above_mean + All_returns_above_mean_div_Total_first_returns_x_100 +
Total_first_returns + Total_all_returns + R3ERUCODE + Forest + elevation + slope, data=DATA.mod)
summary(MPMlm)
plot(MPMlm, ask=F)
# Best Predictive Model, closest to BMA in terms of squared error loss, takes a pretty long time to find.
BPM <- predict(BioMass.Mod, estimator="BPM") #not running, grrrr
### Combined final model?
cor.test(DATA.mod$STBIOMSha, DATA.mod$TCUFT)
# Standing biomass and volume are 0.98 correlated.
FinModVarFull <- select(DATA.mod, Elev_skewness, Elev_CV, Elev_LCV, Elev_P80, Pct_first_returns_above_ht,
Pct_all_returns_above_ht, All_returns_above_ht, Pct_first_returns_above_mean,
All_returns_above_mean_div_Total_first_returns_x_100, Total_first_returns,
Total_all_returns, R3ERUCODE, elevation, slope, PlotSizeAcres, Forest)
corrgram(FinModVarFull, order=T, lower.panel=panel.ellipse,
upper.panel=panel.cor, text.panel=panel.txt,
main="Lidar Predictor Data in PC2/PC1 Order") # Trim until no correlations above ~0.5
FinModVarTrim <- select(DATA.mod, Elev_skewness, Elev_LCV, Elev_P80, Total_first_returns,
Pct_first_returns_above_ht, R3ERUCODE, elevation, slope, PlotSizeAcres, Forest)
corrgram(FinModVarTrim, order=T, lower.panel=panel.ellipse,
upper.panel=panel.cor, text.panel=panel.txt,
main="Lidar Predictor Data in PC2/PC1 Order") # Trim until no correlations above ~0.5
# Final Model Performance
FinModB <- lm(log(STBIOMSha)~ Elev_skewness + Elev_LCV + Elev_P80 + Total_first_returns +
Pct_all_returns_above_ht + elevation + slope + PlotSizeAcres + Forest + R3ERUCODE ,
data=DATA.mod)
summary(FinModB)
plot(FinModB, ask=F)
FinModT <- lm(log(TCUFT)~ Elev_skewness + Elev_LCV + Elev_P80 + Total_first_returns +
Pct_all_returns_above_ht + elevation + slope + PlotSizeAcres + Forest + R3ERUCODE ,
data=DATA.mod)
summary(FinModT)
plot(FinModT, ask=F)
# Possible Outliers
Outliers <- c("53","1148","1238","1242","2145", "2651")
Outliers <- which(rownames(DATA.mod) %in% Outliers)
DATA.mod[Outliers, c(1,2)] #most low values, though 1238 has a fair bit of Biomass.
DATA.mod.t <- DATA.mod[-Outliers,]
# Final Model Performance
FinModBt <- lm(log(STBIOMSha)~ Elev_skewness + Elev_LCV + Elev_P80 + Total_first_returns +
Pct_all_returns_above_ht + elevation + slope + PlotSizeAcres + Forest + R3ERUCODE ,
data=DATA.mod.t)
summary(FinModBt)
plot(FinModBt, ask=F)
FinModTt <- lm(log(TCUFT)~ Elev_skewness + Elev_LCV + Elev_P80 + Total_first_returns +
Pct_all_returns_above_ht + elevation + slope + PlotSizeAcres + Forest + R3ERUCODE ,
data=DATA.mod.t)
summary(FinModTt)
plot(FinModTt, ask=F)
# ### RObust versions of final model.
#
# # Standing biomass and volume are 0.98 correlated.
#
# FinModB.r <- lmrob(log(STBIOMSha)~ Elev_LCV + Elev_skewness + Elev_P80 + Pct_all_returns_above_ht +
# Total_first_returns + R3ERUCODE + elevation + slope + PlotSizeAcres, data=DATA.mod,
# setting = "KS2014", fast.s.large.n = Inf)
# summary(FinModB.r)
# plot(FinModB.r, ask=F)
#
# FinModT.r <- lmrob(log(TCUFT)~ Elev_LCV + Elev_skewness + Elev_P80 + Pct_all_returns_above_ht +
# Total_first_returns + R3ERUCODE + elevation + slope + PlotSizeAcres, data=DATA.mod,
# setting = "KS2014", fast.s.large.n = Inf)
# summary(FinModT.r)
# plot(FinModT.r, ask=F)
|
2c3b09a67417f42e3b2094691a1fb641ff4d99fc
|
1ae5b7089e9d2c52c81104c89fa1145c6646d881
|
/plot2.R
|
b9ee718cd7e7c6825539831b60b84e4b58651913
|
[] |
no_license
|
ekuchar/ExData_Plotting1
|
2bc5d6376708b474c600461d374db27dad032535
|
de843e4fd8c56ba6fe6c370654093c362b9646e7
|
refs/heads/master
| 2021-01-14T12:40:26.582707
| 2014-05-07T18:58:56
| 2014-05-07T18:58:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 681
|
r
|
plot2.R
|
#reading from local repository because of slow/unacessible internet
setwd( "C:/Users/tata/WORK/Data")
# use English (e.g. for week days) insted of Czech
Sys.setlocale("LC_TIME","English")
#install if necessary
#install.packages("sqldf")
library(sqldf)
x <- read.csv.sql("household_power_consumption.txt", sep=";", sql = "select * from file where Date in ('1/2/2007', '2/2/2007' )")
df <- data.frame( Date_Time = strptime( paste( x$Date, x$Time, sep=" "), "%d/%m/%Y %H:%M:%s"), GAP = as.numeric(x$Global_active_power))
png( "plot2.png",width = 480, height = 480, units = "px")
plot(df$Date_Time, df$GAP, type="l", xlab="", ylab = "Global Active Power (kilowatts)")
dev.off()
|
516abd11f4ba84915f0a001b24e491d16a784a73
|
b8c9bbba211bcb9c2f0caf79d22ccedacd0c9192
|
/GetCleanCourseProject.R
|
fe7f83cddd7bc0dc2cc6342904848943ad34f449
|
[] |
no_license
|
tarekanis/Getting_and_cleaning_data_project
|
ee1c8ded3bc5d55e64b3b6d3fade98ac52d79453
|
c84d7fff3b4168ebb592688cde1c1f9da58e9322
|
refs/heads/master
| 2021-01-19T18:05:55.390182
| 2014-07-27T13:36:33
| 2014-07-27T13:36:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,128
|
r
|
GetCleanCourseProject.R
|
# Setup file name variables
folderName <- "UCI HAR DataSet"
columnNamesFile <- paste(folderName,"/features.txt",sep = "")
## Test files
testSub <- paste(folderName,"/test/subject_test.txt",sep = "")
testY <- paste(folderName,"/test/y_test.txt",sep = "")
testX <- paste(folderName,"/test/X_test.txt",sep = "")
## Training Files
trainSub <- paste(folderName,"/train/subject_train.txt",sep = "")
trainY <- paste(folderName,"/train/y_train.txt",sep = "")
trainX <- paste(folderName,"/train/X_train.txt",sep = "")
# Read Data
columnNames <- read.table(columnNamesFile, header = FALSE, colClasses = c("numeric", "character"))
colNames <- columnNames[,2]
## Test Data
testX_df <- read.table(testX,header = FALSE, col.names = colNames)
testY_df <- read.table(testY,header = FALSE,col.names = "activity")
testSub_df <- read.table(testSub,header = FALSE,col.names = "subID")
## Training Data
trainX_df <- read.table(trainX,header = FALSE, col.names = colNames)
trainY_df <- read.table(trainY,header = FALSE,col.names = "activity")
trainSub_df <- read.table(trainSub,header = FALSE,col.names = "subID")
#Creat ID columns
testID <- cbind(testSub_df,testY_df)
testID$activity <- factor(testID$activity,levels = c(1,2,3,4,5,6), labels = c("WALKING","WALKING_UPSTAIRS","WALKING_DOWNSTAIRS","SITTING","STANDING","LAYING"))
trainID <- cbind(trainSub_df,trainY_df)
trainID$activity <- factor(trainID$activity,levels = c(1,2,3,4,5,6), labels = c("WALKING","WALKING_UPSTAIRS","WALKING_DOWNSTAIRS","SITTING","STANDING","LAYING"))
# Merge ID columns with X-Data and create Test-Train unified Dataframe
## Merge
test <- cbind(testID,testX_df)
train <- cbind(trainID,trainX_df)
## Unify
data <- rbind(train,test)
# Subset only columns with measurements on the mean and standard deviation for each metric, keeping subject ID and activity
## Creat regex logical vectors for mean() and std()
index1 <- grepl("mean()",names(data))
index2 <- grepl("std()",names(data))
## Subset & clean up
temp <- data[,index1 | index2]
index3 <- !grepl("Freq",names(temp)) ### to remove meanFreq data
index4 <- !grepl("fBodyBody",names(temp)) ### to remove fBodyBody data
temp <- temp[,index3 & index4]
data <- cbind(data[,c(1,2)],temp)
#rename variables with descriptive labels
## Clean Up labels using regex
temp <- gsub("\\.\\.\\."," in ",names(data))
temp <- gsub("BodyAcc"," Body Acceleration ",temp)
temp <- gsub("BodyGyro"," Body Gyration ",temp)
temp <- gsub("Mag","Magnitude ",temp)
temp <- gsub("GravityAcc"," Gravity Acceleration ",temp)
temp <- gsub("\\.mean","Mean",temp)
temp <- gsub("\\.std","Standard Diviation",temp)
temp <- gsub("JerkMagnitude","Jerk Magnitude",temp)
temp <- gsub("JerkMean","Jerk Mean",temp)
temp <- gsub("JerkStandard","Jerk Standard",temp)
temp <- gsub("^f ","FFT ",temp)
temp <- gsub("^t ","Time ",temp)
temp <- gsub("\\.\\.","",temp)
temp[1:2] <- c("Subject ID","Activity")
## Set new names
names(data) <- temp
#Create Second Tidy aggregated data frame
data2 <- aggregate(data[,3:62], by=list(data[,1],data[,2]),FUN=mean, na.rm=TRUE)
names(data2[,1:2]) <- c("Subject ID","Activity")
write.table(data2,file = "tidyData.txt")
|
8c2bd5160b5134fc4daa521d877b0681afca6bc2
|
4df908dd007b35ebb9c6ead3eb30f30517fae57a
|
/man/dir.create.adf.Rd
|
419b05d0536345c2bb8e71d8562b27695a648834
|
[] |
no_license
|
cran/adfExplorer
|
2d821b94cbd32856671b6fe2b43c1fe40fb07641
|
8d6c0f9eac8bc2b59cd5799b493b202dff5c38e3
|
refs/heads/master
| 2021-09-08T02:04:15.568010
| 2021-09-05T07:50:02
| 2021-09-05T07:50:02
| 108,872,575
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,487
|
rd
|
dir.create.adf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/04fileOperations.r
\docType{methods}
\name{dir.create.adf}
\alias{dir.create.adf}
\alias{dir.create.adf,amigaDisk,character,missing,missing-method}
\alias{dir.create.adf,amigaDisk,character,POSIXt,missing-method}
\alias{dir.create.adf,amigaDisk,character,POSIXt,character-method}
\title{Create a directory on an amigaDisk object}
\usage{
\S4method{dir.create.adf}{amigaDisk,character,missing,missing}(x, path, date, comment)
\S4method{dir.create.adf}{amigaDisk,character,POSIXt,missing}(x, path, date, comment)
\S4method{dir.create.adf}{amigaDisk,character,POSIXt,character}(x, path, date, comment)
}
\arguments{
\item{x}{An \code{\link{amigaDisk}} on which the directory should be created.}
\item{path}{Specify the directory that should be created on \code{x}.
You can specify the full path on the virtual disk conform Amiga DOS syntax
(see \code{\link{current.adf.dir}} details). When no full path is specified
the new directory will be created in the current directory. Note that
wild cards are not allowed.}
\item{date}{A \code{\link[base:DateTimeClasses]{POSIXt}} object that will be used as the
directory modification date. When missing the system time will used.}
\item{comment}{An optional \code{character} string that will be included
in the directory header as a comment. Should not be longer than 79 characters.}
}
\value{
Returns an \code{\link{amigaDisk}} object on which the
directory is created.
}
\description{
Create a directory on a virtual Amiga floppy disk represented by
an \code{\link{amigaDisk}} object.
}
\details{
Create a directory on a virtual Amiga floppy disk represented by
an \code{\link{amigaDisk}} object. Make sure that the virtual disk
is DOS formatted.
}
\examples{
\dontrun{
## create a blank DOS disk:
blank.disk <- blank.amigaDOSDisk("blank", "DD", "FFS", TRUE, FALSE, FALSE)
## creating a new directory on the blank disk is easy:
blank.disk <- dir.create.adf(blank.disk, "new_dir")
## in the line above, the directory is placed in the
## current directory (the root in this case). Directories
## can also be created by specifying the full path:
blank.disk <- dir.create.adf(blank.disk, "DF0:new_dir/sub_dir")
## check whether we succeeded:
list.adf.files(blank.disk)
## we can even make it the current dir:
current.adf.dir(blank.disk) <- "DF0:new_dir/sub_dir"
}
}
\author{
Pepijn de Vries
}
|
5399c4ab427b748d8d5c2a02634db6dbab8e6cd6
|
f484b55f35e230b94321beddc3aab06aa5cb55b2
|
/tests/testthat/test-serializeJSON-S4.R
|
99c1f7d95ca36e91301c0ce110953de5710aac3b
|
[
"MIT"
] |
permissive
|
FlexShopper/jsonlite
|
24e59fb1f71c386b665ed890fc549aac49084423
|
633e438608c9798cb3bd00fde71ef006a19dd064
|
refs/heads/master
| 2023-01-16T03:14:29.172849
| 2020-11-30T15:09:57
| 2020-11-30T15:09:57
| 317,259,057
| 0
| 0
|
NOASSERTION
| 2020-11-30T15:09:59
| 2020-11-30T15:04:36
| null |
UTF-8
|
R
| false
| false
| 1,382
|
r
|
test-serializeJSON-S4.R
|
context("Serializing S4 objects")
test_that("Simple S4 serialization", {
setClass("myClass", slots = list(name = "character"))
obj <- new("myClass", name = "myName")
out <- jsonlite::unserializeJSON(jsonlite::serializeJSON(obj))
expect_identical(obj, out)
removeClass("myClass")
})
test_that("Serialize optional S4 fields", {
setClass(
Class="Trajectories",
representation = representation(
times = "numeric",
traj = "matrix"
)
)
t1 <- new(Class="Trajectories")
t2 <- new(Class="Trajectories", times=c(1,3,4))
t3 <- new(Class="Trajectories", times=c(1,3), traj=matrix(1:4,ncol=2))
expect_identical(t1, unserializeJSON(serializeJSON(t1)))
expect_identical(t2, unserializeJSON(serializeJSON(t2)))
expect_identical(t3, unserializeJSON(serializeJSON(t3)))
removeClass("Trajectories")
})
test_that("Serialize pseudo-null (empty slot)", {
track <- setClass("track", slots = c(x="numeric", y="ANY"))
t1 <- new("track", x = 1:3)
t2 <- unserializeJSON(serializeJSON(t1))
expect_identical(t1, t2)
})
test_that("Class loading errors", {
expect_error(unserializeJSON('{"type":"S4","attributes":{},"value":{"class":"nonExitingClass","package":".GlobalEnv"}}'), "defined")
expect_error(expect_warning(unserializeJSON('{"type":"S4","attributes":{},"value":{"class":"nonExitingClass","package":"nopackage"}}')), "nopackage")
})
|
ff17daf74200337654207f9a41700a0384db3eb5
|
6464efbccd76256c3fb97fa4e50efb5d480b7c8c
|
/cran/paws.application.integration/man/sns_create_platform_application.Rd
|
7f0eabbc9d8191aea322c76a5c7f79a5b36809b8
|
[
"Apache-2.0"
] |
permissive
|
johnnytommy/paws
|
019b410ad8d4218199eb7349eb1844864bd45119
|
a371a5f2207b534cf60735e693c809bd33ce3ccf
|
refs/heads/master
| 2020-09-14T23:09:23.848860
| 2020-04-06T21:49:17
| 2020-04-06T21:49:17
| 223,286,996
| 1
| 0
|
NOASSERTION
| 2019-11-22T00:29:10
| 2019-11-21T23:56:19
| null |
UTF-8
|
R
| false
| true
| 2,411
|
rd
|
sns_create_platform_application.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sns_operations.R
\name{sns_create_platform_application}
\alias{sns_create_platform_application}
\title{Creates a platform application object for one of the supported push
notification services, such as APNS and FCM, to which devices and mobile
apps may register}
\usage{
sns_create_platform_application(Name, Platform, Attributes)
}
\arguments{
\item{Name}{[required] Application names must be made up of only uppercase and lowercase ASCII
letters, numbers, underscores, hyphens, and periods, and must be between
1 and 256 characters long.}
\item{Platform}{[required] The following platforms are supported: ADM (Amazon Device Messaging),
APNS (Apple Push Notification Service), APNS\\_SANDBOX, and FCM (Firebase
Cloud Messaging).}
\item{Attributes}{[required] For a list of attributes, see
\href{https://docs.aws.amazon.com/sns/latest/api/API_SetPlatformApplicationAttributes.html}{SetPlatformApplicationAttributes}}
}
\description{
Creates a platform application object for one of the supported push
notification services, such as APNS and FCM, to which devices and mobile
apps may register. You must specify PlatformPrincipal and
PlatformCredential attributes when using the \code{CreatePlatformApplication}
action. The PlatformPrincipal is received from the notification service.
For APNS/APNS\\_SANDBOX, PlatformPrincipal is \"SSL certificate\". For
FCM, PlatformPrincipal is not applicable. For ADM, PlatformPrincipal is
\"client id\". The PlatformCredential is also received from the
notification service. For WNS, PlatformPrincipal is \"Package Security
Identifier\". For MPNS, PlatformPrincipal is \"TLS certificate\". For
Baidu, PlatformPrincipal is \"API key\".
}
\details{
For APNS/APNS\\_SANDBOX, PlatformCredential is \"private key\". For FCM,
PlatformCredential is \"API key\". For ADM, PlatformCredential is
\"client secret\". For WNS, PlatformCredential is \"secret key\". For
MPNS, PlatformCredential is \"private key\". For Baidu,
PlatformCredential is \"secret key\". The PlatformApplicationArn that is
returned when using \code{CreatePlatformApplication} is then used as an
attribute for the \code{CreatePlatformEndpoint} action.
}
\section{Request syntax}{
\preformatted{svc$create_platform_application(
Name = "string",
Platform = "string",
Attributes = list(
"string"
)
)
}
}
\keyword{internal}
|
adda6bb69c255004743d71e2fbb9fcd252d8c044
|
1a56429cc99500152d6239b9bc9778c47006f094
|
/data-raw/amp-ad.R
|
95a9130dd9fd12eef1cb22f4de22d25db00247a1
|
[
"MIT"
] |
permissive
|
labsyspharm/driad-website
|
bd019dd1aa7df3c94841f10e13f1d559f7911297
|
9d5bf30e6d30798c9008133aa4a574e52cb2a402
|
refs/heads/master
| 2023-03-29T07:13:42.105040
| 2021-04-05T14:12:00
| 2021-04-05T14:12:00
| 303,443,350
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,007
|
r
|
amp-ad.R
|
# Run on O2
library(DRIAD)
library(tidyverse)
library(here)
library(batchtools)
wd <- file.path("/n", "scratch3", "users", "c", "ch305", "driad")
dir.create(wd)
fnROSMAP <- wrangleROSMAP(tempdir())
# fnMSBB <- Sys.glob(here::here("data-raw", "msbb*.tsv.gz"))
fnMSBB <- wrangleMSBB(tempdir())
prediction_tasks_all <- tribble(
~brain_region, ~dataset, ~path,
"Dorsal prefrontal cortex", "ROSMAP", fnROSMAP,
"BM10", "MSBB", fnMSBB[1],
"BM22", "MSBB", fnMSBB[2],
"BM36", "MSBB", fnMSBB[3],
"BM44", "MSBB", fnMSBB[4],
) %>%
crossing(
comparison = c("AB", "AC", "BC")
) %>%
rowwise() %>%
mutate(
task = prepareTask(path, comparison) %>%
list(),
pairs = preparePairs(task) %>%
list()
) %>%
select(-path) %>%
ungroup() %>%
mutate(
id = paste0("prediction_task_", 1:n())
)
write_rds(
prediction_tasks_all,
here("data", paste0("prediction_tasks_all", ".rds"))
)
# prediction_tasks_all <- read_rds(here("data", paste0("prediction_tasks_all", ".rds")))
pwalk(
prediction_tasks_all,
function(task, pairs, id, ...) {
# browser()
x <- list(task = task, pairs = pairs)
write_rds(
x,
here("data", paste0(id, ".rds"))
# compress = "xz"
)
}
)
prediction_tasks <- prediction_tasks_all %>%
select(-task, -pairs)
write_rds(
prediction_tasks,
here("data", paste0("prediction_tasks", ".rds")),
compress = "xz"
)
valid_gene_symbols <- prediction_tasks_all %>%
rowwise() %>%
mutate(
task = if (dataset == "MSBB") select(task, -(1:6)) %>% list() else select(task, -(1:7)) %>% list()
) %>%
pull(task) %>%
map(colnames) %>%
reduce(intersect) %>%
setdiff(
c("ID", "PMI", "AOD", "CDR",
"Braak", "Barcode", "Label")
)
write_rds(
valid_gene_symbols,
here("data", paste0("valid_gene_symbols", ".rda")),
compress = "xz"
)
gene_set_sizes <- c(
5:29,
seq(30, 300, by = 5)
)
prediction_tasks_gene_sets <- prediction_tasks_all %>%
crossing(
gene_set_size = gene_set_sizes
) %>%
rowwise() %>%
mutate(
background_sets = DRIAD:::genBK(
valid_gene_symbols[1:gene_set_size],
task,
1000
) %>%
set_names(., paste0("BK_", seq_along(.))) %>%
list()
) %>%
ungroup()
write_rds(
prediction_tasks_gene_sets,
here("data", paste0("prediction_tasks_gene_sets", ".rds"))
)
# prediction_tasks_gene_sets <- read_rds(here("data", paste0("prediction_tasks_gene_sets", ".rds")))
prediction_tasks_chunks <- prediction_tasks_gene_sets %>%
mutate(
task_file = normalizePath(here("data", paste0(id, ".rds")))
) %>%
select(-task, -pairs) %>%
split(rep_len(1:500, nrow(.))) %>%
enframe("chunk", "data") %>%
# slice(1) %>%
mutate(
input_file = file.path(wd, paste0("prediction_task_", chunk, ".rds"))
)
pwalk(
prediction_tasks_chunks,
function(input_file, data, ...)
write_rds(data, input_file)
)
# Set up jobs
reg <- makeRegistry(
file.dir = file.path(wd, paste0("registry_", gsub(" ", "_", Sys.time()))),
seed = 1,
conf.file = here("data-raw", "batchtools-conf.R")
)
#reg$cluster.functions <- makeClusterFunctionsSlurm(template = "slurm-simple")
run_bk_job <- function(input_file, n_workers = 4) {
library(tidyverse)
library(DRIAD)
library(furrr)
message("Reading input...")
df <- read_rds(input_file)
message("Evaluating gene sets...")
out <- df %>%
rowwise() %>%
mutate(
task = read_rds(task_file) %>%
list(),
background_auc = DRIAD::evalGeneSets(
background_sets,
task[["task"]],
task[["pairs"]]
) %>%
list()
) %>%
ungroup()
message("Writing results...")
write_rds(
out %>%
select(-task),
file.path(
dirname(input_file),
paste0(tools::file_path_sans_ext(basename(input_file)), "_output.rds")
)
)
message("Done...")
input_file
}
batchMap(
fun = run_bk_job,
input_file = prediction_tasks_chunks[["input_file"]]
)
job_table <- findJobs() %>%
# Chunk jobs into a single array job
mutate(chunk = 1)
submitJobs(
job_table,
resources = list(
memory = "8gb",
ncpus = 1L,
partition = "short",
walltime = 5*60*60,
chunks.as.arrayjobs = TRUE,
# For some reason these nodes fail to execute R because of an "illegal instruction"
exclude = "compute-f-17-[09-25]"
)
)
prediction_tasks_outputs <- prediction_tasks_chunks %>%
# rowwise() %>%
mutate(
result = map(
paste0(tools::file_path_sans_ext(basename(input_file)), "_output.rds"),
possibly(read_rds, otherwise = NULL)
)
)
# ungroup()
background_auc_df <- prediction_tasks_outputs %>%
# filter(map_lgl(result, Negate(is.null))) %>%
pull(result) %>%
bind_rows() %>%
arrange(dataset, brain_region, comparison, gene_set_size) %>%
select(dataset, brain_region, comparison, gene_set_size, background_auc) %>%
mutate(
background_auc = map(background_auc, pull, AUC)
)
write_rds(
background_auc_df,
file.path(wd, "background_gene_sets_auc.rds")
)
|
6f00b2cbb9edbb61c52e26214795413b233a8984
|
efc3930c5c08799cb78381d167c9beff0cff20be
|
/05.Data-tranformation/05.Add-new-variables-with-mutate/00.a.mutate.R
|
b251d20fb3a52e95935d7dabca69581fb25ee67f
|
[] |
no_license
|
ReneNyffenegger/R-for-Data-Science
|
5309d5aa07da7f524087b7312b0ab43f228a8795
|
5dc4185176978a1eacc4a560e10a3d70a263a4dd
|
refs/heads/master
| 2020-05-15T18:12:03.492070
| 2019-05-06T19:47:56
| 2019-05-06T19:47:56
| 182,419,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 359
|
r
|
00.a.mutate.R
|
library(nycflights13)
library(tidyverse )
flights_sml <- select(flights,
year:day,
ends_with("delay"),
distance,
air_time
)
mutate(flights_sml,
gain = dep_delay - arr_delay,
hours = air_time / 60,
# It's possible to refer to columns that are created in
# the same call to mutate():
gain_per_hour = gain / hours
)
|
77a5294f1e04ab8c9e359d254ebc9bc7b4ea6442
|
83f5e78e0446003f9ef164fce4aa3899d8753a68
|
/man/mybin.Rd
|
63b6d7e2c9cf29e18f19b64e019e4bf0d7656271
|
[] |
no_license
|
mclaunts/MATH4753
|
d4df46c12260eb3e2813d891f97f97c1674c3790
|
bac0cb3b9f5bb87ce6817fc989133abbacb259c7
|
refs/heads/master
| 2023-01-18T22:25:04.975915
| 2020-11-17T23:30:16
| 2020-11-17T23:30:16
| 298,446,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 428
|
rd
|
mybin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mybin.R
\name{mybin}
\alias{mybin}
\title{Mybin}
\usage{
mybin(iter = 100, n = 10, p = 0.5)
}
\arguments{
\item{iter}{the number of iterations}
\item{n}{the number of Bernoulli trials}
\item{p}{the probability of success in each trial}
}
\value{
barplot of relative frequencies and a table of the same
}
\description{
Mybin
}
\examples{
mybin()
}
|
2ef4c3ac214d91d8566ced2e62be01a6082b16c0
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlespectrumv1explorer.auto/man/PawsGetSpectrumRequest.Rd
|
b064be457041aace96fb7dae89f2c308e0be9010
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,395
|
rd
|
PawsGetSpectrumRequest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spectrum_objects.R
\name{PawsGetSpectrumRequest}
\alias{PawsGetSpectrumRequest}
\title{PawsGetSpectrumRequest Object}
\usage{
PawsGetSpectrumRequest(antenna = NULL, capabilities = NULL,
deviceDesc = NULL, location = NULL, masterDeviceDesc = NULL,
owner = NULL, requestType = NULL, type = NULL, version = NULL)
}
\arguments{
\item{antenna}{Depending on device type and regulatory domain, the characteristics of the antenna may be required}
\item{capabilities}{The master device may include its device capabilities to limit the available-spectrum response to the spectrum that is compatible with its capabilities}
\item{deviceDesc}{When the available spectrum request is made on behalf of a specific device (a master or slave device), device descriptor information for that device is required (in such cases, the requestType parameter must be empty)}
\item{location}{The geolocation of the master device (a device with geolocation capability that makes an available spectrum request) is required whether the master device is making the request on its own behalf or on behalf of a slave device (one without geolocation capability)}
\item{masterDeviceDesc}{When an available spectrum request is made by the master device (a device with geolocation capability) on behalf of a slave device (a device without geolocation capability), the rules of the applicable regulatory domain may require the master device to provide its own device descriptor information (in addition to device descriptor information for the slave device, which is provided in a separate parameter)}
\item{owner}{Depending on device type and regulatory domain, device owner information may be included in an available spectrum request}
\item{requestType}{The request type parameter is an optional parameter that can be used to modify an available spectrum request, but its use depends on applicable regulatory rules}
\item{type}{The message type (e}
\item{version}{The PAWS version}
}
\value{
PawsGetSpectrumRequest object
}
\description{
PawsGetSpectrumRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The request message for the available spectrum query protocol which must include the device's geolocation.
}
\seealso{
Other PawsGetSpectrumRequest functions: \code{\link{paws.getSpectrum}}
}
|
2f97612c47659b8c5b45411b0f00621d553368ca
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/clinDR/examples/emaxalt.Rd.R
|
aaec7b8c91a91a09ded91ab0f1963e6909f7ff20
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 598
|
r
|
emaxalt.Rd.R
|
library(clinDR)
### Name: emaxalt
### Title: Fit 4- or 3-parameter Emax model substituting simpler curves if
### convergence not achieved.
### Aliases: emaxalt
### Keywords: nonlinear
### ** Examples
save.seed<-.Random.seed
set.seed(12357)
doselev<-c(0,5,25,50,100)
n<-c(78,81,81,81,77)
dose<-rep(doselev,n)
### population parameters for simulation
e0<-2.465375
ed50<-67.481113
emax<-15.127726
sdy<-7.967897
pop<-c(log(ed50),emax,e0)
meanresp<-emaxfun(dose,pop)
y<-rnorm(sum(n),meanresp,sdy)
simout<-emaxalt(y,dose)
simout2<-emaxalt(y,dose,modType=4)
.Random.seed<-save.seed
|
3ea33bdafe81d6213836f6a7da8f5bb1a5f4aaa7
|
ed5c385b23b4316e4fd0afb73c6d32aab76bc154
|
/R/resultados_todos.R
|
b4ac475a4717792f5e8fcde0118430331b97c174
|
[] |
no_license
|
loreabad6/analisis_elecciones_EC
|
a7149d5b460b67bae221ea7967f85733b0908074
|
b946067d065e96c571292b74d88f4060d95c9a29
|
refs/heads/main
| 2023-04-01T20:37:36.745974
| 2021-04-12T07:16:31
| 2021-04-12T07:16:31
| 339,191,850
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,155
|
r
|
resultados_todos.R
|
library(rtweet)
library(tidyverse)
library(lubridate)
library(gganimate)
# Extraer tweets de Carlos Oporto
resultados = search_tweets(
q = "Resultados AND Oficiales AND CNE AND carlosoporto",
include_rts = F
)
resultados_tidy = resultados %>%
# Seleccionar columnas de interes
select(created_at, text) %>%
# Cambiar a zona horaria de Ecuador
mutate(hora_ec = with_tz(created_at, tzone = "America/Bogota")) %>%
# Filtrar tweets con resultados de Yaku, Hervas, Lasso y Arauz
filter(str_detect(text, "Arauz | Yaku | Lasso | Hervas")) %>%
# Dividir texto del tweet en cada salto de linea
mutate(tmp_chunks = str_split(text, fixed("\n"))) %>%
# Crear nuevas columnas en funcion de cada linea
mutate(
arauz = map_chr(tmp_chunks, function(s) s[which(str_detect(s, "Arauz"))]),
yaku = map_chr(tmp_chunks, function(s) s[which(str_detect(s, "Yaku"))]),
lasso = map_chr(tmp_chunks, function(s) s[which(str_detect(s, "Lasso"))]),
hervas = map_chr(tmp_chunks, function(s) s[which(str_detect(s, "Hervas"))])
) %>%
# Seleccionar las columnas de interes
select(-c(text, tmp_chunks, created_at)) %>%
# Extraer solo valores numericos
mutate_if(is.character, str_extract, "\\d+\\.*\\d*") %>%
# Convertirlos en valores numericos
mutate_if(is.character, as.numeric)
s = resultados_tidy %>%
pivot_longer(
-c(hora_ec),
names_to = "candidato",
values_to = "porcentaje"
) %>%
group_by(hora_ec) %>%
mutate(rank = rank(-porcentaje, ties.method = "first") * 1) %>%
ungroup() %>%
mutate(candidato = as.factor(candidato)) %>%
mutate(color = case_when(
candidato == "arauz" ~ "#ffa500",
candidato == "lasso" ~ "deepskyblue3",
candidato == "yaku" ~ "purple",
candidato == "hervas" ~ "orangered"
)) %>%
mutate(nombre = case_when(
candidato == "arauz" ~ "Andrés\nArauz",
candidato == "lasso" ~ "Guillermo\nLasso",
candidato == "yaku" ~ "Yaku\nPérez",
candidato == "hervas" ~ "Xavier\nHervas"
)) %>%
ggplot(
aes(
x = porcentaje,
y = as.factor(rank),
group = candidato,
fill = color,
color = color
)
) +
geom_col(
show.legend = F, alpha = 1,
position = 'identity'
) +
geom_text(
aes(label = paste(nombre, " ")),
show.legend = F, nudge_x = 2, size = 5
) +
scale_color_identity(aesthetics = c("color", "fill")) +
scale_y_discrete(limits = rev) +
scale_x_continuous(
labels = scales::percent_format(scale = 1, accuracy = 0.1)
) +
coord_cartesian(
xlim = c(14, 35)
) +
theme_minimal() +
theme(
axis.title = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
text = element_text(size = 16)
) +
transition_states(
hora_ec,
transition_length = 15,
# state_length = 20,
wrap = FALSE
) +
ease_aes('linear')+
enter_fade() +
exit_fade() +
labs(
subtitle = 'Hora y fecha: {closest_state}',
caption = "Fuente: CNE, @carlosoporto. Visualización: @loreabad6"
)
anim_save("conteo_top4.gif", end_pause = 15,
duration = 25,
animation = s, rewind = F,
width = 500, height = 400)
|
d6428a85839bc2df8f17d44a435e73c2fa8f61df
|
ebbe08d58a57ae2e9d308a12df500e1e0ef8d098
|
/microbiome/alpha_diversity_gender.R
|
878772e269dd13832f9c5a52a4fdbe61455f6608
|
[] |
no_license
|
Drizzle-Zhang/bioinformatics
|
a20b8b01e3c6807a9b6b605394b400daf1a848a3
|
9a24fc1107d42ac4e2bc37b1c866324b766c4a86
|
refs/heads/master
| 2022-02-19T15:57:43.723344
| 2022-02-14T02:32:47
| 2022-02-14T02:32:47
| 171,384,799
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,095
|
r
|
alpha_diversity_gender.R
|
# alpha diversity analysis
library(amplicon)
file.in <- '/home/drizzle_zhang/microbiome/result/4.Alpha_Diversity/alpha_index_table/alpha_estimator_summary.csv'
mat.alpha <- read.table(file.in, sep = '\t', header = T, row.names = 1)
type.dose <- c(0, 1, 2, 3)
# meta file
meta.file <- '/home/drizzle_zhang/microbiome/result/meta_sample.out.txt'
df.meta <- read.delim(meta.file, stringsAsFactors = FALSE)
df.meta$Dose <- as.factor(df.meta$Dose)
row.names(df.meta) <- df.meta$Sample
################################### gender
# male
gender <- 'male'
df.meta <- df.meta[df.meta$Gender == gender,]
sample.male <- df.meta[, 'Sample']
mat.alpha.male <- mat.alpha[sample.male,]
# normalization
mat.combine <- merge(mat.alpha, df.meta, by = 'row.names')
row.names(mat.combine) <- mat.combine$Row.names
type.dose <- unique(mat.combine$Dose)
mat.alpha.norm <- data.frame()
for (dose in type.dose) {
baseline.alpha <- mat.combine[
((mat.combine$Dose == dose) & (mat.combine$Time == -1)),
c("observed_species", "shannon", "simpson")]
baseline.alpha <- apply(baseline.alpha, 2, median)
sub.mat.alpha <- mat.combine[
mat.combine$Dose == dose, c("observed_species", "shannon", "simpson")]
sub.alpha.norm <- t(apply(sub.mat.alpha, 1, function(x) {x / baseline.alpha}))
mat.alpha.norm <- rbind(mat.alpha.norm, sub.mat.alpha)
# mat.alpha.norm <- rbind(mat.alpha.norm, sub.alpha.norm)
}
# time series
path.plot <- '/home/drizzle_zhang/microbiome/result/4.Alpha_Diversity/alpha_boxplot_gender'
series.time <- unique(df.meta$Time)
series.time <- c(-1, 1, 5, 9, 17, 21, 25, 29, 33, 41, 49, 60, 68, 84)
df.plot.fit <- data.frame()
i = 1
for (sub.time in series.time) {
# select meta
sel.meta <- df.meta[df.meta$Gender == gender,]
sel.meta <- sel.meta[sel.meta$Time == sub.time,]
# sel.meta <- sel.meta[sel.meta$Dose %in% c(0, 3),]
row.names(sel.meta) <- sel.meta$Sample
# select sample
use.sample <- sel.meta$SampleName
mat.plot.in <- data.frame()
for (alpha_index in c("observed_species", "shannon", "simpson")) {
sub.mat <- data.frame(
value = mat.alpha[use.sample, alpha_index],
type.alpha = rep(alpha_index, length(use.sample)),
row.names = use.sample)
sub.mat <- cbind(sub.mat, sel.meta)
mat.plot.in <- rbind(mat.plot.in, sub.mat)
}
# boxplot
plot.alpha <-
ggplot(aes(x = Dose, y = value, color = Dose, shape = Dose),
data = mat.plot.in) +
geom_boxplot() +
facet_wrap(. ~ type.alpha, scales = 'free') +
labs(x = '', y = 'Alpha Diversity Measure') +
theme(panel.background = element_rect(color = 'gray',
fill = 'transparent'))
ggsave(plot = plot.alpha, path = path.plot,
filename = paste0(gender, '_', sub.time, '_0123.png'))
# ggsave(plot = plot.alpha, path = path.plot,
# filename = paste0(gender, '_', sub.time, '_0123_norm.png'))
# diff of alpha
mat.shannon <- mat.plot.in[
mat.plot.in$type.alpha == 'shannon', c('value', 'Dose')]
for (dose in type.dose) {
sub.shannon <- median(
mat.shannon[mat.shannon$Dose == dose, 'value'])
df.plot.fit <-
rbind(df.plot.fit,
data.frame(Shannon = sub.shannon,
Dose = dose, Time = i))
}
i = i + 1
}
# plot.fit <-
# ggplot(data = df.plot.fit, aes(x = Time, y = Shannon, color = Dose)) +
# geom_line() +
# geom_point()
df.plot.fit$Dose <- as.factor(df.plot.fit$Dose)
ggplot(data = df.plot.fit, aes(x = Time, y = Shannon, color = Dose)) +
geom_line() +
geom_point()
################################## dose
file.in <- '/home/drizzle_zhang/microbiome/result/4.Alpha_Diversity/alpha_index_table/alpha_estimator_summary.csv'
mat.alpha <- read.table(file.in, sep = ',', header = T, row.names = 1)
# meta file
meta.file <- '/home/drizzle_zhang/microbiome/result/meta_sample.out.txt'
df.meta <- read.delim(meta.file, stringsAsFactors = FALSE)
df.meta$Dose <- as.factor(df.meta$Dose)
row.names(df.meta) <- df.meta$Sample
dose <- '3'
df.meta <- df.meta[df.meta$Dose == dose,]
sample.sub <- df.meta[, 'Sample']
mat.alpha.sub <- mat.alpha[sample.sub,]
# normalization
mat.combine <- cbind(mat.alpha.sub, df.meta)
type.gender <- unique(mat.combine$Gender)
mat.alpha.norm <- data.frame()
for (gender in type.gender) {
baseline.alpha <- mat.combine[
((mat.combine$Gender == gender) & (mat.combine$Time == 'A')),
c("observed_species", "shannon", "simpson")]
baseline.alpha <- apply(baseline.alpha, 2, median)
sub.mat.alpha <- mat.combine[
mat.combine$Gender == gender, c("observed_species", "shannon", "simpson")]
sub.alpha.norm <- t(apply(sub.mat.alpha, 1, function(x) {x / baseline.alpha}))
mat.alpha.norm <- rbind(mat.alpha.norm, sub.mat.alpha)
# mat.alpha.norm <- rbind(mat.alpha.norm, sub.alpha.norm)
}
# time series
path.plot <- '/home/drizzle_zhang/microbiome/result/4.Alpha_Diversity/alpha_boxplot_dose'
series.time <- unique(df.meta$Time)
df.plot.fit <- data.frame()
i = 1
for (sub.time in series.time) {
# select meta
sel.meta <- df.meta
sel.meta <- df.meta[df.meta$Time == sub.time,]
# sel.meta <- sel.meta[sel.meta$Dose %in% c(0, 3),]
row.names(sel.meta) <- sel.meta$Sample
# select sample
use.sample <- sel.meta$Sample
mat.plot.in <- data.frame()
for (alpha_index in c("observed_species", "shannon", "simpson")) {
sub.mat <- data.frame(
value = mat.alpha.norm[use.sample, alpha_index],
type.alpha = rep(alpha_index, length(use.sample)),
row.names = use.sample)
sub.mat <- cbind(sub.mat, sel.meta)
mat.plot.in <- rbind(mat.plot.in, sub.mat)
}
# boxplot
plot.alpha <-
ggplot(aes(x = Dose, y = value, color = Dose, shape = Dose),
data = mat.plot.in) +
geom_boxplot() +
facet_wrap(. ~ type.alpha, scales = 'free') +
labs(x = '', y = 'Alpha Diversity Measure') +
theme(panel.background = element_rect(color = 'gray',
fill = 'transparent'))
ggsave(plot = plot.alpha, path = path.plot,
filename = paste0(dose, '_', sub.time, '.png'))
# ggsave(plot = plot.alpha, path = path.plot,
# filename = paste0(gender, '_', sub.time, '_0123_norm.png'))
# diff of alpha
mat.shannon <- mat.plot.in[
mat.plot.in$type.alpha == 'shannon', c('value', 'Gender')]
for (gender in type.gender) {
sub.shannon <- median(
mat.shannon[mat.shannon$Gender == gender, 'value'])
df.plot.fit <-
rbind(df.plot.fit,
data.frame(Shannon = sub.shannon,
Gender = gender, Time = i))
}
i = i + 1
}
ggplot(data = df.plot.fit, aes(x = Time, y = Shannon, color = Gender)) +
geom_line() +
geom_point()
|
d3a72bbd2d936476c723df55c370e8ab15fe2ae5
|
8c0013662db894bbd34454a1fd49506e31377d34
|
/TRS/newer R to segmented and SLM workflow/tests/collect & output exercise data at percent of max WR/collectExeData.r
|
bcb51f9196dbeb8144ead4c9625bed4bb586696e
|
[
"MIT"
] |
permissive
|
tudou2015/DOS-Segmented-Regression-Tools
|
a9285ea3cfc30d269ee353f5186a57778db764c3
|
556744f567b5188f97f43061cdd433ba6b57e236
|
refs/heads/master
| 2020-06-11T07:00:31.346740
| 2016-07-04T17:27:41
| 2016-07-04T17:27:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,635
|
r
|
collectExeData.r
|
# Function to find data at exercise levels of interest
# Given an input of percentage e.g. (.5 for 50%) of work-rate:
# locate the equivalent exercise data at that percentage
# average +/- span for key variables
# Output: average W, VOK, HR, VE + standard deviations
collectExeData <- function(argv){
# Percentage of max work rate
percentWorkRate = argv*max(W)
percentExeTime = argv*max(exeTime)
# Factor over which to average data
span = (5/60)
# Subfunction to find index of closest value
findClosest <- function(value,lookHere){
ind <- which(abs((value)-lookHere)==min(abs((value)-lookHere)))
return(ind)
}
if(argv == 1){
# Case for max W
equivExeTime <- tail(W, n=1)[[1]][1]
indLequivExeTime <- findClosest((equivExeTime-2*span),exeTime)
indUequivExeTime <- which(equivExeTime==exeTime)
} else if(argv == 0){
# Case for min W
equivExeTime <- head(W, n=1)[[1]][1]
indLequivExeTime <- which(equivExeTime==exeTime)
indUequivExeTime <- findClosest((equivExeTime+2*span),exeTime)
} else {
# Case for all other percentages of W
equivExeTime <- exeTime[findClosest(percentExeTime,exeTime)]
indLequivExeTime <- findClosest((equivExeTime-span),exeTime)
indUequivExeTime <- findClosest((equivExeTime+span),exeTime)
}
# Initialize vectors to collect data
AvgW <- vector()
AvgWstDev <- vector()
AvgVOK <- vector()
AvgVOKstDev <- vector()
AvgHR <- vector()
AvgHRstDev <- vector()
AvgVE <- vector()
AvgVEstDev <- vector()
# Collect data +/- factor from timepoint of interest
AvgW <- mean(W$y[indLequivExeTime[1]:indUequivExeTime[1]])
AvgWstDev <- sd(W$y[indLequivExeTime[1]:indUequivExeTime[1]])
AvgVOK <- mean(VOK$y[indLequivExeTime[1]:indUequivExeTime[1]])
AvgVOKstDev <- sd(VOK$y[indLequivExeTime[1]:indUequivExeTime[1]])
AvgHR <- mean(HR$y[indLequivExeTime[1]:indUequivExeTime[1]])
AvgHRstDev <- sd(HR$y[indLequivExeTime[1]:indUequivExeTime[1]])
AvgVE <- mean(VE$y[indLequivExeTime[1]:indUequivExeTime[1]])
AvgVEstDev <- sd(VE$y[indLequivExeTime[1]:indUequivExeTime[1]])
# Collect data into a dataframe
perWRdata <- data.frame(# Exercises Variables of interest
AvgW,AvgWstDev,
AvgVOK,AvgVOKstDev,
AvgHR,AvgHRstDev,
AvgVE,AvgVEstDev)
return(perWRdata)
}
# Function to write data
# Given an input of a collectExeData data frame, and a label e.g "Maximum
# work-rate data":
# Append contents of the data frame onto the output file name
# Output: no direct output, data in csv
writeData <- function(argv, label){
write.table(paste(label,sep=" "),
paste(outputFileName,"Data.csv",sep=""), sep=",", append=TRUE,
row.names=FALSE)
write.table(argv,
paste(outputFileName,"Data.csv",sep=""), sep=",", append=TRUE,
row.names=FALSE)
}
# Test Code #
# Generate pre-requisites
csv <- dir(pattern="*.csv")
csvExeData <- read.csv(csv[1], header = TRUE)
exeTime <- csvExeData[[1]]
VOK <- data.frame(x=exeTime, y=csvExeData[[7]])
VE <- data.frame(x=exeTime, y=csvExeData[[9]])
HR <- data.frame(x=exeTime, y=csvExeData[[10]])
W <- data.frame(x=exeTime, y=csvExeData[[13]])
outputFileName <- "EXO-1 AaAa V3 11-11-11 "
# Call function 1
collectExeData(0) # W should be ~2.8, VOK should be ~8.68, HR should be ~92
collectExeData(0.5) # W should be ~66, VOK should be ~19, HR should be ~139
collectExeData(1) # W should be ~132, VOK should be ~35, HR should be ~196
# Call function 2
writeData(collectExeData(0),"MinWR")
writeData(collectExeData(0.8),"E80")
writeData(collectExeData(1),"MaxWR")
|
273da2cce81d6074b8fd2457cbd15af3318b3f63
|
3f3a25ade89f7ee32537d959cd312bd4f657c7ac
|
/sto_analysis/shanghai_index/index1.r
|
cb6de2487200a03dc850cfe610d13421a6413ad0
|
[] |
no_license
|
davidyuqiwei/davidyu_v1
|
9b913b835b773888375f3351a2336f04ae2858c9
|
f4906ede802cabeafd50109d80669090211b38cd
|
refs/heads/master
| 2021-10-26T09:01:31.406090
| 2021-10-21T00:29:46
| 2021-10-21T00:29:46
| 138,952,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,685
|
r
|
index1.r
|
setwd("G:/stock/data/shanghai_shenzhen_index")
df1=read.csv("shanghai_index_1991-01-01_2017-11-21.csv")
df1=df1[complete.cases(df1),]
close1=df1[,5]
vol=df1[,6]
n1=which(vol>0&vol<10000000)
df2=df1[n1,]
year1=substr(df2[,1],1,4)
month1=substr(df2[,1],6,7)
n1=which(year1>=2012&year1<=2016)
vol=df2[n1,6]
close1=df2[n1,5]
vol2=tapply(vol,list(year1[n1],month1[n1]),function(x)mean(x,na.rm=T))
ser = ts(c(t(vol2)), freq=12, start = c(2004, 1))
fit = stl(ser, s.window="periodic")
plot(fit)
sea1=fit$time.series[,1]
vol2=tapply(close1,list(year1[n1],month1[n1]),function(x)mean(x,na.rm=T))
ser = ts(c(t(vol2)), freq=12, start = c(2004, 1))
fit = stl(ser, s.window="periodic")
plot(fit)
sea2=fit$time.series[,1]
setwd("G:/stock/data/owner")
a1=read.csv("zhongyanghuijin_tr.csv",head=F)
n1=which(as.numeric(as.character(a1[,2]))>0)
a2=a1[n1,]
sort(table(a2[,1]))
date1=as.character(a2[,5])
stock_name=as.character(a2[,1])
stk1=stock_name[date1>"2017-08-30"]
l1=lapply(stk1,function(x)which(stock_name==x))
l2=lapply(l1,function(x)length(x))
stk1[which(unlist(l2)==1)]
vol1=vol[n1]
plot(vol1,close1[n1])
cl2=close1[n1]
n <- length(cl2);
lrest <- log(cl2[-1]/cl2[-n])
plot(vol1[-1],lrest)
vol2=vol1[-which(vol1>10000000)]
close2=tapply(close1,list(year1,month1),function(x)mean(x,na.rm=T))
return1=diff(log(close1))
re2=return1[2000:length(return1)]
plot(re2)
hist(re2)
# rownames(close2)=c(1991:2017)
# colnames(close2)=c("Jan","Feb","Mar", "Apr", "May", "Jun" , "Jul", "Aug" , "Sep" , "Oct", "Nov", "Dec")
close3=c(t(close2))
close3[length(close3)]=4000
ser = ts(c(close3), freq=12, start = c(1991, 1))
fit = stl(ser, s.window="periodic")
plot(fit)
|
bf691da5de1fb5b284301909f495a12f46cb8ad4
|
b5680157471d9eefd15175390cc577036e905fc2
|
/scripts/normalise_matrix_a.R
|
64cb21e2c2ba68841dd443fa45762864bb87a9ab
|
[] |
no_license
|
KIRILLxBREAK/bioinformatics
|
4af57fb0313664acdc5c6a80dea696f05068d172
|
02e001b2268a4ae1db3e6ef9961f948e78d49b4b
|
refs/heads/master
| 2021-01-09T23:36:09.218293
| 2019-01-09T18:29:27
| 2019-01-09T18:29:27
| 73,214,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 451
|
r
|
normalise_matrix_a.R
|
#!/usr/local/bin/Rscript
library(magrittr)
library(dplyr)
#dfA <- read.csv('../analysis/csv/A.csv')
load('../data/temp_rdata/dfA.rd')
rownames(dfA) <- dfA[['entrezgene_id']]
dfA %<>% dplyr::select(-entrezgene_id)
dfA <- dfA - rowMeans(dfA)
path_to_A_norm <- "../analysis/csv/A_norm.csv"
write.table(dfA, file=path_to_A_norm, sep=',', row.names = T, col.names = T)
save(dfA, file='../data/temp_rdata/dfA_norm.rd') ; rm(dfA) ; rm(path_to_A_norm)
|
bb37a9603df162a4dae4f08d22fbb57f6f8d2b98
|
6b4aeb90da899e7053191cf20c2617cae0d46fee
|
/plot3.R
|
009f87df8eb401a25f8d290d6a05cb2068d71ba5
|
[] |
no_license
|
tanmayshishodia/ExData_Plotting1
|
1b497c1e6c1320820bbb91500b0fdf48984cfd4c
|
d5f314fe3df040b59ddd5f564ed5815cf3783969
|
refs/heads/master
| 2020-07-25T22:23:35.192891
| 2019-09-15T11:41:19
| 2019-09-15T11:41:19
| 208,441,241
| 0
| 0
| null | 2019-09-14T13:03:06
| 2019-09-14T13:03:06
| null |
UTF-8
|
R
| false
| false
| 864
|
r
|
plot3.R
|
library(data.table)
#Load the file
dt <- fread("household_power_consumption.txt", na.strings = "?")
#Paste date and time and change char to date format
dt1 <- dt[, DateTime := as.POSIXct(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
#Extract the required dates
dt1 <- dt[dt$DateTime >= "2007-02-01" & dt$DateTime <= "2007-02-03"]
#Create a png file to store the plot
png("plot3.png", width = 480, height = 480, units = "px")
#Plot the graph
plot(x = dt1$DateTime, y = dt1$Sub_metering_1, xlab = "", ylab = "Energy Sub metering", type = "l")
lines(x = dt1$DateTime, y = dt1$Sub_metering_2, col = "red")
lines(x = dt1$DateTime, y = dt1$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty = 1)
#Switch off the graphic device
dev.off()
|
dfd4db06a950167e82f29b1851c978971cdc09fb
|
32413897f497fd21258a034f737dac9d1904e0d6
|
/regression.R
|
f88dd81b7829a89d3e96d9be406a8eb9bef16dd6
|
[] |
no_license
|
liaison/RnD
|
e984c75c33e3d93d84fb8d80fa2a604932e7cc23
|
0c6e2ebff2aa74f72e03b2e86620a4d3c7027677
|
refs/heads/master
| 2021-01-22T10:56:35.714984
| 2017-04-10T09:35:37
| 2017-04-10T09:35:37
| 49,974,551
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,227
|
r
|
regression.R
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#######################
# Regression Models
#######################
# methodologies
# predict "response" (dependent) variable from one or more "predictor"
# (independent/explanatory) variables.
# OLS (ordinary least squares) regression, the regression models that
# minimize the sum of square residuals
# (difference between predicted value and actual value)
# e.g:
# Simple linear regression, polynomial regression and multiple linear regression
##############################
# simple regression
fit <- lm(weight ~ height, data=women)
summary(fit)
women$weight
# get predicted value for all the samples involved in the model.
fitted(fit)
# get the residual values
residuals(fit)
plot(women$height, women$weight,
xlab="Height (in inches)",
ylab="Weight (in pounds)")
abline(fit)
# I() function wraps/escapes the arithmetical operators.
# Add a quadratic (power 2) to the whole polynomial term.
fit2 <- lm(weight ~ height + I(height ^ 2), data = women)
# The significance of the squared term (t=13.89) suggests that the inclusion of
# the quadratic term improves the model fit.
# Indeed, as it shows in the following graph, the fitting line is more smooth.
summary(fit2)
plot(women$height, women$weight,
xlab="Height (in inches)",
ylab="Weight (in pounds)")
lines(women$height, fitted(fit2))
##############################
library(car)
# the loess fit is rendered as dashed line (lty=2)
# the pch=19 option displays points as filled circles (default: open circle)
scatterplot(weight ~ height, data=women,
spread=FALSE, smoother.args=list(lty=2), pch=19,
main="Women Age 30 - 39",
xlab="Height (inches)",
ylab="Weight (lbs.)")
##############################
# multiple linear regression: regression invovles multiple variables
str(state.x77)
head(state.x77)
states <- as.data.frame(state.x77[, c("Murder", "Population",
"Illiteracy", "Income", "Frost")])
# the bivariate correlations
cor(states)
library(car)
scatterplotMatrix(states, spread=FALSE, smoother.args = list(lty=2),
main="Scatter Plot Matrix")
# from the correlation table and the scatter plot, we could tell:
# 1). Murder rates rise with population and illiteracy.
# 2). Murder rates fall with higher income levels and frost
# 3). Colder states have lower illiteracy rates and population and higher income.
|
b6fa4bf7270a992f30b9255393bd071ea2526a80
|
bdc8c4e780a8c23831cc6b640faa504c4c516679
|
/SVM_cv_demo.R
|
1be4f4d1a098d83e43641cb9b32772471df9cee3
|
[] |
no_license
|
maccalvert/SVM_cv_demo
|
52432f0cf7ad1e92d2e012f9785d13f165f19ebf
|
2c287864d093f4117f8ed58084287d6d307e8ea2
|
refs/heads/main
| 2023-01-12T00:52:26.629252
| 2020-11-09T16:25:04
| 2020-11-09T16:25:04
| 311,397,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,217
|
r
|
SVM_cv_demo.R
|
library(e1071)
library(ggplot2)
n <- 5000
set.seed(10111)
x <- matrix(rnorm(n*2), ncol = 2)
x[1:round(n/3),] <- x[1:round(n/3),] + 2
x[(round(n/3)+1):(round(n/3)*2),2] <- x[(round(n/3)+1):(round(n/3)*2),2] - 2
x[(round(n/3)+1):(round(n/3)*2),1] <- x[(round(n/3)+1):(round(n/3)*2),1] + 2
y <- c(rep(1,round(n/3,)*2), rep(-1,round(n/3)-1))
dat <- data.frame(x=x,y=as.factor(y))
# Plot data
ggplot(data = dat, aes(x = x.2, y = x.1, color = y, shape = y)) +
geom_point(size = 2) +
scale_color_manual(values=c("#000000", "#FF0000")) +
theme(legend.position = "none")
#plot subsets
samp <- dat[sample(nrow(dat),1000),]
ggplot(data = samp, aes(x = x.2, y = x.1, color = y, shape = y)) +
geom_point(size = 2) +
scale_color_manual(values=c("#000000", "#FF0000")) +
theme(legend.position = "none")
samp <- dat[sample(nrow(dat),4000),]
ggplot(data = samp, aes(x = x.2, y = x.1, color = y, shape = y)) +
geom_point(size = 2) +
scale_color_manual(values=c("#000000", "#FF0000")) +
theme(legend.position = "none")
#One off test of the 5 fold cv
samp <- base::sample(nrow(dat),4000)
svmfit = svm(y ~ ., data = dat[samp,], kernel = "linear", cost = 10, scale = FALSE)
tab_train <- as.data.frame(table(true = dat[samp,"y"], pred=predict(svmfit, dat[samp,])))
tab_test <- as.data.frame(table(true = dat[-samp,"y"], pred=predict(svmfit, dat[-samp,])))
acc_diff <- (tab_train$Freq[1]+tab_train$Freq[4])/sum(tab_train$Freq)-(tab_test$Freq[1]+tab_test$Freq[4])/sum(tab_test$Freq)
acc_diff
#specifcity
tab_train$Freq[1]/(tab_train$Freq[3]+tab_train$Freq[1])
tab_test$Freq[1]/(tab_test$Freq[3]+tab_test$Freq[1])
#sensitivity
tab_train$Freq[4]/(tab_train$Freq[2]+tab_train$Freq[4])
tab_test$Freq[4]/(tab_test$Freq[2]+tab_test$Freq[4])
#Ok, now for 1000 iterations
cv <- function(nit, samp_size, kernel_func){
sens <- vector()
spec <- vector()
acc <- vector()
#demonstrate how the larger the training data set the lower the risk.
for(i in 1:nit){
train <- base::sample(n,samp_size, replace = FALSE)
svmfit <- svm(y~., data = dat[train,], kernel = kernel_func, cost = 10)
tab_train <- as.data.frame(table(true = dat[train,"y"], pred=predict(svmfit, dat[train,])))
tab_test <- as.data.frame(table(true = dat[-train,"y"], pred=predict(svmfit, dat[-train,])))
sens[i] <- (tab_train$Freq[4]/(tab_train$Freq[2]+tab_train$Freq[4])) - (tab_test$Freq[4]/(tab_test$Freq[2]+tab_test$Freq[4]))
spec[i] <- (tab_train$Freq[1]/(tab_train$Freq[3]+tab_train$Freq[1])) - (tab_test$Freq[1]/(tab_test$Freq[3]+tab_test$Freq[1]))
acc[i] <- ((tab_train$Freq[1]+tab_train$Freq[4])/sum(tab_train$Freq)) - ((tab_test$Freq[1]+tab_test$Freq[4])/sum(tab_test$Freq))
}
error_ests <- data.frame(sensitivity = sens, specificity = spec, accuracy = acc)
return(error_ests)
}
a <- Sys.time()
lin4000 <- cv(nit=1000, samp_size = 4000, kernel_func = "linear")
a - Sys.time()
#Histograms of accuracy
ggplot(data=lin4000, aes(x=accuracy))+
geom_histogram(alpha=0.5, position="identity", binwidth = 0.0025, color ="#00BFC4", fill ="#00BFC4")
#Histograms of sensitivity
ggplot(data=lin4000, aes(x=sensitivity))+
geom_histogram(alpha=0.5, position="identity", binwidth = 0.0025, color ="#F8766D", fill ="#F8766D")
#Histograms of specificity
ggplot(data=lin4000, aes(x=specificity))+
geom_histogram(alpha=0.5, position="identity", binwidth = 0.0025, color ="#7CAE00", fill ="#7CAE00")
#Ok what about messing with sample size.
# sample size of 10
lin10 <- cv(nit=1000, samp_size = 25, kernel_func = "linear")
# sample size of 100
lin100 <- cv(nit=1000, samp_size = 100, kernel_func = "linear")
# sample size of 1000
lin1000 <- cv(nit=1000, samp_size = 1000, kernel_func = "linear")
lin_accuracy <- data.frame(samp_size = c(rep(25,1000), rep(100,1000), rep(1000,1000), rep(4000,1000)),
accuracy = c(lin10$accuracy, lin100$accuracy, lin1000$accuracy, lin4000$accuracy))
lin_accuracy$samp_size <- as.character(lin_accuracy$samp_size)
ggplot(data=lin_accuracy, aes(x=accuracy, color=samp_size, fill=samp_size))+
geom_histogram(alpha=0.5, position="identity",binwidth = 0.01)
#Sensitivity
lin_sensitivity <- data.frame(samp_size = c(rep(25,1000), rep(100,1000), rep(1000,1000), rep(4000,1000)),
sensitivity = c(lin10$sensitivity, lin100$sensitivity, lin1000$sensitivity, lin4000$sensitivity))
lin_sensitivity$samp_size <- as.character(lin_sensitivity$samp_size)
ggplot(data=lin_sensitivity, aes(x=sensitivity, color=samp_size, fill=samp_size))+
geom_histogram(alpha=0.5, position="identity",binwidth = 0.01)
#Specificity
lin_specificity <- data.frame(samp_size = c(rep(25,1000), rep(100,1000), rep(1000,1000), rep(4000,1000)),
specificity = c(lin10$specificity, lin100$specificity, lin1000$specificity, lin4000$specificity))
lin_specificity$samp_size <- as.character(lin_specificity$samp_size)
ggplot(data=lin_specificity, aes(x=specificity, color=samp_size, fill=samp_size))+
geom_histogram(alpha=0.5, position="identity",binwidth = 0.025)
#Now test against the radial classifier
rad4000 <- cv(nit=1000, samp_size = 4000, kernel_func = "radial")
#accuracy
comp_dat <- data.frame(class = c("radial", "linear"),
avg=c(mean(rad4000$accuracy),mean(lin4000$accuracy)),
stddev=c(sd(rad4000$accuracy), sd(lin4000$accuracy)))
ggplot(comp_dat, aes(x=class, y=avg, color=class)) +
geom_point(stat="identity",
position=position_dodge()) +
geom_errorbar(aes(ymin=avg-stddev, ymax=avg+stddev), width=.2,
position=position_dodge(.9))+
ylim(-0.015, 0.015)+
geom_hline(yintercept=0)
#sensitivity
comp_dat <- data.frame(class = c("radial", "linear"),
avg=c(mean(rad4000$sensitivity),mean(lin4000$sensitivity)),
stddev=c(sd(rad4000$sensitivity), sd(lin4000$sensitivity)))
ggplot(comp_dat, aes(x=class, y=avg, color=class)) +
geom_point(stat="identity",
position=position_dodge()) +
geom_errorbar(aes(ymin=avg-stddev, ymax=avg+stddev), width=.2,
position=position_dodge(.9))+
ylim(-0.015, 0.015)+
geom_hline(yintercept=0)
#specificity
comp_dat <- data.frame(class = c("radial", "linear"),
avg=c(mean(rad4000$specificity),mean(lin4000$specificity)),
stddev=c(sd(rad4000$specificity), sd(lin4000$specificity)))
ggplot(comp_dat, aes(x=class, y=avg, color=class)) +
geom_point(stat="identity",
position=position_dodge()) +
geom_errorbar(aes(ymin=avg-stddev, ymax=avg+stddev), width=.2,
position=position_dodge(.9))+
ylim(-0.05, 0.05)+
geom_hline(yintercept=0)
#But what about the raw values of specificity and accuracy?
cv_raw <- function(nit, samp_size, kernel_func){
train_sens <- vector()
train_spec <- vector()
train_acc <- vector()
test_sens <- vector()
test_spec <- vector()
test_acc <- vector()
#demonstrate how the larger the training data set the lower the risk.
for(i in 1:nit){
train <- base::sample(n,samp_size, replace = FALSE)
svmfit <- svm(y~., data = dat[train,], kernel = kernel_func, cost = 10)
tab_train <- as.data.frame(table(true = dat[train,"y"], pred=predict(svmfit, dat[train,])))
tab_test <- as.data.frame(table(true = dat[-train,"y"], pred=predict(svmfit, dat[-train,])))
train_sens[i] <- (tab_train$Freq[4]/(tab_train$Freq[2]+tab_train$Freq[4]))
train_spec[i] <- (tab_train$Freq[1]/(tab_train$Freq[3]+tab_train$Freq[1]))
train_acc[i] <- ((tab_train$Freq[1]+tab_train$Freq[4])/sum(tab_train$Freq))
test_sens[i] <- (tab_test$Freq[4]/(tab_test$Freq[2]+tab_test$Freq[4]))
test_spec[i] <- (tab_test$Freq[1]/(tab_test$Freq[3]+tab_test$Freq[1]))
test_acc[i] <- ((tab_test$Freq[1]+tab_test$Freq[4])/sum(tab_test$Freq))
}
error_ests <- data.frame(train_sensitivity = train_sens, train_specificity = train_spec, train_accuracy = train_acc,
test_sensitivity = test_sens, test_specificity = test_spec, test_accuracy = test_acc)
return(error_ests)
}
lin4000_raw <- cv_raw(nit=1000, samp_size = 4000, kernel_func = "linear")
rad4000_raw <- cv_raw(nit=1000, samp_size = 4000, kernel_func = "radial")
#accuracy
comp_dat <- data.frame(class = rep(c("radial", "linear"),2),
data = c("training", "training", "test", "test"),
avg=c(mean(rad4000_raw$train_accuracy),mean(lin4000_raw$train_accuracy), mean(rad4000_raw$test_accuracy), mean(lin4000_raw$test_accuracy)),
stddev=c(sd(rad4000_raw$train_accuracy),sd(lin4000_raw$train_accuracy), sd(rad4000_raw$test_accuracy), sd(lin4000_raw$test_accuracy)))
ggplot(comp_dat, aes(x=class, y=avg, color=data, group=data)) +
geom_point(stat="identity",
position=position_dodge(0.9)) +
geom_errorbar(aes(ymin=avg-stddev, ymax=avg+stddev), width=.2,
position=position_dodge(.9))
#ylim(-0.015, 0.015)+
#geom_hline(yintercept=0)
#sensitivity
comp_dat <- data.frame(class = rep(c("radial", "linear"),2),
data = c("training", "training", "test", "test"),
avg=c(mean(rad4000_raw$train_sensitivity),mean(lin4000_raw$train_sensitivity), mean(rad4000_raw$test_sensitivity), mean(lin4000_raw$test_sensitivity)),
stddev=c(sd(rad4000_raw$train_sensitivity),sd(lin4000_raw$train_sensitivity), sd(rad4000_raw$test_sensitivity), sd(lin4000_raw$test_sensitivity)))
ggplot(comp_dat, aes(x=class, y=avg, color=data, group=data)) +
geom_point(stat="identity",
position=position_dodge(0.9)) +
geom_errorbar(aes(ymin=avg-stddev, ymax=avg+stddev), width=.2,
position=position_dodge(.9))
#ylim(-0.015, 0.015)+
#geom_hline(yintercept=0)
#specificity
comp_dat <- data.frame(class = rep(c("radial", "linear"),2),
data = c("training", "training", "test", "test"),
avg=c(mean(rad4000_raw$train_specificity),mean(lin4000_raw$train_specificity), mean(rad4000_raw$test_specificity), mean(lin4000_raw$test_specificity)),
stddev=c(sd(rad4000_raw$train_specificity),sd(lin4000_raw$train_specificity), sd(rad4000_raw$test_specificity), sd(lin4000_raw$test_specificity)))
ggplot(comp_dat, aes(x=class, y=avg, color=data, group=data)) +
geom_point(stat="identity",
position=position_dodge(0.9)) +
geom_errorbar(aes(ymin=avg-stddev, ymax=avg+stddev), width=.2,
position=position_dodge(.9))
#ylim(-0.015, 0.015)+
#geom_hline(yintercept=0)
# plot(svmfit,dat)
# set.seed(10111)
# # sample training data and fit model that is it at a 1:5 ratio
# sens <- vector()
# spec <- vector()
# acc <- vector()
# wrong <- vector()
# #demonstrate how the larger the training data set the lower the risk.
# for(i in 1:1000){
# train <- base::sample(n,n/10, replace = FALSE)
# svmfit <- svm(y~., data = dat[train,], kernel = "radial", cost = 10)
# tab <- as.data.frame(table(true = dat[-train,"y"], pred=predict(svmfit, dat[-train,])))
# sens[i] <- tab$Freq[4]/(tab$Freq[2]+tab$Freq[4])
# spec[i] <- tab$Freq[1]/(tab$Freq[3]+tab$Freq[1])
# acc[i] <- (tab$Freq[1]+tab$Freq[4])/sum(tab$Freq)
# wrong[i] <- sum(tab$Freq[2]+tab$Freq[3])
# }
# hist(acc)
#
# cv <- function(nit, samp_size, kernel_func){
# sens <- vector()
# spec <- vector()
# acc <- vector()
# wrong <- vector()
# bia <- vector()
# #demonstrate how the larger the training data set the lower the risk.
# for(i in 1:nit){
# train <- base::sample(n,samp_size, replace = FALSE)
# svmfit <- svm(y~., data = dat[train,], kernel = kernel_func, cost = 10)
# tab <- as.data.frame(table(true = dat[-train,"y"], pred=predict(svmfit, dat[-train,])))
# sens[i] <- tab$Freq[4]/(tab$Freq[2]+tab$Freq[4])
# spec[i] <- tab$Freq[1]/(tab$Freq[3]+tab$Freq[1])
# acc[i] <- (tab$Freq[1]+tab$Freq[4])/sum(tab$Freq)
# wrong[i] <- sum(tab$Freq[2],tab$Freq[3])
# bia[i] <- tab$Freq[2]-tab$Freq[3]
# }
# error_ests <- data.frame(sensitivity = sens, specificity = spec, accuracy = acc, num_wrong = wrong, bias = bia)
# return(error_ests)
# }
#
# #five fold resampling
#
#
# #linear
# # sample size of 10
# lin10 <- cv(nit=1000, samp_size = 25, kernel_func = "linear")
# # sample size of 100
# lin100 <- cv(nit=1000, samp_size = 100, kernel_func = "linear")
# # sample size of 1000
# lin1000 <- cv(nit=1000, samp_size = 1000, kernel_func = "linear")
#
# #Histograms of bias
# lin_bias <- data.frame(samp_size = c(rep(25,1000), rep(100,1000), rep(1000,1000)),
# bias = c(lin10$bias, lin100$bias, lin1000$bias))
# lin_bias$samp_size <- as.character(lin_bias$samp_size)
# ggplot(data=lin_bias, aes(x=bias, color=samp_size, fill=samp_size))+
# geom_histogram(alpha=0.5, position="identity",binwidth = 50)
#
# #Histograms of accuracy
# lin_accuracy <- data.frame(samp_size = c(rep(25,1000), rep(100,1000), rep(1000,1000)),
# accuracy = c(lin10$accuracy, lin100$accuracy, lin1000$accuracy))
# lin_accuracy$samp_size <- as.character(lin_accuracy$samp_size)
# ggplot(data=lin_accuracy, aes(x=accuracy, color=samp_size, fill=samp_size))+
# geom_histogram(alpha=0.5, position="identity",binwidth = 0.01)+
# xlim(0.6,1)+
# geom_vline(xintercept=0.88)
#
# # hist(lin10$bias)
# # hist(lin100$bias)
# # hist(lin1000$bias)
#
# #what about probability?
# #sample size of 25
# ggplot(data=lin10, aes(x=accuracy))+
# geom_histogram(alpha=0.5, position="identity",binwidth = 0.01, color ="#00BFC4", fill ="#00BFC4")+
# xlim(0.6,1)+
# geom_vline(xintercept=0.85)
# #accuracy below 0.8 is:
# sum(lin10$accuracy > 0.85)/1000
# #0.011
# #100
# ggplot(data=lin100, aes(x=accuracy))+
# geom_histogram(alpha=0.5, position="identity",binwidth = 0.01, color ="#F8766D", fill ="#F8766D")+
# xlim(0.6,1)+
# geom_vline(xintercept=0.85)
# #accuracy below 0.8 is:
# sum(lin100$accuracy > 0.85)/1000
#
# #1000
# ggplot(data=lin1000, aes(x=accuracy))+
# geom_histogram(alpha=0.5, position="identity",binwidth = 0.01, color ="#7CAE00", fill ="#7CAE00")+
# xlim(0.6,1)+
# geom_vline(xintercept=0.85)
# #accuracy below 0.8 is:
# sum(lin1000$accuracy > 0.85)/1000
#
# svmfit = svm(y ~ ., data = dat, kernel = "radial", cost = 10, scale = FALSE)
# tab <- as.data.frame(table(true = dat[,"y"], pred=predict(svmfit, dat)))
# (tab$Freq[1]+tab$Freq[4])/sum(tab$Freq)
# #89
# #radial
# # sample size of 10
# rad10 <- cv(nit=1000, samp_size = 25, kernel_func = "radial")
# # sample size of 100
# rad100 <- cv(nit=1000, samp_size = 100, kernel_func = "radial")
# # sample size of 1000
# rad1000 <- cv(nit=1000, samp_size = 1000, kernel_func = "radial")
#
#
#
# n <- 10000
#
# set.seed(10111)
# x = matrix(rnorm(n, sd = 1.5), n/2, 2)
# y = rep(c(-1, 1), c(n/4, n/4))
# x[y == 1,] = x[y == 1,] + 3
# plot(x, col = y + 3)
#
# dat = data.frame(x, y = as.factor(y))
# svmfit = svm(y ~ ., data = dat, kernel = "linear", cost = 10, scale = FALSE)
# plot(svmfit, dat)
#
# # set pseudorandom number generator
# set.seed(10111)
# # sample training data and fit model that is it at a 1:5 ratio
# sens <- vector()
# spec <- vector()
# acc <- vector()
# wrong <- vector()
# #demonstrate how the larger the training data set the lower the risk.
# for(i in 1:1000){
# train <- base::sample(n,n/10, replace = FALSE)
# svmfit <- svm(y~., data = dat[train,], kernel = "linear", gamma = 1, cost = 1)
# tab <- as.data.frame(table(true = dat[-train,"y"], pred=predict(svmfit, dat[-train,])))
# sens[i] <- tab$Freq[4]/(tab$Freq[2]+tab$Freq[4])
# spec[i] <- tab$Freq[1]/(tab$Freq[3]+tab$Freq[1])
# acc[i] <- (tab$Freq[1]+tab$Freq[4])/sum(tab$Freq)
# wrong[i] <- sum(tab$Freq[2]+tab$Freq[3])
# }
# hist(acc)
#
#
#
# train <- base::sample(n,n/1.25, replace = FALSE)
# svmfit <- svm(y~., data = dat[train,], kernel = "radial", gamma = 1, cost = 1)
# plot(svmfit, dat)
#
# #Sensitivity-specificity analysis
# tab <- as.data.frame(table(true = dat[-train,"y"], pred=predict(svmfit, dat[-train,])))
# sensetivity <- tab$Freq[4]/(tab$Freq[2]+tab$Freq[4])
# specificity <- tab$Freq[1]/(tab$Freq[3]+tab$Freq[1])
#
#
|
c4cd7494d4ecdb354d3fc74ea79fb146e1193ac5
|
26fa9a756f6b769b678ecdf1e2acf91b18ff5ee9
|
/man/show_ex_toc.Rd
|
ac9fe55df4165da78de2dc4196159af77643d889
|
[
"MIT"
] |
permissive
|
petzi53/learnitdown
|
ef14d62cb5890abcb40d93ce86465ccc676b92c9
|
d53732fcaabba5a5d934675b65a88f8de1be9502
|
refs/heads/master
| 2023-06-06T21:02:12.221453
| 2021-07-05T16:45:21
| 2021-07-05T16:45:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 801
|
rd
|
show_ex_toc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/show_ex_toc.R
\name{show_ex_toc}
\alias{show_ex_toc}
\title{Insert a table of content for the exercises at the end of a bookdown chapter}
\usage{
show_ex_toc(header = "", clear.it = TRUE)
}
\arguments{
\item{header}{A Markdown text to place as header of the exercises toc.}
\item{clear.it}{Do we clear the toc list (\code{TRUE} by default)}
}
\value{
The Markdown chunk with the exercises toc.
}
\description{
For the various exercise types (h5p, shiny apps, learnrs & GitHub
assignations) we add toc entries with \code{\link[=h5p]{h5p()}}, \code{\link[=launch_shiny]{launch_shiny()}}, \code{\link[=learnr]{learnr()}},
and \code{\link[=assignation]{assignation()}}, respectively. This function create the exercises toc.
}
|
752620b78fff02ecac86b1212ef4fe78077128c5
|
3fd7e629ef19625f3b2f9db01bb9ab62c17aaf40
|
/Hospital_Care (2).R
|
72656b244815c542ed74ddc7f396f4b262ca274f
|
[] |
no_license
|
erabhay85/Hospital_Care_Analysis_Case_Study
|
2caf3ea847e2476a6747ecaad057f4b75fea459a
|
cb9838529ace796583e779168a6fa993c8a1eba4
|
refs/heads/master
| 2021-08-29T23:07:41.892264
| 2017-12-15T07:27:40
| 2017-12-15T07:27:40
| 114,340,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 603
|
r
|
Hospital_Care (2).R
|
data <- read.csv("C:/Python27/Fall_Data11.csv",header = TRUE,sep = ",")
View(data)
##data$majorinjury =ifelse(data$InJuryLevel_Key==3,1,0)
str(data)
summary(data)
CTDF$random <- runif(nrow(CTDF), 0, 1);
CTDF <- CTDF[order(CTDF$random),]
CTDF.dev <- CTDF[which(CTDF$random <= 0.7),]
CTDF.val <- CTDF[which(CTDF$random > 0.7),]
c(nrow(CTDF.dev), nrow(CTDF.val))
install.packages("randomForest")
library(randomForest)
RF <- randomForest(as.factor(Major_Inc) ~ ., data = data, ntree=400, mtry = 3, nodesize = 30,importance=TRUE)
print(RF)
plot(RF, main="")
a <- c("a","b","c","d","e")
b<- c(-1,-3)
a[b]
|
3893f9c56f9f8f5cd339f92cd02955810c4e5cb8
|
a447fc11752764aef2ba535e530255b615b2f6d5
|
/R/wrappers_sparse.r
|
28925178f22182a59077902cc92cf8988de91fef
|
[
"BSD-2-Clause"
] |
permissive
|
wrathematics/coop
|
8dec1727de8c3f007d1d749c24def6c9e9079bbc
|
3a0d91311fc172fda52f4f82a10aaf1691a9460e
|
refs/heads/master
| 2021-11-24T03:46:08.206976
| 2021-11-23T12:20:19
| 2021-11-23T12:20:19
| 44,967,170
| 31
| 7
| null | 2017-06-19T17:33:08
| 2015-10-26T12:35:17
|
C
|
UTF-8
|
R
| false
| false
| 923
|
r
|
wrappers_sparse.r
|
#' @useDynLib coop R_co_sparse
co_sparse <- function(n, a, i, j, index, type, use, inverse)
{
check.is.flag(inverse)
if (!is.double(a))
storage.mode(a) <- "double"
if (!is.integer(i))
storage.mode(i) <- "integer"
if (!is.integer(j))
storage.mode(j) <- "integer"
use <- check_use(use)
if (use == "everything")
{}
else if (use == "all.obs")
{
if (anyNA(a))
stop("missing observations in covar/pcor/cosine")
}
### TODO
# else if (use == "complete.obs")
# {
# if (anyNA(x))
# {
# out <- naomit_coo(a, i, j)
# a <- out[[1]]
# i <- out[[2]]
# j <- out[[3]]
# }
# }
else
stop("unsupported 'use' method")
.Call(R_co_sparse, as.integer(n), a, i, j, as.integer(index), as.integer(type), as.integer(inverse))
}
#' @useDynLib coop R_csc_to_coo
csc_to_coo <- function(row_ind, col_ptr)
{
.Call(R_csc_to_coo, row_ind, col_ptr)
}
|
ce34c53175c49729385f7fa62d82e5b0844df301
|
5dc8cb48cfc061d84cab2e1593067b696ea69990
|
/aula7/exemplo6.R
|
8253cc66ac74cf8df876e365bca23b56f9149527
|
[
"MIT"
] |
permissive
|
rodrigoesborges/slides2017
|
838d2eae2ce67dfd3f514ac8628443f47ac4732e
|
434b21d318d49bd7c6ce4677d578ca22bf3c0da3
|
refs/heads/master
| 2021-06-12T10:21:43.072406
| 2017-02-22T17:15:33
| 2017-02-22T17:15:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,431
|
r
|
exemplo6.R
|
library(purrr)
dormir_secs <- function(secs = 1L) {
Sys.sleep(secs)
return(sprintf('dormi %d segundo(s)', secs))
}
# ------------------------------------------------------------------
## Progress bar
vetor <- rep(1, 4)
prog <- dplyr::progress_estimated(length(vetor))
dormidas <- map(vetor, ~{
print(prog$tick())
dormir_secs(.x)
})
# Exercício: reescrever com funções anônimas normais
# Exercício: reescrever com uma função apenas
# ------------------------------------------------------------------
## Paralelo com plyr, parallel e doParallel
# preparar clusters
n_cores <- parallel::detectCores()
cl <- parallel::makeCluster(n_cores)
doParallel::registerDoParallel(cl)
## diferença entre paralelo e nao paralelo
system.time({
dormidas2 <- plyr::llply(vetor, dormir_secs, .parallel = TRUE)
})
system.time({
dormidas3 <- plyr::llply(vetor, dormir_secs, .parallel = FALSE)
})
# identical(dormidas, dormidas2)
# identical(dormidas, dormidas3)
# parar clusters (limpa sujeira)
parallel::stopCluster(cl)
# ------------------------------------------------------------------
## Failwith
vetor <- c('acerto', 'erro', 'acerto')
errar <- function(x) {
if (x == 'erro') stop('errei') else 'acertei'
}
f <- dplyr::failwith('putz, errei!', errar)
map_chr(vetor, errar)
map_chr(vetor, f)
# ------------------------------------------------------------------
## Estude:
purrr::accumulate
purrr::transpose
|
376d8d52880e4e67dcfc4844bb6cf401044162d4
|
d38fe70c8d6e30c1cf1e9be7f49b4c709b353528
|
/demo/archivist_jss.r
|
18edb2da1e1c1551d44422cd6c3b11dffa768f0c
|
[] |
no_license
|
pbiecek/archivist
|
1c1d4f9880255a67a6fd9d926668504b61958abb
|
18841c9bd216e792299beb4b8d72f287006aee76
|
refs/heads/master
| 2021-06-06T00:03:18.169650
| 2021-05-20T13:39:49
| 2021-05-20T13:39:49
| 12,585,738
| 80
| 23
| null | 2021-05-20T13:28:47
| 2013-09-04T08:23:59
|
HTML
|
UTF-8
|
R
| false
| false
| 4,521
|
r
|
archivist_jss.r
|
# Intro
#This is the replication script for 'archivist: An R Package for Managing, Recording and Restoring Data Analysis Results' (Przemyslaw Biecek, Marcin Kosinski) submitted to JSS.
#First, make sure that `archivist` is installed.
if (!require(archivist)) {
install.packages("archivist")
library(archivist)
}
# Section 2.1
# Creation of hooks to R objects.
# Following lines download R objects from remote repository.
archivist::aread("pbiecek/graphGallery/7f3453331910e3f321ef97d87adb5bad")
archivist::aread("pbiecek/graphGallery/7f34533")
setLocalRepo(system.file("graphGallery", package = "archivist"))
aread("7f3453331910e3f321ef97d87adb5bad")
# regression model
model <- archivist::aread("2a6e492cb6982f230e48cf46023e2e4f")
summary(model)
# Section 2.2
# Retrieval of a list of R objects with given tags.
# Following lines search within remote repositories and download objects with given properties.
models <- asearch("pbiecek/graphGallery", patterns = c("class:lm", "coefname:Sepal.Length"))
lapply(models, coef)
plots <- asearch("pbiecek/graphGallery",
patterns = c("class:gg", "labelx:Sepal.Length"))
length(plots)
library("gridExtra")
do.call(grid.arrange, plots)
# from local
plots <- asearch(patterns = c("class:gg", "labelx:Sepal.Length"))
length(plots)
do.call(grid.arrange, plots)
# Section 2.3
# Retrieval of the object's pedigree.
library("archivist")
library("dplyr")
createLocalRepo("arepo", default = TRUE)
data(iris)
iris %a%
filter(Sepal.Length < 6) %a%
lm(Petal.Length~Species, data=.) %a%
summary() -> tmp
ahistory(tmp)
ahistory(md5hash = "050e41ec3bc40b3004bc6bdd356acae7")
# this is not always this hash
#Session info
sinfo <- asession("050e41ec3bc40b3004bc6bdd356acae7")
head(sinfo$packages)
# Section 3.1
# Repository management.
# Creation of a new empty repository.
# local path
repo <- "arepo"
createLocalRepo(repoDir = repo)
# Deletion of an existing repository
repo <- "arepo"
deleteLocalRepo(repoDir = repo, deleteRoot = TRUE)
# Copying artifacts from other repositories.
repo <- "arepo"
createLocalRepo(repoDir = repo, default = TRUE)
copyRemoteRepo(repoTo = repo, md5hashes= "7f3453331910e3f321ef97d87adb5bad",
user = "pbiecek", repo = "graphGallery", repoType = "github")
# Showing repository statistics
showLocalRepo(repoDir = repo, method = "tags")
summaryRemoteRepo(user="pbiecek", repo="graphGallery")
# Setting default repository
setRemoteRepo(user = "pbiecek", repo = "graphGallery")
# Section 3.2
# Artifact management
# Saving an R object into a repository
library("ggplot2")
repo <- "arepo"
pl <- qplot(Sepal.Length, Petal.Length, data = iris)
saveToLocalRepo(pl, repoDir = repo)
showLocalRepo(repoDir = repo, "tags")
#deleteLocalRepo("arepo", deleteRoot = TRUE)
# Serialization of an object creation event into repository
library("dplyr")
iris %a%
filter(Sepal.Length < 6) %a%
lm(Petal.Length~Species, data=.) %a%
summary() -> tmp
ahistory(tmp)
ahistory(md5hash = "050e41ec3bc40b3004bc6bdd356acae7")
# Loading an object from repository
pl2 <- loadFromRemoteRepo("7f3453331910e3f321ef97d87adb5bad", repo="graphGallery", user="pbiecek",
value=TRUE)
pl3 <- loadFromLocalRepo("7f345333", system.file("graphGallery", package = "archivist"), value=TRUE)
archivist::aread("pbiecek/graphGallery/7f3453331910e3f321ef97d87adb5bad")
setLocalRepo(system.file("graphGallery", package = "archivist"))
pl3 <- loadFromLocalRepo("7f345333", value=TRUE)
archivist::aread("7f345333")
setLocalRepo(system.file("graphGallery", package = "archivist"))
model <- aread("2a6e492cb6982f230e48cf46023e2e4f")
digest::digest(model)
# Removal of an object from repository
rmFromLocalRepo("7f3453331910e3f321ef97d87adb5bad", repoDir = repo)
#Remove all older than 30 days
obj2rm <- searchInLocalRepo(list(dateFrom = "2010-01-01", dateTo = Sys.Date()), repoDir = repo)
rmFromLocalRepo(obj2rm, repoDir = repo, many = TRUE)
# Search for an artifact
# Search in a local/GitHub repository
searchInLocalRepo(pattern = "class:gg",
repoDir = system.file("graphGallery", package = "archivist"))
searchInLocalRepo(pattern = list(dateFrom = "2016-01-01",
dateTo = "2016-02-07" ),
repoDir = system.file("graphGallery", package = "archivist"))
searchInLocalRepo(pattern=c("class:gg", "labelx:Sepal.Length"),
repoDir = system.file("graphGallery", package = "archivist"))
|
321e8081aa42789b6cd15000262bb88a9691d3b8
|
9a5a28781aeeeb1e3629e5c59425802c7ddae77f
|
/man/LR.inference.Rd
|
754ab6a25f399aa39c053947336f0b2cb4ae8c61
|
[] |
no_license
|
jlstiles/sim.papers
|
a2b94682e02ea4b8031a3040ea3dac2b03d416bf
|
380608511079eea2c519f25e22cae81a48c14f4e
|
refs/heads/master
| 2021-10-25T04:19:51.264694
| 2019-03-31T21:01:21
| 2019-03-31T21:01:21
| 95,248,313
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,013
|
rd
|
LR.inference.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LR.inference.R
\name{LR.inference}
\alias{LR.inference}
\title{LR.inference}
\usage{
LR.inference(W, A, Y, Qform, alpha = 0.05, simultaneous.inference = FALSE)
}
\arguments{
\item{W, }{matrix or data.frame of covariates}
\item{A, }{a binary vector of treatment assignments}
\item{Y, }{a binary vector of outcomes}
\item{Qform, }{a formula for Y in terms of the covariates as input in glm}
\item{alpha, }{significance level for the (1-alpha)100 percent CI's. 0.05 is default}
\item{simultaneous.inference, }{TRUE if user wants simultaneous confidence
bounds for both ATE and blip variance at level alpha. default is FALSE}
}
\value{
if simultaneous.inference is specified as TRUE then will return a vector giving
pt estimate, left and right bound for ATE, simultaneous ATE CI, blip variance,
and simultaneous blip variance. Otherwise gives pt estimate, left and right bound for ATE
and blip variance.
}
\description{
Function that gives inference for logistic regression plug-in
estimators of ATE and Blip Variance.
}
\examples{
# using built-in package functions, g0_linear and define Q0_linear to specify
# pscore and outcome model probabilities
g0_linear
Q0_linear = function(A,W1,W2,W3,W4) plogis(A + W1 + W2 + A*(W3 + W4) + W3 + W4)
# get a randomly drawn dataframe under the specified model
data = gendata(1000, g0_linear, Q0_linear)
# get the truth
truth = get.truth(g0_linear, Q0_linear)
truth
# well-specified model
Qform = formula("Y ~ W1 + W2 + A*(W3 + W4)")
# specifying the covariates, treatment and outcome
W = data[,2:5]
A = data$A
Y = data$Y
# should cover each truth 95 percent of the time.
info = LR.inference(W=W,A=A,Y=Y,Qform=Qform, alpha = .05)
info
# should cover each truth 95 percent of the time and both truths
# simultaneously 95 percent of the time for the simultaneous CI's
info1 = LR.inference(W=W,A=A,Y=Y,Qform=Qform, alpha = .05,
simultaneous.inference = TRUE)
info1
}
|
b47dab5f160e7b988230b118c9857183c3d21045
|
7e34930737b7bfe9746a3c4b5340e23a80482691
|
/cachematrix.R
|
cd8e63278a65f005c9a885eb3db1e6e3bc8aa3e1
|
[] |
no_license
|
ehodder/ProgrammingAssignment2
|
1b9b176eac15d6fb4fa41dceb88e32de9d4a4472
|
25b82a2a307010cf35742efcd542c8af620efb3b
|
refs/heads/master
| 2021-01-18T02:11:30.711826
| 2014-04-23T16:26:03
| 2014-04-23T16:26:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,126
|
r
|
cachematrix.R
|
## These functions provide a way to create and cache an inverse of a matrix
## so that the inverse only has to be calculated once. Create a new matrix object
## by calling makeMatrix with the original matrix then call cacheSolve with the new
## matrix object to get the inverse
## makeCacheMatrix creates a matrix object with several functions attached which
## provide set and get routines for the original matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(solve) i <<- solve
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## returns the inverse of the matrix, a cached version if available or calculated if not available. Takes the
## new makeMatrix object as a parameter
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
|
42250e2a276ab6ae850138f48b41274ef4e90d6e
|
7dc5e6dac1531a024b48e1871bfbc8ab923f7e18
|
/getggmap.R
|
4dd5ab3be0afb4c428ab1dede559e364b203db0d
|
[] |
no_license
|
spacetimeecologist/RangeFilling
|
b5c46b6f27614d4a1d788408381ae4606c4d5f44
|
2a20b88411319081e8d0ad1508b4d7435921956a
|
refs/heads/master
| 2020-05-17T17:03:18.742685
| 2015-06-22T15:31:16
| 2015-06-22T15:31:16
| 22,317,074
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,998
|
r
|
getggmap.R
|
require(ggplot2)
require(maptools)
require(plyr)
require(raster)
# raster <- raster('C:/Users/ben/Documents/Data/Output/Yuccabrevifolia/06-24.1705/maxent.1705.envi')
# points <- read.csv('C:/Users/ben/Documents/Data/Output/Yuccabrevifolia/06-24.1705/presence.csv')
# points <- points[, 2:3]
# poly <- shapefile('C:/Users/ben/Documents/Data/USGS_TreeAtlas/yuccbrev/yuccbrev.shp')
# rp <- 14.504343567
getggmap <- function(raster, points, poly) {
require(ggplot2)
require(maptools)
require(plyr)
require(raster)
require(rgeos)
# Import map data
cntry <- shapefile('C:/Users/ben/Documents/Data/50m_cultural/ne_50m_admin_0_countries.shp')
coast <- shapefile('C:/Users/ben/Documents/Data/50m_physical/ne_50m_coastline.shp')
lakes <- shapefile('C:/Users/ben/Documents/Data/50m_physical/ne_50m_lakes.shp')
river <- shapefile('C:/Users/ben/Documents/Data/50m_physical/ne_50m_rivers_lake_centerlines.shp')
state <- shapefile('C:/Users/ben/Documents/Data/50m_cultural/ne_50m_admin_1_states_provinces_lakes.shp')
# raster.nat <- crop(raster('C:/Users/ben/Documents/Data/50m_physical/GRAY_50M_SR_OB/GRAY_50M_SR_OB_crop.asc'), extent(raster))
# p.nat <- rasterToPoints(raster.nat)
# df.nat <- data.frame(p.nat)
# colnames(df.nat) <- c('Longitude', 'Latitude', 'Elevation')
quick.subset <- function(x, longlat){
# longlat should be a vector of four values: c(xmin, xmax, ymin, ymax)
x@data$id <- rownames(x@data)
x.f = fortify(x, region="id")
x.join = join(x.f, x@data, by="id")
x.subset <- subset(x.join, x.join$long > longlat[1] & x.join$long < longlat[2] &
x.join$lat > longlat[3] & x.join$lat < longlat[4])
x.subset
}
domain <- c((xmin(raster)*1.1), (xmax(raster)*0.9), (ymin(raster)*0.9), (ymax(raster)*1.1))
coast.subset <- quick.subset(coast, domain)
lakes.subset <- quick.subset(lakes, domain)
river.subset <- quick.subset(river, domain)
state.subset <- quick.subset(state, domain)
# Remove raster pixels outside of coast. Convert to data.frame.
cntry.crop <- crop(cntry, extent(raster))
raster.mask <- mask(raster, cntry.crop)
p <- rasterToPoints(raster.mask)
df <- data.frame(p)
colnames(df) <- c('long', 'lat', 'predictions')
# Convert points to data.frame
n <- data.frame(points)
colnames(n) <- c('long', 'lat')
# Convert poly to data.frame
poly@data$id <- rownames(poly@data)
r <- fortify(poly, region='id')
ggmap <- ggplot() + geom_tile(data=df, aes(x=long, y=lat, fill=factor(predictions))) +
scale_fill_manual(values= c( "5"="#69BE13", "4"="#85BE4B", "3"="#A5B398",
"2"="#B3AB98", "1"="#BEB4A2", "0"="#CCBFAD"),
breaks= c("5", "4", "3", "2", "1", "0"),
labels= c("Five", "Four", "Three", "Two", "One", "Zero")) +
geom_path(data=state.subset, aes(x=long, y=lat, group=group), colour='#878378', size=.2) +
geom_path(data=river.subset, aes(x=long, y=lat, group=group), colour='#4F5069', size=.3) +
geom_polygon(data=lakes.subset, aes(x=long, y=lat, group=group), fill='#727497') +
geom_path(data=coast.subset, aes(x=long, y=lat, group=group), colour='#4F5069', size=.7) +
geom_path(data=lakes.subset, aes(x=long, y=lat, group=group), colour='#4F5069', size=.3) +
geom_path(data=r, aes(x=long, y=lat, group=group), size=.5) +
geom_point(data=n, aes(x=long, y=lat), color='#EB6200', size=3, shape=16, alpha=.5) +
coord_quickmap(xlim=c(xmin(raster), xmax(raster)),
ylim=c(ymin(raster), ymax(raster))) +
xlab("Longitude") + ylab("Latitude") +
guides(fill=guide_legend(title='# of Models\nPredicting Presence')) +
theme(panel.background = element_rect(fill = '#727497'),
panel.grid.major = element_line(colour='#BFBAAB', size=.2),
panel.grid.minor = element_blank(),
panel.border= element_rect(colour='#403E3B', fill=NA))
return(ggmap)
}
|
4ffcd8a52bdc12c9b6d2bf31860d3750966853fd
|
3fa8c5984304c79e988d32a2ce6eb9c5983e3cba
|
/R_assignment_11/Assignment-11-Vizualization.R
|
88347761999e833a4b0bea3d2f06e31e395899d7
|
[] |
no_license
|
nlad-gmu/Lad_AIT580
|
8a62ae5abbd5ed6ff11ed06ebb060985d7bd83a0
|
e5dc2a6751aacb4eab39f7d64f91bd22ebfe9c7c
|
refs/heads/master
| 2020-07-24T11:29:18.151181
| 2019-12-17T09:36:46
| 2019-12-17T09:36:46
| 207,908,848
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 469
|
r
|
Assignment-11-Vizualization.R
|
###------------------
###Visualization
###------------------
###Students Name:Neha Lad
###GNumber:G01169261
rm(list=ls())
data <- read.csv(file.choose())
install.packages("ggplot2")
library(ggplot2)
#1. Create Histogram for Age using R (10 points)
ggplot(data, aes(x=Age)) + geom_histogram(binwidth=1,color="darkblue", fill="lightblue")
#2. Create Scatter Plot for Age and Monthly Income using R
ggplot(data, aes(x=Age, y=MonthlyIncome)) + geom_point(color="blue")
|
9dc131a97ddfcc536d54e73305b5a31e435a57b0
|
6550e0e725f2e7b6ff9dd55714f37a8a75807980
|
/scripts/regression_experiments.R
|
fac48eef130c8cb785783fdd8168dfd99ae2c129
|
[
"MIT"
] |
permissive
|
tuner/turbine-analysis
|
ee1b4521e45cdf9e79f4577cd76b4d016d88752b
|
652de26ea7690bd9d29390404c206048a37e3bad
|
refs/heads/master
| 2020-03-31T17:21:41.598780
| 2018-12-05T05:56:53
| 2018-12-05T05:56:53
| 152,419,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,118
|
r
|
regression_experiments.R
|
###
# Playing with the data. Just some random hacks..
###
# Clear the workspace
rm(list=ls())
source("common.R")
data <- load_data()
explain_variable(data$turbine$`Local wind power should be produced in Helsinki`)
explain_variable(data$turbine$`I would like to buy local wind power in Helsinki, even if cheaper electricity would be available from elsewhere`)
explain_variable(data$turbine$`Construction of wind turbines would have a positive impact on Helsinki's image.`)
explain_variable(data$turbine$`Construction of wind turbines would have a positive impact on the landscape of Helsinki's.`)
explain_variable(data$turbine$`What do you feel if wind turbines would be built at the outer territorial waters (8-10 km from the coastline) of Helsinki?`)
explain_variable(data$turbine$`How often do you spend your free time on the shores of Helsinki?`)
model_and_plot(data$census$VIHR, data$turbine$`Construction of wind turbines would have a positive impact on Helsinki's image.`)
model_and_plot(data$census$KOK, data$turbine$`Construction of wind turbines would have a positive impact on Helsinki's image.`)
model_and_plot(data$census$VASL, data$turbine$`Construction of wind turbines would have a positive impact on Helsinki's image.`)
model_and_plot(data$census$`Alko per 1000`, data$turbine$`Construction of wind turbines would have a positive impact on Helsinki's image.`)
model_and_plot(data$census$`Households without cars`, data$turbine$`I would like to buy local wind power in Helsinki, even if cheaper electricity would be available from elsewhere`)
model_and_plot(data$census$KOK, data$turbine$`What do you feel if wind turbines would be built at the outer territorial waters (8-10 km from the coastline) of Helsinki?`)
model_and_plot(data$census$VASL, data$turbine$`What do you feel if wind turbines would be built at the outer territorial waters (8-10 km from the coastline) of Helsinki?`)
model_and_plot(data$census$`Born in Helsinki`, data$turbine$`What do you feel if wind turbines would be built at the outer territorial waters (8-10 km from the coastline) of Helsinki?`)
plot_correlations()
|
419a193ad45561e15c07038c7d6a9789370c9e1c
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/metafolio/inst/testfiles/est_beta_params/libFuzzer_est_beta_params/est_beta_params_valgrind_files/1612988827-test.R
|
0db2932d4fca6e5eac6116247a3390523ff1798f
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 140
|
r
|
1612988827-test.R
|
testlist <- list(mu = 2.99205734114282e+21, var = 4.55931121056418e+169)
result <- do.call(metafolio:::est_beta_params,testlist)
str(result)
|
171b67463c1b6a22af67ac18067b6d25fade9a51
|
1374a5344c2818fa97ba234a37087596664ba0e2
|
/analysis/robustness_results.R
|
3ea5b24b5cb77538a76ac8a0e8fa0c845a593282
|
[] |
no_license
|
kennyjoseph/ORCID_career_flows
|
d0cb36fbd0b713e0b2571f17d49159348cedbc2d
|
bfefedfba52c43ed3b5dec1d50e3c72b8176e5c4
|
refs/heads/master
| 2023-03-17T07:09:47.648304
| 2021-03-22T17:29:55
| 2021-03-22T17:29:55
| 297,714,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,314
|
r
|
robustness_results.R
|
source("util.R")
theme_set(theme_light(20))
run_sim <- function(trans_file){
trans <- fread(trans_file)
trans <- melt(trans, id =c("from_matched_field","to_matched_field"))
full_trans <- rbindlist(apply(trans, 1,function(m){data.table(from_matched_field=rep(m[1],m[4]),
to_matched_field=rep(m[2],m[4]),
is_fem=rep(ifelse(m[3] == "n_female",1,0),m[4]))}))
cov <- fread(sub("transitions","covariates",trans_file))
print(ncol(cov))
full_trans <- merge(full_trans, cov, by.x="from_matched_field", by.y="field")
setnames(full_trans, "covariate","from_fab")
full_trans <- merge(full_trans, cov, by.x="to_matched_field", by.y="field")
setnames(full_trans, "covariate","to_fab")
full_trans[, fab := to_fab - from_fab]
res <-glm(is_fem ~ 1+fab + from_matched_field , data=full_trans,family="binomial")
return(as.vector(coef(res)[2]))
}
all_trans_files <- Sys.glob("../python/syn_2/*transitions.tsv")
results <- sapply(all_trans_files,run_sim)
res <- data.table(filename=names(results),beta=as.vector(results))
res[, filename := sub("../python/syn_2/","", filename)]
res[, filename := sub("_transitions.tsv","", filename)]
write.csv(res,"../python/simchallenge_results_2.csv",row.names=F)
|
2be7b013ba0e3b0f2ff7aaa59b63b2b32d755f41
|
3fd97955d533167594a314730eafc1a9d13e8b1d
|
/scripts/04_calculate_lsm_NA.R
|
a2304713e1b1b3a9836e615b939f1e77510d5e69
|
[] |
no_license
|
mhesselbarth/Borthwick_et_al_2019_Front_Genet
|
ea59e9c9f181a63f62dffe7645ab2444d61fceae
|
89b90c73188d3f923f36c44f7f4f0a5b0b37344e
|
refs/heads/master
| 2021-07-20T13:23:16.577675
| 2020-05-01T17:32:11
| 2020-05-01T17:32:11
| 153,736,815
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,896
|
r
|
04_calculate_lsm_NA.R
|
# load libraries
library(clustermq)
library(suppoRt) # devtools::install_github("mhesselbarth/helpeR")
library(landscapemetrics)
library(raster)
library(sp)
library(tidyverse)
source(paste0(getwd(), "/scripts/00_calculate_lsm_helper.R"))
source(paste0(getwd(), "/scripts/00_clip_and_calc.R"))
#### Load data ####
# # load the clippings
# clippings_pmm_nlcd_NA <- readRDS(paste0(getwd(), "/data/Output/clippings_pmm_nlcd_NA.rds"))
#
# # check if all rasters all loaded in memory
# all(purrr::map_lgl(clippings_pmm_nlcd_NA, raster::inMemory))
#
# # extract names
# names_clippings <- purrr::map_chr(clippings_pmm_nlcd_NA, function(x) names(x))
#
# names_clippings <- stringr::str_split(names_clippings, pattern = "_", simplify = TRUE) # need for local version
# # load input layer
# nlcd_layer_NA <- readRDS(paste0(getwd(), "/data/Output/nlcd_reclassified_NA.rds"))
# load sampling points
sampling_points <- raster::shapefile(paste0(getwd(), "/data/GIS/SSR_17_sites.shp"))
# #### Calculate locally ####
#
# # Calculate metrics locally but overall printing progress
# total_clippigings <- length(clippings_pmm_nlcd_NA)
#
# landscape_metrics <- purrr::map(seq_along(clippings_pmm_nlcd_NA), function(x) {
#
# print(paste0("Progress: ", x, " from ", total_clippigings))
#
# result <- calculate_lsm(landscape = clippings_pmm_nlcd_NA[[x]],
# level = "landscape",
# classes_max = 2,
# verbose = FALSE,
# progress = FALSE)
#
# gc(verbose = FALSE, reset = TRUE, full = TRUE)
#
# return(result)
# })
#### Specify metrics ####
landscape_sub <- c("lsm_l_ai",
"lsm_l_area_mn",
"lsm_l_cai_mn",
"lsm_l_condent",
"lsm_l_contag",
"lsm_l_core_mn",
"lsm_l_division",
"lsm_l_ed",
"lsm_l_ent",
"lsm_l_iji",
"lsm_l_joinent",
"lsm_l_lpi",
"lsm_l_lsi",
"lsm_l_mesh",
"lsm_l_mutinf",
"lsm_l_np",
"lsm_l_pd",
"lsm_l_pladj",
"lsm_l_pr",
"lsm_l_prd",
"lsm_l_rpr",
"lsm_l_shdi",
"lsm_l_shei",
"lsm_l_sidi",
"lsm_l_siei",
"lsm_l_split",
"lsm_l_ta",
"lsm_l_te")
#### clustermq (clip_and_calc) ####
# get all combinations
sampling_ids <- suppoRt::expand_grid_unique(x = seq_along(sampling_points),
y = seq_along(sampling_points))
# run metrics
landscape_metrics_NA <- suppoRt::submit_to_cluster(fun = clip_and_calc,
focal_plot = sampling_ids[, 1],
other_plot = sampling_ids[, 2],
n_jobs = nrow(sampling_ids),
log_worker = TRUE,
const = list(sampling_points = sampling_points,
# input_layer = nlcd_layer_NA,
what = landscape_sub,
classes_max = 2,
path = "/home/uni08/hesselbarth3/nlcd_reclassified_NA.rds"),
template = list(job_name = "lsm_clip",
log_file = "lsm_clip.log",
queue = "medium",
walltime = "02:00:00",
mem_cpu = "6144",
processes = 1))
suppoRt::save_rds(object = landscape_metrics_NA,
filename = "landscape_metrics_NA_raw.rds",
path = paste0(getwd(), "/data/Output"),
overwrite = FALSE)
# bind to one dataframe
landscape_metrics_NA <- dplyr::bind_rows(landscape_metrics_NA)
# replace layer with 1:136
landscape_metrics_NA$layer <- rep(x = 1:nrow(sampling_ids),
each = length(unique(landscape_metrics_NA$metric)))
suppoRt::save_rds(object = landscape_metrics_NA,
filename = "landscape_metrics_NA.rds",
path = paste0(getwd(), "/data/Output"),
overwrite = FALSE)
|
77b5b27fba681ed372ed3140668927d654176ddd
|
3b06a15e9a14a27dee2049c10dd2904e792fae1e
|
/R/heuristic_contention_model.R
|
74e6a356bcaefec4b9d9b73a54259026c9c459f0
|
[] |
no_license
|
jorgerodriguezveiga/WildfireResources
|
7cf965e7fcfa45a9921b27fb08d8f73212f50da2
|
73d02124c0ea6001ab2bb88c81d64ebed39a2fe3
|
refs/heads/master
| 2021-05-09T18:11:01.921116
| 2018-05-06T16:52:58
| 2018-05-06T16:52:58
| 119,157,188
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,756
|
r
|
heuristic_contention_model.R
|
# ---------------------------------------------------------------------------- #
# Model 1: Contention_model
# ---------------------------------------------------------------------------- #
#' Contention model
#'
#' @description Solve the contention model.
#'
#' @param W_fix Work matrix.
#' @param S_fix Start matrix.
#' @param params model params.
#' @param M high value for the content_yes_no constraint.
#' @param M_prime penalty.
#' @param solver solver name. Options: 'gurobi', 'lpSolve' or 'Rsymphony'.
#' @param solver_params list of gurobi options. Defaults to list(TimeLimit=600, OutputFlag = 0).
#'
#' @return optimal values.
#'
#' @export
contention_model <- function(W_fix, S_fix, params, M, M_prime, solver="gurobi", solver_params=list(TimeLimit=600, OutputFlag=0)){
#-----------------------------------------------------------------------------
# Datos
#-----------------------------------------------------------------------------
I=params$I
Periods=params$Periods
DFP=params$DFP
FBRP=params$FBRP
A=params$A
CTFP=params$CTFP
C=params$C
P=params$P
SP=params$SP
NVC=params$NVC
PR=params$PR
PER=params$PER
nMax=params$nMax
nMin=params$nMin
n=length(I)
m=length(Periods)
#-----------------------------------------------------------------------------
# Modelo matemático 1
#-----------------------------------------------------------------------------
# Orden de las variables de decision:
# E[i,t] : 0*(n*m)+t+(i-1)*m
# W[i,t] : 1*(n*m)+t+(i-1)*m
# U[i,t] : 2*(n*m)+t+(i-1)*m
# Z[i] : 3*(n*m)+i
# mu[t] : 3*(n*m)+n+t
# MU[t] : 3*(n*m)+n+m+t
# Y[t-1] : 3*(n*m)+n+2*m+t
# Y[m] : 3*(n*m)+n+3*m+1
n_var<-3*(n*m)+n+3*m+1 # numero de variables
n_cons<-(n*m)+(n*m)+(n*m)+1+1+m+n+n+n+m+m # numero de restricciones
# Type
type = c(rep("B", n*m), # E[i,t]
rep("B", n*m), # W[i,t]
rep("B", n*m), # U[i,t]
rep("B", n) , # Z[i]
rep("C", m) , # mu[t]
rep("C", m) , # MU[t]
rep("B", m) , # Y[t-1]
rep("B", 1) ) # Y[m]
# Objective function
cost <- numeric(n_var)
penalization <- numeric(n_var)
# Constraints
constr_W <- matrix(0, nrow = n*m, ncol = n_var)
sense_W <- rep("=", n*m)
rhs_W <- numeric(n*m)
constr_C <- matrix(0, nrow = n*m, ncol = n_var)
sense_C <- rep("=", n*m)
rhs_C <- numeric(n*m)
constr_working <- matrix(0, nrow = n*m, ncol = n_var)
sense_working <- rep(">=", n*m)
rhs_working <- numeric(n*m)
constr_no_content <- numeric(n_var)
sense_no_content <- "="
rhs_no_content <- 1
constr_content <- numeric(n_var)
sense_content <- "<="
rhs_content <- numeric(1)
constr_content_yes_no <- matrix(0, nrow = m, ncol = n_var)
sense_content_yes_no <- rep(">=", m)
rhs_content_yes_no <- numeric(m)
constr_one_exit <- matrix(0, nrow = n, ncol = n_var)
sense_one_exit <- rep("=", n)
rhs_one_exit <- numeric(n)
constr_start_end <- matrix(0, nrow = n, ncol = n_var)
sense_start_end <- rep(">=", n)
rhs_start_end <- numeric(n)
constr_flight_time <- matrix(0, nrow = n, ncol = n_var)
sense_flight_time <- rep("<=", n)
rhs_flight_time <- DFP-CTFP
constr_n_aircraft_min <- matrix(0, nrow = m, ncol = n_var)
sense_n_aircraft_min <- rep(">=", m)
rhs_n_aircraft_min <- numeric(m)
constr_n_aircraft_max <- matrix(0, nrow = m, ncol = n_var)
sense_n_aircraft_max <- rep("<=", m)
rhs_n_aircraft_max <- numeric(m)
for(i in 1:n){
for(t in 1:m){
#=========================================================================
# var W {i in I, t in T} = W_fix[i,t] * (sum{j in 1..t} S_fix[i,j]*Z[i] - sum{j in 1..(t-1): j+FBRP<=m} E[i,j+FBRP])
#-------------------------------------------------------------------------
constr_W[t+(i-1)*m, varh1_i("W",c(i,t),n,m)] <- 1 # W[i,t]
constr_W[t+(i-1)*m, varh1_i("Z",c(i),n,m)] <- - W_fix[i,t]*sum(S_fix[i,1:t]) # sum{j in 1..t} W_fix[i,t]*S_fix[i,j]*Z[i]
for(j in 1:(t-1)){
if(j+FBRP<=m){
constr_W[t+(i-1)*m, varh1_i("E",c(i,j+FBRP),n,m)] <- W_fix[i,t] # - sum{j in 1..(t-1): j+FBRP<=m} W_fix[i,t]*E[i,j+FBRP])
}
}
#=========================================================================
#=========================================================================
# var U {i in I, t in T} = sum{j in 1..t} S_fix[i,j]*Z[i] - sum{j in 1..(t-1)} E[i,j];
#-------------------------------------------------------------------------
constr_C[t+(i-1)*m, varh1_i("U",c(i,t),n,m)] <- 1 # var U {i in I, t in T}
constr_C[t+(i-1)*m, varh1_i("Z",c(i),n,m)] <- -sum(S_fix[i,1:t]) # - sum{j in 1..t} S_fix[i,j]*Z[i]
for(j in 1:(t-1)){
if(j>0){
constr_C[t+(i-1)*m, varh1_i("E",c(i,j),n,m)] <- 1 # + sum{j in 1..(t-1)} E[i,j]
}
}
#=========================================================================
#=========================================================================
# subject to working {i in I, t in T}:
# U[i,t] >= W[i,t]
#-------------------------------------------------------------------------
constr_working[t+(i-1)*m, varh1_i("U",c(i,t),n,m)] <- 1 # U[i,t]
constr_working[t+(i-1)*m, varh1_i("W",c(i,t),n,m)] <- -1 # - W[i,t]
#=========================================================================
#=========================================================================
# minimize Cost: Coste + Penalization =
# sum{i in I, t in T} C[i]*U[i,t] + sum{t in T} NVC[t]*Y[t-1] + sum{i in I} P[i]*Z[i]
# + sum{t in T} M_prime*mu[t] + sum{t in T} M_prime/2*MU[t] + Y[m]
#-------------------------------------------------------------------------
cost[varh1_i("U",c(i,t),n,m)] <- C[i] # sum{i in I, t in T} C[i]*U[i,t]
cost[varh1_i("Y",c(t-1),n,m)] <- NVC[t] # + sum{t in T} NVC[t]*Y[t-1]
cost[varh1_i("Z",c(i),n,m)] <- P[i] # + sum{i in I} P[i]*Z[i]
penalization[varh1_i("mu",c(t),n,m)] <- M_prime # + sum{t in T} M_prime*mu[t]
penalization[varh1_i("MU",c(t),n,m)] <- 10*M_prime # + sum{t in T} 10*M_prime*MU[t]
penalization[varh1_i("Y", c(m),n,m)] <- 1 # + Y[m]
#=========================================================================
#=========================================================================
# subject to no_content:
# Y[0] = 1;
#-------------------------------------------------------------------------
constr_no_content[varh1_i("Y", 0,n,m)] <- 1 # Y[0]
#=========================================================================
#=========================================================================
# subject to content:
# sum{t in T} PER[t]*Y[t-1] <= sum{i in I, t in T} PR[i,t]*W[i,t]
#-------------------------------------------------------------------------
constr_content[varh1_i("Y",c(t-1),n,m)] <- PER[t] # sum{t in T} PER[t]*Y[t-1]
constr_content[varh1_i("W",c(i,t),n,m)] <- -PR[i,t] # - sum{i in I, t in T} PR[i,t]*W[i,t]
#=========================================================================
#=========================================================================
# subject to content_yes_no {t in T}:
# sum{i in I, j in 1..t} PR[i,j]*W[i,j] >= SP[t]*Y[t-1] - M*Y[t]
#-------------------------------------------------------------------------
for(i1 in 1:n){
for(j in 1:t){
constr_content_yes_no[t, varh1_i("W",c(i1,j),n,m)] <- PR[i1, j] # sum{i in I, j in 1..t} PR[i,j]*W[i,j]
}
}
constr_content_yes_no[t, varh1_i("Y",c(t-1),n,m)] <- -SP[t] # - SP[t]*Y[t-1]
constr_content_yes_no[t, varh1_i("Y",c(t),n,m)] <- M # + M*Y[t]
#=========================================================================
#=========================================================================
# subject to one_exit {i in I}:
# sum{t in T} E[i,t] = Z[i]
#-------------------------------------------------------------------------
for(j in 1:m){
constr_one_exit[i, varh1_i("E",c(i,j),n,m)] <- 1 # sum{t in T} E[i,t]
}
constr_one_exit[i, varh1_i("Z",c(i),n,m)] <- -1 # - Z[i]
#=========================================================================
#=========================================================================
# subject to start_end {i in I}:
# sum{t in T} t*E[i,t] >= sum{t in T} t*S_fix[i,t]*Z[i]
#-------------------------------------------------------------------------
for(j in 1:m){
constr_start_end[i, varh1_i("E",c(i,j),n,m)] <- j # sum{t in T} t*E[i,t]
}
constr_start_end[i, varh1_i("Z",c(i),n,m)] <- -sum((1:m)*S_fix[i,]) # - sum{t in T} t*S_fix[i,t]*Z[i]
#=========================================================================
#=========================================================================
# subject to flight_time {i in I}:
# sum{t in T} U[i,t] <= DFP-CTFP[i]
#-------------------------------------------------------------------------
for(j in 1:m){
constr_flight_time[i, varh1_i("U",c(i,j),n,m)] <- 1 # sum{t in T} U[i,t]
}
#=========================================================================
#=========================================================================
# subject to n_aircraft_min {t in T}:
# sum{i in I} W[i,t] >= nMin[t]*Y[t-1] - mu[t]
#-------------------------------------------------------------------------
for(i1 in 1:n){
constr_n_aircraft_min[t, varh1_i("W",c(i1,t),n,m)] <- 1 # sum{i in I} W[i,t]
}
constr_n_aircraft_min[t, varh1_i("Y",c(t-1),n,m)] <- -nMin[t] # - nMin[t]*Y[t-1]
constr_n_aircraft_min[t, varh1_i("mu",c(t),n,m)] <- 1 # + mu[t]
#=========================================================================
#=========================================================================
# subject to n_aircraft_max {t in T}:
# sum{i in I} W[i,t] <= nMax*Y[t-1] + MU[t]
#-------------------------------------------------------------------------
for(i1 in 1:n){
constr_n_aircraft_max[t, varh1_i("W",c(i1,t),n,m)] <- 1 # sum{i in I} W[i,t]
}
constr_n_aircraft_max[t, varh1_i("Y",c(t-1),n,m)] <- -nMax # - nMax*Y[t-1]
constr_n_aircraft_max[t, varh1_i("MU",c(t),n,m)] <- -1 # - MU[t]
#=========================================================================
}
}
obj = cost+penalization
constr = rbind(constr_W,
constr_C,
constr_working,
constr_no_content,
constr_content,
constr_content_yes_no,
constr_one_exit,
constr_start_end,
constr_flight_time,
constr_n_aircraft_min,
constr_n_aircraft_max
)
sense = c(sense_W,
sense_C,
sense_working,
sense_no_content,
sense_content,
sense_content_yes_no,
sense_one_exit,
sense_start_end,
sense_flight_time,
sense_n_aircraft_min,
sense_n_aircraft_max
)
rhs = c(rhs_W,
rhs_C,
rhs_working,
rhs_no_content,
rhs_content,
rhs_content_yes_no,
rhs_one_exit,
rhs_start_end,
rhs_flight_time,
rhs_n_aircraft_min,
rhs_n_aircraft_max
)
require_gurobi=require("gurobi")
if(solver=="gurobi" &
requireNamespace("slam", quietly = TRUE) &
require_gurobi){
heuristic<-list()
heuristic$A<-constr
heuristic$obj<-obj
heuristic$sense<-sense
heuristic$rhs<-rhs
heuristic$vtypes<-type
heuristic$lb<-numeric(n_var)
heuristic$modelsense<-"min"
sol <- gurobi::gurobi(heuristic, solver_params)
x<-sol$x
obj_value <- sol$objval
sol_result<-sol$status
}else if(solver=="lpSolve" &
requireNamespace("lpSolveAPI", quietly = TRUE)){
heuristic <- lpSolveAPI::make.lp(n_cons, n_var)
lpSolveAPI::set.objfn(heuristic, obj)
for(j in 1:n_cons) lpSolveAPI::set.row(heuristic, j, constr[j,])
lpSolveAPI::set.rhs(heuristic, rhs)
lpSolveAPI::set.constr.type(heuristic, sense)
type_C <- which(type=="C")
type_B <- which(type=="B")
lpSolveAPI::set.type(heuristic, type_C, "real")
lpSolveAPI::set.type(heuristic, type_B, "binary")
resolver <- lpSolveAPI::solve(heuristic)
if(resolver==0){
sol_result <-"OPTIMAL"
}else{
sol_result <- "INFEASIBLE"
}
obj_value <- lpSolveAPI::get.objective(heuristic)
x <- lpSolveAPI::get.variables(heuristic)
}else if(solver=="Rsymphony" &
requireNamespace("Rsymphony", quietly = TRUE)){
sense[sense=="="] <- "=="
sol <- Rsymphony::Rsymphony_solve_LP(obj, constr, sense, rhs, types = type, max = F)
obj_value <- sol$objval
x <- sol$solution
resolver <- sol$status
if(resolver==0){
sol_result <-"OPTIMAL"
}else{
sol_result <- "INFEASIBLE"
}
}
if(sol_result=="OPTIMAL"){
# E[i,t] : 0*(n*m)+t+(i-1)*m
E = matrix(x[1:(n*m)],nrow = n, ncol = m, byrow = T)
row.names(E) <- I
colnames(E) <- Periods
# W[i,t] : 1*(n*m)+t+(i-1)*m
W = matrix(x[1*(n*m)+1:(n*m)],nrow = n, ncol = m, byrow = T)
row.names(W) <- I
colnames(W) <- Periods
# U[i,t] : 2*(n*m)+t+(i-1)*m
U = matrix(x[2*(n*m)+1:(n*m)],nrow = n, ncol = m, byrow = T)
row.names(U) <- I
colnames(U) <- Periods
# Z[i] : 3*(n*m)+i
Z = matrix(x[3*(n*m)+1:(n)],nrow = n, byrow = T)
row.names(Z) <- I
# mu[t] : 3*(n*m)+n+t
mu = matrix(x[3*(n*m)+n+1:(m)], ncol = m, byrow = T)
colnames(mu) <- Periods
# MU[t] : 3*(n*m)+n+m+t
MU = matrix(x[3*(n*m)+n+m+1:(m)], ncol = m, byrow = T)
colnames(MU) <- Periods
# Y[t-1] : 7*(n*m)+n+m+t
# Y[m] : 7*(n*m)+n+m+m+1
Y = matrix(x[3*(n*m)+n+2*m+1:(m+1)], ncol = m+1, byrow = T)
colnames(Y) <- c("0",Periods)
results <- list(sol_result=sol_result,
obj=obj_value,
cost=t(cost)%*%x,
penalty=t(penalization)%*%x,
E=E,
W=W,
U=U,
Z=Z,
mu=mu,
MU=MU,
Y=Y)
return(results)
}else{
return(list(sol_result="INFEASIBLE"))
}
}
|
da24fc23cb2e521f2711bd0ab99e300e86d4a91a
|
cb24924846351f3ac98aa4bcd96a7288917622f8
|
/204.R
|
9be4116114dd30f810d6bc176fe5f0ee6f202fcb
|
[] |
no_license
|
Miffka/RAnalysis1
|
e6a2400783a9870d52d3dafc86efbff07ac8ed8f
|
867b84f1a8953ae605af6298447bf8aacc3092d1
|
refs/heads/master
| 2020-03-28T18:35:31.624730
| 2018-09-15T11:01:50
| 2018-09-15T11:01:50
| 148,894,199
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,667
|
r
|
204.R
|
setwd(as.character("C:/Users/miffka/Documents/!DataMining/RAnalysis1"))
getwd()
library(psych)
library(ggplot2)
library(Hmisc)
# свои функции
my_calc <- function(x, y){ #задаем аргументы в скобках
s <- x + y #задаем внутренние переменные и пишем тело
return(s) #возвращаем результат
}
result <- my_calc(x = 10, y = 15)
#функция с несколькими выводными данными пишется с вектором в return!
my_calc1 <- function(x, y){
s <- x + y #внутренние переменные остаются внутри функции
d <- x - y #они не меняют внешних
return(c(s, d))
}
#в определении функции можно задать значение по умолчанию
my_calc3 <- function(x, y, z = 10){
s <- x + y + z
d <- x - y - z
return(c(s,d))
}
my_calc3(1,2)
# первая пробная функция
distr1 <- rnorm(100)
hist(distr1)
distr1[1:30] <- NA
#заменяем пропущенные значения на среднее по оставшимся
distr1[is.na(distr1)] <- mean(distr1, na.rm = T)
#а теперь пишем функцию, которая будет делать это со всем, что в нее входит
my_na_rm <- function(x){
if (is.numeric(x)){
x[is.na(x)] <- mean(x, na.rm = T)
return(x)
}
else {
print("X is not numeric")
}
}
distr1 <- my_na_rm(x = distr1)
hist(distr1)
#что будет, если введем другой тип переменной в функцию?
my_na_rm(x = c("2", "3", NA))
#все плохо
#а когда сделали все по фэн-шую, то все лучше
#модифицируем нашу функцию так, чтобы она в случае распределения, сильно
#отличающегося от нормального, заменяла пропущенные значения не на
#среднее, а на медиану
#и дописываем, чтобы она выводила сообщение, было ли распределение
#нормальным или не было
my_na_rm <- function(x){
if (is.numeric(x)){
stat_test <- shapiro.test(x)
if (stat_test$p.value > 0.05) {
x[is.na(x)] <- mean(x, na.rm = T)
print("NA values were replaced with mean")
}
else {
x[is.na(x)] <- median(x, na.rm = T)
print("NA values were replaced with median")
}
return(x)
}
else {
print("X is not numeric")
}
}
distr1 <- rnorm(1000)
distr1[1:30] <- NA
distr1 <- my_na_rm(distr1)
d1 <- rnorm(2000)
d2 <- runif(2000) #равномерное распределение
d1[1:10] <- NA
d2[1:10] <- NA
d1 <- my_na_rm(d1)
head(d1)
d2 <- my_na_rm(d2)
hist(d2)
#переносим функцию в отдельный файл и сохраняем там в виде скрипта
#достаем функцией source
source("my_na_rm.R") #можем сдесь прописать полный путь
my_na_rm()
#задача 1 - написать функцию, которая выводит номера позиций проп набл
NA.position <- function(x){
s <- which(is.na(x))
return(s)
}
task1 <- rnorm(10)
task1[c(2,4,8)] <- NA
NA.position(task1)
#задача 2 - подсчет пропущенных значений в векторе
NA.counter <- function(x){
s <- length(which(is.na(x)))
return(s)
}
NA.counter(task1)
#функция, которая объединяет несколько файлов в один датафрейм
dir(pattern = "*.csv") #выводим все csv
grants <- data.frame()
#сначала просто сделаем
for (i in dir(pattern = "*.csv")){
temp_df <- read.csv(i)
grants <- rbind(temp_df, grants)
}
setwd(as.character("C:/Users/miffka/Documents/!DataMining/RAnalysis1/Grants_data"))
read_data <- function(){
df <- data.frame()
number <<- 0
for (i in dir(pattern = "*.csv")){
df1 <- read.csv(i)
df <- rbind(df1, df)
number <<- number + 1
}
print(paste(as.character(number), "files were combined"))
return(df)
}
grants2 <- read_data()
#если хотим, чтобы функция сохраняла внутренние переменные, используем <<-
#можно бы и дополнить эту функцию
#задача 3
#получает вектор с пропущенными, положительными и отрицательными
#значениями и возвращает сумму положительных элементов вектора
filtered.sum <- function(x){
s <- sum(x[which(x > 0)], na.rm = T)
return(s)
}
filtered.sum(c(1, -2, 3, NA, NA))
#задача 4
# написать функцию, которая находит и удаляет выбросы при помощи boxplot
#выброс - наблюдение, отклоняются от квартилей более, чем на 1.5*IQR,
#где IQR - межквартильный размах
task4 <- rnorm(100)
boxplot(task4)
#task4 <- outliers.rm(task4)
#boxplot(task4)
#task41 <- quantile(task4, probs = c(0.25, 0.75))
#task42 <- IQR(task4)
#сначала пропишем все вне функции
#добавим функцию, которая убирает значения одного вектора из другого
exclude_val <- function(full_vector,searched_vector){
found=c()
for(i in full_vector){
if(any(is.element(searched_vector,i))){
searched_vector[(which(searched_vector==i))[1]]=NA
}
else{
found=c(found,i)
}
}
return(found)
}
#task43 <- task4[which(task4 < 1.5 * IQR(task4) * quantile(task4, probs = 0.25))]
#task44 <- task4[which(task4 > 1.5 * IQR(task4) * quantile(task4, probs = 0.75))]
#task45 <- append(task43, task44)
#task4 <- exclude_val(task4, task45)
#task4 <- setdiff(task4, task43)
#task4 <- setdiff(task4, task44)
x <- rnorm(100)
task43 <- c(x < 1.5 * IQR(x) + quantile(x, probs = 0.25))
task44 <- c(x > 1.5 * IQR(x) + quantile(x, probs = 0.75))
task45 <- as.logical(task43 + task44)
x1 <- x[!task45]
#теперь запишем в общем виде
outliers.rm <- function(x){
y <- IQR(x)
k <- quantile(x, probs = 0.25) - y * 1.5
l <- y * 1.5 + quantile(x, probs = 0.75)
task43 <- c(x < k)
task44 <- c(x > l)
task45 <- as.logical(task43 + task44)
x <- x[!task45]
return(x)
}
#красивое решение
outliers.rm <- function(x){
q <- quantile(x, 0.25) + quantile(x, 0.75)
return(x[abs(x - q/2) <= 2*IQR(x)])
}
#это исходный вектор
t1 <- c(1.34, 0.32, -11.57, 0.32, 0.31, 23.03, 0.14, -1.28, 1.5, -3.01, 1.43, 0.8, 0.32, 0.78, -2.44, 0.28, -3.5, -0.39, -0.18, -0.02, -0.76, 0.42, -2.74, -0.75, -0.98, -60.76, 0.33, 41.99, -1.12, -3.92)
#это правильный ответ
t2 <- c(1.34, 0.32, 0.32, 0.31, 0.14, -1.28, 1.5, -3.01, 1.43, 0.8, 0.32, 0.78, -2.44, 0.28, -3.5, -0.39, -0.18, -0.02, -0.76, 0.42, -2.74, -0.75, -0.98, 0.33, -1.12)
#это мой ответ
t3 <- c(0.32, 0.32, 0.31, 0.14, -1.28, -3.01, 0.8, 0.32, 0.78, -2.44, 0.28, -0.39, -0.18, -0.02, -0.76, 0.42, -2.74, -0.75, -0.98, 0.33, -1.12)
t2 == outlier(t1)
summary(t1)
boxplot(t1)
boxplot(t2)
boxplot(t3)
histogram(t1)
histogram(t2)
histogram(t3)
|
f83e7b4643e725d13b64234fe0863fbbe2aee963
|
2e5b80db0556d23b88870d1afab2f75ad84f18f3
|
/published-data-extraction/R/combine-recruit-data.R
|
7fd7bf383d69a8de0f07bb068f42970dd8aad684
|
[
"MIT"
] |
permissive
|
MRCIEU/ewascatalog
|
dfbdc1e57dc65fa066810eb1a208bd9fc3d32add
|
90f95fc663ca86a56696d633d5049c2e09463318
|
refs/heads/main
| 2023-06-25T08:39:25.506215
| 2023-06-11T18:34:17
| 2023-06-11T18:34:17
| 329,965,100
| 2
| 0
| null | 2021-01-15T18:16:33
| 2021-01-15T16:28:55
|
R
|
UTF-8
|
R
| false
| false
| 4,247
|
r
|
combine-recruit-data.R
|
#
#
#
# pkgs
library(tidyverse) # tidy code and data
library(readxl) # reading in excel files
library(openxlsx) # writing out excel file
## CHANGE ME
date_of_extraction <- "2022-07-25"
mkdir <- function(path) system(paste("mkdir -p", path))
mkdir(paste0("recruits-data/combined-data/", date_of_extraction))
mkdir(paste0("recruits-data/combined-data/", date_of_extraction, "/results"))
### This should be done after results are manually scanned
# 1. read in recruits
recruits <- trimws(readLines("recruits-data/recruits.txt"))
# a. remove individual who is gathering author names
ind <- c("Paul Yousefi") # REMEMBER TO ADD ABI HERE FOR 2022-04-25!!!
recruits <- recruits[!recruits %in% ind]
# 2. make function that reads in the studies files for a given recruit
# a. and moves results files into the results folder
combine_studies <- function(recruits, date)
{
map_dfr(recruits, function(rec) {
print(rec)
rec_path <- file.path("recruits-data", rec, date)
studies_file <- file.path(rec_path, "studies.xlsx")
if (!file.exists(studies_file)) {
warning("The studies file: ", studies_file, " does not exist. Check to make sure this is intentional.")
return(NULL)
}
studies <- read_xlsx(studies_file)
if (nrow(studies) == 0) return(NULL)
studies$recruit <- rec
studies$Date <- as.character(studies$Date)
studies$PMID <- as.character(studies$PMID)
# copy results over
res_path <- file.path(rec_path, "results")
res_files <- list.files(res_path)
res_dirs <- list.dirs(path = res_path, full.names = TRUE, recursive = FALSE)
out_path <- file.path("recruits-data/combined-data", date, "results")
if (length(res_dirs) > 0) {
# get dir names
dir_nams <- gsub(paste0(res_path, "/"), "", res_dirs)
# copy files into correct directories
lapply(dir_nams, sort_dir, res_path = res_path, out_path = out_path)
# remove directory from file list
res_files <- res_files[!res_files %in% dir_nams]
}
x <- file.copy(from = file.path(res_path, res_files),
to = out_path,
overwrite = TRUE,
recursive = FALSE,
copy.mode = TRUE)
stopifnot(sum(x) == length(res_files))
# return studies
return(studies)
})
}
rec <- recruits[6]
# 2 b. make function to sort out the directories in any results folder
sort_dir <- function(dir_nam, res_path, out_path)
{
# make dir in new file path
mkdir(file.path(out_path, dir_nam))
# move files there
res_dir_files <- list.files(file.path(res_path, dir_nam))
x <- file.copy(from = file.path(res_path, dir_nam, res_dir_files),
to = file.path(out_path, dir_nam),
overwrite = TRUE, recursive = FALSE,
copy.mode = TRUE)
stopifnot(sum(x) == length(res_dir_files))
}
# 3. combine studies data (and make sure people are attributed to each row)
comb_studies <- combine_studies(recruits, date = date_of_extraction)
# 4. remove missing rows
comb_studies <- comb_studies[rowSums(is.na(comb_studies)) != ncol(comb_studies) - 1, ]
# 5. read in jotform studies
jotform_file <- file.path("recruits-data/combined-data", date_of_extraction, "studies-jotform.xlsx")
if (file.exists(jotform_file)) {
jotform_studies <- read_xlsx(jotform_file)
jotform_studies$recruit <- "jotform"
jotform_studies$PMID <- as.character(jotform_studies$PMID)
if (ncol(comb_studies) == 0) {
studies_out <- jotform_studies
} else {
comb_studies$PMID <- as.character(comb_studies$PMID)
if (!all(colnames(jotform_studies) %in% colnames(comb_studies))) stop("column names of 'studies-jotform.xlsx' does not match those from recruit uploaded 'studies.xlsx'")
studies_out <- bind_rows(comb_studies, jotform_studies)
}
} else {
studies_out <- comb_studies
}
## Remove age group category definitions
studies_out$Age_group <- gsub(" \\(.*", "", studies_out$Age_group)
# remove recruit names from one set of studies
studies_no_names <- studies_out %>%
dplyr::select(-recruit)
# 6. write out the combined studies file
write.xlsx(studies_no_names,
file = file.path("recruits-data/combined-data", date_of_extraction, "studies.xlsx"),
sheetName = "data")
write.xlsx(studies_out,
file = file.path("recruits-data/combined-data", date_of_extraction, "studies-with-names.xlsx"),
sheetName = "data")
|
a49dafeb39dc7ff7a4ae999392a9f9354f5bc42f
|
d7ff455d68ab5aec6f90cc7f2a0d26dd594e4348
|
/R/big_main_simulation_additional.R
|
8b906972acd455bfa35c3e5297d4ddc2ace19eb9
|
[] |
no_license
|
stijnmasschelein/complementarity-simulation
|
5f4c6c9794b820e88291e4b9f4140ad73a5ad0e0
|
e9a2f88a3c358585762aba3accbeff063b068a73
|
refs/heads/master
| 2021-06-04T01:58:52.011673
| 2020-02-04T10:55:21
| 2020-02-04T10:55:21
| 147,630,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 546
|
r
|
big_main_simulation_additional.R
|
if (!require("remotes")){
install.packages("remotes")
library(remotes)
}
if (!require(simcompl2)){
remotes::install_github("stijnmasschelein/simcompl2",
ref = "only_files")
library(simcompl2)
}
source("R/big_parameters.R")
data_params_1 <- data_params
data_params_1$b1 = list(c(.5, .5, .5, 0))
data_params_1$d[[2]] = list(c(.25, .25, 0))
do.call(run_simulation, list(data_params = data_params_1,
test_params = test_params,
sim_params = sim_params))
|
2ee3bb29c648fee99f6366152eb97410db99b583
|
524847bb282dc11701351f268b83f2642a95fc7b
|
/plot2.R
|
f58b3b58b83cf0775c2a45dd9b7609e570ab0c32
|
[] |
no_license
|
paras1605/ExData_Plotting1
|
25316f0687d174ddd2a40c303062dba35c48c95d
|
af694c3922294a1f0770f5cc0b6b04860b7ea0ea
|
refs/heads/master
| 2020-04-30T00:54:59.735121
| 2019-03-20T17:45:55
| 2019-03-20T17:45:55
| 176,514,709
| 0
| 0
| null | 2019-03-19T13:10:47
| 2019-03-19T13:10:46
| null |
UTF-8
|
R
| false
| false
| 588
|
r
|
plot2.R
|
table <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors = FALSE)
subsetdata <- table[table$Date %in% c("1/2/2007","2/2/2007"), ]
subsetdata$Date <- as.Date(subsetdata$Date, format="%d/%m/%Y")
# Date has to be used with time and hence combining date and time as one
datetime <- paste(subsetdata$Date, subsetdata$Time)
subsetdata$datetime <- as.POSIXct(datetime)
with(subsetdata, plot(Global_active_power ~ datetime, type = "l", xlab = "", ylab = "Global Active Power (Kilowatts)"))
dev.copy(png, "plot2.png", width = 480, height = 480)
dev.off()
|
8674d2bdb5ef71233283341f26ee8b422a4c71b5
|
799c09fde62544672faadbf5ea81242e6f5d001f
|
/Binom Max Lik.R
|
cb48ee31fa88d2985ff94131b684c0b267ceb57b
|
[] |
no_license
|
dcossyleon/Stats-Simulations
|
6494901abec5db3c6e5bbe79aff3c8039c5375fc
|
99227ba624fbbca68f3d10be79e7b436da0bc1fe
|
refs/heads/master
| 2020-04-25T00:19:38.828044
| 2019-02-24T18:33:25
| 2019-02-24T18:33:25
| 172,373,275
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 860
|
r
|
Binom Max Lik.R
|
#Jan 22, 2018
#Maximum likelihood Binomial distrib
#1) Generate binomial data
n=1000
size=1
X1 <- runif(n, min=0, max=1)
B0 <- 1
B1 <- 2
Y <- rbinom(n=n, size=size, prob=(exp(B0+B1*X1)/(1+(exp(B0+B1*X1))))) #not good
Y2 <-rbinom(n=n, size=size, prob=(1/(1+exp(-(B0+B1*X1)))))
Y3 <-rbinom(n=n, size=size, prob=(1/(1+exp(-(Z)))))
Z <- B0+B1*X1
PR <- 1/(1+(exp(-Z)))
Y1 <- rbinom(n, 1, PR)
d <- data.frame(x1=X1, y=Y)
hist(d$y)
#GLM
summary(glm(Y~X1, family = binomial))
summary(glm(Y2~X1, family = binomial))
summary(glm(Y3~X1, family = binomial))
summary(glm(Y1~X1, family = binomial))
#log likelihood
opt.input.binom <- function(data, theta){
b0 <- theta[1]
b1 <- theta[2]
nlog.L <- -sum(dbinom(data$y, size=size, prob=(exp(b0+b1*data$x)/(exp(b0+b1*data$x)+1)), log=T))
return(nlog.L)
}
optim(c(0,1),opt.input.binom, data=d, method="BFGS")
|
607e45b1e6113364a7ae9e65a21e9a6a188a10a5
|
7075471c1b29d89c7d08cc3c72ef888609e76dd1
|
/server.R
|
7b120f4a5467b0c5193bb8b5be9b82a190df41ff
|
[] |
no_license
|
manshrestha/Shiny-Application-and-Reproducible-Pitch
|
ddee2a7157a4666f70710a205aebad701007854e
|
f340a695ef3566abe56b68b6dd5dee4c4aca9d99
|
refs/heads/master
| 2020-12-30T22:32:18.419172
| 2016-05-12T11:11:18
| 2016-05-12T11:11:18
| 58,634,522
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,184
|
r
|
server.R
|
# Perform all necessary initialization
init <- function(){
library(shiny) # load Shiny Library
data(mtcars) # load mtcars dataset
set.seed(888) # set seed for reproducibility
}
regression <- function(){
mtcars$am[mtcars$am == 0] <- "Automatic"
mtcars$am[mtcars$am == 1] <- "Manual"
mtcars$cyl <- as.factor(mtcars$cyl)
mtcars$vs <- as.factor(mtcars$vs)
mtcars$am <- as.factor(mtcars$am)
mtcars$gear <- as.factor(mtcars$gear)
mtcars$carb <- as.factor(mtcars$carb)
temp <- lm(mpg~., data = mtcars) # Linear Regression for Model 3
step(temp, direction = "backward")
}
init()
model <- regression()
shinyServer(
function(input, output, session){
invalid <- reactiveValues(cyl = 0, hp = 0, wt = 0)
cyl <- reactive(input$cyl)
hp <- reactive(input$hp)
wt <- reactive(input$wt)
am <- reactive(input$am)
# Check if Cylinder is out of range or not
observe({
if((cyl() %% 1 != 0) | (cyl() %% 2 != 0)){
output$cyl <- renderPrint({"Cylinder is Invalid."})
invalid$cyl <- 1
}else if((cyl() < min(mtcars$cyl)) | (cyl() > max(mtcars$cyl))){
output$cyl <- renderPrint({"Number of Cylinder is Out of Range."})
invalid$cyl <- 1
}else {
output$cyl <- renderPrint({input$cyl})
invalid$cyl <- 0
}
},
label = "cyl", priority = 2)
# Check if Horse Power is out of range or not
observe({
if((hp() < min(mtcars$hp)) | (hp() > max(mtcars$hp))){
output$hp <- renderPrint({"Horse Power is Out of Range"})
invalid$hp <- 1
} else {
output$hp <- renderPrint({input$hp})
invalid$hp <- 0
}
},
label = "hp", priority = 2)
# Check if Weight is out of range or not
observe({
if((wt() < min(mtcars$wt)) | (wt() > max(mtcars$wt))){
output$wt <- renderPrint({"Weight is Out of Range"})
invalid$wt <- 1
} else {
output$wt <- renderPrint({input$wt})
invalid$wt <- 0
}
},
label = "wt", priority = 2)
output$am <- renderPrint({input$am})
# Perform prediction with the regression model from mtcar dataset
# using the parameters that the user entered.
observe({
if(!(invalid$cyl | invalid$hp | invalid$wt)){
carspecs <- data.frame(cyl = cyl(), hp = hp(),
wt = wt(), am = am())
carspecs$cyl <- as.factor(carspecs$cyl)
mpg_predict <- predict(model, carspecs,
interval = "predict",
level = 0.95)
output$mpg_avg <- renderPrint({mpg_predict[1]})
output$mpg_min <- renderPrint({mpg_predict[2]})
} else{
output$mpg_avg <- renderPrint({"Invalid Car Specs. Please check your inputs"})
}
},
label = "predict", priority = 1)
}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.