blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a536a3f952033528d009471264325d08fe53247e
|
aa30f641d17b4658b33ecb28571ad46076128361
|
/plot3.R
|
3cdc4fcfa87ca4a9c3c662e126ef501939718a63
|
[] |
no_license
|
zubayer16/ExData_Plotting1
|
d9b9b47251d9468a9714d04fa49f65244b73a388
|
f1793c324d7ccd6b00b68ba6d0a07d593cdf81ee
|
refs/heads/master
| 2022-07-11T18:01:17.980736
| 2020-05-17T17:10:06
| 2020-05-17T17:10:06
| 264,683,629
| 0
| 0
| null | 2020-05-17T14:19:11
| 2020-05-17T14:19:10
| null |
UTF-8
|
R
| false
| false
| 1,331
|
r
|
plot3.R
|
#Reading the text file in a table
powercon<-read.table("household_power_consumption.txt",header=TRUE,sep = ";")
#Extracting data for rows with dates 1/2/2007 and 2/2/2007
powercon1<-subset(powercon,Date=="1/2/2007" | Date=="2/2/2007",select=c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#Converting the dates to a standard format and storing it
df<-strptime(paste(powercon1$Date, powercon1$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#head(df)
#Storing all the three different columns
metering<-as.numeric(as.character(powercon1$Sub_metering_1))
metering1<-as.numeric(as.character(powercon1$Sub_metering_2))
metering2<-as.numeric(as.character(powercon1$Sub_metering_3))
#Plotting the first column from the data frame
plot(df,metering,type="l", xlab="", ylab="Energy Sub metering")
#Appending two more lines in the previous plot
lines(df,metering1,col="red")
lines(df,metering2,col="blue")
#Labelling on the top right corner of the graph for better understanding
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,lwd=2,col=c("black","red","blue"))
#Copying the plot in a png file with 480*480 dimension by default
dev.copy(png,file="plot3.png")
dev.off()
|
055ce406e5f991c0c5b1bd26115eb68d601c6b38
|
c82d2e02c0f8eb75857d01073e5a7bdf499b753b
|
/R/sc_sensitivity_analysis.R
|
d9b5fa2206af19b6294737f2362513065bece2a9
|
[
"MIT"
] |
permissive
|
passt/hsc-division-patterns
|
d680f917eeacaf2ca952ea1b180ade1c4ed4604d
|
2e93b0a910435e67aa7dcdce5566e44c84747e54
|
refs/heads/master
| 2022-12-19T04:46:39.928254
| 2020-09-29T08:52:19
| 2020-09-29T08:52:19
| 294,457,747
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,361
|
r
|
sc_sensitivity_analysis.R
|
# Function to run LIME variable importances
# LIME parameters optimized to give good local explanation of model, and consistent interpretations across different cells
library(lime)
library(h2o)
library(gplots)
localH2O = h2o.init(nthreads=-1)
##############################################################################################################
#import model - model generated by h2o.deeplearning command with parameters specified in the manuscript.
model.path = "ANN"
model = h2o.loadModel(model.path)
# import PDC data
PDC_data8 <- read.csv("~/Desktop/PDC8.csv", header=FALSE)
PDC_data4 <- read.csv("~/Desktop/PDC4.csv", header=FALSE)
PDC_data18 <- read.csv("~/Desktop/PDCm18.csv", header=FALSE)
PDC_data = rbind(PDC_data4,PDC_data8,PDC_data18)
# import training data
training_data <- read.csv("~/Desktop/TFull.csv", header=FALSE)
training_data = training_data[,1:96]
P = predict(model,as.h2o(PDC_data))
P = as.data.frame(P)
L = P$predict
cL = count(L)
UL = cL$x[which(cL$freq >= 50)]
##############################################################################################################
# LIME explainer - 2 bins for interpretability, gene is high or low
explainer <- lime(x = training_data, model = model, n_bins = 2, bin_continuous = TRUE, quantile_bins = FALSE)
##############################################################################################################
# Run explainer on random sample of 20 PDCs from each class
ncells = 20
for (j in 1:length(UL)){
idx = which(L == UL[j],arr.ind = T)
NumC = length(idx)
nc = min(NumC,ncells)
s = sample(1:NumC,nc)
if (j == 1){
test_data = PDC_data[idx[s],]
}
else{
temp_data = PDC_data[idx[s],]
test_data = rbind(temp_data,test_data)
}
}
nfeatures = 10
explanation <- lime::explain (
x = as.data.frame(test_data),
explainer = explainer,
n_features = nfeatures,
dist_fun = "euclidean",
kernel_width = 1.3,
feature_select = "forward_selection",
n_labels = 1)
idx = which(explanation$model_r2 > 0.25)
explanation = explanation[idx,]
##############################################################################################################
# clean explainer output
Ftrs = unique(explanation$feature)
nfeat = length(Ftrs)
for (j in 1:nfeat){
st = paste(Ftrs[j],"\\b",sep = "")
idx = which(grepl(st,explanation$feature_desc)) # indicies associated with feature
U = unique(explanation$feature_desc[idx]) # feature descriptions
ndesc = length(U) # number of feature descriptions (could be two of them)
for (k in 1:ndesc){
I = which(grepl(U[k],explanation$feature_desc)) # indicies associated with k = 1,2 feature description
if (grepl("V",substring(U[k],1,1)) == TRUE){
explanation$feature_weight[I] = -explanation$feature_weight[I] # if feature is "less than" switch sign
if (ndesc == 2){
kswitch = setdiff(c(1,2),k) # switch feature description if alternative "greater than" appears
explanation$feature_desc[I] = U[kswitch]
}
}
}
}
##############################################################################################################
# most informative features
# explainer renames labels ".' is "+" and "..1" is "-"
ClassLabels = unique(explanation$label)
vectorOfYf <- vector(mode = "list", length = length(ClassLabels))
Mout = matrix(0,length(ClassLabels),nfeatures)
for (j in 1:length(ClassLabels)){
idx = which(explanation$label == ClassLabels[j],arr.ind = T)
nclass_cells = length(unique(explanation$case[idx]))
Xf = explanation$feature_desc[idx]
Yf <- as.data.frame(sort(table(Xf),decreasing=T))
Yf[,2] = Yf[,2]/(nclass_cells)
vectorOfYf[[j]] = Yf
d = explanation[idx,]
v = vectorOfYf[[j]]
v = v[[1]]
Mouttemp = matrix(0,1,nfeatures)
for (i in 1:nfeatures){
I = which(d$feature_desc == v[i], arr.ind = T)
Mouttemp[i] = sum(sign(d$feature_weight[I]))#/length(D1$feature_weight[I])
}
Mout[j,] = Mouttemp
}
##############################################################################################################
# Venn diagram
V = venn(list(vectorOfYf[[1]]$Xf[1:nfeatures],vectorOfYf[[2]]$Xf[1:nfeatures],vectorOfYf[[3]]$Xf[1:nfeatures],vectorOfYf[[3]]$Xf[1:nfeatures],vectorOfYf[[3]]$Xf[1:nfeatures]))
|
b95bb07234d8c3f2ad3412421903d25ab45ef4c7
|
58b373c005dd500abac956cd56282bb6bb0c9cdd
|
/models/lmmodels/multiple linear model regression.R
|
106bd0607dcb316c3c022c33b93be1d225c95264
|
[] |
no_license
|
Manjunathambv/Data-Science_-_Insofe_R_programming-
|
6555974b924ce21783672e66c40a71706c970189
|
a9ab6d5b0b3731905b5e52bf31005349a9ce7b50
|
refs/heads/master
| 2022-11-14T05:09:30.046663
| 2020-06-20T03:59:19
| 2020-06-20T03:59:19
| 273,633,155
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,558
|
r
|
multiple linear model regression.R
|
rm(list = ls())
data = read.csv("housing_data.csv")
dim(data)
names(data)
head(data, 10)
str(data)
attribute_names = colnames(data)
target_names = "MV"
independent_attributes = setdiff(attribute_names, target_names)
character_names = c("CHAS" , "RAD")
numeric_variable = setdiff(independent_attributes, character_names)
target_names
independent_attributes
numeric_variable
character_names
data[,character_names] = lapply(data[,character_names], as.factor)
str(data)
head(data)
## splitting the data into train and test
library(caret)
set.seed(1234)
splitting = createDataPartition(y = data$MV, p = 0.7, list = FALSE)
head(splitting)
train_data = data[splitting, ]
test_data = data[-splitting, ]
head(train_data)
tail(test_data)
#Data Preprocessing
summary(train_data)
colSums(is.na(train_data))
##missing value is imputed
imputed_model = preProcess(x = train_data, method = "medianImpute")
sum(is.na(train_data))
train_imputed = predict(object = imputed_model, newdata = train_data)
sum(is.na(train_imputed))
colSums(is.na(train_imputed))
test_imputed = predict(object = imputed_model, newdata = test_data)
sum(is.na(test_imputed))
colSums(is.na(test_imputed))
# for categorical data
library(DMwR)
train = centralImputation(train_imputed)
test = centralImputation(test_imputed)
colSums(is.na(train))
colSums(is.na(test))
## relationship between independent and dependent variable
library(corrplot)
corrplot::corrplot(cor(train[,c(numeric_variable,target_names)], use = "complete.obs"), method = "number")
std_model = preProcess(train[, numeric_variable], method = c("center", "scale"))
train[,numeric_variable] = predict(object = std_model, newdata = train[,numeric_variable])
test[,numeric_variable] = predict(object = std_model, newdata = test[,numeric_variable])
mlr_model = lm(formula = MV~., data = train)
summary(mlr_model)
par(mfrow = c(2,2))
plot(mlr_model)
library(MASS)
stepAIC_model = stepAIC(mlr_model, direction = "both")
summary(stepAIC_model)
par(mfrow = c(2,2))
plot(stepAIC_model)
library(car)
vif(mlr_model)
vif(stepAIC_model)
mlr_model_VIF <- lm(formula = MV ~ CRIM + ZN + CHAS + NOX + RM + DIS + RAD + PT + B + LSTAT, data = train)
summary(mlr_model_VIF)
vif(mlr_model_VIF)
train_pred = predict(mlr_model_VIF, train[, independent_attributes])
test_pred = predict(mlr_model_VIF, test[, independent_attributes])
regr.eval(trues = train$MV, preds =train_pred)
regr.eval(trues = test$MV, preds =test_pred)
|
a602ed3a4dbd84fc229fcc7ce2ab8c57c575a47d
|
f624d93869a9c8404bf10cf48cd21db6f7993a8a
|
/data-raw/ch_12_work.R
|
540d7682d987762b7728bdcb647bd6b1b0183533
|
[] |
no_license
|
clayford/bme
|
470ba4f0ff078d1c0f963455f89727e3a3a76616
|
38909ccddb301fc8c3c7b3abab93ad672ea2dbcd
|
refs/heads/master
| 2021-01-21T11:40:24.124585
| 2019-02-01T21:39:35
| 2019-02-01T21:39:35
| 56,441,842
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,531
|
r
|
ch_12_work.R
|
# Biostats in epidemiology
# ch 12 work
# 12.2 --------------------------------------------------------------------
# Example 12.2
library(epitools)
schizophrenia
# crude death rate and directly standardized death rate
with(schizophrenia,
ageadjust.direct(count = cohort.deaths, pop = cohort.py, stdpop = alberta.pop)) * 10^3
# crude death rates
Ra <- sum((schizophrenia$cohort.py/sum(schizophrenia$cohort.py)) *
(schizophrenia$cohort.deaths/schizophrenia$cohort.py))
Rb <- sum((schizophrenia$alberta.pop/sum(schizophrenia$alberta.pop)) *
(schizophrenia$alberta.deaths/schizophrenia$alberta.pop))
# directly standardized death rate
Ras <- sum((schizophrenia$alberta.pop/sum(schizophrenia$alberta.pop)) *
(schizophrenia$cohort.deaths/schizophrenia$cohort.py))
Rbs <- sum((schizophrenia$alberta.pop/sum(schizophrenia$alberta.pop)) *
(schizophrenia$alberta.deaths/schizophrenia$alberta.pop))
# same as Rb
# crude rate ratio
CRR <- Ra/Rb
# standardized rate ratio
SRR <- Ras/Rb
# var of standardized death rate
Nsk <- schizophrenia$alberta.pop
Ns <- sum(schizophrenia$alberta.pop)
Dak <- schizophrenia$cohort.deaths
Nak <- schizophrenia$cohort.py
Dbk <- schizophrenia$alberta.deaths
Nbk <- schizophrenia$alberta.pop
# sqrt(sum((Nsk/Ns)^2 * (Dak/(Nak^2))))
varRas <- sum((Nsk/Ns)^2 * (Dak/(Nak^2)))
varRbs <- sum((Nsk/Ns)^2 * (Dbk/(Nbk^2)))
varlogSRR <- varRas/(Ras^2) + varRbs/(Rb^2)
# 95 CI for SRR
exp(log(SRR) + c(-1,1) * qnorm(0.975) * sqrt(varlogSRR))
# function for this
std.rate.ratio <- function(count, pop, stdcount, stdpop, conf.level=0.95){
Ra <- sum((pop/sum(pop)) *
(count/pop))
Rb <- sum((stdpop/sum(stdpop)) *
(stdcount/stdpop))
# directly standardized death rate
Ras <- sum((stdpop/sum(stdpop)) *
(count/pop))
# crude rate ratio
CRR <- Ra/Rb
# standardized rate ratio
SRR <- Ras/Rb
# var of standardized death rate
Nsk <- stdpop
Ns <- sum(stdpop)
Dak <- count
Nak <- pop
Dbk <- stdcount
Nbk <- stdpop
# sqrt(sum((Nsk/Ns)^2 * (Dak/(Nak^2))))
varRas <- sum((Nsk/Ns)^2 * (Dak/(Nak^2)))
varRbs <- sum((Nsk/Ns)^2 * (Dbk/(Nbk^2)))
varlogSRR <- varRas/(Ras^2) + varRbs/(Rb^2)
# 95 CI for SRR
alpha <- (1 - conf.level)/2
CINT <- exp(log(SRR) + c(-1,1) * qnorm(1 - alpha) * sqrt(varlogSRR))
list("Standardize rate ratio" = SRR, "95% CI for SRR" = CINT,
"Crude rate ratio" = CRR,
"directly standardized death rate" = Ras)
}
with(schizophrenia, std.rate.ratio(count = cohort.deaths, pop = cohort.py,
stdcount = alberta.deaths, stdpop = alberta.pop))
# 12.3 --------------------------------------------------------------------
# Example 12.3
str(schizophrenia)
Da <- sum(schizophrenia$cohort.deaths)
Rsk <- schizophrenia$alberta.deaths/schizophrenia$alberta.pop
Nak <- schizophrenia$cohort.py
Ea <- sum(Rsk*Nak)
# standardized mortality ratio
SMR <- Da/Ea
varSMR <- SMR/Ea
# 95% CI
SMR + c(-1,1)*qnorm(0.975)*sqrt(varSMR)
# This returns a slightly different CI
ageadjust.indirect(count = schizophrenia$cohort.deaths,
pop = schizophrenia$cohort.py,
stdcount = schizophrenia$alberta.deaths,
stdpop = schizophrenia$alberta.pop)
# exact CI (if Ea < 5)
# use 10.9 and 10.10 with n = Ea and d = Da (time = Ea and status = Da)
exact.rate.test(time = Ea, status = Da)$conf.int
exact.rate.test(time = Ea, status = Da)
# For the 10-19 age group with Da = 2 and Ea = 0.376
exact.rate.test(time = 0.376, status = 2)
# hypothesis of no mortality difference between cohort and the standard pop'n:
STATISTIC <- ((Da - Ea)^2)/Ea
smr.test <- function(count, pop, stdcount, stdpop, conf.level = 0.95){
dname <- paste("\n ", deparse(substitute(count)),
"\n ", deparse(substitute(pop)),
"\n ", deparse(substitute(stdcount)),
"\n ", deparse(substitute(stdpop)))
alternative <- "two.sided"
Da <- sum(count)
Rsk <- stdcount/stdpop
Nak <- pop
Ea <- sum(Rsk*Nak)
# standardized mortality ratio
est <- Da/Ea
names(est) <- "Standardized Mortality Ratio"
null <- 1
names(null) <- names(est)
if(Ea < 5){
CINT <- exact.rate.test(time = Ea, status = Da)$conf.int
attr(CINT, "conf.level") <- conf.level
p.value <- exact.rate.test(time = Ea, status = Da)$p.value
METHOD <- paste("Exact test of no mortality difference between cohort and standard population")
RVAL <- list(p.value = p.value,
estimate = est, null.value = null,
conf.int = CINT, alternative = alternative,
method = METHOD,
data.name = dname)
} else {
varSMR <- est/Ea
# 95% CI
alpha <- (1-conf.level)/2
CINT <- est + c(-1,1)*qnorm(1 - alpha)*sqrt(varSMR)
attr(CINT, "conf.level") <- conf.level
STATISTIC <- ((Da - Ea)^2)/Ea
p.value <- pchisq(q = STATISTIC, df = 1, lower.tail = FALSE)
names(STATISTIC) <- "X-squared"
METHOD <- paste("Test of no mortality difference between cohort and standard population")
RVAL <- list(statistic = STATISTIC, parameter = c(df = 1), p.value = p.value,
estimate = est, null.value = null,
conf.int = CINT, alternative = alternative,
method = METHOD,
data.name = dname)
}
class(RVAL) <- "htest"
return(RVAL)
}
with(schizophrenia,
smr.test(count = cohort.deaths,
pop = cohort.py,
stdcount = alberta.deaths,
stdpop = alberta.pop))
# Exact test (expected number of deaths < 5)
with(subset(schizophrenia,age.group=="10-19"),
smr.test(count = cohort.deaths,
pop = cohort.py,
stdcount = alberta.deaths,
stdpop = alberta.pop))
# top <- schizophrenia[,1:3]
# top$exp <- 1
# bot <- schizophrenia[,c(1,4,5)]
# bot$exp <- 2
# names(top) <- names(bot) <- c("age.group","deaths","pop", "exp")
# dat <- rbind(top,bot)
#
# schizL <- melt(schizophrenia, id.vars = c("age.group","cohort.deaths"))
#
# lrt.homogeneity(time, status, exposure, strata)
# 12.4 --------------------------------------------------------------------
# age-period-cohort analysis
# Fig 12.1(a)
year <- as.integer(rownames(females))
plot(year, females[,1], type="b", ylim = c(0,50),
ylab = "Death Rate (per 100,000)", xlab="Year")
lines(year, females[,2], type = "b", pch = 2)
lines(year, females[,3], type = "b", pch = 3)
lines(year, females[,4], type = "b", pch = 4)
lines(year, females[,5], type = "b", pch = 5)
legend("topright", legend = colnames(females), pch = 1:5, title = "Age group")
# Fig 12.2(b)
ag <- unclass(factor(colnames(females)))
plot(ag, females[1,], type="b", ylim = c(0,50),
ylab = "Death Rate (per 100,000)", xlab="Age group", axes=F)
lines(ag, females[2,], type = "b", pch = 2)
lines(ag, females[3,], type = "b", pch = 3)
lines(ag, females[4,], type = "b", pch = 4)
lines(ag, females[5,], type = "b", pch = 5)
legend("top", legend = rownames(females), pch = 1:5, title = "Time period")
axis(1, at=1:5, labels = colnames(females))
axis(2, at=seq(0,50,10), labels = seq(0,50,10))
# Fig 12.2(c)
# diag(females[1:3,3:5])
# k <- pmin(nrow(females) - 1, ncol(females) - 3)
# diag(females[1:(1 + k),3:(3 + k)])
getDiag <- function(m,i,j){
x <- vector(mode = "list", length = length(i))
y <- vector(mode = "list", length = length(i))
k <- pmin(nrow(m) - i, ncol(m) - j)
for(n in seq_along(i)){
y[[n]] <- diag(m[i[n]:(i[n] + k[n]),j[n]:(j[n] + k[n])])
x[[n]] <- j[n]:(j[n] + k[n])
}
c(x,y)
}
# row and column coordinats for getDiag function
i <- c(3,2,1,1,1)
j <- c(1,1,1,2,3)
dr <- getDiag(females,i,j)
plot(dr[[1]], dr[[6]], type="b", xlim = c(1,5), ylim = c(0,50),
ylab = "Death Rate (per 100,000)", xlab="Age group", axes=F)
lines(dr[[2]], dr[[7]], type="b", pch=2)
lines(dr[[3]], dr[[8]], type="b", pch=3)
lines(dr[[4]], dr[[9]], type="b", pch=4)
lines(dr[[5]], dr[[10]], type="b", pch=5)
axis(1, at=1:5, labels = colnames(females))
axis(2, at=seq(0,50,10), labels = seq(0,50,10))
legend("top", legend = seq(1930,1970,10), pch = 1:5, title = "Birth Cohort")
|
58afb698922f852dd1fb9329601ad89ffcb9bc41
|
24b46b2f61e37af930fb8d20cf3873ac57b42abb
|
/04_Codes/simulation_experi/phase 1.R
|
002ea7cec2de63c93ee39b59bc98feb5e6788cb3
|
[] |
no_license
|
anqiChen9306/Territory-Mangement
|
1e1e0c169b4179a3a0eddb14da81eeb4795c4636
|
011f8dda27cce4328cc57fd1c67a12144136558c
|
refs/heads/master
| 2020-03-06T14:41:58.250533
| 2018-07-25T12:24:38
| 2018-07-25T12:24:38
| 126,940,383
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,288
|
r
|
phase 1.R
|
library(tidyr)
library(dplyr)
library(DT)
library(data.table)
library(reshape2)
library(stringr)
library(parallel)
library(foreach)
library(doSNOW)
setwd("D:\\Rachel\\WorkMaterial\\Pharbers_git\\Territory Mangement\\04_Codes\\simulation_experi")
source("global.R")
load("backupofdata_0.RData")
## set the random number generator seed
set.seed(1000)
## you can lay the 60 options as a list with length equal to 60 here
universe <- list(a = letters[1:5],
b = seq(0,15,3), #6
c = c(0.95,1,1.1,1.2),
d = c(4, 6, 8),
e = seq(4,24,4)) #9
## please set a container to store the combinations
contain_phase1 <- matrix(ncol = 118)
cl <- makeCluster(4, outfile="") # number of cores. Notice 'outfile'
registerDoSNOW(cl)
iterations <- 100000
pb <- txtProgressBar(min = 1, max = iterations, style = 3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
## here is the loop to do the work
system.time(
contain_phase1 <- foreach(i = 1:iterations, .combine = rbind,
.options.snow = opts) %dopar% {
tmp<- c(universe[[1]][sample(1:5, 1)], ##hosp 1 salesmen
universe[[2]][sample(1:6, 1)], # budget
universe[[3]][sample(1:4, 1)], # target factor
universe[[5]][sample(1:6, 1)], # product value
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:3, 1)],
universe[[5]][sample(1:6, 1)],
0,
0,
universe[[1]][sample(1:5, 1)], ##hosp 2
universe[[2]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:3, 1)],
universe[[5]][sample(1:6, 1)],
0,
0,
universe[[1]][sample(1:5, 1)], ##hosp 3
universe[[2]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:3, 1)],
universe[[5]][sample(1:6, 1)],
0,
0,
universe[[1]][sample(1:5, 1)], ##hosp 4
universe[[2]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:3, 1)],
universe[[5]][sample(1:6, 1)],
0,
0,
universe[[1]][sample(1:5, 1)], ##hosp 5
universe[[2]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:3, 1)],
universe[[5]][sample(1:6, 1)],
0,
0,
universe[[1]][sample(1:5, 1)], ##hosp 6
universe[[2]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:3, 1)],
universe[[5]][sample(1:6, 1)],
0,
0,
universe[[1]][sample(1:5, 1)], ##hosp 7
universe[[2]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:3, 1)],
universe[[5]][sample(1:6, 1)],
0,
0,
universe[[1]][sample(1:5, 1)], ##hosp 8
universe[[2]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:3, 1)],
universe[[5]][sample(1:6, 1)],
0,
0,
universe[[1]][sample(1:5, 1)], ##hosp 9
universe[[2]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:3, 1)],
universe[[5]][sample(1:6, 1)],
0,
0,
universe[[1]][sample(1:5, 1)], ##hosp 10
universe[[2]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:4, 1)],
universe[[5]][sample(1:6, 1)],
universe[[3]][sample(1:3, 1)],
universe[[5]][sample(1:6, 1)],
0,
0, ##10
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)], ## field work
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)], ##product training
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)], ##sales training
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)],
universe[[4]][sample(1:3, 1)])
# if (any(apply(contain_phase1, 2, function(x) identical(x, tmp)))) {
# next
# } else {
# contain_phase1 <- rbind(contain_phase1, tmp)
# }
# # if you want 100000, you can change the 100 to 100000
# if (nrow(contain_phase1) == 10000) break
# contain_phase1 <- rbind(contain_phase1, tmp)
setTxtProgressBar(pb, i)
return(tmp)
}
)
close(pb)
stopCluster(cl)
### filter
tmist_container_phase1 <-
as.data.frame(contain_phase1, stringsAsFactors = FALSE)
part1 <- NULL
for (i in 1:10) {
for (j in 1:4) {
if ( j == 1) {
tmp <- c(paste("hosp_",i,"_salesmen",sep=""),
paste("hosp_",i,"_budget",sep=""),
paste("hosp_",i,"_prod_",j,"_target_factor",sep=""),
paste("hosp_",i,"_prod_",j,"_prod_hours",sep=""))
} else {
tmp <- c(paste("hosp_",i,"_prod_",j,"_target_factor",sep=""),
paste("hosp_",i,"_prod_",j,"_prod_hours",sep=""))
}
part1 <- c(part1, tmp)
}
}
part2 <- NULL
for (i in 1:5) {
tmp <- c(paste("man_salesmen_",i,"_field_work",sep=""),
paste("man_salesmen_",i,"_product_training",sep=""),
paste("man_salesmen_",i,"_sales_training",sep=""))
part2 <- c(part2,tmp)
}
part2 <- c(part2,"man_admin_work","man_kpi_analysis","man_meetings_with_team")
colnames(tmist_container_phase1) <- c(part1, part2)
### processing each combination, including data manipulation & filtering & calculation
## filter combinations within restriction
# restriction 1 : sum of arranged worktime of each salesmen &flm <=100
# restriction 2 : no arrangement of time & budget in hospital is without salesmen
# restriction 3 : sum of arranged budget <=100
# restriction 4 : field work time <= arranged contact time
# restriction 5 : contact time of each salesmen >0
# tmist_container_phase1m <- vector("list", nrow(tmist_container_phase1))
cl <- makeCluster(4, outfile="") # number of cores. Notice 'outfile'
registerDoSNOW(cl)
iterations <- nrow(tmist_container_phase1)
pb <- txtProgressBar(min = 1, max = iterations, style = 3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
tmist_container_phase1m <- foreach(i = 1:1000, #.combine = c,
.options.snow = opts,
.packages = c("dplyr", "tidyr", "DT",
"data.table", "reshape2",
"stringr")) %dopar% {
tmp <- tmist_container_phase1[i,] %>%
gather(name, value) %>%
mutate(split = ifelse(substr(name, 1, 4) == "hosp", 1, 2))
## extract account decision part
# extract hosp code
decision_input_m <- subset(tmp, split == "1", select = c(name, value)) %>%
mutate(hosp_code = substr(name,
sapply(name,function(x)str_locate_all(x, "_")[[1]][1,1]+1),
sapply(name,function(x)str_locate_all(x, "_")[[1]][2,1]-1)),
others = substr(name,sapply(name,function(x)str_locate_all(x, "_")[[1]][2,1]+1), str_length(name)))
# extract salesment & budget
decision_input_m1 <- subset(decision_input_m, others %in% c("salesmen","budget"),
select = c(hosp_code, others, value)) %>%
spread(others, value)
# extract others
decision_input_m2 <- subset(decision_input_m, !(others %in% c("salesmen","budget")),
select = c(hosp_code, others, value)) %>%
mutate(prod_code = substr(others, 6, 6),
others = substr(others, 8, str_length(others))) %>%
spread(others, value)
decision_input <- decision_input_m1 %>%
left_join(decision_input_m2, by = "hosp_code")
decision_input$salesmen <- sapply(decision_input$salesmen, function(x) {
switch(x,
a = "小宋",
b = "小兰",
c = "小木",
d = "小白",
e = "小青")
})
decision_input$budget <- as.numeric(decision_input$budget)
decision_input$prod_hours <- as.numeric(decision_input$prod_hours)
decision_input$target_factor <- as.numeric(decision_input$target_factor)
decision_input$phase <- 1
decision_input$hosp_code <- as.numeric(decision_input$hosp_code)
decision_input$prod_code <- as.numeric(decision_input$prod_code)
## extract management decision part
# extract salesmen
management_input_m <- subset(tmp, split == "2", select = c(name,value)) %>%
mutate(is.personel = ifelse(substr(name,5,9) == "sales", 1, 2),
value = as.numeric(value))
# extract personal training
management_input_m1 <- subset(management_input_m, is.personel == 1,
select = c(name, value)) %>%
mutate(salesmen = substr(name, 14, 14),
name = substr(name, 16, str_length(name))) %>%
spread(name, value)
# extract shared team time
management_input_m2 <- subset(management_input_m, is.personel != 1,
select = c(name, value)) %>%
mutate(name = substr(name, 5, str_length(name))) %>%
spread(name, value)
management_input <- data.frame(management_input_m1,
management_input_m2)
management_input$phase <- 1
management_input$salesmen <- sapply(management_input$salesmen, function(x) {
switch(x,
"1" = "小宋",
"2" = "小兰",
"3" = "小木",
"4" = "小白",
"5" = "小青")})
## promotional fee <= 100
restrict_test1 <- decision_input %>%
select(hosp_code, budget) %>%
distinct()
## worktime <= 100
decision_part <- decision_input %>%
group_by(salesmen) %>%
summarise(contact_time = sum(as.numeric(prod_hours, na.rm = T)))
management_part <- data.frame(salesmen = management_input$salesmen,
other_time = apply(subset(management_input,
select = -field_work), 1, function(x) sum(as.numeric(x[2:6]))))
total_time <- decision_part %>%
left_join(management_part, by = "salesmen") %>%
mutate(total_worktime = contact_time +other_time)
## field worktime <= contract time
field_work_part <- data.frame(salesmen = management_input$salesmen,
field_work_time = management_input$field_work)
## flm worktime <= 100
flm_worktime <- sum(as.numeric(management_input$field_work),
as.numeric(management_input$sales_training),
as.numeric(management_input$admin_work[1]),
as.numeric(management_input$kpi_analysis[1]),
as.numeric(management_input$meetings_with_team[1]))
if (length(unique(decision_input$salesmen[decision_input$salesmen%in%salesmen_list$salesmen])) != 5 ) {
tmist_container_phase1m <- "salesmen restrict"
} else if (sum(as.numeric(restrict_test1$budget)) > 100) {
tmist_container_phase1m <- "budget restrict"
} else if (any(total_time$total_worktime > 100)) {
tmist_container_phase1m <- "work time restrict"
} else if (any(as.numeric(field_work_part$field_work_time) > as.numeric(decision_part$contact_time))) {
tmist_container_phase1m <- "field work restrict"
} else if (flm_worktime > 100) {
tmist_container_phase1m <- "flm work time"
} else {
tmist_container_phase1m <- list("decision_input" = decision_input,
"management_input" = management_input)
}
return(tmist_container_phase1m)
}
close(pb)
stopCluster(cl)
tmist_container_phase1m1 <- tmist_container_phase1m[sapply(tmist_container_phase1m, is.list)]
## calculation
cl <- makeCluster(4, outfile="") # number of cores. Notice 'outfile'
registerDoSNOW(cl)
iterations <- length(tmist_container_phase1m1)
pb <- txtProgressBar(min = 1, max = iterations, style = 3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
tmist_output_phase1 <- foreach(i = 1:iterations, #.combine = c,
.options.snow = opts,
.packages = c("dplyr", "tidyr", "DT",
"data.table", "reshape2",
"stringr")) %dopar% {
tmp <- tmist_container_phase1m1[[i]]
cp_data1 <- get.data1(tmp$decision_input)
cp_data2 <- get.data2(tmp$management_input)
flm_data <- get.data3(cp_data2)
data_to_use <- calculation(pp_data1,
pp_data2,
cp_data1,
cp_data2)
out <- run_for_results(data_to_use, flm_data)
out$phase =1
out$comb_no = i
inter_data_m <- data_to_use %>%
select(hosp_name,
hosp_code,
prod_name,
prod_code,
real_revenue,
real_volume,
sr_sales_performance,
deployment_quality_index,
customer_relationship_index,
promotional_support_index,
sales_performance,
offer_attractiveness,
acc_offer_attractiveness,
salesmen,
sales_level,
real_revenue_by_sr,
real_volume_by_sr,
sr_acc_revenue,
sales_skills_index,
product_knowledge_index,
motivation_index,
sr_acc_field_work,
target_revenue_realization_by_sr)
list(inter_data = inter_data_m,
result = out)}
tmist_output_phase1m <- lapply(tmist_output_phase1, function(x) {
x$result})
tmist_output_phase1m1 <- bind_rows(tmist_output_phase1m)
|
a449977ed97fdbfb2d216360580f86b7a613aeee
|
9ba83814967597f5b58cf0b91f15bd2f3895e379
|
/man/tictocify.Rd
|
bbe84bceb04d9512bec25e11fc66c9210fd9cafd
|
[] |
no_license
|
visuelledata/frite
|
a71f297167caa6b5261d1d724fa5f62ef88322d8
|
090d392bbc54a7efc47a2ab54eda26f905d8d09c
|
refs/heads/master
| 2020-03-20T12:43:14.430397
| 2018-07-03T03:28:47
| 2018-07-03T03:28:47
| 137,438,851
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,097
|
rd
|
tictocify.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tictocify.R
\name{tictocify}
\alias{tictocify}
\title{Creates a function that functions identically to the original, but gives execution time}
\usage{
tictocify(..f, tic_args = NULL, toc_args = NULL)
}
\arguments{
\item{..f}{Any function that isn't primitive (sum, list, etc...)}
\item{tic_args}{A list of arguments to be passed into tic()}
\item{toc_args}{A list of arguments to be passed into toc()}
}
\value{
function
}
\description{
Takes any function and returns a function that will behave in a nearly identical
manner, but will also return the function's execution time.
}
\details{
Creates a wrapper function around a function call, preceded by tic() and
toc(). The wrapper will be given the same arguments as the original function and
work identically to the original function.
}
\examples{
set.seed(1)
stuff <- rnorm(n = 100000)
lapply_new <- tictocify(lapply)
lapply_new(stuff, function(x) x > 0)
is.output.same(lapply_new(stuff, function(x) x > 0), lapply)
}
\seealso{
\code{\link{is.output.same}}
}
|
c2eb0e7b8d7160d46898ccc81795ce859fec13fb
|
8ec0decfa8bab54bce5eb1feffb4b89bdcadf2f2
|
/app.R
|
a8aafd86a9c796e7e1083bc171dd9bb95b8ef8a5
|
[] |
no_license
|
usfviz/jenniferzhu-hw2
|
f39e4bb695293602ed5ac70dfd5683bfdd40a486
|
b2f304484076bf0888fd106e95a91058e0663fd6
|
refs/heads/master
| 2021-01-19T19:47:28.558807
| 2017-04-18T05:46:16
| 2017-04-18T05:46:16
| 88,446,288
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,336
|
r
|
app.R
|
library(shiny)
library(ggvis)
library(reshape2)
library(dplyr)
ui <- fluidPage(
headerPanel('HW2 by Jennifer Zhu'),
fluidRow(
column(3,
wellPanel(
radioButtons("region", "Region",
c("All" = "All",
"East Asia & Pacific" = "East Asia & Pacific",
"Europe & Central Asia" = "Europe & Central Asia",
"Latin America & Caribbean" = "Latin America & Caribbean",
"Middle East & North Africa" = "Middle East & North Africa",
"North America" = "North America",
"South Asia" = "South Asia",
"Sub-Saharan Africa" = "Sub-Saharan Africa")
)
)
),
column(3,
mainPanel(
uiOutput("ggvis_ui"),
ggvisOutput("ggvis")
)
)
),
fluidRow(
shiny::column(4, offset = 5,
wellPanel(
sliderInput("year",
"Year",
min = 1960,
max = 2014,
value = 1,
animate = animationOptions(interval = 100))
)
)
)
)
server <- function(input, output) {
## Process data into a single data frame
# load life expectancy
le <- read.csv('API_SP.DYN.LE00.IN_DS2_en_csv_v2/API_SP.DYN.LE00.IN_DS2_en_csv_v2.csv', header = 5, skip = 4, stringsAsFactors = F)
le <- le[, c(1:2, 5:(ncol(le)-3))]
# make wide table long
le <- melt(le, id.vars = colnames(le)[1:2])
colnames(le) <- c('Country.Name', 'Country.Code', 'Year', 'Life.Expectancy')
# load fertility rate
fr <- read.csv('API_SP.DYN.TFRT.IN_DS2_en_csv_v2/API_SP.DYN.TFRT.IN_DS2_en_csv_v2.csv', header = 5, skip = 4, stringsAsFactors = F)
fr <- fr[, c(1:2, 5:(ncol(fr)-3))]
# make wide table long
fr <- melt(fr, id.vars = colnames(fr)[1:2])
colnames(fr) <- c('Country.Name', 'Country.Code', 'Year', 'Fertility.Rate')
# load country metadata
ct <- read.csv('API_SP.DYN.TFRT.IN_DS2_en_csv_v2/Metadata_Country_API_SP.DYN.TFRT.IN_DS2_en_csv_v2.csv', header = 1, stringsAsFactors = F)[, 1:2]
# get rid of na level
ct <- ct[ct$Region != "",]
# load population metadata
pl <- read.csv('population.csv', header = 1, stringsAsFactors = F)
pl <- pl[, c(2, 5:(ncol(pl)-2))]
# make wide table long
pl <- melt(pl, id.vars = 'Country.Code')
colnames(pl) <- c('Country.Code', 'Year', 'Population')
# merge all data into the same table
dat <- merge(le, fr, by = c('Country.Name', 'Country.Code', 'Year'))
dat <- merge(dat, ct, by = 'Country.Code')
dat <- merge(dat, pl, by = c('Country.Code', 'Year'))
# remove rows with na
dat <- dat[!is.na(dat), ]
# convert Year from factor to number
dat$Year <- as.integer(as.character(substring(dat$Year, 2)))
# add a column of index
dat$id <- 1:nrow(dat)
# colors to fill
defaultColors <- factor(c("#dc3912", "#ff9900", "#109618", "#990099", "#0099c6", "#3366cc", "#dd4477"))
dat$color <- defaultColors[as.numeric(factor(dat$Region))]
# opacity
dat$Opacity <- 0.2
all_values <- function(x) {
if(is.null(x)) return(NULL)
row <- dat[dat$id == x$id, ]
paste(row$Country.Name)
}
yearData <- reactive({
dat2 <- dat
if(input$region == 'All') dat2$Opacity <- 0.7
else{
dat2$Opacity[dat2$Region == input$region] <- 0.7
}
# Filter to the desired year
# Also sort by region
df <-
dat2 %>%
filter(Year == input$year) %>%
select(Country.Name, Fertility.Rate, Life.Expectancy,
Region, Population, id, color, Opacity) %>%
arrange(Region)
return(df)
})
ggvis(yearData, ~Life.Expectancy, ~Fertility.Rate, size := ~Population / 500000, key := ~id, fill = ~Region, opacity := ~Opacity) %>%
add_tooltip(all_values, "hover") %>%
layer_points() %>%
hide_legend('fill') %>%
hide_legend('size') %>%
add_axis("x", title = 'Life expectancy', orient = "bottom") %>%
add_axis("y", title = 'Fertility rate', orient = "left") %>%
scale_numeric("x", domain = c(0, 90), nice = FALSE, clamp = TRUE) %>%
scale_numeric("y", domain = c(0.5, 9), nice = FALSE, clamp = TRUE) %>%
bind_shiny("ggvis", "ggvis_ui")
}
shinyApp(ui = ui, server = server)
|
4ca137d1e8cbf6e21dbfdc69414a58c1851ff7dc
|
fd733b41910324b1cd829c07963b01e89148dca1
|
/man/soql_add_endpoint.Rd
|
62a939414f6a62ea3b36ad7317f2332022de8ab1
|
[] |
no_license
|
cran/soql
|
a4d99c0d7a48b9132d8b6f374f9aba27bae09085
|
bfdb7859f0d4c42278a43c7ecbb28eb8e62537bb
|
refs/heads/master
| 2016-08-11T15:21:34.027062
| 2016-04-01T10:41:49
| 2016-04-01T10:41:49
| 54,423,943
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,080
|
rd
|
soql_add_endpoint.Rd
|
\name{soql_add_endpoint}
\alias{soql_add_endpoint}
\title{
Add SODA API endpoint
}
\description{
Add an endpoint to an already-existing \code{soql} object.
}
\usage{
soql_add_endpoint(soql_list, endpoint)
}
\arguments{
\item{soql_list}{
The \code{soql} object. If you don't have one yet, use the \code{soql()} function first. This can be piped in.
}
\item{endpoint}{
The endpoint should be the URL of the data, without any parameters.
}
}
\value{
Returns a new \code{soql} object, with the endpoint added, for use in other functions.
}
\references{
\href{https://dev.socrata.com/docs/endpoints.html}{Socrata's documentation on what an endpoint is}
}
\seealso{
\code{\link{soql}}
}
\examples{
if (require(magrittr)) \{
# With pipes
my_url <- soql() \%>\%
soql_add_endpoint("https://fake.soda.api/resource.json") \%>\%
as.character()
\} else \{
# Without pipes
soql_chain <- soql()
soql_chain <- soql_add_endpoint(soql_chain, "https://fake.soda.api/resource.json")
my_url <- as.character(soql_chain)
\}
}
|
42d13227e252036975c3e25f1e51f6b3bb7b5bc7
|
07689fc1a6e6cf546706f1642942fab4cb4e075b
|
/R/SpeedFilter.R
|
e7a3ee37b8825aa8f4c0a9f92995f8bd980a4cda
|
[] |
no_license
|
dvm1607/TrajDataMining
|
48e35418a9b18f2c8d81d81ee21d1a76744993e7
|
166baeeafa341fd4b0f8e6fc9137f3cdf294a6a4
|
refs/heads/master
| 2021-01-10T12:23:44.692776
| 2017-06-16T14:01:43
| 2017-06-16T14:01:43
| 52,975,737
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 742
|
r
|
SpeedFilter.R
|
setGeneric(
name = "speedFilter",
def = function(A1,speed)
{
.loadPackages()
standardGeneric("speedFilter")
}
)
setMethod(
f = "speedFilter",
signature = c("Track", "numeric"),
definition = function(A1, speed)
{
if (is.null(A1)|| length(A1@sp) < 3){
return (A1)}
firstPoint = 1
lastPoint = length(A1@sp)
pointIndexsToKeep <- list()
pointIndexsToKeep[1]
pointIndexsToKeep[1] = firstPoint
pointIndexsToKeep[2] = lastPoint
size <- (length(A1@sp)-1)
for (i in 1:size){
sp<- A1@connections$speed[i]
if(sp<speed){
pointIndexsToKeep <- c(pointIndexsToKeep,i)
}
}
return(.IndexToTrack(A1,pointIndexsToKeep))
}
)
|
a442d9b7653df230c6267f34c61de78c51164da3
|
45c9bbfd9290f210b7812b1c596ef8bbf71d766a
|
/R/two_by_two_contingency_table_test.R
|
1fb26beaf015e4cace9a27a18828d5689c68cb13
|
[] |
no_license
|
anhnguyendepocen/stat0002
|
0ecac4af4e85c6b5db05df10d49bd9bee1f21a69
|
66a6372043ef39cec18465d0cf339bb50e02bdf9
|
refs/heads/master
| 2023-01-02T01:12:46.374718
| 2020-10-16T20:27:34
| 2020-10-16T20:27:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,137
|
r
|
two_by_two_contingency_table_test.R
|
# ============================== two_by_two_movie =============================
#' Test for lack of association in a 2 by 2 contingency table
#'
#' A movie to study the distribution of the Pearson chi-squared test statistic
#' used to test for lack of association in a 2 by 2 contingency table.
#'
#' @param data A numeric 2 by 2 matrix, giving the observed frequencies of
#' a 2 by 2 contingency table.
#' @param bin_width A numeric scalar. The width of the bins in the histogram
#' of the test statistics plotted on the bottom on the movie.
#' @param pos A numeric integer. Used in calls to \code{\link{assign}}
#' to make information available across successive frames of a movie.
#' By default, uses the current environment.
#' @param envir An alternative way (to \code{pos}) of specifying the
#' environment. See \code{\link{environment}}.
#' @details The movie is split into three sections.
#' In the top left is a table displaying the contingency table based on
#' the frequencies in \code{data}, with row totals, column totals and the
#' grand total added. If \code{data} has row and column names then
#' only the first letters of these are added to the table.
#' In the top right is a similar table containing frequencies based on
#' simulated data. The simulated data has the same grand total as
#' \code{data}. The data are simulated under the assumption that the
#' value of the variable in the row of the table is not associated with
#' the value of the variable in the column of the table.
#' See Section 7.1.2 of the STAT0002 notes for details.
#'
#' Under each of these tables the calculation of the Pearson
#' chi-squared test statistic is given. Every time a new simulated dataset
#' is produced the value of the test statistic is added to a histogram
#' containing all the test statistics of simulated data produced.
#' The most recent simulated test statistic is indicated in this plot
#' with a red circle.
#' The test statistic produced from the real data is indicated in this plot
#' with a blue circle.
#' The p.d.f. of a chi-squared random variable with
#' one degree of freedom is superimposed on the plot.
#' If the expected frequencies based on the real data are sufficiently
#' large then the distribution of the test statistic under the null
#' hypothesis of no association has approximately this distribution.
#'
#' Three radio buttons enable the user to choose whether to simulate
#' 1, 100 or 1000 datasets.
#' @return Nothing is returned, only the animation is produced.
#' @seealso \code{\link{stat0002movies}}: general information about the movies.
#' @examples
#' # Ignore department
#' sex_outcome <- apply(UCBAdmissions, 2:1, FUN = sum)
#' colnames(sex_outcome) <- c("A", "R")
#' rownames(sex_outcome) <- c("M", "F")
#' two_by_two_movie(data = sex_outcome)
#'
#' # Conditon on department 1
#' sex_outcome_1 <- UCBAdmissions[, , 1]
#' colnames(sex_outcome_1) <- c("A", "R")
#' rownames(sex_outcome_1) <- c("M", "F")
#' two_by_two_movie(data = sex_outcome_1)
#'
#' # Conditon on department 2
#' sex_outcome_2 <- UCBAdmissions[, , 2]
#' colnames(sex_outcome_2) <- c("A", "R")
#' rownames(sex_outcome_2) <- c("M", "F")
#' two_by_two_movie(data = sex_outcome_2)
#' @export
two_by_two_movie <- function(data, bin_width = 0.25,
pos = 1, envir = as.environment(pos)) {
if (is.null(data)) {
stop("data must be supplied")
}
if (any(data < 0) || anyNA(data)) {
stop("all entries of data must be non-negative and finite")
}
if (sum(data) == 0) {
stop("at least one entry of data must be positive")
}
if (!is.matrix(data)) {
stop("data must be a matrix")
}
if (nrow(data) != 2 || ncol(data) != 2) {
stop("data must be a matrix with 2 rows and 2 columns")
}
# Performs chi-squared test on the real data
correct <- FALSE
real_test_res <- suppressWarnings(stats::chisq.test(data, correct = correct))
if (!is.null(colnames(data))) {
colnames(data) <- substr(colnames(data), 1, 1)
}
if (!is.null(rownames(data))) {
rownames(data) <- substr(rownames(data), 1, 1)
}
# Add row and column sums and the total frequency
real_data <- add_sums(data)
# Sample size
n <- sum(data)
# Estimate the probabilities of being in each of the 4 cells in the table
p_hat <- (rowSums(data) %o% colSums(data)) / n ^ 2
sim_test_stats <- NULL
assign("sim_test_stats", sim_test_stats, envir = envir)
tbt_panel <- rpanel::rp.control("2 x 2 contingency table simulation",
real_data = real_data,
real_test_res = real_test_res,
n = n, p_hat = p_hat,
bin_width = bin_width, correct = correct,
envir = envir)
nsim <- 1
rpanel::rp.radiogroup(tbt_panel, nsim, c("1", "100", "1000"),
action = two_by_two_plot,
title = "Choose the number of 2 x 2 tables to simulate")
rpanel::rp.do(tbt_panel, two_by_two_plot)
return(invisible())
}
# Function to add row, column and total sums to a contingency table
add_sums <- function(x) {
r_sums <- rowSums(x)
c_sums <- colSums(x)
t_sum <- sum(x)
x <- cbind(x, total = r_sums)
x <- rbind(x, total = c(c_sums, t_sum))
return(x)
}
# Function to add chi-squared test statistic calculation
add_chi_squared_calc <- function(x_loc, y_loc, x) {
my_cex <- 1.35
real_obs <- c(t(x$observed))
real_exp <- c(round(t(x$expected), 1))
o_val <- real_obs
e_val <- real_exp
my_text <- substitute(frac((a1 - b1) ^ 2, b1) +
frac((a2 - b2) ^ 2, b2),
list(a1 = o_val[1], b1 = e_val[1],
a2 = o_val[2], b2 = e_val[2]))
graphics::text(x_loc, y_loc, my_text, cex = my_cex, pos = 4)
my_text <- substitute(+ frac((a3 - b3) ^ 2, b3) +
frac((a4 - b4) ^ 2, b4) == test_stat,
list(a3 = o_val[3], b3 = e_val[3],
a4 = o_val[4], b4 = e_val[4],
test_stat = round(x$statistic, 2)))
graphics::text(x_loc, y_loc - 0.25, my_text, cex = my_cex, xpd = TRUE, pos = 4)
return(invisible())
}
# Function to be called by two_by_two_sim_movie().
two_by_two_plot <- function(panel) {
with(panel, {
old_par <- graphics::par(no.readonly = TRUE)
on.exit(graphics::par(old_par))
# Create layout of
# 1. contingency table of real data on the top left
# 2. contingency table of simulated data on the top right
# 3. histogram and chi-squared density on the bottom
graphics::layout(matrix(c(1, 2, 3, 3), 2, 2, byrow = TRUE),
widths = c(1, 1), heights = c(1, 1))
graphics::par(oma = c(0, 0, 0, 0), mar = c(2.5, 2, 1, 2) + 0.1)
# 1. Produce the table on the top left
dum_x <- c(0, 0, 1, 1)
dum_y <- c(0, 1, 0, 1) # summary data for plot
graphics::plot(dum_x, dum_y, type = "n", ann = FALSE, axes = FALSE)
my_table <- real_data
plotrix::addtable2plot(0.5, 0.8, my_table, cex = 1.5, bty = "n",
display.rownames = TRUE, display.colnames = TRUE,
hlines = TRUE, vlines = TRUE, title = "",
xpad = 0.5, ypad = 1.2, xjust = 0.5, yjust = 0.5,
text.col = 1:5)
add_chi_squared_calc(0., 0.275, real_test_res)
# 2. Produce the table on the top right
# Simulate nsim 2 x 2 tables under the null hypothesis that the margins
# are independent
chisq_test_fun <- function(x) {
temp <- suppressWarnings(stats::chisq.test(matrix(x, nrow = 2),
correct = correct)$statistic)
return(temp)
}
full_chisq_test_fun <- function(x) {
temp <- suppressWarnings(stats::chisq.test(matrix(x, nrow = 2),
correct = correct))
return(temp)
}
big_sim_data <- stats::rmultinom(nsim, n, p_hat)
sim_data <- matrix(big_sim_data[, ncol(big_sim_data)], nrow = 2)
add_sim_test_stats <- apply(big_sim_data, 2, chisq_test_fun)
# Store the new values of the test statistics
sim_test_stats <- c(sim_test_stats, add_sim_test_stats)
# Repeat the most recent test for displaying on the plot
sim_test_res <- suppressWarnings(stats::chisq.test(sim_data))
assign("sim_test_stats", sim_test_stats, envir = envir)
graphics::plot(dum_x, dum_y, type = "n", ann = FALSE, axes = FALSE)
graphics::title(main = "simulated 2 x 2 table", line = -0.25)
# Add row and column sums and the total frequency
my_table <- add_sums(sim_data)
plotrix::addtable2plot(0.5, 0.725, my_table, cex = 1.5, bty = "n",
display.rownames = FALSE, display.colnames = FALSE,
hlines = TRUE, vlines = TRUE, title = "",
xpad = 0.5, ypad = 1.2, xjust = 0.5, yjust = 0.5,
text.col = 1:5)
# Performs chi-squared test on the real data
add_chi_squared_calc(0, 0.275, sim_test_res)
# 3. Produce the bottom plot
big_val <- max(10, ceiling(max(sim_test_stats)))
my_breaks <- seq(0, big_val, by = bin_width)
max_dens <- max(graphics::hist(sim_test_stats, plot = FALSE,
breaks = my_breaks)$density)
max_y <- max(max_dens, 1.1)
graphics::hist(sim_test_stats, probability = TRUE, col = 8,
xlim = c(0, big_val), ylim = c(0, max_y), main = "",
breaks = my_breaks,
axes = FALSE, ann = FALSE)
graphics::axis(1, pos = 0, mgp = c(3, 0.5, 0))
graphics::axis(2, pos = 0)
graphics::title(xlab = "sum of squared Pearson residuals (test statistic)",
line = 1.2, cex.lab = 1.5)
graphics::title(ylab = "density", line = 0.6, cex.lab = 1.5)
graphics::curve(stats::dchisq(x, df = 1), from = 0, to = big_val,
n = 500, lty = 1, lwd = 2, add = TRUE)
if (real_test_res$statistic > big_val) {
graphics::legend("topright",
legend = c(expression(paste(chi[1]^2," density")),
"last simulated", "real (off the scale!)"),
lty = c(1, -1, -1), lwd = c(2, 0, 0),
pch = c(-1, 16, 16), col = c("black", "red", "blue"),
pt.cex = 2, cex = 1.5)
} else {
graphics::legend("topright",
legend = c(expression(paste(chi[1]^2," density")),
"last simulated", "real"),
lty = c(1, -1, -1), lwd = c(2, 0, 0),
pch = c(-1, 16, 16), col = c("black", "red", "blue"),
pt.cex = 2, cex = 1.5)
}
graphics::points(sim_test_res$statistic, 0, pch = 16, cex = 2,
col = "red")
graphics::points(real_test_res$statistic, 0, pch = 16, cex = 2,
col = "blue")
})
return(invisible(panel))
}
|
183b51473ef361f3ba82c9da098168f989040a96
|
460adc8d1918d26cff8f4c00124d65193d561ce2
|
/R/dataTableModule.R
|
34c996e68d521424c1f32856e8dbf9e2a8757cf9
|
[
"Apache-2.0"
] |
permissive
|
paul-shannon/shinyModules
|
74ad8ee492e9cb9c9d82119b616296028369c8d3
|
faecba65fb8d77149f4b9c34441b17e67bf35f82
|
refs/heads/master
| 2022-12-06T13:55:59.807356
| 2020-09-12T19:34:08
| 2020-09-12T19:34:08
| 272,457,191
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,160
|
r
|
dataTableModule.R
|
#----------------------------------------------------------------------------------------------------
#' the UI for a DataTable shiny module
#'
#' @import shiny
#'
#' @param id the html document's widget id
#'
#' @aliases dataTableUI
#' @rdname dataTableUI
#'
#' @export
#'
dataTableUI <- function(id){
tagList(
DT::DTOutput(NS(id, "dataTable"))
)
}
#----------------------------------------------------------------------------------------------------
#' the server for a DataTable shiny module
#'
#' @param input enviroment provided by shiny
#' @param output enviroment provided by shiny
#' @param session enviroment provided by shiny
#' @param tbl data.frame
#' @param selectionPolicy character string, "none", "single", or "multiple"
#' @param wrapLongTextInCells logical, TRUE or FALSE
#' @param searchString character string, selects all rows with this, default "" (no search)
#' @param rownames.to.display character vector, default "all",
#'
#' @aliases dataTableServer
#' @rdname dataTableServer
#'
#' @export
#'
dataTableServer <- function(id, input, output, session,
tbl,
selectionPolicy=reactive("multiple"),
wrapLongTextInCells=reactive(TRUE),
searchString=reactive(""),
rownames.to.display=reactive("all")
){
moduleServer(id, function(input, output, session){
output$dataTable <- DT::renderDataTable({
tbl.sub <- tbl()
rownames <- rownames.to.display()
if(length(rownames) == 0){
tbl.sub <- data.frame()
}else{
if(rownames[1] == "all")
tbl.sub <- tbl()
else{
rownames <- intersect(rownames, rownames(tbl))
if(length(rownames) > 0)
tbl.sub <- tbl()[rownames,]
} # else
} # major else
selectionOption <- list(mode=selectionPolicy(), selected=NULL)
searchString <- searchString()
searchOptions <- list()
if(searchString != " - ")
searchOptions <- list(search=searchString, caseInsensitive=TRUE)
DTclass <- "display"
if(!wrapLongTextInCells())
DTclass <- paste0(DTclass, " nowrap")
DT::datatable(tbl.sub,
rownames=TRUE,
class=DTclass,
options=list(dom='<lfip<t>>',
scrollX=TRUE,
search=searchOptions,
lengthMenu = c(3,5,10,50),
pageLength = 5,
paging=TRUE),
selection=selectionOption)
}) # renderDataTable
tableSelection <- reactive({
rownames(tbl())[input$dataTable_rows_selected]
})
return(tableSelection)
}) # moduleServer
} # dataTableServer
#----------------------------------------------------------------------------------------------------
printf <- function(...) print(noquote(sprintf(...)))
|
66413ad72ca854af4600a14d492e9eba06e7d08c
|
28908345906995f998f6585f7b4d4bc8dd0d467f
|
/docker/Rpackages.R
|
ff8a19e16a6a336342ae8105d63c9bddbd58da35
|
[] |
no_license
|
OpenMS/usage_plots
|
2447120f2f14e82fe727efdd823cadb4fbf99fff
|
c3526c49b3effa76fbc2fb0a4b420196d98a50c4
|
refs/heads/master
| 2023-05-11T01:23:01.425200
| 2023-05-08T13:13:55
| 2023-05-08T13:13:55
| 95,778,185
| 2
| 4
| null | 2023-05-08T13:13:56
| 2017-06-29T13:02:26
|
Python
|
UTF-8
|
R
| false
| false
| 155
|
r
|
Rpackages.R
|
install.packages(c('ggplot2','scholar','knitr','rmarkdown','leaflet','rworldmap','lattice','RColorBrewer','sp'), repos='http://cran.us.r-project.org')
q()
|
c839f66f14915e47812de2d13d9d9aa3d31fcc11
|
4e03620bcddf4662f1871f7fe2b61e5f5aceef3d
|
/R/RcppExports.R
|
7a49e33cfc400eb287f62bb849bcd114b4f32b47
|
[] |
no_license
|
pitakakariki/lineprof
|
c898eca166584c8d14536662383d15454c739a3e
|
499a79780d58f58337b3ef78b8f31f5c750e68e6
|
refs/heads/master
| 2021-01-12T20:49:34.025621
| 2015-01-15T05:16:28
| 2015-01-15T05:16:28
| 29,280,271
| 1
| 0
| null | 2015-01-15T04:13:30
| 2015-01-15T04:13:30
| null |
UTF-8
|
R
| false
| false
| 758
|
r
|
RcppExports.R
|
# This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
contains <- function(haystack, needle) {
.Call('lineprof_contains', PACKAGE = 'lineprof', haystack, needle)
}
firstTRUE <- function(x) {
.Call('lineprof_firstTRUE', PACKAGE = 'lineprof', x)
}
parseLineProfileRefs <- function(input) {
.Call('lineprof_parseLineProfileRefs', PACKAGE = 'lineprof', input)
}
#' Pause execution
#'
#' This is similar to \code{\link{Sys.sleep}} but is captured during
#' profiling, making it useful when generating simple examples.
#'
#' @export
#' @param sec Number of seconds to pause (millsecond resolution).
pause <- function(sec) {
invisible(.Call('lineprof_pause', PACKAGE = 'lineprof', sec))
}
|
050a6ba57cebfdfd065f0b22cd489c8a77ef3ee2
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/8642_1/rinput.R
|
ec7746ef519424cea428f628b40614d9b991effc
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("8642_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8642_1_unrooted.txt")
|
79cfd74b6cccd0ffb656da83cd1deff9ecee8eb9
|
7056b32ad7ae1d92599304606793afe43ae195d4
|
/R/baseliner_quick.R
|
1727e7b0cabe400a84ba43e6f2cf68b8e1b6d8ee
|
[] |
no_license
|
ahopple/PREMIS-sapflow
|
3eb0f451365ad9703903f809db8bd7b3e6d6dde4
|
ae637ee2aebbb3895efcde901fc2f9d4f3977df4
|
refs/heads/master
| 2023-02-20T04:46:36.444707
| 2021-01-21T21:25:22
| 2021-01-21T21:25:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,978
|
r
|
baseliner_quick.R
|
# Script to process sapflow and PAR/VPD data for use in Baseliner
# Stephanie Pennington | Created August 27, 2019
# Load packages
library(readr)
library(lubridate)
library(tibble)
library(tidyr)
library(dplyr)
# BC files are tab delim
west <- read_csv("../BC/CR1000 BC WEST_Table1_201908.csv",
col_names = c("Timestamp", "Record", "W1", "W2", "W3", "W4", "W5", "W6", "W7", "W8"),
skip = 4)
west$Timestamp <- ymd_hms(west$Timestamp)
east <- read_csv("../BC/CR1000 BC EAST_Table1_201908.dat",
col_names = c("Timestamp", "Record", "E1", "E2", "E3", "E4", "E5", "E6", "E7", "E8"),
skip = 4)
east$Timestamp <- ymd_hms(east$Timestamp)
# met data
met_BC <- read_csv("../BC/BC13_MET_June_24_2019.csv", skip = 1)
met_BC$Timestamp <- mdy_hms(met_BC$'Date Time, GMT-07:00')
# pull out met data needed for Baseliner
east_time <- east %>%
separate(Timestamp, into = c("Date", "time"), sep = " ") %>%
separate(time, into = c("h", "m"), sep = ":")
east_time$time <- paste0(east_time$h, east_time$m)
east_time$Timestamp <- as.POSIXct(paste0(east_time$Date, " ", east_time$h,":", east_time$m, ":00"))
wx_BC <- tibble(Timestamp = ymd_hms(met_BC$Timestamp),
PAR = met_BC$`PAR, µmol/m²/s (LGR S/N: 20418939, SEN S/N: 20419083)`,
es = (0.6108 * exp((17.27 * met_BC$`Temp, °C (LGR S/N: 20418939, SEN S/N: 20411961)`)/(273.3 + met_BC$`Temp, °C (LGR S/N: 20418939, SEN S/N: 20411961)`)))/10,
VPD = ((100 - met_BC$`RH, % (LGR S/N: 20418939, SEN S/N: 20411961)`) * es)/100)
t.series.east <-tibble(Timestamp = seq(from=as.POSIXct(first(east$Timestamp)),
to=as.POSIXct(last(east$Timestamp)),
by="30 min"))
ebl <- left_join(t.series.east, east_time, by = "Timestamp")
east.bl <- left_join(ebl, wx_BC)
east.bl <- tibble(Plot.ID = rep(1, nrow(east.bl)),
Year = year(east.bl$Timestamp),
DOY = yday(east.bl$Timestamp),
Time = east.bl$time,
VPD = east.bl$VPD,
PAR = east.bl$PAR,
E1 = east.bl$E1,
E2 = east.bl$E2,
E3 = east.bl$E3,
E4 = east.bl$E4,
E5 = east.bl$E5,
E6 = east.bl$E6,
E7 = east.bl$E7,
E8 = east.bl$E8)
east.bl[is.na(east.bl)] <- NaN
##### WEST #####
west_time <- west %>%
separate(Timestamp, into = c("Date", "time"), sep = " ") %>%
separate(time, into = c("h", "m"), sep = ":")
west_time$time <- paste0(west_time$h, west_time$m)
west_time$Timestamp <- as.POSIXct(paste0(west_time$Date, " ", west_time$h,":", west_time$m, ":00"))
t.series.west <-tibble(Timestamp = seq(from=as.POSIXct(first(west$Timestamp)),
to=as.POSIXct(last(west$Timestamp)),
by="30 min"))
wbl <- left_join(t.series.west, west_time, by = "Timestamp")
west.bl <- left_join(wbl, wx_BC, by = "Timestamp")
west.bl <- tibble(Plot.ID = rep(2, nrow(west.bl)),
Year = year(west.bl$Timestamp),
DOY = yday(west.bl$Timestamp),
Time = west.bl$time,
VPD = west.bl$VPD,
PAR = west.bl$PAR,
W1 = west.bl$W1,
W2 = west.bl$W2,
W3 = west.bl$W3,
W4 = west.bl$W4,
W5 = west.bl$W5,
W6 = west.bl$W6,
W7 = west.bl$W7,
W8 = west.bl$W8)
west.bl[is.na(west.bl)] <- NaN
write_csv(east.bl, "../BC/east_baseliner.csv", col_names = FALSE)
write_csv(west.bl, "../BC/west_baseliner.csv", col_names = FALSE)
|
76bfbca553c73f642cf06a24965443552fad0d12
|
fff8e5b247a2ec63521c878b17355b6ccefd9e28
|
/habitat - pca.R
|
919cb8ca40e58ce1cb54c4bb2cbc72a31a7eeef8
|
[] |
no_license
|
coricarver/urbanization-index
|
4e44e47f4d87535fc5776f41618235efb884935e
|
3670aa6b3f9872922c8d0f9adb4742bbe61e0db5
|
refs/heads/master
| 2021-12-12T13:06:44.917307
| 2017-01-13T17:31:07
| 2017-01-13T17:31:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,206
|
r
|
habitat - pca.R
|
############################
## habitata - pca.R
##
## Creating the Urbanization Index
############################
#########
## Data
#########
# Load and combine all GIMP-created data files
hab <- do.call('rbind',
lapply(list.files(path = "./data/", pattern = ".txt$", full.names=T),
FUN = function(x) read.csv(x)))
head(hab)
# If you wish, combine different parameters to get values of natural vegetation
hab$natural <- hab$trees + hab$bushes + hab$naturalgrass
hab <- hab[, -grep("trees|bushes|naturalgrass", names(hab))]
# Make each a percentage of the total
hab$total <- rowSums(hab[, -1])
hab[, -grep("ID|total", names(hab))] <- hab[, -grep("ID|total", names(hab))] / hab$total
# Make sure all worked (should show nothing)
hab[hab$total == 0,]
# Get a data frame with the variables of interest for creating a pca index
hab.pca <- hab[, c("natural", "grass", "pavement", "buildings")]
## /*---------*/
##' ## PCA
## /*---------*/
pca <- prcomp(~., data = hab.pca, scale = TRUE, center = TRUE)
pca
summary(pca)
library(vegan)
screeplot(pca, bstick = TRUE)
##' Save the index
hab$hab <- predict(pca)[,1]
write.csv(hab, "./pca.csv", row.names = FALSE)
|
abb38d6aab7447cfa8707667971841546a4b3094
|
8c0b0e9a59c7230f3a23dbe8b29d2cde68733524
|
/man/gard_reptiles.Rd
|
724fa4f5c482844ad4fb1fd204bcf97b21f93fbf
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-other-copyleft",
"GPL-3.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later",
"GPL-3.0-only"
] |
permissive
|
RS-eco/rasterSp
|
33754976f86858511ccd6562fd16d47205a9e0df
|
8c007900ceb2679f8aee04aec4d936648df5191b
|
refs/heads/main
| 2023-01-11T11:49:02.325118
| 2023-01-09T08:00:37
| 2023-01-09T08:00:37
| 225,630,913
| 19
| 7
|
MIT
| 2021-07-07T07:15:15
| 2019-12-03T13:48:02
|
R
|
UTF-8
|
R
| false
| true
| 424
|
rd
|
gard_reptiles.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rasterSp-package.R
\docType{data}
\name{gard_reptiles}
\alias{gard_reptiles}
\title{Latin names of Reptile Species}
\format{
A \code{data.frame} with 10064 observations and 8 variables.
}
\description{
Latin names of Reptile Species
}
\details{
This dataset contains all Latin Names of Reptiles
that were derived from GARD ranges, version 1.1.
}
|
19a2d84567aa9b4e4d17128751b20270c2755f03
|
ce6bfb815cf9f86c4fa051f2dd5a821c4dfdf451
|
/plot2.R
|
c67fab6d55b19c39336a3c711543a6647a06301b
|
[] |
no_license
|
lshizue/ExData_Plotting1
|
76f4c7fa4c79e009e2369f89ff5abde8b866d9e0
|
533eb1a3f48d77eb7677281289bdbce3dc074924
|
refs/heads/master
| 2021-01-12T20:40:52.288017
| 2015-01-10T18:48:52
| 2015-01-10T18:48:52
| 29,036,006
| 0
| 0
| null | 2015-01-09T21:23:40
| 2015-01-09T21:23:37
| null |
UTF-8
|
R
| false
| false
| 587
|
r
|
plot2.R
|
plot2 <- function(){
#loading data if not available
if(!exists("dados")) {
source("load.R")
loadData()
}
# create png file
png(filename="plot2.png",
width=480,
height=480,
units="px",
bg="transparent")
library(datasets)
#ploting empty canvas
par(mar= c(1, 4.1, 4.1, 2.1))
plot(dados$Date,
dados$Global_active_power,
type="n",
xlab="",
ylab="Global Active Power (kilowatts)")
# plotting line
lines(dados$Date, dados$Global_active_power)
# closing png file
dev.off()
}
plot2()
|
0a1c8b26f5ce572fa0211281814dec2a04eed9d0
|
0251ab23f6bbdfd51f101706d95480831cc3408e
|
/tests/testthat/test-skeleton_weight.R
|
58034bda54c7f3fc096d3b41f1e6edaa65459f27
|
[
"MIT"
] |
permissive
|
EcoNum/coral.growth
|
36462d26240bb3c6c73adca0ea7a77c31851700f
|
87a02ee2f00d4b402508fadbad9c8c181a59db9c
|
refs/heads/master
| 2020-05-24T08:24:25.171782
| 2020-05-04T11:33:18
| 2020-05-04T11:33:18
| 187,184,000
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 185
|
r
|
test-skeleton_weight.R
|
context("test-skeleton_weight")
test_that("a character variable in skeleton_weight()", {
expect_error(skeleton_weight(buoyant_weight = "a", S = 35, T = 25,rho_aragonite = 2930))
})
|
9762ee9a44e73a22fc7729c4529bba095d0dbfa2
|
e02d552c115e3da97a085598414507d0813a7591
|
/figure.1/generate.miRNA.density.R
|
177d5f169ee386d61fd68132bf44ea5678bb7b5b
|
[] |
no_license
|
Nodine-Group/sRNA-spike-ins
|
f7a22b11b1398d4a2da9d2a439deb55a20392915
|
ab1bdacaf310ade9c8fc76a38bc08b8b8fb271c8
|
refs/heads/master
| 2020-03-13T07:39:43.037831
| 2018-02-26T13:12:59
| 2018-02-26T13:12:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,931
|
r
|
generate.miRNA.density.R
|
#generate.miRNA.density.R
#generates density plot for just miRNA fams in col0_fb1 for Fig. 1D
install.packages("extrafont")
library("extrafont")
loadfonts()
setwd('/Volumes/nodine/lab/members/michael/compFiles/sRNAs/projects/apps/smallRNA_spikeIns/scripts.for.ms/') #user will have to change this to their working directory
getVals <- function(sample,type){
sample_1 = paste(sample,'1',sep='')
sample_2 = paste(sample,'2',sep='')
data_1 = read.table(paste("methods.data.analysis/small.RNA.Seq/",sample_1,"/smRNA_spikeIns/doseResponseTable_noTransform",sep=""), header=TRUE, sep="\t", row.names=1, strip.white=T)
data_2 = read.table(paste("methods.data.analysis/small.RNA.Seq/",sample_2,"/smRNA_spikeIns/doseResponseTable_noTransform",sep=""), header=TRUE, sep="\t", row.names=1, strip.white=T)
#now fit linear model and use to predict mpe
fit_spike_log_1 = lm(log(data_1$mlcs,10) ~ log(data_1$rpm..mean.,10))
fit_spike_log_2 = lm(log(data_2$mlcs,10) ~ log(data_2$rpm..mean.,10))
#extract individual miR/tasiR families and take average of individual ones with at these 1 rpm in both
sample_1 = paste(sample,'1','_',type,sep='')
sample_2 = paste(sample,'2','_',type,sep='')
vals_1 = read.table(paste("methods.data.analysis/small.RNA.Seq/data.for.graphs/",sample_1,sep=""), header=TRUE, row.names=1, sep="\t", strip.white=T, quote="")
vals_2 = read.table(paste("methods.data.analysis/small.RNA.Seq/data.for.graphs/",sample_2,sep=""), header=TRUE, row.names=1, sep="\t", strip.white=T, quote="")
#select "expressed" small RNAs; i.e. at least 1 rpm
#also take log10 as this will be used for estimations
sub_1 = subset(vals_1, rpm >= 1)
sub_2 = subset(vals_2, rpm >= 1)
sub_1 = log(sub_1,10)
sub_2 = log(sub_2,10)
sub_1 = as.data.frame(sub_1)
sub_2 = as.data.frame(sub_2)
sub = merge(sub_1,sub_2, by = "row.names")
row.names(sub) = sub$Row.names
names(sub) = c('Row.names','biorep1','biorep2')
sub = subset(sub, select = c(biorep1,biorep2))
#estimate individual molecules
coeffs_1 = coefficients(fit_spike_log_1)
coeffs_2 = coefficients(fit_spike_log_2)
sub_matrix = cbind(coeffs_1[1] + coeffs_1[2]*sub$biorep1, coeffs_2[1] + coeffs_2[2]*sub$biorep2) #, mean(c(coeffs_1[1] + coeffs_1[2]*sub$biorep1,coeffs_2[1] + coeffs_2[2]*sub$biorep2)))
dimnames(sub_matrix)=list(c(row.names(sub)),c("biorep1","biorep2"))
#take exponential values and divid by one million for more meaningful values
final_matrix = (10 ^ sub_matrix)/1000000
return(final_matrix)
}
col0_fb_miR = getVals('col0_fb','miR_fams')
col0_fb_miR_vals = log(rowMeans(col0_fb_miR) * 1000000,10)
pdf("figure.1/fb_miRNA_density_log10.pdf",family='Arial', useDingbats=F)
plot(density(col0_fb_miR_vals), type="h", col="grey", main="miRNA levels in flowers", xlab="Molecules per ug", ylab="Density", las=1)
abline(v=median(col0_fb_miR_vals),lty=2,lwd=2,col='black')
dev.off()
|
43cf971f1da378f10b86951fbb91da5910f3dc10
|
57b2f7991b35074babf10e600183a255525a8053
|
/stimuli/NS01stimuli.R
|
9acbbc949c2f77ae8fe21c636e12f23d211a8556
|
[] |
no_license
|
Nian-Jingqing/NS01
|
8d1b4ffe1ed7db12975aa0c4d0955b9f59cf5549
|
59166f3201207a7fc597c5838ef61e1d6e3c6779
|
refs/heads/master
| 2023-03-17T19:40:11.710487
| 2020-04-07T16:11:25
| 2020-04-07T16:11:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,742
|
r
|
NS01stimuli.R
|
# NS01 Generate choices and get stimuli.
# Original data from Tim Mullett
# C. E. R. Edmunds - Created - 7-12-2018
# Setup --------------------------------------------------------------------------------------------
rm(list=ls()) # Clear all variables in the workspace
require(data.table)
femaleRatings <- fread("IAPSratings/femratings.csv")
maleRatings <- fread("IAPSratings/maleratings.csv")
allRatings <- fread("IAPSratings/allratings.csv")
maxValue <- 7
minValue <- 5
halfRange <- (maxValue-minValue)/2
midPoint <- halfRange + minValue
nPpts <- 80
nTrials <- 100
NS01ratings <- data.table(IAPS = as.integer(femaleRatings$IAPS),
fValue = femaleRatings$valmn,
mValue = maleRatings$valmn,
aValue = allRatings$valmn)
picturesMissing <- c(3005, 2745, 2055, 2375)
pornographic <- c(4698, 4693, 4677, 4672, 4666, 4650, 4604)
aspectRatioWrong <- c(1121, 1313, 1661, 2320, 2381, 2385, 2394, 2485, 2487, 2495, 2499, 2518, 2580,
2600, 4601, 4605, 4606, 4609, 7236, 7249, 7281, 7283, 7284, 7285, 7289, 7402,
7481, 7504, 8220, 8241, 8178, 8050)
NS01ratings <- NS01ratings[!IAPS %in% c(picturesMissing, pornographic, aspectRatioWrong),]
# Subset to get the items we want (i.e. within (minValue, maxValue), and with less than 1.5 diff
# between genders
NS01ratings[, keep:=ifelse(abs(fValue-midPoint) < halfRange &
abs(mValue-midPoint) < halfRange &
abs(aValue-midPoint) < halfRange &
abs(mValue-fValue) < 1.5, 1, 0)]
# Remove unwanted items
NS01ratings <- NS01ratings[!keep==0,]
# Clear unneeded data from workspace
rm(allRatings, femaleRatings, maleRatings)
NS01choices <- data.table(ParticipantNo = rep(1:nPpts, each=nTrials),
lChoice = 0L,
lValue = 0,
rChoice = 0L,
rValue = 0)
# Generate choices
for(iPpt in 1:nPpts){
itemSample <- sample(1:nrow(NS01ratings), 2*nTrials, replace=F)
NS01choices[ParticipantNo==iPpt, c("lChoice", "lValue", "rChoice", "rValue") :=
.(NS01ratings$IAPS[itemSample[1:nTrials]],
NS01ratings$aValue[itemSample[1:nTrials]],
NS01ratings$IAPS[itemSample[(nTrials+1):(2*nTrials)]],
NS01ratings$aValue[itemSample[(nTrials+1):(2*nTrials)]])]
}
# Saving choices
fwrite(NS01choices, "NS01choices.csv")
fwrite(NS01choices, "../experimentCode/Data/NS01choices.csv")
filenames <- paste("IAPSpictures/",unique(c(NS01choices$lChoice, NS01choices$rChoice)), ".jpg",
sep="")
file.copy(filenames, "../experimentCode/Stimuli")
|
06726eb7d6fa36e35fcd56f9d498e69379f2ffa9
|
bc3a58c0f3abd24f4f64f641152c09b79efefe38
|
/man/GDSFileResourceResolver.Rd
|
cffeb7c75017b411ae124c9e4134dbc86572328a
|
[
"MIT"
] |
permissive
|
isglobal-brge/dsOmics
|
96aa2594cbe009f2899d99fdc5be43a96f50d6bf
|
78fee19320cdf360db7ec1aed2fb07ee4c533951
|
refs/heads/master
| 2023-04-07T09:23:17.202083
| 2023-03-15T09:31:40
| 2023-03-15T09:31:40
| 158,839,360
| 1
| 12
|
MIT
| 2021-02-02T10:21:06
| 2018-11-23T13:55:17
|
R
|
UTF-8
|
R
| false
| true
| 2,522
|
rd
|
GDSFileResourceResolver.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GDSFileResourceResolver.R
\docType{class}
\name{GDSFileResourceResolver}
\alias{GDSFileResourceResolver}
\title{GDS file resource resolver}
\format{
A R6 object of class GDSFileResourceResolver
}
\description{
Build a GDS resource client from a resource object describing access to a
GDS file or a VCF file to be converted into a GDS file.
}
\section{Super class}{
\code{\link[resourcer:ResourceResolver]{resourcer::ResourceResolver}} -> \code{GDSFileResourceResolver}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-GDSFileResourceResolver-isFor}{\code{GDSFileResourceResolver$isFor()}}
\item \href{#method-GDSFileResourceResolver-newClient}{\code{GDSFileResourceResolver$newClient()}}
\item \href{#method-GDSFileResourceResolver-clone}{\code{GDSFileResourceResolver$clone()}}
}
}
\if{html}{\out{
<details open><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="resourcer" data-topic="ResourceResolver" data-id="initialize"><a href='../../resourcer/html/ResourceResolver.html#method-ResourceResolver-initialize'><code>resourcer::ResourceResolver$initialize()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-GDSFileResourceResolver-isFor"></a>}}
\if{latex}{\out{\hypertarget{method-GDSFileResourceResolver-isFor}{}}}
\subsection{Method \code{isFor()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{GDSFileResourceResolver$isFor(x)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-GDSFileResourceResolver-newClient"></a>}}
\if{latex}{\out{\hypertarget{method-GDSFileResourceResolver-newClient}{}}}
\subsection{Method \code{newClient()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{GDSFileResourceResolver$newClient(x)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-GDSFileResourceResolver-clone"></a>}}
\if{latex}{\out{\hypertarget{method-GDSFileResourceResolver-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{GDSFileResourceResolver$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
b0c04d44137b892da21a990124d9437f55ec7dc3
|
9d58b6ea030df648ad75ec9089c12dd315294ebe
|
/Loan_Calculation.R
|
ca5da2235bde263088b991ce75157e4610b704fd
|
[] |
no_license
|
jwcb1025/est-loan-balance
|
11c5fb3f5932b538b9b7b0fbb989fd92de8d6b1b
|
46eea4618aab1378a648c8d005c1bbef5c3b4456
|
refs/heads/master
| 2020-03-24T16:26:32.456305
| 2018-07-30T05:26:17
| 2018-07-30T05:26:17
| 142,824,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,128
|
r
|
Loan_Calculation.R
|
# Import Libraries
library(dplyr)
# Import Data
df.DataTree <- read.csv("~/Models/DataTree/Raw/06065.csv", sep="|", na.strings=c(""," ","NA", "\\N"), stringsAsFactors = FALSE, fill=TRUE, quote="", colClasses = "character")
df.InterestRates <- read.csv("~/Models/Annual 30 yr interest rate.csv")
# Extract residential properties
## Generate Property Type(SFR/Condo) (SFR=1004), (1004=Condo), (1009=PUD)
df.DataTree$PropertType <- NA
lut <- c("1001" = "SFR", "1004" = "Condo", "1009" = "Condo", "1010" = "Condo")
df.DataTree$PropertyType <- lut[df.DataTree$StdLandUseCode]
df.DataTree <- filter(df.DataTree, PropertyType == "SFR" | PropertyType == "Condo")
rm(lut)
#Merge Interest Rate Data
##Obtain merge id - "year"
df.DataTree$Year <- as.numeric(substr(df.DataTree$CurrSaleRecordingDate, 1, 4))
df.DataTree <- left_join(df.DataTree,df.InterestRates, by = "Year")
#Est current loan balance
##Add first and second mortgage
df.DataTree$loanTotal<- as.numeric(df.DataTree$ConCurrMtg1LoanAmt) + as.numeric(df.DataTree$ConCurrMtg2LoanAmt)
#Obtain number of years owned
df.DataTree$yearsOwned <- 2019 - as.numeric(df.DataTree$Year)
#Replace missing ConCurrMtg1Term with year(ConCurrMth1DueDate) - year(CurrSaleRecordingDate)
df.DataTree$ConCurrMtg1Term[is.na(df.DataTree$ConCurrMtg1Term)] <- (2019 - as.numeric(substr(df.DataTree$CurrSaleRecordingDate, 1,4)))
#Obtain number of months remaining on mortgage
df.DataTree$monthsremaining <- as.numeric(df.DataTree$ConCurrMtg1Term) - (df.DataTree$yearsOwned * 12)
df.DataTree$monthsremaining[df.DataTree$monthsremaining < 0] <- 0
#Obtain monthly mortgage rate & payment
df.DataTree$monthlyrate <- df.DataTree$Average.Rate / 12
df.DataTree$monthlyPayment <- df.DataTree$loanTotal * df.DataTree$monthlyrate / (1 - 1 / (1 + df.DataTree$monthlyrate) ^ as.numeric(df.DataTree$ConCurrMtg1Term))
#Caluclate Outstanding Balance
df.DataTree$loanBalanceCurrent <- df.DataTree$monthlyPayment * (1 - 1 / (1 + df.DataTree$monthlyrate) ^ df.DataTree$monthsremaining) / df.DataTree$monthlyrate
#df$loanBalance <- df$loanTotal * (1 - 1 / ((1 + df$monthlyrate) ^ df$monthsremaing)) / df$monthlyrate
|
25de8488491ae263c4235cc60506301e5f679c1b
|
63b2af2879f69bf0eb494a206ecd245059e266ab
|
/SimLin_Repos.R
|
60eb53234e7382d2d1aaa7fbf716afd5b9327845
|
[
"MIT"
] |
permissive
|
MCnu/R_sim_scripts
|
4e0e7363acd586c602027348f26f15b5f47f91c4
|
f69b466c8b9163f0e4b0e705dee00412b897ca76
|
refs/heads/master
| 2021-08-17T19:28:34.279460
| 2021-05-25T16:08:41
| 2021-05-25T16:08:41
| 299,398,051
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,739
|
r
|
SimLin_Repos.R
|
# SIMULATED REPOSITIONING ANALYSIS
sim_lin <-
function(delta_start = "follow",
time_min = 25,
time_max = 450,
peri_rad = 0.85,
velo = T,
JDD = F,
Direct = T,
vacf = T,
ctrl_summ = F,
summarize = T,
contact = "first") {
# create LUT
LUT_ttp_vec <- c()
for (i in 1:length(Repos_List)) {
float_mat <- Repos_List[[i]]
# (nrow(float_mat[float_mat[,"D2O"] > peri_rad,]))
float_ttp <- float_mat[float_mat[, "D2O"] > peri_rad, ]
if (!is.null(nrow(float_ttp))) {
if (nrow(float_ttp) >= 1) {
LUT_ttp_vec[i] <- float_ttp[1, "time"]
} else {
LUT_ttp_vec[i] <- NA
}
} else {
LUT_ttp_vec[i] <- NA
}
}
LUT_ttpv_logic <-
(LUT_ttp_vec > time_min & LUT_ttp_vec < time_max & !is.na(LUT_ttp_vec))
print("Fraction of used trajectories:")
print(mean(LUT_ttpv_logic))
JDD_DF <-
data.frame(
cID = character(),
cont_num = numeric(),
position = character(),
displacement = numeric()
)
DIRECT_DF <- data.frame()
pridist_vec <- c()
if(velo==T) tot_velo_vec <- c()
if(summarize==T) summ_frame <- data.frame()
for (i in 1:length(LUT_ttpv_logic)) {
if (!LUT_ttpv_logic[i]) {
next
}
float_mat <- Repos_List[[i]]
float_mat <- float_mat[float_mat[, "time"] <= LUT_ttp_vec[i], ]
step_count <- nrow(float_mat)
# print(precon_float_direct_matrix)
float_direct_frame <- data.frame()
if (delta_start == "follow") {
follow_float_frame <- data.frame()
endp <- float_mat[step_count, ]
for (s in 2:step_count) {
prip <- float_mat[(s - 1), ]
secp <- float_mat[s, ]
pridist <-
sqrt(((prip[1]) - (endp[1]))^2 + ((prip[2]) - (endp[2]))^2)
secdist <-
sqrt(((secp[1]) - (endp[1]))^2 + ((secp[2]) - (endp[2]))^2)
interdist <-
sqrt(((prip[1]) - (secp[1]))^2 + ((prip[2]) - (secp[2]))^2)
delta_dist_to_end <- unlist(pridist - secdist)
if (s == 2) {
pridist_vec <- c(pridist_vec, as.numeric(pridist))
}
# positive means it used to be further away
if (pridist != secdist) {
# law of cosines, baby
deviation_angle <-
unlist(acos((pridist^2 + interdist^2 - secdist^2) / (2 * pridist *
interdist)))
# print(deviation_angle)
if (is.nan(deviation_angle)) {
deviation_angle <- 0
}
}
if (pridist == secdist) {
deviation_angle <- 0
}
follow_float_frame <-
rbind(
follow_float_frame,
data.frame(
cID = i,
cont_num = 1,
step = (s - 1),
delta_dist = delta_dist_to_end,
delta_theta = deviation_angle
)
)
}
float_direct_frame <-
rbind(float_direct_frame, follow_float_frame)
if (velo == T) {
# vector of velocities derived from the D2P and time per frame
precontact_steps <- as.vector(float_mat[2:nrow(float_mat), "D2P"])
time_per_frame <- float_mat[2,"time"] - float_mat[1,"time"]
tot_velo_vec <-
c(tot_velo_vec, c(precontact_steps / time_per_frame))
}
if (summarize == T) {
summ_frame <-
rbind(
summ_frame,
data.frame(
cID = i,
cont_num = "First",
mean_delta_dist = mean(float_direct_frame$delta_dist),
mean_delta_theta = mean(float_direct_frame$delta_theta),
time = step_count * (float_mat[2, "time"] - float_mat[1, "time"])
)
)
}
}
DIRECT_DF <- rbind(DIRECT_DF, float_direct_frame)
}
if (JDD == T) {
sum_filt <-
data.frame(
cID = c(),
cont_num = c(),
position = c(),
med_dis = c()
)
for (i in 1:length(unique(JDD_DF$cID))) {
filt_jdf <- filter(JDD_DF, cID == unique(JDD_DF$cID)[i])
filt_pre <- filter(filt_jdf, position == "PRE")
filt_pos <- filter(filt_jdf, position == "POS")
prebind <-
data.frame(
cID = filt_pre[1, 1],
cont_num = filt_pre[1, 2],
position = filt_pre[1, 3],
med_dis = median(filt_pre[, 4])
)
posbind <-
data.frame(
cID = filt_pos[1, 1],
cont_num = filt_pos[1, 2],
position = filt_pos[1, 3],
med_dis = median(filt_pos[, 4])
)
sum_filt <- rbind(sum_filt, prebind, posbind)
}
jplot <- ggplot() +
geom_violin(data = JDD_DF, aes(x = cID, y = displacement, color = position)) +
geom_hline(yintercept = sum_u3_d2p[3]) +
geom_point(data = sum_filt, aes(x = cID, y = med_dis, color = position))
# print(jplot)
}
if (delta_start == "follow") {
global_direct_sim <<- DIRECT_DF
pdvec <<- pridist_vec
}
if (velo == T) {
tvv <<- tot_velo_vec
# print(plot(density(tot_velo_vec)))
}
if (summarize == T) {
global_summary_sim <<- summ_frame
sumplot <- ggplot() +
coord_cartesian(xlim = c(-0.01, 0.01), ylim = c(0, 3.14)) +
geom_text(
data = summ_frame,
aes(x = mean_delta_dist, y = mean_delta_theta, label = time)
)
# print(sumplot)
}
}
|
16b65b7af232dddf3268b54a59212bf7f8eec915
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/mixtox/R/NOEC.R
|
4c43cb0af8fa33662c8e7130f82d3ecc96bfe1ba
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 837
|
r
|
NOEC.R
|
NOEC <- function(x, expr, sigLev = 0.05){
# NOEC and LOEC calculation using Dunnett's test
## Dunnett, C.W., 1964. New tables for multiple comparisons with controls. Biometrics 30, 482-491
n <- nrow(expr)
m <- ncol(expr)
C <- sum(expr)^2 / ((n + 1) * m)
Tj <- rowSums(expr)
SSB <- sum(Tj^2) / m - C
SST <- sum(expr^2) - C
SSW <- SST - SSB
SW <- sqrt((SST - SSB) / ((n + 1) * (m - 1)))
tj <- (Tj / m) / (SW * sqrt(1 / m + 1 / m))
probF <- qf(1 - sigLev / 2, (n + 1) * (m - 1), n)
noecSign <- sign(abs(tj) - probF)
idx.one <- which(noecSign == -1)
if(length(idx.one) == 0) noec = NULL else noec = x[idx.one[length(idx.one)]]
idx.two <- which(noecSign == 1)
if(length(idx.two) == 0) loec = NULL else loec = x[idx.two[1]]
mat <- cbind(x, tj, probF, noecSign)
list(mat = mat, no = noec, lo = loec)
}
|
cf3d41a89f864d2894e1976caba6f9628bfa98d0
|
b58784428b3c9b553f1ade5b57ab9e69a954f897
|
/Pubmed API search/GetPubMed.R
|
f5eea628e0e5ae30b6844ff2e08652a75e728d96
|
[] |
no_license
|
ritadvi/R-projects
|
8c6de361fb998adcc9be1ccafe390447d5bb1e23
|
0dbca18005ac46f7aaaa5b3b554fe8d67f70c2cb
|
refs/heads/master
| 2022-04-27T00:00:36.210061
| 2020-04-28T10:14:33
| 2020-04-28T10:14:33
| 257,618,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,182
|
r
|
GetPubMed.R
|
#install the RISmed package
install.packages("RISmed")
library(RISmed)
library(tidyverse)
# Use getwd() to check current working directory
# Import data from input.csv
setwd("~/R-projects/Pubmed API search")
# Leave a header for the column name Genes / Proteins
data <- read.csv(file = 'input.csv', TRUE, ",", stringsAsFactors = FALSE)
# For each gene / protein
for (value in 1:nrow(data)){
# Concatenate (paste) gene / protein + CFTR
searchKey = paste(data[value,1],"%26CFTR")
# Print Query
cat(value," )",data[value,1],"CFTR","\n",file="outfile.txt",append=TRUE)
# Query PubMed
res <- EUtilsSummary(searchKey, type='esearch', db='pubmed')
# Print results
cat("Results:",attr(res,'count'),"\n\n",file="outfile.txt",append=TRUE)
# If Count Results > 0 print Ids
if(attr(res,'count')>0){
# If results > 20, show only top 20
if(length(attr(res,'PMID'))>20){
cat("Latest 20 Article Ids:",attr(res,'PMID')[1:20],"\n",file="outfile.txt",sep="\n",append=TRUE)
}
# Else, if results < 20 show all
else {
cat("Article Ids:",attr(res,'PMID'),"\n",file="outfile.txt",sep="\n",append=TRUE)
}
}
}
|
d5b16a1f4775199e7555b952a6150b5a5f260960
|
a9b5691574a5883c9f5dacd42df81b0c851ddc07
|
/Exploratory Data Analysis/Peer Assignment 1/Peer 1/plot1.R
|
00cd769fe2eed5505227be2a135dccf3d56f4fa7
|
[] |
no_license
|
AlexKitov/datasciencecoursera
|
b0420c9c25a674444b0e7e1e3299ec58b804bbd0
|
61c35f98434534332780a330114e9a4b494e8486
|
refs/heads/master
| 2016-09-07T18:53:11.424122
| 2015-08-30T14:14:26
| 2015-08-30T14:14:26
| 19,503,728
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 674
|
r
|
plot1.R
|
# Locale definition
Sys.setlocale("LC_TIME", "English")
library(sqldf)
# File definition
file <- file("household_power_consumption.txt")
# Data load
DF <- sqldf("select * from file where Date = '1/2/2007' or
Date = '2/2/2007'",
file.format = list(sep = ";", header = TRUE)
)
close(file)
# Date transform
DF <- transform(DF, Date_Time = strptime(
paste(Date, Time, sep = " "), "%d/%m/%Y %H:%M:%S")
)
# Plot 1 generation
png(file = "plot1.png", width = 480, height = 480, bg = "transparent")
hist(DF$Global_active_power, col = "red", main = "Global Active Power",
xlab ="Global Active Power (kilowatts)")
dev.off()
|
501805047f4e37dd3b1ae582168c04baf23df392
|
23f7e209d1eaefb5477034d57c89679de915d186
|
/rscripts/scores_visualization.R
|
cfc54e289c632937cf6446a0496e3a0db07a9bd1
|
[] |
no_license
|
kalmSveta/AS
|
3d4b3a68f0dde8dc719ecd93867fe17bc994e13e
|
549c9d20925af35bf5d1ba5815a6b111c1fb287d
|
refs/heads/master
| 2020-05-29T20:59:23.152415
| 2019-11-30T10:26:46
| 2019-11-30T10:26:46
| 189,366,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 969
|
r
|
scores_visualization.R
|
library(data.table)
library(ggplot2)
dt <- fread('../python_scripts/folding_pretty_copy/out/folding/panhandles_preprocessed.tsv')
filtered <- fread('../python_scripts/folding_pretty_copy/out/folding/panhandles_preprocessed_filtered.tsv')
dt <- dt[dt$id %in% filtered$id, ]
known.ids <- c(127812, 70913, 579130, 106707,107608, 107609)
names(known.ids) <- c('SF1', 'ENAH', 'DNM1', 'ATE1_1', 'ATE1_2', 'ATE1_3')
dt[dt$gene_name == 'GFRA1',]$gene_name <- 'ATE1_2'
for(score in c("SplicingScore", "mutational_score", "conservation_score", "kmerScore", "Energy_and_freq_Score")){
svg(paste0('~/Desktop/Thesis_pictures/Results/', score, '.svg'), width = 7, height = 7)
known.scores <- dt[dt$id %in% known.ids, ][[score]]
names(known.scores) <- dt[dt$id %in% known.ids, ]$gene_name
hist(dt[[score]], xlab = 'score', main = score)
abline(v = known.scores, col = 'red')
text(known.scores, 60000, names(known.scores), cex = 0.6, pos = 1, col = "red")
dev.off()
}
|
49e8a880567ba5834872fe11ea337e4ffcc7bdd4
|
6924a89998d69059eefe91ffd1661c88017c12f4
|
/man/layer_gin_conv.Rd
|
c16e238524b7ffbff6d12f339957b388d26d4c82
|
[] |
no_license
|
rdinnager/rspektral
|
dffffac8345f32d5a86af91f8802da0166e8e828
|
f72836cc79732bfeb70f5580314b92b38fd54114
|
refs/heads/master
| 2023-05-26T23:13:08.888846
| 2021-06-08T12:50:46
| 2021-06-08T12:50:46
| 289,394,618
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,290
|
rd
|
layer_gin_conv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layers_conv.R
\name{layer_gin_conv}
\alias{layer_gin_conv}
\title{GINConv}
\usage{
layer_gin_conv(
object,
channels,
epsilon = NULL,
mlp_hidden = NULL,
mlp_activation = "relu",
activation = NULL,
use_bias = TRUE,
kernel_initializer = "glorot_uniform",
bias_initializer = "zeros",
kernel_regularizer = NULL,
bias_regularizer = NULL,
activity_regularizer = NULL,
kernel_constraint = NULL,
bias_constraint = NULL,
...
)
}
\arguments{
\item{channels}{integer, number of output channels}
\item{epsilon}{unnamed parameter, see
\href{https://arxiv.org/abs/1810.00826}{Xu et al. (2018)}, and the equation above.
By setting \code{epsilon=None}, the parameter will be learned (default behaviour).
If given as a value, the parameter will stay fixed.}
\item{mlp_hidden}{list of integers, number of hidden units for each hidden
layer in the MLP (if None, the MLP has only the output layer)}
\item{mlp_activation}{activation for the MLP layers}
\item{activation}{activation function to use}
\item{use_bias}{bool, add a bias vector to the output}
\item{kernel_initializer}{initializer for the weights}
\item{bias_initializer}{initializer for the bias vector}
\item{kernel_regularizer}{regularization applied to the weights}
\item{bias_regularizer}{regularization applied to the bias vector}
\item{activity_regularizer}{regularization applied to the output}
\item{kernel_constraint}{constraint applied to the weights}
\item{bias_constraint}{constraint applied to the bias vector.}
}
\description{
\loadmathjax
A Graph Isomorphism Network (GIN) as presented by
\href{https://arxiv.org/abs/1810.00826}{Xu et al. (2018)}.
\strong{Mode}: single, disjoint.
\strong{This layer expects a sparse adjacency matrix.}
This layer computes for each node \mjeqn{i}{}:
\mjdeqn{ Z_i = \textrm{MLP}\big( (1 + \epsilon) \cdot X_i + \sum\limits_{j \in \mathcal{N}(i)} X_j \big) }{}
where \mjeqn{\textrm{MLP}}{} is a multi-layer perceptron.
\strong{Input}
\itemize{
\item Node features of shape \verb{(N, F)};
\item Binary adjacency matrix of shape \verb{(N, N)}.
}
\strong{Output}
\itemize{
\item Node features with the same shape of the input, but the last dimension
changed to \code{channels}.
}
}
|
d142ba4cbef5688d8e1106f7385a117a6f9e1736
|
971db158564345d25e66821f97d3257272daa3ea
|
/man/plotAlleleByPosition.Rd
|
6da34085518f0cdc5f36d8d7107239a34b9708e8
|
[] |
no_license
|
cran/cancerTiming
|
0c40bb4ce4a36abfbb270ff166b33b94d38527eb
|
20a684b40ae92542ec883f6758bfb545524722be
|
refs/heads/master
| 2021-01-17T09:05:07.366273
| 2016-04-03T00:22:46
| 2016-04-03T00:22:46
| 17,694,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,608
|
rd
|
plotAlleleByPosition.Rd
|
\name{plotAlleleByPosition}
\alias{plotAlleleByPosition}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot allele frequencies by position
}
\description{
Plot observed allele frequencies from sequencing data against their location on the chromosome.
}
\usage{
plotAlleleByPosition(mutData, segmentData = NULL,
whChr = 1:22, chromosomeId = "chr",
sampleName = NULL, sample = FALSE, tumorAFId, positionId, type = "mutation",
startId = "start", endId = "end", segFactorId, tCNId, MarkId, segColors,
normCont = NULL, addData = NULL, addColor="red",col="black",pch=1,lwd=2,
xlim,ylab="Allele Frequency",...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{mutData}{ data.frame with mutation data set to be plotted
%% ~~Describe \code{mutData} here~~
}
\item{segmentData}{ (optional) segmentation data
%% ~~Describe \code{segmentData} here~~
}
\item{whChr}{ which chromosome to plot
%% ~~Describe \code{whChr} here~~
}
\item{chromosomeId}{ column name for chromosome (must be same in all data.frames)
%% ~~Describe \code{chromosomeId} here~~
}
\item{sampleName}{id printed on the plot to identify the sample
%% ~~Describe \code{sampleName} here~~
}
\item{sample}{logical. If true, take only a random sample of 10,000 locations for the chromosome. Can speed up for plotting SNPs.
%% ~~Describe \code{sample} here~~
}
\item{tumorAFId}{ column name for the allele frequency in mutData
%% ~~Describe \code{tumorAFId} here~~
}
\item{positionId}{column name for the allele location in mutData
%% ~~Describe \code{positionId} here~~
}
\item{type}{type of allele frequency plotted (passed to `allAF' in order to create the lines for the expected AF)
%% ~~Describe \code{type} here~~
}
\item{startId}{column name for the start of the segmentation (in segData)
%% ~~Describe \code{startId} here~~
}
\item{endId}{column name for the end of the segmentation (in segData)
%% ~~Describe \code{endId} here~~
}
\item{segFactorId}{column name for the factor for segmentations (in segData).
%% ~~Describe \code{segFactorId} here~~
}
\item{tCNId}{column name that gives the total copy number for the segmentation (in segData); needed if give normCont to calculated expected AF
%% ~~Describe \code{tCNId} here~~
}
\item{MarkId}{column name of a column with logical values that identifies segments that should be marked up with hash marks.
%% ~~Describe \code{LOHId} here~~
}
\item{segColors}{vector of colors for the segmentations. Should be as long as the number of levels of segFactorId
%% ~~Describe \code{segColors} here~~
}
\item{normCont}{percent normal contamination. If missing, then lines for the expected AF will not be calculated.
%% ~~Describe \code{normCont} here~~
}
\item{addData}{ data.frame with another set (example germline SNPs) to be plotted in red
%% ~~Describe \code{germlineSNP} here~~
}
\item{addColor}{ color for the additional data
%% ~~Describe \code{germlineSNP} here~~
}
\item{lwd}{line width of the lines for the expected AF
%% ~~Describe \code{lwd} here~~
}
\item{ylab}{label for y-axis
%% ~~Describe \code{lwd} here~~
}
\item{xlim}{xlim boundaries. If missing, will be calculated.
%% ~~Describe \code{xlim} here~~
}
\item{col}{col for the mutData points
%% ~~Describe \code{xlim} here~~
}
\item{pch}{pch for the mutData points
%% ~~Describe \code{xlim} here~~
}
\item{\dots}{arguments passed to initial plotting command.
%% ~~Describe \code{\dots} here~~
}
}
\value{
returns invisibly the vector of colors for the segmentations, useful for making legends (see the example)
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\author{
Elizabeth Purdom}
\examples{
data(mutData)
#only mutations in the CNLOH region
onlyMuts<-subset(mutData,is.na(rsID) & position <= 1.8E7)
snps<-subset(mutData,!is.na(rsID) )
segData<-data.frame(chromosome="17",start=c(0,1.8e7+1),
end=c(1.8e7,max(mutData$position)),
totalCpy=c(2,NA),markRegion=c(FALSE,TRUE))
out<-plotAlleleByPosition(onlyMuts,whChr=17, segmentData=segData,
tCNId="totalCpy",normCont=0.22, addData=snps,pch=19,
addColor="grey",MarkId="markRegion",
segColors="pink",xaxt="n",xlab="Position", segFactorId="totalCpy",
chromosomeId = "chromosome",tumorAFId="allelefreq",
positionId="position",type="mutation")
axis(1,line=1,tick=FALSE)
legend("topright",legend=c(names(out),"unknown"),fill=c(out,NA),
title="Total Copy Number")
}
|
2bb793d4ae83ddb729c0c2efb700a8d8e5a8c5a9
|
d9f981d50d5476226ceffb9a4f75498ed3fd6127
|
/grangertest.R
|
d7257cdca67c9f5f5094209d1bf170348e194688
|
[] |
no_license
|
bwriteLeaf/guochushiyan
|
509df750338ac988c8d050c790e273e02eb365f6
|
af9b3fd48874fcc18ea964c90d8a8994137a9523
|
refs/heads/master
| 2021-04-15T08:57:52.308169
| 2018-04-04T07:54:43
| 2018-04-04T07:54:43
| 126,176,314
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 475
|
r
|
grangertest.R
|
#isweek = TRUE
isweek = FALSE
infile = "C:/Users/user/Documents/YJS/国储局Ⅱ期/实验/guochushiyan/winequality/wti-senti-hp.csv"
if (isweek){
infile = "C:/Users/user/Documents/YJS/国储局Ⅱ期/实验/guochushiyan/winequality/wti-senti-week-hp.csv"
}
table <- read.csv(infile, sep=",", header=T)
senti = table[["sentiment"]]
wti = table[["wti"]]
library(lmtest)
grangertest(senti~wti, order = 20, data =table)
#grangertest(wti~senti, order = 1, data =table)
|
152d0a7b5ac6596dfae629430e796b3f76d6b686
|
c6b10aa493cbfa8fdcaa76304c5b1bf0604317a3
|
/man/snp_ppa.Rd
|
eb5de2496625b7837129057d2bc3faf2d434075d
|
[] |
no_license
|
zerland/moloc
|
3b895d9f06ac91bfba0ecd7a11cc7a0091578b0c
|
f083d8e44577c9a0c3a562f0d99957851f50fa68
|
refs/heads/master
| 2023-03-18T17:23:23.491381
| 2021-01-20T15:20:24
| 2021-01-20T15:20:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 770
|
rd
|
snp_ppa.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_moloc.R
\name{snp_ppa}
\alias{snp_ppa}
\title{snp_ppa}
\usage{
snp_ppa(ABF, n_files, config_ppas)
}
\arguments{
\item{ABF}{is an array containing the single and coloc logBFs}
\item{n_files}{is one number, i.e. the number of traits to be analyzed}
\item{priors}{is the prior for one variant to be associated with 1 trait and with each additional trait}
}
\value{
A data frame containing the likelihoods and posteriors for each configuration combination
}
\description{
Posterior probability that each SNP is THE causal variant for a shared signal
}
\examples{
snp <- snp_ppa(ABF, n_files=3, config_ppas=lkl[[1]])
}
\author{
Jimmy Liu, Claudia Giambartolomei
}
\keyword{internal}
|
3d3daf37577d1670d284b4453f14dc3e7f4aac2b
|
702464534ec7388d8b44a1100a8db2a466e6bacd
|
/man/catches.Rd
|
7af851f368573da0da5164eacdb5886030b66c0b
|
[] |
no_license
|
klarrieu/ESM262PackageAssignment
|
1900dfb18200d1805cdaac53440244d6836e697d
|
2504e4e3511ae4bcd17a688cd30bbcefe0a8b40d
|
refs/heads/master
| 2020-03-19T10:18:35.271509
| 2018-06-06T20:33:51
| 2018-06-06T20:33:51
| 136,270,785
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 395
|
rd
|
catches.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/catches.R
\docType{data}
\name{catches}
\alias{catches}
\title{Sample data for fisheries_summary function}
\format{A matrix with rownames as fish species names and columnnames as fishery names}
\usage{
catches
}
\description{
catches - A matrix with number of each fish species caught by fishery
}
\keyword{datasets}
|
114fc1f26bbc8b88242d8de6b9dec112d4384f82
|
d072433fb4facac496d0337b4cf22b5e00cf6853
|
/man/rpp.glm.Rd
|
611916ad0760977d3db518b49dfcc080054ccaec
|
[] |
no_license
|
cran/asuR
|
3e3d745182d938acba825b723fc09b14cc16dbaa
|
578f3e0693d29fb8fd3056d52fa8913d5039cc79
|
refs/heads/master
| 2020-12-24T15:05:44.493897
| 2007-06-01T00:00:00
| 2007-06-01T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,097
|
rd
|
rpp.glm.Rd
|
\name{rpp.glm}
\alias{rpp.glm}
\title{Partial Residual Plots}
\description{
Plots the partial residuals of a glm-model versus each predictor
}
\usage{
\method{rpp}{glm}(mymodel, id=c("all", "none"), ...)
}
\arguments{
\item{mymodel}{an object of class \code{glm}, usually, a result of a
call to the function \code{glm}.}
\item{id}{a character string or numeric value; in which panel should
it be possible to interactively identify points; not yet available}
\item{...}{further arguments}
}
\details{
A partial residual plot allows to study the effect of a focal predictor
and taking the other predictors into account. This helps to find an
appropriate transformation for the focal predictor.
***expected pattern:\cr
linear scatter of points along the regression line
***Question:\cr
Is the relationship linear? otherwise:\cr
1. change the transformations of the predictor
}
\value{
A plot and a vector with the row index of the identified values
in the original data.
}
\author{\email{thomas.fabbro@unibas.ch}}
\seealso{\code{\link{inspect}}}
\examples{
#
}
\keyword{models}
|
2e8aa7e84f9e67b6ba3e45b79a75fc68de6dff4e
|
bce2c5ccb8e41f81d2105900c4e5ea329e8a4d1d
|
/Current-stuff/R/mclust/man/predict.densityMclust.Rd
|
0c8e8b107e7899a19c718213c2e77755b303b988
|
[] |
no_license
|
tonyc2700/phd
|
7165b21ff2ae4fb756e5cad5ac3e599c515ac847
|
bf87f0023e4163e44175fdf6cc75c7e73ef79883
|
refs/heads/master
| 2021-01-17T13:58:17.472303
| 2016-11-09T13:50:39
| 2016-11-09T13:50:39
| 33,205,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,854
|
rd
|
predict.densityMclust.Rd
|
\name{predict.densityMclust}
\alias{predict.densityMclust}
\title{Density estimate of multivariate observations by Gaussian finite mixture modeling}
\description{Compute density estimation for multivariate observations based on Gaussian finite mixture models estimated by \code{\link{densityMclust}}.}
\usage{
\method{predict}{densityMclust}(object, newdata, what = c("dens", "cdens"), \dots)
}
\arguments{
\item{object}{an object of class \code{"densityMclust"} resulting from a call to \code{\link{densityMclust}}.}
\item{newdata}{a vector, a data frame or matrix giving the data. If missing the density is computed for the input data obtained from the call to \code{\link{densityMclust}}.}
\item{what}{a character string specifying what to retrieve: \code{"dens"} returns a vector of values for the mixture density, \code{cdens} returns a matrix of component densities for each mixture component (along the columns).}
\item{\dots}{further arguments passed to or from other methods.}
}
% \details{}
\value{
Returns a vector or a matrix of densities evaluated at \code{newdata} depending on the argument \code{what} (see above).
}
\references{
C. Fraley and A. E. Raftery (2002).
Model-based clustering, discriminant analysis, and density estimation.
\emph{Journal of the American Statistical Association 97:611:631}.
C. Fraley, A. E. Raftery, T. B. Murphy and L. Scrucca (2012).
mclust Version 4 for R: Normal Mixture Modeling for Model-Based
Clustering, Classification, and Density Estimation.
Technical Report No. 597, Department of Statistics, University of Washington.
}
\author{Luca Scrucca}
% \note{}
\seealso{\code{\link{Mclust}}.}
\examples{
x = faithful$waiting
dens = densityMclust(x)
x0 = seq(50, 100, by = 10)
d0 = predict(dens, x0)
plot(dens)
points(x0, d0, pch = 20)
}
\keyword{multivariate}
|
15b35c86fc9eccd8ad38c5ffaabcb59ac85fa466
|
aec2efc1d566edce379275ad493b2ef3514eb345
|
/test/analysis.R
|
2e68a049a8e6b11f3321fab6707154f6c1d8ad40
|
[] |
no_license
|
hirenj/ms-checker
|
0368b9874bc3b355664136a3db8463f7540db253
|
2e79ab8c355f9a36b48b949cac0abcadc2dc9507
|
refs/heads/master
| 2023-04-13T15:54:18.992789
| 2022-11-26T19:45:38
| 2022-11-26T19:45:38
| 32,934,491
| 0
| 0
| null | 2023-04-11T23:46:31
| 2015-03-26T15:09:47
|
JavaScript
|
UTF-8
|
R
| false
| false
| 1,891
|
r
|
analysis.R
|
args <- commandArgs(trailingOnly = TRUE)
manual_data <- read.table(args[1],sep='\t',header=T)
my_data <- read.table(args[2],sep='\t',header=T)
manual_data$quant <- as.numeric(manual_data$quant)
manual_data$original_quant <- as.numeric(manual_data$original_quant)
my_data$annot <- my_data$quant
my_data$quant <- as.numeric(my_data$quant)
manual_data$key <- paste( manual_data$sequence, manual_data$glyco,sep=' ')
my_data$key <- paste( my_data$sequence, my_data$glyco,sep=' ')
my_singlets <- my_data[my_data$quant == 1e0-5 | my_data$quant == 1e05 | grepl("potential", my_data$annot ) | grepl("conflict",my_data$annot) ,]
merged <- merge(manual_data,my_singlets,by='key',all.x=T,all.y=T)
false_negatives <- subset(merged,!is.na(uniprot.x) & (quant.x == 0.00001 | quant.x == 100000) & is.na(uniprot.y))
positives <- subset(merged, ! is.na(uniprot.x) & quant.x == quant.y)
false_positives <- subset(merged, ! key %in% positives$key & ! is.na(uniprot.y) & !is.na(uniprot.x) & quant.y != quant.x & quant.y == original_quant & (original_quant == 1e05 | original_quant == 1e-05 ))
new_quants <- subset(merged, ! key %in% c( unique(positives$key), unique(false_positives$key) ) & ! is.na(quant.y) & original_quant == quant.y & (original_quant == 1e05 | original_quant == 1e-05))
invalid_negatives <- subset(merged, ! is.na(uniprot.x) & grepl("potential",annot) & quant.x < 100000 & quant.x > 0.00001 )
valid_negatives <- subset(merged, ! is.na(uniprot.x) & grepl("potential",annot) & (quant.x == 100000 | quant.x == 0.00001 ))
new_potentials <- subset(merged,is.na(quant.x) & grepl("potential",annot))
if (!is.na(args[3])) {
write_excel(header=T,filename=args[3],
false_negatives=false_negatives,
false_positives=false_positives,
positives=positives,
new_singlets=new_quants,
new_potentials=new_potentials,
invalid_negatives=invalid_negatives,
valid_negatives=valid_negatives)
}
|
c9fc29a2951e24bbf45df7020ef18b6d5e297474
|
12cc9e21827aee98ba8eec485c4798fc62f2ebd5
|
/man/targetsketch.Rd
|
ca9405a5827c22f0b5934e7fd4afa7abf32fa654
|
[
"MIT"
] |
permissive
|
tomsing1/targetsketch
|
22d446bdfdb8f7b0c8e623f98230fbb0c9b267ea
|
8b495debc9589861eab84eccf8f70f99c991065d
|
refs/heads/main
| 2023-02-07T05:18:26.827658
| 2020-10-13T13:01:21
| 2020-10-13T13:01:21
| 324,883,295
| 0
| 0
|
NOASSERTION
| 2020-12-28T01:28:37
| 2020-12-28T01:28:37
| null |
UTF-8
|
R
| false
| true
| 335
|
rd
|
targetsketch.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api.R
\name{targetsketch}
\alias{targetsketch}
\title{Launch the targetsketch app}
\usage{
targetsketch()
}
\description{
Launch an interactive web application for
creating and visualizing \code{targets} pipelines.
}
\examples{
\dontrun{
targetsketch()
}
}
|
0715a18e8b25d83610f2e0a9cb0fa539ea7aabde
|
b522c234364dc52c19cd58e36f92ad8ca3281f3a
|
/plot1.R
|
4798fa42fff20dbf87d8acd1a46db318407c8799
|
[] |
no_license
|
ningkon/ExData_Plotting1
|
5f2cfd44a3d4855f50f2433d8d6e3a86b082c3fc
|
dcdc10df6874d55d387bd623e245d6ae55c93906
|
refs/heads/master
| 2020-04-23T01:17:07.759051
| 2019-02-17T08:51:52
| 2019-02-17T08:51:52
| 170,807,657
| 0
| 0
| null | 2019-02-15T05:38:15
| 2019-02-15T05:38:14
| null |
UTF-8
|
R
| false
| false
| 1,298
|
r
|
plot1.R
|
# download the file to working directory
mydf <- read.table("./Coursera/data/household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
# load and start dplyr package
library(dplyr)
# Load only the data from 1/2/2007 and 2/2/2007
mydata <- filter(mydf, mydf$Date == '1/2/2007' | mydf$Date == '2/2/2007')
# Convert Date column into date datatype using dplyr pipe
mydata <- mydata %>%
mutate(Temp_Date = factor(Date)) %>%
mutate(New_Date = as.Date(Temp_Date, format = "%d/%m/%Y")) %>%
mutate(Date = New_Date)
# Concatenate Date and Time columns and create New_Date column as date datatype
mydata <- mydata %>%
mutate(dt = paste(Date, Time, sep = " ")) %>%
mutate(New_Date = as.POSIXct(dt)) %>%
select(Date:Sub_metering_3, New_Date)
# create new png graphics device to render plot to desired directory; set file height = width = 480
png( filename = "./Coursera/Assignments/Exploratory Data Analysis Assngmt 1/plot1.png", width = 480, height = 480)
# plot frequency of Global Active Power with given annotations:
hist(mydata$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "")
# close device
dev.off()
|
fc0fbca3a19fc27965ff2f17147d58ed7cb1ae03
|
59034b909c69f1658e30d405b438ea31cddaf4db
|
/covid/app.R
|
24e6a4decda510217ef8f68a5a2500c042bc1c73
|
[] |
no_license
|
chrishebbes/covid-19
|
8e4ae38ea36333f5af64a3041ed61a591095070d
|
1b4275da5fd76fa1ba18fb2d3684a87dd58ece79
|
refs/heads/master
| 2023-01-10T15:17:58.193997
| 2020-11-05T12:53:15
| 2020-11-05T12:53:15
| 303,705,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,175
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(devtools);
library(rsconnect);
#devtools::install_github("publichealthengland/coronavirus-dashboard-api-R-sdk");
library(shiny)
library(shinydashboard)
library(ukcovid19);
library(ggplot2)
library(tidyverse);
library(lubridate);
query_filters <- c(
'areaType=nation',
'areaName=England'
)
cases_and_deaths = list(
date = "date",
areaName = "areaName",
areaCode = "areaCode",
newCasesByPublishDate = "newCasesByPublishDate",
cumCasesByPublishDate = "cumCasesByPublishDate",
newDeaths28DaysByPublishDate = "newDeaths28DaysByPublishDate",
cumDeaths28DaysByPublishDate = "cumDeaths28DaysByPublishDate"
)
data <- get_data(
filters = query_filters,
structure = cases_and_deaths
)
frow1<-fluidRow(valueBoxOutput("value1"),valueBoxOutput("value2"),valueBoxOutput("value3"));
frow2<-fluidRow(box(plotOutput("plot1", height = 250)));
frow3<-fluidRow(box(title = "Date",sliderInput("slider", "Date", 1, 100, 50)));
body<-dashboardBody(frow1,frow2,frow3);
ui <- dashboardPage(
dashboardHeader(title="COVID-19 Dashboard"),
dashboardSidebar(),
body
)
server <- function(input, output) {
data<-data %>% mutate(ymddate=parse_date(date,"%Y-%m-%d"));
output$plot1<-renderPlot({ggplot(data,aes(x=ymddate,y=newCasesByPublishDate))+geom_point()+xlab("Date")+ylab("Cases");})
dailyval<-reactive({data$newDeaths28DaysByPublishDate[input$slider]});
newcase<-reactive({data$newCasesByPublishDate[input$slider]});
datesel<-reactive({data$date[input$slider]});
output$value1<-renderValueBox({
valueBox(
dailyval(), "Daily Deaths", icon = icon("skull-crossbones"),
color = "red"
)
})
output$value2<-renderValueBox({
valueBox(
newcase(), "New Cases", icon = icon("stethoscope"),
color = "blue"
)
})
output$value3<-renderValueBox({
valueBox(
datesel(), "New Cases", icon = icon("calendar"),
color = "green"
)
})
}
shinyApp(ui, server)
|
570fcdafa90d7589d7c64db9620dbd823bb04dc2
|
8a476df8f065da042e23ea00a9d1cd86ae63ad4e
|
/man/jn.var.Rd
|
98c27b88e844cc9df1379ebde87666708cba2fd0
|
[] |
no_license
|
cran/skewtools
|
b40ab0ce0f1a3a1dd210105c0a8659ccebcd6881
|
91c35886083fd03f4af94ff3ad5d86b483a2bb88
|
refs/heads/master
| 2021-01-18T14:24:05.220898
| 2012-07-21T00:00:00
| 2012-07-21T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,196
|
rd
|
jn.var.Rd
|
\name{jn.var}
\alias{jn.var}
\title{
Jackknife estimator for the variance of the Heteroscedastic Nonlinear
Regression Growth model parameters
}
\description{
This function computes the estimated variance of the Heteroscedastic Nonlinear Regression Growth
Model using Jackknife and Weighted Jackknife described in Wu (1986)
}
\usage{
jn.var(data, betas, rho, sigma2, shape, nu, weight = FALSE,
method = "MLE", model = "VB", type = "ST", m.type = "power")
}
\arguments{
\item{data}{a numeric matrix of \code{data.frame} type with labels \code{x} (ages) and \code{y} (lengths)}
\item{betas}{a initial numeric vector of Growth model parameters}
\item{rho}{a initial numeric value of mean residual parameter}
\item{sigma2}{a initial numeric value of variance residual parameter}
\item{shape}{a initial numeric value of shape residual parameter}
\item{nu}{a initial numeric value of freedom degree residual parameter}
\item{weight}{a logical value to use the weighted Jackknife method. By default is \code{FALSE} (usual method)}
\item{method}{a string to select the method of estimation. For maximum likelihood: \code{MLE}, for the \code{\link{nls}} funcion;
\code{EM}, for the \code{\link{HNL.skew}} funcion}
\item{model}{a string related to growth model. It is available for: \code{VB} (Von Bertalanffy, by default),
\code{Gompertz}, \code{Logistic} and \code{Richards}}
\item{type}{a string related to residual distribution. It is available for: \code{ST} (Skew-t, by default), \code{SN} (Skew-Normal),
\code{T} (T-Student) and \code{N} (Normal)}
\item{m.type}{a string related to heteroscedastic residual modelation. It is available for: \code{power} (power type,
by default) and \code{exp} (exponencial type). See \code{\link{HNL.skew}} function for details}
}
\value{
\item{V}{a numeric matrix of estimated variance}
}
\references{
Wu, C., F., J. (1986). Jackknife, Bootstrap and Other Resampling Methods in Regression Growth Analysis. \emph{Annals of Statistics}, 14, 4, 1261-1295.
}
\author{
Javier E. Contreras-Reyes
}
\seealso{
\code{\link{boot.var}}, \code{\link{HNL.skew}}, \code{\link{nls}}
}
\examples{
data(merluzaChile)
x <- merluzaChile$edad
y <- merluzaChile$long
plot(x, y, main="Age-Length", ylab="Length (cm)", xlab="Age (years)")
beta <- c(80, 0.08, -0.187)
rho <- -0.1
sigma2 <- 3.2726
shape <- 0.1698
nu <- 11
X = data.frame(list(x=merluzaChile$edad, y=merluzaChile$long))
p=round(nrow(X)/2,0)
# Run:
# bvar <- jn.var(data = X, beta, rho, sigma2, shape, nu, weight=FALSE,
# method = "MLE", model = "VB", type = "T", m.type="power")
# bvar$V
st = c(b1=beta[1], b2=beta[2], b3=beta[3])
ml <- nls(formula=y~b1*(1-exp(-b2*(x-b3))), data=X, start=st)
sqrt(diag(vcov(ml)))
# Run:
# bvar2 <- jn.var(data = X, beta, rho, sigma2, shape, nu, weight=FALSE,
# method = "MLE", model = "VB", type = "T", m.type="power")
# bvar2$V
modelVB <- HNL.skew(y, x, beta, rho, sigma2, shape, nu,
loglik = FALSE, model = "VB", type = "T",
m.type = "power", error = 0.00001)
sqrt(diag(modelVB$V))
}
\keyword{Variance}
\keyword{Estimation}
\keyword{skewtools}
|
c665d58839504c0cee146a4bb431275f62448884
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/EAinference/R/RcppExports.R
|
99c1017436ba1c352fe537b7716f43228132862f
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 317
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
grlasso <- function(X, Y, XY, weights, group, lbd, Gamma, initBeta, eps) {
.Call('_EAinference_grlasso', PACKAGE = 'EAinference', X, Y, XY, weights, group, lbd, Gamma, initBeta, eps)
}
|
b847f829410619524752caa2ba457470fafed24d
|
fe6ccdbfc41f2a820d0f911289e1ff5d15472e7d
|
/man/BubbleChartSpec.Rd
|
1114faf52cf87f6ad4323f06b9385f229d55b045
|
[] |
no_license
|
bradgwest/googleSheetsR
|
b5b7cd4c3a34fceeecb25894398fe87148f87cea
|
48de991ee571f11b02f2254d3456551bfdbcd76f
|
refs/heads/master
| 2020-03-27T11:21:13.423181
| 2018-10-12T19:52:59
| 2018-10-12T19:52:59
| 146,481,101
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,418
|
rd
|
BubbleChartSpec.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sheets_objects.R
\name{BubbleChartSpec}
\alias{BubbleChartSpec}
\title{BubbleChartSpec Object}
\usage{
BubbleChartSpec(series = NULL, legendPosition = NULL,
bubbleOpacity = NULL, bubbleSizes = NULL, domain = NULL,
bubbleTextStyle = NULL, bubbleBorderColor = NULL, groupIds = NULL,
bubbleLabels = NULL, bubbleMinRadiusSize = NULL,
bubbleMaxRadiusSize = NULL)
}
\arguments{
\item{series}{The data contianing the bubble y-values}
\item{legendPosition}{Where the legend of the chart should be drawn}
\item{bubbleOpacity}{The opacity of the bubbles between 0 and 1}
\item{bubbleSizes}{The data contianing the bubble sizes}
\item{domain}{The data containing the bubble x-values}
\item{bubbleTextStyle}{The format of the text inside the bubbles}
\item{bubbleBorderColor}{The bubble border color}
\item{groupIds}{The data containing the bubble group IDs}
\item{bubbleLabels}{The data containing the bubble labels}
\item{bubbleMinRadiusSize}{The minimum radius size of the bubbles, in pixels}
\item{bubbleMaxRadiusSize}{The max radius size of the bubbles, in pixels}
}
\value{
BubbleChartSpec object
}
\description{
BubbleChartSpec Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A <a href='/chart/interactive/docs/gallery/bubblechart'>bubble chart</a>.
}
\concept{BubbleChartSpec functions}
|
01145d6cb9e908bdae82e9c1a218015d403f8e4c
|
f2bcc38a5ece9b14a3d8b68c5d4a09edff06314d
|
/man/CamApi.Rd
|
9fbb3b71de31e5f716b492b178ef5f72453fbb46
|
[
"Apache-2.0"
] |
permissive
|
charlieccarey/monarchr.biolink
|
8eb618c9c61091ac3371e7d67636520a07d3db6b
|
5b488a06cedcafc9df44368e2d665555bc5cd53d
|
refs/heads/master
| 2020-03-09T08:40:56.748770
| 2018-04-11T04:33:54
| 2018-04-11T04:33:54
| 128,694,973
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,105
|
rd
|
CamApi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CamApi.r
\docType{data}
\name{CamApi}
\alias{CamApi}
\title{Cam operations}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
CamApi
}
\description{
swagger.Cam
}
\section{Fields}{
\describe{
\item{\code{path}}{Stores url path of the request.}
\item{\code{apiClient}}{Handles the client-server communication.}
\item{\code{userAgent}}{Set the user agent of the request.}
}}
\section{Methods}{
\describe{
get_activity_collection Returns list of models
get_instance_object Returns list of matches
get_model_collection Returns list of ALL models
get_model_contibutors Returns list of all contributors across all models
get_model_instances Returns list of all instances
get_model_object Returns a complete model
get_model_properties Returns list of all properties used across all models
get_model_property_values Returns list property-values for all models
get_model_query Returns list of models matching query
get_physical_interaction Returns list of models
}
}
\keyword{datasets}
|
56714b8d5070c87aab43d69babd697abeac2b83b
|
45b46cbea9f36c9f1e44ba01c925e34ff24cfdbc
|
/Cachematrix.r
|
7ac82c8c22a66d9036decd556c3baf3f10113b88
|
[] |
no_license
|
mostorga/ProgrammingAssignment2
|
e314a9ffca333c6487e8e24ea03e91d622a67094
|
bcdb5023be204e212fe690fdc4c5fa16705cdf06
|
refs/heads/master
| 2020-03-28T11:53:08.539733
| 2016-06-04T16:37:54
| 2016-06-04T16:37:54
| 59,960,073
| 0
| 0
| null | 2016-05-29T19:25:34
| 2016-05-29T19:25:34
| null |
UTF-8
|
R
| false
| false
| 606
|
r
|
Cachematrix.r
|
##This functions creates the matrix that will be used
makeCacheMatrix <-function(x=matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL}
get<-function()x
setmatrix<-function(solve) m<<-solve
getmatrix<-function()m
list(set=set,get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)}
##This function will cache the inverse of the Matrix
cachesolve <- function(x, ...) {
m <- x$getmatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
mat.data <- x$get()
m <- solve(mat.data, ...)
x$setmatrix(m)
return(m)
}
|
142bf1e23ab17d9de39d8f0f8c3cb7e444033b28
|
9126fddfd4e75e9015a9edf488062fd2c182e3e1
|
/recommender.R
|
d4129ba0af84874567512b655a510f4b71ba6dbd
|
[] |
no_license
|
mandixbw/netflix
|
394e225f0c6d790f1efff43c99b83bc48025f692
|
adb8afc7b98e907fcb582ee28ed1db4279d9220f
|
refs/heads/master
| 2021-10-26T15:08:25.359453
| 2019-04-13T10:57:16
| 2019-04-13T10:57:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,685
|
r
|
recommender.R
|
## SETTING UP A WORKING DIRECTORY ##
setwd("insert your working directory here")
## LOADING THE NECESSARY PACKAGES
library(dplyr)
library(zoo)
library(recosystem)
## DATA PREPARATION -- TRAINING DATA
nf <- read.table("C:/Users/Andini Eka F/Documents/R/Business Analytics EDX Course/combined_data_1.txt",sep=",",header=FALSE,fill=TRUE)
head(nf)
# this is how the data looks like - In this preview, "1:" is the movie ID, and the rows that come after ("1488844", "822109", etc) are the user ID.
# generally, the indicator of rows which contain the movie ID lies in the presence of ":".
# V2 is the rating data
# V3 is the date on which the rating was given for a certain movie ID
## V1 V2 V3
## 1 1: NA
## 2 1488844 3 2005-09-06
## 3 822109 5 2005-05-13
## 4 885013 4 2005-10-19
## 5 30878 4 2005-12-26
## 6 823519 3 2004-05-03
# in order to be interpretable, the above format has to be transformed into the data frame which contains these rows: Movie ID, User ID, Rating, and Date
nf$V4 <- nf$V1 #creating 2 separate columns, V1 will be dedicated for Movie ID, while V4 will be dedicated for User ID
nf$V1[!grepl(":",nf$V1)] <- NA #if the V1 column does NOT contain ":", it will be changed into NA
nf$V1 <- gsub(":","",nf$V1) #replacing the ":" with "" -- this function is basically aimed at removing the ":" character in the movie ID
nf$V1 <- na.locf(nf$V1) #Replaces each missing value (the NAs) with the most recent present value prior to it (Last Observation Carried Forward) -- that is, the most recent "Movie ID" prior to it
nf <- filter(nf,!grepl(":",nf$V4)) #removing the rows containing ":" in the V4 column from the observation
nf <- nf[,c("V1","V4","V2","V3")] #restructuring the order of the columns
names(nf) <- c("movie_id", "user_id","rating","date")
# this is how the cleaned data looks like
## movie_id user_id rating date
## 1 1 1488844 3 2005-09-06
## 2 1 822109 5 2005-05-13
## 3 1 885013 4 2005-10-19
## 4 1 30878 4 2005-12-26
## 5 1 823519 3 2004-05-03
## 6 1 893988 3 2005-11-17
str(nf) #checking the data type of each column
nf$movie_id <- as.numeric(nf$movie_id)
nf$user_id <- as.numeric(nf$user_id)
nf$rating <- as.numeric(nf$rating)
summary(nf)
## DATA PREPARATION - TESTING DATA
test <- read.table("C:/Users/Andini Eka F/Documents/R/Business Analytics EDX Course/probe.txt",sep=",",header=FALSE,fill=TRUE)
test$V2 <- test$V1
test$V1[!grepl(":",test$V1)] <- NA
test$V1 <- gsub(":","",test$V1)
head(test)
## V1 V2
## 1 1 1:
## 2 <NA> 30878
## 3 <NA> 2647871
## 4 <NA> 1283744
## 5 <NA> 2488120
## 6 <NA> 317050
test$V1 <- na.locf(test$V1)
test <- filter(test,!grepl(":",test$V2))
names(test) <- c("movie_id","user_id")
test$rating <- NA
## TUNING THE MATRIX FACTORIZATION ALGORITHM TO FIND OUT THE BEST PARAMETER VALUE
set.seed(145)
r=Reco()
opts <- r$tune(data_memory(nf$user_id,nf$movie_id, rating=nf$rating, index1=TRUE), opts=list(dim=c(5,10), lrate=c(0.05,0.1, 0.15), niter=5, nfold=5, verbose=FALSE))
# NOTE: The tuning is done over a set of parameter values: two settings of latent vector dimensionality, dim=5, or dim=10, three different values of the learning rate (lrate) and for 5 iterations, involving 5 fold cross validation.
# Due to limited processing power, the tuning is only iterated 5 times in my case. However, it'd be preferable to have it iterated many times if you are equipped with sufficient processing power
opts$min #to look at the best option attained during the tuning process
r$train(data_memory(nf_3$user_id, nf_3$movie_id, rating=nf_3$rating, index1=TRUE), opts=c(opts$min, nthread=1, niter=50) #use the best option to train the the training data model
res <- r$output(out_memory(), out_memory()) #storing the latent vectors
pred <- r$predict(data_memory(test$user_id,test$movie_id, rating=NULL, index1=TRUE),out_memory()) #predict the ratings in the testing data
test$pred_rating <- pred
test$pred_rating <- round(test$pred_rating,0)
test <- left_join(test, nf, by= c("movie_id", "user_id")) #retrieving the actual rating of the test data for further comparison with the predicted rating
str(test)
test$movie_id <- as.numeric(test$movie_id)
test$user_id <- as.numeric(test$movie_id)
test$compare <- ifelse(test$rating == test$pred_rating,1,0)
mean(test$compare)
## the mean is 0.5339242 -- 53.4% of the predicted ratings are accurate
# calculating the RMSE
sqrt(mean((join$pred_rating-join$rating)^2))
# the RMSE is 0.8200581. As a benchmark, based on https://www.netflixprize.com/faq.html, Cinematch's algorithm on the same data scored a 0.9474 of RMSE
|
bca996deb762ddd8d6656f075c92677c0887410d
|
f85bbc68b6a74c430db3d7923d558b2376e83b19
|
/resources/R_packages.R
|
4f75b66842f29a6def2244cc629368270e36b878
|
[
"MIT"
] |
permissive
|
skin-science/CKG
|
3e3dad9962d79ed8009896c4d685fdc990af34f1
|
3763686739066eb4fb50f065f07aa0d8f0137368
|
refs/heads/master
| 2023-06-14T09:12:29.755334
| 2021-07-12T23:05:32
| 2021-07-13T08:05:07
| 274,251,530
| 0
| 0
|
MIT
| 2020-06-22T22:03:43
| 2020-06-22T22:03:42
| null |
UTF-8
|
R
| false
| false
| 291
|
r
|
R_packages.R
|
install.packages('BiocManager')
BiocManager::install()
BiocManager::install(c('AnnotationDbi', 'GO.db', 'preprocessCore', 'impute'))
install.packages(c('devtools', 'tidyverse', 'flashClust', 'WGCNA', 'samr'), dependencies=TRUE, repos='http://cran.rstudio.com/')
install.packages('IRkernel')
|
685464ab7c59693f1a3607dec847cc6dc1c83ff3
|
127fc4ab3ed4eadd8a5605eb688efbaa09675e97
|
/R/dists.R
|
4bef7f2875791d933b89350dd097a1437a9129b2
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
beckyfisher/ssdtools
|
aa3d83b2debd80df704ab81b451b0dfc05e3639d
|
297dfa8adcd0202cd5535f7ec7f64be9459179d4
|
refs/heads/master
| 2022-07-21T07:55:51.034739
| 2022-03-21T21:17:51
| 2022-03-21T21:17:51
| 233,015,874
| 0
| 1
| null | 2020-01-10T09:46:22
| 2020-01-10T09:46:21
| null |
UTF-8
|
R
| false
| false
| 2,135
|
r
|
dists.R
|
# Copyright 2021 Environment and Climate Change Canada
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Species Sensitivity Distributions
#'
#' Gets a character vector of the names of the available distributions.
#'
#' @param bcanz A flag or NULL specifying whether to only include distributions in the set that is approved by BC, Canada, Australia and New Zealand for official guidelines.
#' @param tails A flag or NULL specifying whether to only include distributions with both tails.
#' @param npars A whole numeric vector specifying which distributions to include based on the number of parameters.
#' @return A unique, sorted character vector of the distributions.
#' @family dists
#' @export
#'
#' @examples
#' ssd_dists()
#' ssd_dists(bcanz = TRUE)
#' ssd_dists(tails = FALSE)
#' ssd_dists(npars = 5)
ssd_dists <- function(bcanz = NULL, tails = NULL, npars = 2:5) {
chk_null_or(bcanz, vld = vld_flag)
chk_null_or(tails, vld = vld_flag)
chk_whole_numeric(npars)
chk_not_any_na(npars)
chk_range(npars, c(2L, 5L))
dists <- ssdtools::dist_data
if(!is.null(bcanz)) {
dists <- dists[dists$bcanz == bcanz,]
}
if(!is.null(tails)) {
dists <- dists[dists$tails == tails,]
}
dists <- dists[dists$npars %in% npars,]
dists$dist
}
#' All Species Sensitivity Distributions
#'
#' Gets a character vector of the names of all the available distributions.
#'
#' @return A unique, sorted character vector of the distributions.
#' @family dists
#' @export
#'
#' @examples
#' ssd_dists_all()
ssd_dists_all <- function() {
ssd_dists(bcanz = NULL, tails = NULL, npars = 2:5)
}
|
a8b686377579248a2091025cbbd30b16bb713300
|
ebe5203b2fdca46fba7f1e484ebd65dfcd72dd21
|
/270m/elevation sz and mat for 270m BCM points.R
|
2e30c4cc11a9c547518bb574e2678169d07e90f9
|
[] |
no_license
|
JosephStewart/Tracking-Climate-For-Seed-Zones
|
26303516dcb76e3fdf3e298d75619323701c6fd3
|
86037e57098d2cff32d7adfd218b23e2431955aa
|
refs/heads/master
| 2020-04-18T15:01:44.070322
| 2019-11-13T03:50:27
| 2019-11-13T03:50:27
| 167,603,312
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,162
|
r
|
elevation sz and mat for 270m BCM points.R
|
library(dismo);library(rgdal);library(ggplot2);library(data.table)
#
#
a = Sys.time()
r = raster("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/1981_2010/tavg1981_2010_ave_HST.tif")
ca_bound = readOGR("/Users/joaaelst/Documents/GIS_Lib/Boundaries/California_Boundary_2013census/","ca_bound")
ca_bound = spTransform(ca_bound, CRS(projection(r)))
# r = crop(r, ca_bound)
# values(r) = 1
#
# Pts = rasterToPoints(r, spatial=T) # NA values are skipped, so make no values NA
# Pts$elev = extract(elev_r, Pts) # takes a few minutes
#
# # saveRDS(Pts,"/Users/joaaelst/Dropbox/SeedTransferProject/Tracking Climate For Seed Zones/270m/elevation for 270m BCM points.RDS")
# Pts = readRDS("/Users/joaaelst/Dropbox/SeedTransferProject/Tracking Climate For Seed Zones/270m/elevation for 270m BCM points.RDS")
#
# Pts@data = data.frame(elev = Pts@data$elev)
# Pts@data$elev[Pts@data$elev < -5e37] = NA
# Pts@data$elev = round(Pts@data$elev* 3.28084) # convert m to ft
#
#
#
#
#
# values(r) = Pts@data$elev
#
# plot(r)
# plot(ca_bound,add=T)
#
#
# sz = readOGR("/Users/joaaelst/Dropbox/SeedTransferProject/GIS Lib/Seed zones/","seed zones")
# sz = spTransform(sz, CRS(projection(r)))
# sz@data = data.frame(SEED_ZONE = sz@data$SEED_ZONE)
#
# o <- Pts %over% sz
# str(o)
# Pts@data$sz = o$SEED_ZONE
#
# str(Pts)
# saveRDS(Pts,"/Users/joaaelst/Dropbox/SeedTransferProject/Tracking Climate For Seed Zones/270m/elevation and seed zone for 270m BCM points.RDS")
Pts = readRDS("/Users/joaaelst/Dropbox/SeedTransferProject/Tracking Climate For Seed Zones/270m/elevation and seed zone for 270m BCM points.RDS")
data = Pts@data
ceiling_500 = function(x) ceiling(x/500)*500
data$el_bnd_mx = ceiling_500(data$elev)
data$el_bnd = paste0(data$el_bnd_mx -500, " — ", data$el_bnd_mx, "ft")
e = seq(0, 14500, 500)
data$el_bnd = factor(data$el_bnd, levels = paste0(e -500, " — ", e, "ft"))
data = data.table(data[,c("sz","el_bnd")])
# Hist Clim
mat_1981_2010 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Averages_10y_30y/tmn1981_2010_ave_HST.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Averages_10y_30y/tmx1981_2010_ave_HST.tif")
mat_1981_2010 = mean(mat_1981_2010)
mat_1981_2010 = crop(mat_1981_2010, ca_bound)
data$mat_1981_2010 = values(mat_1981_2010)
plot(mat_1981_2010)
mat_1921_1950 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Averages_10y_30y/tmn1921_1950_ave_HST.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Averages_10y_30y/tmx1921_1950_ave_HST.tif")
mat_1921_1950 = mean(mat_1921_1950)
mat_1921_1950 = crop(mat_1921_1950, ca_bound)
data$mat_1921_1950 = values(mat_1921_1950)
# plot(mat_1921_1950)
mat_1951_1980 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Averages_10y_30y/tmn1951_1980_ave_HST.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Averages_10y_30y/tmx1951_1980_ave_HST.tif")
mat_1951_1980 = mean(mat_1951_1980)
mat_1951_1980 = crop(mat_1951_1980, ca_bound)
data$mat_1951_1980 = values(mat_1951_1980)
plot(mat_1951_1980)
mat_1961_1970 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Averages_10y_30y/tmn1961_1970_ave_HST.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Averages_10y_30y/tmx1961_1970_ave_HST.tif")
mat_1961_1970 = mean(mat_1961_1970)
mat_1961_1970 = crop(mat_1961_1970, ca_bound)
data$mat_1961_1970 = values(mat_1961_1970)
plot(mat_1961_1970)
mat_2009_2018 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Averages_10y_30y/tmn2009_2018_ave_HST.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Averages_10y_30y/tmx2009_2018_ave_HST.tif")
mat_2009_2018 = mean(mat_2009_2018)
mat_2009_2018 = crop(mat_2009_2018, ca_bound)
data$mat_2009_2018 = values(mat_2009_2018)
plot(mat_2009_2018)
# future scenarios miroc85 ####
mat_2025_miroc85 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp85_2010_2039/tmn2010_2039_ave_miroc_esm_rcp85.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp85_2010_2039/tmx2010_2039_ave_miroc_esm_rcp85.tif")
mat_2025_miroc85 = mean(mat_2025_miroc85)
mat_2025_miroc85 = crop(mat_2025_miroc85, ca_bound)
data$mat_2025_miroc85 = values(mat_2025_miroc85)
mat_2055_miroc85 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp85_2040_2069/tmn2040_2069_ave_miroc_esm_rcp85.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp85_2040_2069/tmx2040_2069_ave_miroc_esm_rcp85.tif")
mat_2055_miroc85 = mean(mat_2055_miroc85)
mat_2055_miroc85 = crop(mat_2055_miroc85, ca_bound)
data$mat_2055_miroc85 = values(mat_2055_miroc85)
mat_2085_miroc85 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp85_2070_2099/tmn2070_2099_ave_miroc_esm_rcp85.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp85_2070_2099/tmx2070_2099_ave_miroc_esm_rcp85.tif")
mat_2085_miroc85 = mean(mat_2085_miroc85)
mat_2085_miroc85 = crop(mat_2085_miroc85, ca_bound)
data$mat_2085_miroc85 = values(mat_2085_miroc85)
# future scenarios miroc45 ####
mat_2025_miroc45 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp45_2010_2039/tmn2010_2039_ave_miroc_esm_rcp45.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp45_2010_2039/tmx2010_2039_ave_miroc_esm_rcp45.tif")
mat_2025_miroc45 = mean(mat_2025_miroc45)
mat_2025_miroc45 = crop(mat_2025_miroc45, ca_bound)
data$mat_2025_miroc45 = values(mat_2025_miroc45)
mat_2055_miroc45 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp45_2040_2069/tmn2040_2069_ave_miroc_esm_rcp45.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp45_2040_2069/tmx2040_2069_ave_miroc_esm_rcp45.tif")
mat_2055_miroc45 = mean(mat_2055_miroc45)
mat_2055_miroc45 = crop(mat_2055_miroc45, ca_bound)
data$mat_2055_miroc45 = values(mat_2055_miroc45)
mat_2085_miroc45 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp45_2070_2099/tmn2070_2099_ave_miroc_esm_rcp45.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/miroc_esm/rcp45_2070_2099/tmx2070_2099_ave_miroc_esm_rcp45.tif")
mat_2085_miroc45 = mean(mat_2085_miroc45)
mat_2085_miroc45 = crop(mat_2085_miroc45, ca_bound)
data$mat_2085_miroc45 = values(mat_2085_miroc45)
# future scenarios cnrm85 ####
mat_2025_cnrm85 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp85_2010_2039/tmn2010_2039_ave_cnrm_cm5_rcp85.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp85_2010_2039/tmx2010_2039_ave_cnrm_cm5_rcp85.tif")
mat_2025_cnrm85 = mean(mat_2025_cnrm85)
mat_2025_cnrm85 = crop(mat_2025_cnrm85, ca_bound)
data$mat_2025_cnrm85 = values(mat_2025_cnrm85)
mat_2055_cnrm85 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp85_2040_2069/tmn2040_2069_ave_cnrm_cm5_rcp85.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp85_2040_2069/tmx2040_2069_ave_cnrm_cm5_rcp85.tif")
mat_2055_cnrm85 = mean(mat_2055_cnrm85)
mat_2055_cnrm85 = crop(mat_2055_cnrm85, ca_bound)
data$mat_2055_cnrm85 = values(mat_2055_cnrm85)
mat_2085_cnrm85 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp85_2070_2099/tmn2070_2099_ave_cnrm_cm5_rcp85.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp85_2070_2099/tmx2070_2099_ave_cnrm_cm5_rcp85.tif")
mat_2085_cnrm85 = mean(mat_2085_cnrm85)
mat_2085_cnrm85 = crop(mat_2085_cnrm85, ca_bound)
data$mat_2085_cnrm85 = values(mat_2085_cnrm85)
# future scenarios cnrm45 ####
mat_2025_cnrm45 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp45_2010_2039/tmn2010_2039_ave_cnrm_cm5_rcp45.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp45_2010_2039/tmx2010_2039_ave_cnrm_cm5_rcp45.tif")
mat_2025_cnrm45 = mean(mat_2025_cnrm45)
mat_2025_cnrm45 = crop(mat_2025_cnrm45, ca_bound)
data$mat_2025_cnrm45 = values(mat_2025_cnrm45)
mat_2055_cnrm45 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp45_2040_2069/tmn2040_2069_ave_cnrm_cm5_rcp45.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp45_2040_2069/tmx2040_2069_ave_cnrm_cm5_rcp45.tif")
mat_2055_cnrm45 = mean(mat_2055_cnrm45)
mat_2055_cnrm45 = crop(mat_2055_cnrm45, ca_bound)
data$mat_2055_cnrm45 = values(mat_2055_cnrm45)
mat_2085_cnrm45 = stack("/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp45_2070_2099/tmn2070_2099_ave_cnrm_cm5_rcp45.tif", "/Users/joaaelst/Documents/GIS_Lib/_climate/BCM_Surfaces_2015/Future/cnrm_cm5/rcp45_2070_2099/tmx2070_2099_ave_cnrm_cm5_rcp45.tif")
mat_2085_cnrm45 = mean(mat_2085_cnrm45)
mat_2085_cnrm45 = crop(mat_2085_cnrm45, ca_bound)
data$mat_2085_cnrm45 = values(mat_2085_cnrm45)
data = data[!is.na(data$sz) & !is.na(data$mat_2025_miroc85) & ! is.na(data$el_bnd),] # subset
head(data)
saveRDS(data, "/Users/joaaelst/Dropbox/SeedTransferProject/Tracking Climate For Seed Zones/270m/elevation mat and seed zone for 270m BCM points.RDS", compress = F)
a - Sys.time()
data = data[seq(1, nrow(data), 20),] # subsample every 100th value
library(tidyr)
head(data)
data_long <- gather(data, period, mat, mat_1981_2010:mat_2085_cnrm45, factor_key=F)
data_long$period[data_long$period == "mat_1921_1950"] <- "1921-1950"
data_long$period[data_long$period == "mat_1951_1980"] <- "1951-1980"
data_long$period[data_long$period == "mat_1961_1970"] <- "1961-1970"
data_long$period[data_long$period == "mat_1981_2010"] <- "1981-2010"
data_long$period[data_long$period == "mat_2009_2018"] <- "2009-2018"
data_long$period[data_long$period == "mat_2025_miroc85"] <- "2010-2039 HDHE"
data_long$period[data_long$period == "mat_2055_miroc85"] <- "2040-2069 HDHE"
data_long$period[data_long$period == "mat_2085_miroc85"] <- "2070-2099 HDHE"
data_long$period[data_long$period == "mat_2025_miroc45"] <- "2010-2039 HDLE"
data_long$period[data_long$period == "mat_2055_miroc45"] <- "2040-2069 HDLE"
data_long$period[data_long$period == "mat_2085_miroc45"] <- "2070-2099 HDLE"
data_long$period[data_long$period == "mat_2025_cnrm85"] <- "2010-2039 WWHE"
data_long$period[data_long$period == "mat_2055_cnrm85"] <- "2040-2069 WWHE"
data_long$period[data_long$period == "mat_2085_cnrm85"] <- "2070-2099 WWHE"
data_long$period[data_long$period == "mat_2025_cnrm45"] <- "2010-2039 WWLE"
data_long$period[data_long$period == "mat_2055_cnrm45"] <- "2040-2069 WWLE"
data_long$period[data_long$period == "mat_2085_cnrm45"] <- "2070-2099 WWLE"
unique(data_long$period)
class(data_long)
str(data_long)
# dir.create("/Users/joaaelst/Dropbox/SeedTransferProject/Tracking Climate For Seed Zones/shiny/CA_Seed_Zone_CC_BCM/lib/uncompressed_ss100/")
saveRDS(data_long, "/Users/joaaelst/Dropbox/SeedTransferProject/Tracking Climate For Seed Zones/shiny/CA_Seed_Zone_CC_BCM/lib/uncompressed_ss100/data_long_mat.RDS", compress = F)
# saveRDS(data_long, "/Users/joaaelst/Dropbox/SeedTransferProject/Tracking Climate For Seed Zones/shiny/CA_Seed_Zone_CC_BCM/lib/data_long_mat.RDS")
a - Sys.time()
head(data_long)
system.time({
scenarios = c("1961-1970", "2009-2018")
d = data_long[data_long$period %in% scenarios,]
sz = "526"
d = d[d$sz == sz,]
str(d)
ggplot(aes(y = mat, x = el_bnd, fill = period), data = d) + geom_boxplot() + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + ggtitle(paste("Seed Zone", sz)) + xlab("elevational band") + ylab("mean anual temperature [°C]") +
theme(axis.title = element_text(size = rel(1.5))) +
theme(axis.text = element_text(size = rel(1.3))) +
theme(legend.text = element_text(size = rel(1.3))) +
theme(legend.title = element_text(size = rel(1.3)))
})
table(d$el_bnd)
|
9b1af496bb9ea75ddb9dccb04c61094ef45954d3
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googledatastorev1beta3.auto/man/GqlQueryParameter.Rd
|
1fc8c18af15a35a0700be474a9106f79b07c1ed8
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 509
|
rd
|
GqlQueryParameter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datastore_objects.R
\name{GqlQueryParameter}
\alias{GqlQueryParameter}
\title{GqlQueryParameter Object}
\usage{
GqlQueryParameter(value = NULL, cursor = NULL)
}
\arguments{
\item{value}{A value parameter}
\item{cursor}{A query cursor}
}
\value{
GqlQueryParameter object
}
\description{
GqlQueryParameter Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A binding parameter for a GQL query.
}
|
87a98ffede8c3909ee29686238e07fafbbd27a5f
|
c5bb677e0639c892319362332515769f9d1270fd
|
/AmyloGram/functions.R
|
1dfa2dafa631585dd59a35834821b8e71c321716
|
[] |
no_license
|
michbur/prediction_amyloidogenicity_ngram
|
ce90675c6387405118617657080e9bfee685d00f
|
e3b41e18fbeb6a667ebc3b5c7eaf395c526c7323
|
refs/heads/master
| 2020-04-06T07:10:29.026876
| 2016-09-05T07:22:44
| 2016-09-05T07:22:44
| 60,834,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,993
|
r
|
functions.R
|
library(signalHsmm)
library(biogram)
library(ranger)
make_AmyloGram <- function(seqs_list, ets, max_len, aa_group) {
seq_lengths <- lengths(seqs_list)
seqs_m <- tolower(t(sapply(seqs_list[seq_lengths <= max_len], function(i)
c(i, rep(NA, max_len - length(i)))
)))
gl <- do.call(rbind, lapply(1L:nrow(seqs_m), function(i) {
res <- do.call(rbind, strsplit(decode_ngrams(seq2ngrams(seqs_m[i, ][!is.na(seqs_m[i, ])], 6, a()[-1])), ""))
cbind(res, id = paste0("P", rep(i, nrow(res))))
}))
bitrigrams <- as.matrix(count_multigrams(ns = c(1, rep(2, 4), rep(3, 3)),
ds = list(0, 0, 1, 2, 3, c(0, 0), c(0, 1), c(1, 0)),
seq = degenerate(gl[, -7], aa_group),
u = as.character(1L:length(aa_group))))
fdat <- bitrigrams > 0
storage.mode(fdat) <- "integer"
fets_raw <- ets[seq_lengths <= max_len]
flens <- seq_lengths[seq_lengths <= max_len] - 5
fets <- unlist(lapply(1L:length(flens), function(i) rep(fets_raw[i], flens[i])))
test_bis <- test_features(fets, fdat, adjust = NULL)
imp_bigrams <- cut(test_bis, breaks = c(0, 0.05, 1))[[1]]
train_data <- data.frame(as.matrix(fdat[, imp_bigrams]), tar = factor(fets))
list(rf = ranger(tar ~ ., train_data, write.forest = TRUE, probability = TRUE),
imp_features = imp_bigrams,
enc = aa_group)
}
predict_AmyloGram <- function(model, seqs_list) {
if(min(lengths(seqs_list)) < 6) {
stop("Sequences shorter than 6 residues cannot be processed.")
}
seqs_m <- tolower(t(sapply(seqs_list, function(i)
c(i, rep(NA, max(lengths(seqs_list)) - length(i))))))
gl <- do.call(rbind, lapply(1L:nrow(seqs_m), function(i) {
res <- do.call(rbind, strsplit(decode_ngrams(seq2ngrams(seqs_m[i, ][!is.na(seqs_m[i, ])], 6, a()[-1])), ""))
cbind(res, id = paste0("P", rep(i, nrow(res))))
}))
bitrigrams <- as.matrix(count_multigrams(ns = c(1, rep(2, 4), rep(3, 3)),
ds = list(0, 0, 1, 2, 3, c(0, 0), c(0, 1), c(1, 0)),
seq = degenerate(gl[, -7], model[["enc"]]),
u = as.character(1L:length(model[["enc"]]))))
test_ngrams <- bitrigrams > 0
storage.mode(test_ngrams) <- "integer"
test_lengths <- lengths(seqs_list) - 5
preds <- data.frame(prob = predict(model[["rf"]], data.frame(test_ngrams[, model[["imp_features"]]]))[["predictions"]][, 2],
prot = unlist(lapply(1L:length(test_lengths), function(i) rep(i, test_lengths[i])))
)
data.frame(Name = names(seqs_list),
Probability = vapply(unique(preds[["prot"]]), function(single_prot)
max(preds[preds[["prot"]] == single_prot, "prob"]),
0)
)
}
make_decision <- function(x, cutoff)
data.frame(x, Amyloidogenic = factor(ifelse(x[["Probability"]] > cutoff, "yes", "no")))
|
e6cee0c354efbb536d1f4917f728dba424f78431
|
7b2cb501e52fdc01040b685b7d245c50bb4dc6b8
|
/finemap_data_1000G.R
|
471cdde4febd5eb4e2db7d7ff0ab6e95d48d12bb
|
[] |
no_license
|
hsowards/finelymapped
|
f9d0b31604d62b707811cc386b9983a2f60f4b47
|
f604d548c7b6fc1af059ed9b51326229ccfb55a7
|
refs/heads/main
| 2023-06-26T11:41:29.733037
| 2021-08-02T15:19:42
| 2021-08-02T15:19:42
| 350,479,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,944
|
r
|
finemap_data_1000G.R
|
source("/data/Brown_lab/dap/source.R") #this reads in the library and my functions I've written
args <- commandArgs(trailingOnly=T)
#1000G data processing for dap-g
sumstats_tongwu <- readr::read_tsv(paste0("/data/Brown_lab/dap/sumstats/", args[4], ".GWAS.txt"))
ld_tongwu <- readr::read_tsv(paste0("/data/Brown_lab/dap/LD_1000G/", args[4], ".LD.gz"), col_names = F)
#pulling the arguments and setting the region
pos <- as.numeric(args[2])
dist <- as.numeric(args[3])
start <- (pos - dist)
end <- (pos + dist)
#Clean sumstats
gwas <- sumstats_tongwu %>%
filter(pos > start & pos < end)
#bim
bim <- readr::read_tsv(paste0("/data/Brown_lab/dap/LD_1000G/", args[4], ".LD.gz"), col_names = F) %>%
select(X1, X2, X3, X4, X5) %>%
filter(X2 > start & X2 < end)
gwas.bim <- merge(bim, gwas, by.x = "X2", by.y = "pos", sort=F)
#Find out how many mismatches there are
length(which(gwas.bim$A1!=gwas.bim$ref))
#Flip z-scores
gwas.bim$Z_adjusted <- gwas.bim$Z_fixed
# Subtract the z-score from 0 for the SNPs which are mismatched with the LD reference
gwas.bim$Z_adjusted[which(gwas.bim$A1!=gwas.bim$ref)] <- 0-gwas.bim$Z_adjusted[which(gwas.bim$A1!=gwas.bim$ref)]
sumstats <- gwas.bim %>%
filter(!is.na(zscore))
##this is the cleanLD function from my aource file adjjusted for 1000G
##this code aligns the summary statistics and LD matrix
#CLEAN LD
ld_headers <- ld_tongwu %>%
select(-X1, pos = X2, -X3, -X4, -X5)
ld_headers$pos <- paste("X", ld_headers$pos, sep="")
headers <- ld_headers$pos
ld_parable <- ld_headers %>%
select(-pos) %>%
set_names(headers) %>%
mutate(pos = ld_tongwu$X2) %>%
select(pos, contains("X"))
#defining extra snps
R_extras <- anti_join(ld_tongwu, sumstats, by = c("X2")) #if there are no extras? dont proceed?
ex_snps <- R_extras$X2 #pos of extra snps
ex_snps_cols <- paste("X", ex_snps, sep="") #column of extra snps
ld_filter <- ld_parable %>%
filter(pos %in% ex_snps) #keeping missing snp rows
#clean LD matrix to region (rows)
ld_clean <- ld_parable %>%
anti_join(ld_filter, by = c('pos')) %>% #antijoin filters out positions that are in R_filter from R_parable
select(-all_of(ex_snps_cols)) %>% #selecting out missing snp columns
filter(pos > start & pos < end)
#clean LD matrix to region (cols)
region_snps <- ld_clean$pos
region_snps_col <- paste("X", region_snps, sep = "")
R_clean <- ld_clean %>%
select(region_snps_col)
R <- as.matrix(R_clean)
bps <- ld_clean$pos
sumstats_filtered <- sumstats %>% #dataset with snps in LD matrix
filter(X2 %in% bps) %>%
distinct(X2, .keep_all = T) #removing one duplicated position
#Writing for dap-g
sumstats_filtered %>%
select(snp_name_i = rsnum, z_i = zscore) %>%
write_tsv(paste0("/data/Brown_lab/dap/dap_input/sumstats/", args[4], "_sumstats_1000G.txt"))
#Writing for dap-g
R %>%
as.data.frame() %>%
write_tsv(paste0("/data/Brown_lab/dap/dap_input/ld/", args[4], "_LD_1000G.txt"), col_names = F)
|
fbc8322e6a03b7b337ac0043d244238c322b42d2
|
9a7792bae32e2512e0a74aed94b8c0aef8433d6c
|
/inst/doc/stars4.R
|
f35f7495aba88130515942076ecd56cea593e072
|
[] |
no_license
|
cran/stars
|
0c7109e878c92e02464ed13ecf23ffe90279c54a
|
9c98ae0f331962170bd1f4b72e668c7c882cee1d
|
refs/heads/master
| 2023-09-04T04:24:36.219481
| 2023-08-11T02:30:02
| 2023-08-11T04:30:48
| 145,904,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,946
|
r
|
stars4.R
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE, collapse = TRUE, dev = "png")
set.seed(13579)
knitr::opts_chunk$set(fig.height = 4.5)
knitr::opts_chunk$set(fig.width = 6)
## ----fig.width=4.5, fig.height=4----------------------------------------------
suppressPackageStartupMessages(library(stars))
m = matrix(1:20, nrow = 5, ncol = 4)
dim(m) = c(x = 5, y = 4) # named dim
(s = st_as_stars(m))
## -----------------------------------------------------------------------------
dim(s[[1]])
## ----fig.width=4.5, fig.height=4----------------------------------------------
image(s, text_values = TRUE, axes = TRUE)
## ----fig.width=4.5, fig.height=4----------------------------------------------
attr(s, "dimensions")[[2]]$delta = -1
image(s, text_values = TRUE, axes = TRUE)
## -----------------------------------------------------------------------------
tif = system.file("tif/L7_ETMs.tif", package = "stars")
st_dimensions(read_stars(tif))["y"]
## -----------------------------------------------------------------------------
str(attr(st_dimensions(s), "raster"))
## -----------------------------------------------------------------------------
attr(attr(s, "dimensions"), "raster")$affine = c(0.1, 0.1)
plot(st_as_sf(s, as_points = FALSE), axes = TRUE, nbreaks = 20)
## -----------------------------------------------------------------------------
atan2(0.1, 1) * 180 / pi
## -----------------------------------------------------------------------------
attr(attr(s, "dimensions"), "raster")$affine = c(0.1, 0.2)
plot(st_as_sf(s, as_points = FALSE), axes = TRUE, nbreaks = 20)
## -----------------------------------------------------------------------------
atan2(c(0.1, 0.2), 1) * 180 / pi
## -----------------------------------------------------------------------------
x = c(0, 0.5, 1, 2, 4, 5) # 6 numbers: boundaries!
y = c(0.3, 0.5, 1, 2, 2.2) # 5 numbers: boundaries!
(r = st_as_stars(list(m = m), dimensions = st_dimensions(x = x, y = y)))
st_bbox(r)
image(r, axes = TRUE, col = grey((1:20)/20))
## -----------------------------------------------------------------------------
x = c(0, 0.5, 1, 2, 4) # 5 numbers: offsets only!
y = c(0.3, 0.5, 1, 2) # 4 numbers: offsets only!
(r = st_as_stars(list(m = m), dimensions = st_dimensions(x = x, y = y)))
st_bbox(r)
## -----------------------------------------------------------------------------
x = c(0, 1, 2, 3, 4) # 5 numbers: offsets only!
y = c(0.5, 1, 1.5, 2) # 4 numbers: offsets only!
(r = st_as_stars(list(m = m), dimensions = st_dimensions(x = x, y = y)))
st_bbox(r)
## -----------------------------------------------------------------------------
x = st_as_stars(matrix(1:9, 3, 3),
st_dimensions(x = c(1, 2, 3), y = c(2, 3, 10), cell_midpoints = TRUE))
## ----eval=FALSE---------------------------------------------------------------
# install.packages("starsdata", repos = "http://pebesma.staff.ifgi.de", type = "source")
## -----------------------------------------------------------------------------
(s5p = system.file("sentinel5p/S5P_NRTI_L2__NO2____20180717T120113_20180717T120613_03932_01_010002_20180717T125231.nc", package = "starsdata"))
## ----echo=FALSE---------------------------------------------------------------
EVAL = s5p != ""
## ----eval=EVAL----------------------------------------------------------------
subs = gdal_subdatasets(s5p)
subs[[6]]
## ----eval=EVAL----------------------------------------------------------------
gdal_metadata(subs[[6]], "GEOLOCATION")
## ----eval=EVAL----------------------------------------------------------------
nit.c = read_stars(subs[[6]])
threshold = units::set_units(9e+36, mol/m^2)
nit.c[[1]][nit.c[[1]] > threshold] = NA
nit.c
## ----eval=EVAL----------------------------------------------------------------
plot(nit.c, breaks = "equal", reset = FALSE, axes = TRUE, as_points = TRUE,
pch = 16, logz = TRUE, key.length = 1)
maps::map('world', add = TRUE, col = 'red')
## ----eval=EVAL----------------------------------------------------------------
plot(nit.c, breaks = "equal", reset = FALSE, axes = TRUE, as_points = FALSE,
border = NA, logz = TRUE, key.length = 1)
maps::map('world', add = TRUE, col = 'red')
## ----eval=EVAL----------------------------------------------------------------
(nit.c_ds = stars:::st_downsample(nit.c, 8))
plot(nit.c_ds, breaks = "equal", reset = FALSE, axes = TRUE, as_points = TRUE,
pch = 16, logz = TRUE, key.length = 1)
maps::map('world', add = TRUE, col = 'red')
## ----eval=EVAL----------------------------------------------------------------
plot(nit.c_ds, breaks = "equal", reset = FALSE, axes = TRUE, as_points = FALSE,
border = NA, logz = TRUE, key.length = 1)
maps::map('world', add = TRUE, col = 'red')
## ----eval=EVAL----------------------------------------------------------------
w = st_warp(nit.c, crs = 4326, cellsize = 0.25)
plot(w)
|
a45b3268579ad1ded800995b671fa67ae94e91e4
|
8691943a2547990118c85beefecdc43312371618
|
/man/textcolor.Rd
|
304a16bbeaa024570cd3c4fa783a9a68774abd1a
|
[] |
no_license
|
Flavjack/GerminaR
|
f0d97cbf735520db702538601e2cf511527cf66c
|
3535a1aea0729abe5ba79114386885d487f946f4
|
refs/heads/master
| 2022-09-04T10:21:15.391932
| 2022-05-29T16:19:10
| 2022-05-29T16:19:10
| 49,505,163
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 840
|
rd
|
textcolor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/textcolor.R
\name{textcolor}
\alias{textcolor}
\title{Colourise text for display in the terminal.}
\usage{
textcolor(text, fg = "red", bg = NULL)
}
\arguments{
\item{text}{character vector}
\item{fg}{foreground colour, defaults to white}
\item{bg}{background colour, defaults to transparent}
}
\description{
If R is not currently running in a system that supports terminal colours
the text will be returned unchanged.
}
\details{
Allowed colours are: black, blue, brown, cyan, dark gray, green, light
blue, light cyan, light gray, light green, light purple, light red,
purple, red, white, yellow
}
\examples{
print(textcolor("Red", "red"))
cat(textcolor("Red", "red"), "\n")
cat(textcolor("White on red", "white", "red"), "\n")
}
\author{
testthat package
}
|
e0ce68f7e72674094c8e1b8959ad49ba85f9f62e
|
163ceeb94d49b70d43cd707cbc5de03164a1ce50
|
/R/rollmean.R
|
b923dfef8e4b3add26436cdce9eb74bc507cb15e
|
[] |
no_license
|
privefl/bigutilsr
|
e8cce921638d1327a1038f6ac9b237eae9ca87de
|
bb760d109193d2163e869d9d231a8fdcba2ac96e
|
refs/heads/master
| 2022-12-27T01:39:56.076386
| 2022-12-20T14:36:53
| 2022-12-20T14:36:53
| 199,856,656
| 11
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 808
|
r
|
rollmean.R
|
################################################################################
#' Gaussian smoothing
#'
#' @param x Numeric vector.
#' @param size Radius of the smoothing (smaller than half of the length of `x`).
#' If using `size = 0`, it returns `x`.
#'
#' @return Numeric vector of the same length as `x`, smoothed.
#' @export
#'
#' @examples
#' (x <- rnorm(10))
#' rollmean(x, 3)
rollmean <- function(x, size) {
if (size == 0) return(x)
len <- 2 * floor(size) + 1
if (len >= length(x)) stop("Parameter 'size' is too large.")
if (size < 0) stop("Parameter 'size' must be positive.")
lims <- qnorm(range(ppoints(len)))
weights <- dnorm(seq(lims[1], lims[2], length.out = len))
roll_mean(x, weights)
}
################################################################################
|
c6668f85ab1a7dfb37d06008f8730a2da1466435
|
737d67e2884b33324003a7303351f5abf2c419e6
|
/HivePlots/WGCNA/FromChen/moduleCandidate.R
|
08166734eeb3b9405b535cee194914d7d1602c03
|
[
"Apache-2.0"
] |
permissive
|
bwbai/ATLANTiC
|
ad576ec305120a8b5012d08cad7f11a0c4ab217e
|
c831ac3e0c6ae0ed9be130a1959b50a6bc41ce2f
|
refs/heads/master
| 2022-04-19T18:42:15.671658
| 2020-04-21T11:41:52
| 2020-04-21T11:41:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,779
|
r
|
moduleCandidate.R
|
library(stringr)
library(fastmatch)
kin <- read.delim("Dat/Uniprot/Annotation_Kinase_list.txt", stringsAsFactors = FALSE)
tryp <- readRDS('/media/kusterlab/users_files/martin/postdoc/projects/phosphoproject/dat/preproc/crc65_psites_annotation.rds')
fp <- readRDS('/media/kusterlab/users_files/martin/postdoc/projects/phosphoproject/dat/preproc/crc65_fp_annotation.rds')
mod_crc <- read.csv("/media/kusterlab/users_files/martin/postdoc/projects/phosphoproject/res/20180302_wgcna_allsites_allproteins_incnas_crc65_nonas_crc65/CSVs/wgcna_NoImputation.csv", stringsAsFactors = FALSE)
mod_nci <- read.csv("/media/kusterlab/users_files/martin/postdoc/projects/phosphoproject/res/20180308_wgcna_allsites_allproteins_incnas_nci60_nonas_nci60/CSVs/wgcna_NoImputation.csv", stringsAsFactors = FALSE)
# col - "Gene", "id", "sequence window", "position", "isKinase", "panel"
mod_crc$gene <- str_split_fixed(mod_crc$id, "_", 2)[, 1]
mod_crc$isKinase <- ""
mod_crc$isKinase[mod_crc$gene %in% kin$Name] <- "TRUE"
mod_crc$sequenceWindow <- tryp$`Sequence window`[fmatch(mod_crc$id, tryp$label)]
mod_crc$position <- tryp$Positions[fmatch(mod_crc$id, tryp$label)]
mod_crc$panel <- "CRC65"
mod_nci$gene <- str_split_fixed(mod_nci$id, "_", 2)[, 1]
mod_nci$isKinase <- ""
mod_nci$isKinase[mod_nci$gene %in% kin$Name] <- "TRUE"
mod_nci$sequenceWindow <- tryp$`Sequence window`[fmatch(mod_nci$id, tryp$label)]
mod_nci$position <- tryp$Positions[fmatch(mod_nci$id, tryp$label)]
mod_nci$panel <- "NCI60"
mod <- rbind(mod_nci, mod_crc)
mod <- mod[, c("gene", "isKinase", "id", "moduleColor", "sequenceWindow", "position", "panel")]
head(mod)
write.table(mod, file = "Res/20180314_wgcnaSupTables/moduleCandidates.txt",
col.names = TRUE, row.names = FALSE, quote = FALSE, sep = "\t")
|
219b2755da95400e8e0f0203ede6283c31497b13
|
438e15c93ab0264fcb168cd099b13e8ae8c9d4f5
|
/Machine Learning Script.R
|
27db94f3c6e5a28507e77f6825bc1d20af0d3499
|
[] |
no_license
|
Lunatico9/R-ML-Movielens
|
d857b04350a9b9e946d3fc77bcccba6f6acf380b
|
ceb4beb4e50c94bed976c455f5c962a095a0199f
|
refs/heads/master
| 2020-09-20T12:28:57.846194
| 2019-12-06T23:46:12
| 2019-12-06T23:46:12
| 224,477,415
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,396
|
r
|
Machine Learning Script.R
|
################################
# Create edx set, validation set
################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
library(caret)
library(tidyverse)
###########################
#Machine Learning Algorithm
###########################
#create training and test sets from edx
train_ind = createDataPartition(edx$rating, p = 0.9, list = FALSE)
train_edx = edx[train_ind,]
test_edx = edx[-train_ind,]
#remove from test set users and movies not present in training set
test_edx <- test_edx %>%
semi_join(train_edx, by = "movieId") %>%
semi_join(train_edx, by = "userId")
#define RMSE output function for solution evaluation
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
#optimize tuning parameter lambda validating on the test set
lambda_RMSE = function(lambda) {
#calculate overall rating mean
mu <- mean(train_edx$rating)
#calculate regularized movie bias using lambda
b_i <- train_edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+lambda))
#calculate regularized user bias using lambda
b_u <- train_edx %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+lambda))
#predict ratings on the test set
predicted_ratings <-
test_edx %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
pull(pred)
return(RMSE(test_edx$rating, predicted_ratings))
}
#try lambdas from 1 to 10 by increments of 0.25
lambdas = seq(1,10,0.25)
rmses = sapply(lambdas, lambda_RMSE)
#pick best lambda
lambda = lambdas[which.min(rmses)]
#try lambdas in the neighborhood of the previous optimum by increments of 0.005
lambdas = seq(lambda-0.25,lambda+0.25, 0.005)
rmses = sapply(lambdas, lambda_RMSE)
#pick best lambda
lambda = lambdas[which.min(rmses)]
###################################
#Final prediction on validation set
###################################
#calculate overall rating mean
mu <- mean(edx$rating)
#calculate regularized movie bias with optimized lambda
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+lambda))
#calculate regularized user bias with optimized lambda
b_u <- edx %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+lambda))
#predict ratings on the validation set
predicted_ratings <-
validation %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
pull(pred)
#display resulting RMSE
RMSE(validation$rating, predicted_ratings)
|
13a11f0636f349d6ac7ad6d0f39b466a0e46a138
|
5fed6d674f165c35b61f675caed58a59b510e1c3
|
/Replication/mps_analysis.R
|
445e5f34a89dc9ebd808daebb9ae493a0a42d93b
|
[] |
no_license
|
abyanka/cpt
|
9ba41edaad4dac2ed819f2f20f628fd5f96bc9a6
|
1b68d40162927db87efd50453e8e71f39e5e7fd1
|
refs/heads/master
| 2020-04-14T21:02:10.593272
| 2018-10-20T19:57:20
| 2018-10-20T19:57:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,067
|
r
|
mps_analysis.R
|
##############################################################################################################
# Description: The program replicates all the figures, tables and results
# of the MPs for Sale: Eggers and Hainmueller (2009) data application
#
#
##############################################################################################################
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
##########################################################
# RDD plot figure: CPT vs energy and crossmatch
##########################################################
load("MPs.rda")
### Function for extracting the results:
getresults = function(analysis)
{
results = matrix(NA,length(analysis$windowsizes),5)
colnames(results) = c("window", "observations", "CrossMatch", "Energy", "CPT")
for (i in 1:length(analysis$windowsizes))
{
results[i,1] = analysis$windowsizes[i]
results[i,2] = analysis$observations[i]
results[i,3] = analysis$analyses["crossmatch",i]
results[i,4] = analysis$analyses["energy",i]
results[i,5] = analysis$analyses["forest",i]
}
return(results)
}
### MPs for sale: ###
dp = as.data.frame(getresults(MPs.analysis))
df=gather(dp[,-1], value = pv ,key = test.type, -observations )
p.cpt <- ggplot(df,aes(x=observations,y=pv,col=test.type, shape=test.type))+
geom_point(size=2.5)+geom_line()+
labs(
title = "",
y = "P-value \n" ,
x = "Number of Obs. in window \n")+
theme_bw()+
theme(panel.border = element_blank(),axis.line = element_line(colour = "black"))+
geom_hline(yintercept = 0.05, lty=2,col="black")+
scale_colour_grey()+
guides(col=guide_legend(title="Test type: "),shape=guide_legend(title="Test type: "))+ # adding legend title
theme(legend.position="bottom") # legend position
pdf("output/figures/MPs_rdd_window.pdf",width=8,height=7)
p.cpt+geom_vline(xintercept = 164, col="grey",lwd=1,lty=2)+
annotate("text", x = 205, y = 1,
label = paste("EH choosen window")
,col="black")
dev.off()
|
cfa9d817e5be58e2aa7d2a2840a26863d1b01702
|
89dfa63f311c4e6f6afefd64d0650a19f5e9a81a
|
/server.R
|
b953cad5010a1d40c80ec40a99183743d53f1716
|
[
"Apache-2.0"
] |
permissive
|
sdu-ncsu/st558-project3
|
3ea01b05011067fa86feba08e07612c6f2e57958
|
d4695b7a54e2a061348683eb59ad9cfe5fabc291
|
refs/heads/master
| 2023-01-19T18:00:16.633332
| 2020-11-18T02:29:44
| 2020-11-18T02:29:44
| 311,433,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,519
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tidyverse);
library(readxl);
library(plotly);
library(caret);
air <- read_delim("Chicago.csv", delim = ",") %>% select(-c(X, city, date, time, season, year)) %>% drop_na();
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
getFilteredData <- reactive({
if(input$logical == 'gt') {
filteredData <- air %>% filter(eval(parse(text = input$subsetVar)) > input$filterValue)
} else {
filteredData <- air %>% filter(eval(parse(text = input$subsetVar)) < input$filterValue)
}
})
getLmColData <- reactive({
LmColData <- air %>% select(input$xlmcol)
})
getColData <- reactive({
colData <- air %>% select(input$varSum)
})
output$summaryText <- renderText({
#get filtered data
summary(getColData())
})
getRfFit <- eventReactive(input$mtry, {
set.seed(92)
trainIndex <- createDataPartition(air$death,
p = 0.8, list = FALSE)
airTrain <- air[as.vector(trainIndex),];
airTest <- air[-as.vector(trainIndex),];
my_grid <- expand.grid(mtry = input$mtry:input$mtry)
rfFit <- train(death ~ ., data = airTrain,
preProcess =c("center", "scale"),
method = "rf",
tuneGrid = my_grid,
trControl = trainControl(method = "cv", number = 4))
testPerformance <- predict(rfFit, newdata = airTest)
testResult <- postResample(testPerformance, obs = airTest$death)
return(list("fit" = rfFit, "testResult" = testResult))
})
output$rfResults <- renderDataTable({
getRfFit()[[1]]$results
})
output$rfResults <- renderUI({
withMathJax(
paste0("RSME = ", getRfFit()[[1]]$results[[2]]),
br(),
paste0("\\( R^2 = \\)", getRfFit()[[1]]$results[[3]]),
br(),
paste0("\\( MAE = \\)", getRfFit()[[1]]$results[[4]])
)
})
output$rfTestResults <- renderUI({
withMathJax(
paste0("RSME = ", getRfFit()[[2]][[1]]),
br(),
paste0("\\( R^2 = \\)", getRfFit()[[2]][[2]]),
br(),
paste0("\\( MAE = \\)", getRfFit()[[2]][[3]])
)
})
output$rfPredictResults <- renderUI({
newValues = data.frame("death"=100, "temp" = input$rfPredictTemp, "dewpoint" = input$rfPredictDewpoint, "pm10" = input$rfPredictPm10, "o3"=input$rfPredictO3)
withMathJax(
paste0("Death = ", predict(getRfFit()[[1]], newValues))
)
})
output$lmResults <- renderUI({
fit <- lm(death ~ eval(parse(text = input$xlmcol)), air)
withMathJax(
h3('Linear Regression Information'),
paste0(
"Adj. \\( R^2 = \\) ", round(summary(fit)$adj.r.squared, 3),
", \\( \\beta_0 = \\) ", round(fit$coef[[1]], 3),
", \\( \\beta_1 = \\) ", round(fit$coef[[2]], 3)
),
br(),
h3('Prediction'),
if(input$xlmcol == 'temp') {
paste0("Death = ", predict(fit, data.frame( temp = c(input$lmPrediction))))
} else if (input$xlmcol == 'pm10') {
paste0("Death = ",predict(fit, data.frame( pm10 = c(input$lmPrediction))))
} else if (input$xlmcol == 'o3') {
} else if (input$xlmcol == 'dewpoint') {
paste0("Death = ",predict(fit, data.frame( dewpoint = c(input$lmPrediction))))
}
)
})
output$plot1Plotly <- renderPlotly({
selectedClusterData <- air %>% select(input$xcol, input$ycol)
selectedClusterData$cluster <- kmeans(selectedClusterData, input$clusters)$cluster
colors <- palette(c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
"#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999"))
for(i in 1:input$clusters){
selectedClusterData$color[selectedClusterData$cluster == i] <- colors[i]
}
print(selectedClusterData)
fig2 <- plot_ly(selectedClusterData,
x = eval(parse(text = paste0("selectedClusterData$",input$xcol))),
y = eval(parse(text = paste0("selectedClusterData$",input$ycol))),
mode = "markers",
showlegend = FALSE,
hoverinfo = "x+y+text",
text = paste("Cluster:", selectedClusterData$cluster),
marker = list(opacity = 0.4,
color = selectedClusterData$color,
size = 12,
line = list(color = "#262626", width = 1)))
fig2
})
output$summaryPlot <- renderPlotly({
if(input$overHundred) {
newAir <- air %>% filter(death > 100)
} else {
newAir <- air
}
if(input$varSum == 'pm10'){
fig <- plot_ly(y = newAir$pm10, type = "box", quartilemethod="exclusive")
} else if (input$varSum == 'dewpoint' ){
fig <- plot_ly(y = newAir$dewpoint, type = "box", quartilemethod="exclusive")
} else if (input$varSum == 'temp') {
fig <- plot_ly(y = newAir$temp, type = "box", quartilemethod="exclusive")
} else if (input$varSum == 'o3') {
fig <- plot_ly(y = newAir$o3, type = "box", quartilemethod="exclusive")
} else if (input$varSum == 'death') {
fig <- plot_ly(y = newAir$death, type = "box", quartilemethod="exclusive")
}
fig
})
output$filteredTable <- renderDataTable({
getFilteredData()
})
output$downloadData <- downloadHandler(
filename = function() {
paste("filtered_data", ".csv", sep = "")
},
content = function(file) {
write.csv(getFilteredData(), file, row.names = FALSE)
}
)
})
|
44d167904233718ec2c06ec76715913dfc22ba7e
|
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
|
/FDA_Pesticide_Glossary/1,2-Dichloroethane_(.R
|
4c69cc277eef8beaad74aee27c08ca7e3481b072
|
[
"MIT"
] |
permissive
|
andrewdefries/andrewdefries.github.io
|
026aad7bd35d29d60d9746039dd7a516ad6c215f
|
d84f2c21f06c40b7ec49512a4fb13b4246f92209
|
refs/heads/master
| 2016-09-06T01:44:48.290950
| 2015-05-01T17:19:42
| 2015-05-01T17:19:42
| 17,783,203
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
1,2-Dichloroethane_(.R
|
library("knitr")
library("rgl")
#knit("1,2-Dichloroethane_(.Rmd")
#markdownToHTML('1,2-Dichloroethane_(.md', '1,2-Dichloroethane_(.html', options=c("use_xhml"))
#system("pandoc -s 1,2-Dichloroethane_(.html -o 1,2-Dichloroethane_(.pdf")
knit2html('1,2-Dichloroethane_(.Rmd')
|
78d4bd78c480776340b3f2842b4a0622edd754d6
|
fded4ae8aa91861b1882fde7b28f6fdc32ebafed
|
/R/coursera/week4.R
|
cd16d1db96a98f991cf21071855058b861a2f307
|
[] |
no_license
|
carloshpf/sandbox
|
11133d1c26db83927830beef5f20f76328ba4161
|
fd6def51c1f110f6696351f7875ca98e17f98cee
|
refs/heads/master
| 2020-04-12T01:34:43.409021
| 2019-03-07T20:49:32
| 2019-03-07T20:49:32
| 33,043,063
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,049
|
r
|
week4.R
|
# The str Function
# What is in this objetc?
# Alternative to summary
# Compactly display the contents of lists
str(str)
# function (object, ...)
f <- gl(40, 10)
str(f)
summary(f)
head(airquality)
str(airquality)
m <- matrix(rnorm(100), 10, 10)
str(m)
s <- split(airquality, airquality$Month)
str(s)
# -----------------------
a <- matrix(rnorm(100, 1, 0.25), ncol = 2)
b <- matrix(rnorm(100, 2, 0.25), ncol = 2)
c <- rbind(a, b)
plot(c)
# criar fator para cada um, uma terceira coluna talvez
# -----------------------
# Simulation ------
# rnorm, dnorm, pnorm, rpois
# d for density
# r for random number generation
# p for cumulative distribution
# q for quantile function
# Every distribution has these four types of functions
set.seed(1)
rpois(10, 1)
ppois(2, 2)
# Profiler
#
hilbert <- function(n) {
i <- 1:n
1 / outer(i - 1, i, "+")
}
x <- hilbert(500)
system.time(svd(x))
set.seed(10)
x <- rep(0:1, each = 5)
e <- rnorm(10, 0, 20)
y <- 0.5 + 2 * x + e
library(datasets)
Rprof()
fit <- lm(y ~ x1 + x2)
Rprof(NULL)
|
e1b871c58501570da095e725328a221f18827f89
|
6c1926b99503f6304d35ba383538c9c365242bb1
|
/man/attributes.to.long.Rd
|
82cb6a7ed8e4268df49995e1cd89f54d69c32c62
|
[] |
no_license
|
smorisseau/dhstools
|
56e1451de1124ac0f7943c7710a03a13b5fcca22
|
a8ba0addb7cae06cf085ebe08e9136bef04ed87f
|
refs/heads/master
| 2021-01-17T15:33:10.641739
| 2014-03-25T15:37:50
| 2014-03-25T15:37:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,191
|
rd
|
attributes.to.long.Rd
|
\name{attributes.to.long}
\alias{attributes.to.long}
\title{attributes.to.long}
\usage{
attributes.to.long(data, attribute.prefix,
ego.vars = NULL, keep.na = FALSE, idvar = NULL,
regexp = "^(.+)([\\.|_])(\\d+$)", regexp.index.gp = 3,
regexp.vname.gp = 1)
}
\arguments{
\item{data}{the wide-form dataset to convert}
\item{attribute.prefix}{a vector whose entries have the
prefixes of the names of variables in the dataframe
\code{data} that pertain to each alter. if you'd like
these to be re-named in the long form data, then the
variable names you'd like to use in the long form should
be the names of each entry in this vector. in the example
above, we would use
\code{attribute.prefix=c("age"="emage", "sex"="emsex")}.
see \code{regexp}, below, to understand how these
prefixes are used to match columns of the dataset; by
default, we assume that the variables match
<prefix><either '.' or '_'><number>.}
\item{ego.vars}{if not NULL, the names of columns in the
dataset that refer to the egos and so should not be
converted to long-form. you can specify that they should
be renamed in the same way as with
\code{attribute.prefix}. in the example above, we would
use \code{ego.vars=c("degree"="resp.d.hat")}.}
\item{keep.na}{if FALSE, remove columns in the resulting
dataset that are all NAs}
\item{idvar}{the index or name of the variable in the
data that has the respondent id. if NULL, then new ids
which correspond to the rows in data are created.}
\item{regexp}{the regular expression which describes the
wide-form variable names the default is anything that
ends in a "." or a "_" and a number. if you specify your
own regular expression, it should use groups (specified
with "(" and ")"). the default is
\code{"^(.+)([\\.|_])(\\d+$)"}, which specifies three
groups (see below for how these groups are used).}
\item{regexp.index.gp}{the group(s) in regexp which
should match the wide-form variable name prefixes
specified in \code{attribute.prefix}. in the default,
this is the first group, so \code{regexp.index.gp=1}.}
\item{regexp.vname.gp}{the group(s) in regexp which
should vary over the different alters; in the default,
this is the third group, so \code{regexp.vname.gp=3}.}
}
\value{
a long-form dataframe with the attributes reported for
all of the alters. the dataframe will include an
alter.id variable which is formed using <respondent
id>.<alter number>
}
\description{
Start with a wide-form dataframe reported about alters
using network method questions and convert it into a
long-form dataset. For example, after a network survey of
out-migrants, there might be variables about sex and age
of each emigre reported to be connected to each
respondent. In a study that encountered a maximum of 3
reported emigres across all respondents, this wide-form
dataframe might look like:\cr \tabular{ccccccccc}{
resp.id\tab resp.d.hat\tab emage.1\tab emage.2\tab
emage.3\tab emsex.1\tab emsex.2\tab emsex.3\cr 1\tab
100\tab 24\tab NA\tab NA\tab M\tab NA\tab NA\cr 2\tab
110\tab NA\tab NA\tab NA\tab NA\tab NA\tab NA\cr 3\tab
140\tab 33\tab 23\tab 53\tab F\tab M\tab F\cr ... \cr }
The \code{attributes.to.long} function could convert that
into a long-form dataframe that looks like this:\cr
\tabular{ccc}{ degree\tab age\tab sex\cr 100\tab 24\tab
M\cr 140\tab 33\tab F\cr 140\tab 23\tab M\cr 140\tab
53\tab F\cr \tab...\tab\cr } (Note that we make no
guarantees about the order in which the reshaped data
will be returned.)\cr \itemize{ \item{TODO - }{for now,
this converts any factors into characters. this is
obviously not ideal. eventually, data type should be
preserved...} \item{TODO - }{handle the case of "" more
effectively. Right now, we *assume* that all structural
missings (eg, I only report one alter, though there are
three columns for me to do so) are NA} \item{TODO -
}{look at the code in the middle of the function that's
commented out and be sure we know that the order of the
rows will be the same, to that we can cbind them
together.} }
}
\examples{
\dontrun{
## TODO add example
}
}
|
26d503708b26d2968dd52152f3a10f87858eba07
|
4362380c7c3bf62ab837f47cb63db1ad3bf2372d
|
/sparsity_analysis/elastic-net/14_DGN-WB_CV_elasticNet.r
|
89579a639ac3a5fdf1779050c364fc46d0cf5007
|
[] |
no_license
|
lmogil/GeneExp_h2
|
2d6823655595e8b014410ded161729fd3a2aee49
|
91e022196348d62cf30172d5a4e9011ee9648302
|
refs/heads/master
| 2021-06-20T00:26:34.161973
| 2017-06-01T13:55:08
| 2017-06-01T13:55:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,287
|
r
|
14_DGN-WB_CV_elasticNet.r
|
####by Heather E. Wheeler 20150108####
date <- Sys.Date()
args <- commandArgs(trailingOnly=T)
#args <- '22'
"%&%" = function(a,b) paste(a,b,sep="")
###############################################
### Directories & Variables
#pre <- "/Users/heather/Dropbox/elasticNet_testing"
pre <- ""
my.dir <- pre %&% "/group/im-lab/nas40t2/hwheeler/PrediXcan_CV/"
ct.dir <- pre %&% "/group/im-lab/nas40t2/hwheeler/PrediXcan_CV/cis.v.trans.prediction/"
gt.dir <- pre %&% "/group/im-lab/nas40t2/hwheeler/PrediXcan_CV/cis.v.trans.prediction/DGN-WB_genotypes/by.chr/"
en.dir <- pre %&% "/group/im-lab/nas40t2/hwheeler/PrediXcan_CV/EN/hapmap2/transcriptome-DGN-WB/"
k <- 10 ### k-fold CV
n <- 1 #number of k-fold CV replicates, remove nrep loop for this implementation
tis <- "DGN-WB"
chrom <- as.numeric(args[1])
chrname <- "chr" %&% chrom
##alpha = The elasticnet mixing parameter, with 0≤α≤ 1. The penalty is defined as
#(1-α)/2||β||_2^2+α||β||_1.
#alpha=1 is the lasso penalty, and alpha=0 the ridge penalty.
#alphalist <- 0:20/20 #vector of alphas to test in CV
alphalist <- c(0.05,0.95)
################################################
### Functions & Libraries
library(glmnet)
#library(doMC)
#registerDoMC(4)
#getDoParWorkers()
stderr <- function(x) sqrt(var(x,na.rm=TRUE)/length(x))
lower <- function(x) quantile(x,0.025,na.rm=TRUE)
upper <- function(x) quantile(x,0.975,na.rm=TRUE)
## convenience function to select best lambda over 1 k-fold cv replicates for linear model by Keston edited by Heather to get predicted values over different alphas
glmnet.select <- function(response, covariates, nfold.set = 10, alpha.set, foldid, ...) {
require(glmnet)
fullout <- list()
for(h in 1:length(alphalist)){
pred.matrix = matrix(0,nrow=dim(covariates)[1],ncol=1)
glmnet.fit = cv.glmnet(covariates, response, nfolds = nfold.set, alpha = alpha.set[h], foldid = foldid[,1], keep = TRUE, parallel=F) ##parallel=T is slower on tarbell, not sure why
new.df = data.frame(glmnet.fit$cvm, glmnet.fit$lambda, glmnet.fit$glmnet.fit$df, 1:length(glmnet.fit$lambda))
best.lam = new.df[which.min(new.df[,1]),] # needs to be min or max depending on cv measure (MSE min, AUC max, ...)
cvm.best = best.lam[,1] #best CV-MSE
nrow.max = best.lam[,4] #row position of best lambda
pred.matrix[,1] = glmnet.fit$fit.preval[,nrow.max] #predicted values for best lambda
ret <- as.data.frame(glmnet.fit$glmnet.fit$beta[,nrow.max]) # vector of all betas
ret[ret == 0.0] <- NA
ret.vec = as.vector(ret[which(!is.na(ret)),]) # vector of non-zero betas
names(ret.vec) = rownames(ret)[which(!is.na(ret))] # names (rsID) of non-zero betas
min.lambda <- glmnet.fit$glmnet.fit$lambda[nrow.max] #best lambda
output = list(ret.vec, cvm.best, nrow.max, min.lambda, pred.matrix, alpha.set[h])
fullout <- c(fullout,output)
}
return(fullout)
}
################################################
rpkmid <- ct.dir %&% tis %&% ".exp.ID.list"
expid <- scan(rpkmid,"character")
rpkmgene <- ct.dir %&% tis %&% ".exp.GENE.list"
geneid <- scan(rpkmgene,"character")
rpkmfile <- ct.dir %&% tis %&% ".exp.IDxGENE"
expdata <- scan(rpkmfile)
expdata <- matrix(expdata, ncol = length(geneid), byrow=TRUE)
rownames(expdata) <- expid
colnames(expdata) <- geneid
t.expdata <- expdata #don't need to transpose DGN
gencodefile <- my.dir %&% 'gencode.v12.V1.summary.protein.nodup.genenames'
gencode <- read.table(gencodefile)
rownames(gencode) <- gencode[,6]
gencode <- gencode[gencode[,1]==chrname,] ##pull genes on chr of interest
t.expdata <- t.expdata[,intersect(colnames(t.expdata),rownames(gencode))] ###pull gene expression data w/gene info
expsamplelist <- rownames(t.expdata) ###samples with exp data###
bimfile <- gt.dir %&% "DGN.hapmap2.QC.chr" %&% chrom %&% ".bim" ###get SNP position information###
bim <- read.table(bimfile)
rownames(bim) <- bim$V2
famfile <- gt.dir %&% "DGN.hapmap2.QC.chr" %&% chrom %&% ".fam" ###samples with gt data###
fam <- read.table(famfile)
samplelist <- intersect(fam$V1,expsamplelist)
exp.w.geno <- t.expdata[samplelist,] ###get expression of samples with genotypes###
explist <- colnames(exp.w.geno)
gtfile <- gt.dir %&% tis %&% '.gt.chr' %&% chrom %&% '.IDxSNP'
gtX <- scan(gtfile)
gtX <- matrix(gtX, ncol = length(bim$V2), byrow=TRUE)
colnames(gtX) <- bim$V2
rownames(gtX) <- fam$V1
X <- gtX[samplelist,]
grouplist <- read.table(ct.dir %&% tis %&% '.10reps.10fold.group.list',header=T)
rownames(grouplist) <- grouplist[,1]
groupid <- grouplist[,2:dim(grouplist)[2]]
resultsarray <- array(0,c(length(explist),8))
dimnames(resultsarray)[[1]] <- explist
resultscol <- c("gene","alpha","cvm","lambda.iteration","lambda.min","n.snps","R2","pval")
dimnames(resultsarray)[[2]] <- resultscol
workingbest <- "working_" %&% tis %&% "_exp_" %&% k %&% "-foldCV_" %&% n %&% "-reps_elasticNet_bestAlpha_hapmap2snps_predictionInfo_chr" %&% chrom %&% "_" %&% date %&% ".txt"
write(resultscol,file=workingbest,ncolumns=8)
allR2array <- array(0,c(length(explist),1,length(alphalist)))
dimnames(allR2array)[[1]] <- explist
dimnames(allR2array)[[2]] <- c("R2")
dimnames(allR2array)[[3]] <- alphalist
allR2col <- c("gene",dimnames(allR2array)[[3]])
workingall <- "working_" %&% tis %&% "_exp_" %&% k %&% "-foldCV_" %&% n %&% "-reps_elasticNet_eachAlphaR2_hapmap2snps_chr" %&% chrom %&% "_" %&% date %&% ".txt"
write(allR2col,file=workingall,ncolumns=22)
allParray <- array(0,c(length(explist),length(alphalist)))
dimnames(allParray)[[1]] <- explist
dimnames(allParray)[[2]] <- alphalist
for(i in 1:length(explist)){
cat(i,"/",length(explist),"\n")
gene <- explist[i]
geneinfo <- gencode[gene,]
chr <- geneinfo[1]
c <- substr(chr$V1,4,5)
start <- geneinfo$V3 - 1e6 ### 1Mb lower bound for cis-eQTLS
end <- geneinfo$V4 + 1e6 ### 1Mb upper bound for cis-eQTLs
chrsnps <- subset(bim,bim[,1]==c) ### pull snps on same chr
cissnps <- subset(chrsnps,chrsnps[,4]>=start & chrsnps[,4]<=end) ### pull cis-SNP info
cisgenos <- X[,intersect(colnames(X),cissnps[,2])] ### pull cis-SNP genotypes
if(is.null(dim(cisgenos))){
bestbetas <- data.frame() ###effectively skips genes with <2 cis-SNPs
}else{
minorsnps <- subset(colMeans(cisgenos), colMeans(cisgenos,na.rm=TRUE)>0) ###pull snps with at least 1 minor allele###
minorsnps <- names(minorsnps)
cisgenos <- cisgenos[,minorsnps]
if(is.null(dim(cisgenos)) | dim(cisgenos)[2] == 0){###effectively skips genes with <2 cis-SNPs
bestbetas <- data.frame() ###effectively skips genes with <2 cis-SNPs
}else{
exppheno <- exp.w.geno[,gene] ### pull expression data for gene
exppheno <- scale(exppheno, center=T, scale=T) ###need to scale for fastLmPure to work properly
exppheno[is.na(exppheno)] <- 0
rownames(exppheno) <- rownames(exp.w.geno)
##run Cross-Validation over alphalist
cv <- glmnet.select(exppheno,cisgenos,nfold.set=k,alpha.set=alphalist,foldid=groupid) ###run glmnet k-fold CV once determine best lambda & betas
allbetas <- list() ##non-zero betas for each alpha 1:length(alpha.set)
allcvm <- vector() ##minimum cross-validated MSE for each alpha
allnrow.max <- vector() ##best lambda's vector position for each alpha
alllambdas <- vector() ##best lambda for each alpha
pred.mat <- matrix(NA,nrow=length(exppheno),ncol=length(alphalist)) ##predicted values at each alpha
for(j in 1:length(alphalist)*6){
allbetas <- c(allbetas,cv[j-5])
allcvm <- c(allcvm,cv[[j-4]])
allnrow.max <- c(allnrow.max,cv[[j-3]])
alllambdas <- c(alllambdas,cv[[j-2]])
pred.mat[,j/6] <- cv[[j-1]]
}
indexbestbetas <- which.min(allcvm)
bestbetas <- allbetas[[indexbestbetas]] ###how many SNPs in best predictor?
}
}
if(length(bestbetas) > 0){
for(a in 1:length(alphalist)){
pred.en <- pred.mat[,a] ##k-fold CV predictions for each alpha
cvm <- allcvm[a]
### calculate correlation between predicted and observed expression
res <- summary(lm(exppheno~pred.en))
genename <- as.character(gencode[gene,6])
allR2array[gene,1,a] <- res$r.squared
allParray[gene,a] <- res$coef[2,4]
}
## output R2's
workingR2 <- c(gene,allR2array[gene,,])
write(workingR2,file=workingall,append=T,ncolumns=22)
idxR2 <- which.max(allR2array[gene,1,]) ##determine alpha that gives max R2
bestbetas <- allbetas[[idxR2]] ##may differ from min cvm betas
##for the best alpha, find output
resultsarray[gene,1] <- genename
resultsarray[gene,2] <- alphalist[idxR2]
resultsarray[gene,3] <- allcvm[idxR2] ###add mean minimum cvm (cross-validated mean-squared error) to results
resultsarray[gene,4] <- allnrow.max[idxR2] ###add mean of best lambda iteration to results
resultsarray[gene,5] <- alllambdas[idxR2] ###add best lambda to results
resultsarray[gene,6] <- length(bestbetas) ###add #snps in prediction to results
resultsarray[gene,7] <- allR2array[gene,1,idxR2] ###lm R2
resultsarray[gene,8] <- allParray[gene,idxR2] ###lm p-value
### output best shrunken betas for PrediXcan
bestbetalist <- names(bestbetas)
bestbetainfo <- bim[bestbetalist,]
betatable<-as.matrix(cbind(bestbetainfo,bestbetas))
betafile<-cbind(betatable[,2],betatable[,5],betatable[,7]) ###middle column: [,6] for GEUVADIS, [,5] for GTEx/other plink bed/bim/bam files
colnames(betafile) <- c("SNP","eff.allele","beta")
rownames(betafile) <- bestbetalist
write.table(betafile, file=en.dir %&% gene %&% "-" %&% tis %&% ".txt",quote=F,row.names=F,sep="\t")
}else{
genename <- as.character(gencode[gene,6])
resultsarray[gene,1] <- genename
resultsarray[gene,2:8] <- c(NA,NA,NA,NA,0,NA,NA)
}
write(resultsarray[gene,],file=workingbest,ncolumns=8,append=T)
}
write.table(resultsarray,file=tis %&% "_exp_" %&% k %&% "-foldCV_" %&% n %&% "-reps_elasticNet_bestAlpha_hapmap2snps_predictionInfo_chr" %&% chrom %&% "_" %&% date %&% ".txt",quote=F,row.names=F)
write.table(allR2array, file=tis %&% "_exp_" %&% k %&% "-foldCV_" %&% n %&% "-reps_elasticNet_eachAlphaR2_hapmap2snps_chr" %&% chrom %&% "_" %&% date %&% ".txt",quote=F)
|
1f2a75181bbb434beaf939b493d39682dbe9ba70
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/segmag/R/calc_segmentation_magnitude.r
|
05b8efac4ffc8e14dd3ad15fa954c077684df738
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,722
|
r
|
calc_segmentation_magnitude.r
|
calc_segmentation_magnitude <- function(segmag)
{
# Baut ein Array in das die Segmentierungsstaerke geschrieben wird
# Um jeden Tastendruck wird Gauss gelegt
# Gauss vorberechnet mit cutoff
# Eine VPn jeweils Huellfunktion, damit maximaler Beitrag beschraenkt ist
# Statt in Zeit wird in Indizes des Arrays gerechnet, da schneller geht.. Dazu alle mittels time_steps umgerechnet, also 1 / time_steps Arrayfelder je Sekunde
if (! is.segmag(segmag)) stop("segmag must be an object of class segmag")
# Vektor mit Segmentierungsstaerke ueber Zeit in time_steps als Indizes
segmentation_magnitude_overall <- numeric(segmag$index_time_max+1)
for (id in levels(segmag$ids))
{
index_keypresses <- segmag$index_keypresses[segmag$ids == id]
calc_segmentation_magnitude_impl(segmentation_magnitude_overall,index_keypresses,segmag$gauss_values,segmag$gauss_n_indexes_per_side,segmag$indexes_gauss_offset)
}
# as.numeric(as.character()) and plyr::round_any: Fix floating point issue causing problems in addressing specific time points (round_any and as.numeric(as.character()) fix different occurances of the issue)
# Example:
# tmp <- segmag(factor(c(1)),c(0),gauss_sd = 0.8)
# tmp$data$segmentation_magnitude[tmp$data$time==0.00]
# Before Fix returns: numeric(0)
# After Fix returns: [1] 0.4986779
return( data.frame(
time=as.numeric(as.character(
plyr::round_any(
seq(
segmag$time_min,
segmag$time_min + (segmag$index_time_max*segmag$time_steps),
segmag$time_steps
),
segmag$time_steps
)
)),
segmentation_magnitude=segmentation_magnitude_overall)
)
}
|
4c176210769b6568013370ab47e861c9108ff602
|
fa9a3f2faf618b2bf12409fe69341031ee1de9ac
|
/dev_suite/clean_all_time_space_data.R
|
23ea90c80d79abac8cf8d2cb1a0abdf18d4d86e8
|
[] |
no_license
|
silverer/covid_nyc_map
|
c1cbc0a4fbc8a29be95634123c1160b8c590da94
|
2d08dd8394853652f39c0b0ab3bce67db40526a3
|
refs/heads/master
| 2022-11-16T18:22:27.566032
| 2020-07-15T19:19:03
| 2020-07-15T19:19:03
| 265,358,933
| 0
| 0
| null | 2020-06-21T17:54:05
| 2020-05-19T20:30:30
|
R
|
UTF-8
|
R
| false
| false
| 7,749
|
r
|
clean_all_time_space_data.R
|
library(dplyr)
library(ggplot2)
library(plotly)
library(stats)
library(gtools)
library(stringr)
library(RColorBrewer)
setwd("~/Documents/covid_nyc_map")
source('./src/data_paths.R')
source('./dev_suite/clean_temporal_data.R')
acs <- read.csv(paste(new_data, 'acs_data_nyc.csv', sep = ''),
stringsAsFactors = F)
acs$ZCTA <- as.character(acs$ZCTA)
acs <- acs %>%
select(-c(X)) %>%
mutate(percent_non_white = 100 - percent_white)
all_time <- read.csv(paste(all_time_data, 'all_time_covid_data.csv', sep = ''),
stringsAsFactors = F)
all_time <- all_time %>%
filter(!is.na(MODIFIED_ZCTA) & MODIFIED_ZCTA != '') %>%
mutate(ZCTA = as.character(MODIFIED_ZCTA),
COVID_TEST_RATE = (COVID_TEST_COUNT/POP_DENOMINATOR)*100000) %>%
select(-c(MODIFIED_ZCTA, MODZCTA, Positive, Total)) %>%
rename(`Total tests` = COVID_TEST_COUNT,
`Percent positive tests` = PERCENT_POSITIVE,
`Total deaths` = COVID_DEATH_COUNT,
`Positive tests` = COVID_CASE_COUNT,
`Death rate` = COVID_DEATH_RATE,
`Testing rate` = COVID_TEST_RATE,
`Case rate` = COVID_CASE_RATE)
merged_all <- dplyr::left_join(all_time, acs, by = 'ZCTA')
# Poverty categories:
# - Low: <10% of residents in ZCTA living below the FPT
# - Medium: 10% to <20%
# - High: 20% to <30%
# - Very high: ≥30% residents living below the FPT
merged_cats <- merged_all %>%
mutate(
`Poverty rate` = case_when(
poverty_rate < 10 ~'Low',
poverty_rate >= 10 & poverty_rate < 20 ~ 'Medium',
poverty_rate >= 20 & poverty_rate < 30 ~ 'High',
poverty_rate >= 30 ~'Very high'
),
`Poverty rate` = factor(`Poverty rate`, levels = rev(c('Low', 'Medium',
'High', 'Very high'))),
`Percent Black` = quantcut(percent_black), #Generate quartiles and assign as factors
`Percent Black` = factor(`Percent Black`,
levels = rev(levels(`Percent Black`))),#Reverse levels for readability
`Percent non-white` = quantcut(percent_non_white, q = 4),
`Percent non-white` = factor(`Percent non-white`,
levels = rev(levels(`Percent non-white`))),
`Income bracket (thousands)` = quantcut(median_income/1000, q = 4),
`Income bracket (thousands)` = factor(`Income bracket (thousands)`,
levels = rev(levels(`Income bracket (thousands)`))),
`Percent Hispanic and/or Latino` = quantcut(percent_hispanic_latino, q = 4),
`Percent Hispanic and/or Latino` = factor(`Percent Hispanic and/or Latino`,
levels = rev(levels(`Percent Hispanic and/or Latino`))),
`Percent uninsured` = quantcut(percent_uninsured, q = 4),
`Percent uninsured` = factor(`Percent uninsured`,
levels = rev(levels(`Percent uninsured`))),
`Percent rec. public assistance` = quantcut(percent_receiving_public_assistance, q = 4),
`Percent rec. public assistance` = factor(`Percent rec. public assistance`,
levels = rev(levels(`Percent rec. public assistance`)))) %>%
mutate(commit_date = as.Date(commit_date),
Date = as.Date(actual_date))
plot_disparities_over_time <- function(merged_df, grp_var,
cov_var = 'Death rate'){
mean_df = merged_df %>%
filter(!is.na(.data[[cov_var]])) %>%
group_by(Date, .data[[grp_var]]) %>%
summarise_at(vars('Death rate', 'Case rate', 'Testing rate'), mean)
ylabel = str_to_lower(cov_var)
p = ggplot(as.data.frame(mean_df),
aes(x = Date, y = .data[[cov_var]],
color = .data[[grp_var]]))+
geom_point()+
scale_color_brewer(palette="Spectral",name = grp_var)+
labs(x = '', y = paste('Cumulative', ylabel))+
theme(panel.background = element_blank(),
axis.line = element_line(colour = "black"),
text = element_text(size = 16),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14),
axis.title = element_text(size = 14),
legend.title = element_blank(),
legend.text = element_text(size = 10))
return(p)
}
get_legend_text <- function(grp_var){
leg_words = unlist(str_split(grp_var, " "))
if(length(leg_words)<3){
leg_title = grp_var
}else if(length(leg_words)==3){
leg_title = paste(leg_words[1], ' ', leg_words[2],
'\n', leg_words[3], sep = '')
}else{
leg_title = paste(leg_words[1], ' ', leg_words[2],
'\n', leg_words[3], ' ',
leg_words[4],
sep = '')
}
return(leg_title)
}
test_plot <- plot_disparities_over_time(merged_cats, 'Income bracket (thousands)')
legend_title <- get_legend_text('Income bracket (thousands)')
ggplotly(test_plot)%>%
layout(legend = list(y = 0.5, title = list(text=legend_title,
font=list(size=14))))
test_plot <- plot_disparities_over_time(merged_cats, 'Income bracket (thousands)',
cov_var = 'Case rate')
ggplotly(test_plot)%>%
layout(legend = list(y = 0.5, title = list(text="Income bracket\n(thousands)")))
test_plot <- plot_disparities_over_time(merged_cats, 'Poverty rate',
cov_var = 'Case rate')
ggplotly(test_plot)
test_plot <- plot_disparities_over_time(merged_cats, 'Percent uninsured',
cov_var = 'Case rate')
ggplotly(test_plot)
test_plot <- plot_disparities_over_time(merged_cats, 'Percent uninsured',
cov_var = 'Testing rate')
ggplotly(test_plot)
test_plot <- plot_disparities_over_time(merged_cats, 'Percent Black',
cov_var = 'Case rate')
ggplotly(test_plot)
test_plot <- plot_disparities_over_time(merged_cats, 'Percent Black',
cov_var = 'Death rate')
ggplotly(test_plot)
test_plot <- plot_disparities_over_time(merged_cats, 'Percent Hispanic and/or Latino',
cov_var = 'Case rate')
legend_title <- get_legend_text('Percent Hispanic and/or Latino')
ggplotly(test_plot)%>%
layout(legend = list(y = 0.5, title = list(text=legend_title,
font=list(size=14))))
rename_columns <- function(rename_list = NULL){
pretty_columns = read.csv(paste(new_data, 'pretty_column_names.csv', sep = ''),
stringsAsFactors = FALSE)
pretty_columns = pretty_columns %>%
filter(!is.na(split_word))
pretty_columns$l2[pretty_columns$l2 == ''] <- ' '
pretty_columns = pretty_columns %>%
mutate(formatted_name = paste(l1, l2, sep = "\n"),#build names that need to have a newline
formatted_name = str_trim(formatted_name, side = 'both'), #remove leading/trailing whitespace
format_1l = paste(l1, l2, sep = ' '), #build names to go on one line
format_1l = str_trim(format_1l, side = 'both'))
#plot_names is used to rename the choro_inputs columns
plot_names = as.vector(pretty_columns$original_name)
names(plot_names) = as.vector(pretty_columns$format_1l)
if(!is.null(rename_list)){
for(i in 1:length(rename_list)){
plot_names[names(rename_list)[i]] = rename_list[i]
}
}
return(plot_names)
}
get_daily_counts <- function(count_vec){
new_vec = rep(NA, length(count_vec))
for(i in 2:length(count_vec)){
today = count_vec[i]
yesterday = count_vec[i-1]
new_vec[i] = today - yesterday
}
return(new_vec)
}
|
a80f819186bbc0d7baabdbd21278a679b56dcd32
|
f7a5ba5af2006f6f24c538ffa1cea6b96b811dc5
|
/src/runAnalysis.R
|
08a8f3499848febdf76d2d7caf75747699c9f812
|
[] |
no_license
|
xut006/single_cell_UCMAIT
|
eaa58150bd9547830e5c7a49bf3b5c0490e76b2c
|
2ed97ed37c3703faafb07e7b0f25291f2431e216
|
refs/heads/master
| 2023-03-13T17:48:12.819005
| 2021-03-08T23:59:43
| 2021-03-08T23:59:43
| 198,322,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 986
|
r
|
runAnalysis.R
|
#### Run Analysis ####
### paths must be set first
## repository directory (e.g. ~/something/something/single_cell_insulin)
repo_dir <- "/single_cell_insulin"
## get date for output file
date <- gsub("-", "_", Sys.Date())
#### render to html ####
## set controller script and output paths
# ctrl_path <- paste(repo_dir, "/src/controller/insulin_filtered_clusterReports_violins.Rmd", sep = "")
#
# output_path <- paste(repo_dir, "/results/insulin_analysis_", date, ".html", sep = "")
#
# ## render
# rmarkdown::render(
# input = ctrl_path,
# output_format = "html_document",
# output_file = output_path)
#### render to pdf ####
## set controller script and output paths
ctrl_path <- paste(repo_dir, "/src/controller/insulin_filtered_clusterReports_violins.Rmd", sep = "")
output_path <- paste(repo_dir, "/results/insulin_analysis_", date, ".pdf", sep = "")
## render
rmarkdown::render(
input = ctrl_path,
output_format = "pdf_document",
output_file = output_path)
|
34deeeb3c0e70b7fc4f62ffecca2d0dac996edbd
|
0ffbd1f7e505f8c8633f6856c3949f49da8a8200
|
/R_code/PackagesInstall.R
|
d5b7f0fdc9011465f6b9fbd4da24708e156df9d3
|
[] |
no_license
|
michelegargiulo/UNISA2020_StatisticalDataAnalysis
|
7c84606b9f75fbb7ebae6d1c1369258cb39c9dcf
|
9ef5c09ca9bbd876ce28a70009f486327017b0ab
|
refs/heads/main
| 2023-01-31T23:30:21.370822
| 2020-12-18T13:54:35
| 2020-12-18T13:54:35
| 320,288,027
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 308
|
r
|
PackagesInstall.R
|
install.packages("devtools")
install.packages("roxygen2")
install.packages("readxl")
install.packages("anchors")
install.packages("RSQLite")
install.packages('mice')
install.packages('leaps')
install.packages('glmnet')
install.packages("pls")
install.packages("tidyverse")
install.packages("caret")
|
f82607b39c011964365ab2eafaf4a681e64699e7
|
7374303c14e64c42bed64be1c8aff78e9aefa3d8
|
/man/kde.test.Rd
|
bcd923951fdbce71193338d27fa3bc14c87e4cdd
|
[] |
no_license
|
cran/ks
|
cd7d27f9a0d865f577c0bc4e857dbeca09ed55a6
|
f571ffa28e9dbc5ab649b4f6ac30879cf8fad43c
|
refs/heads/master
| 2022-11-30T03:47:41.411752
| 2022-11-24T02:40:02
| 2022-11-24T02:40:02
| 17,696,943
| 6
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,128
|
rd
|
kde.test.Rd
|
\name{kde.test}
\alias{kde.test}
\title{Kernel density based global two-sample comparison test}
\description{
Kernel density based global two-sample comparison test for 1- to 6-dimensional data.}
\usage{
kde.test(x1, x2, H1, H2, h1, h2, psi1, psi2, var.fhat1, var.fhat2,
binned=FALSE, bgridsize, verbose=FALSE)
}
\arguments{
\item{x1,x2}{vector/matrix of data values}
\item{H1,H2,h1,h2}{bandwidth matrices/scalar bandwidths. If these are
missing, \code{Hpi.kfe}, \code{hpi.kfe} is called by default.}
\item{psi1,psi2}{zero-th order kernel functional estimates}
\item{var.fhat1,var.fhat2}{sample variance of KDE estimates evaluated at x1, x2}
\item{binned}{flag for binned estimation. Default is FALSE.}
\item{bgridsize}{vector of binning grid sizes}
\item{verbose}{flag to print out progress information. Default is FALSE.}
}
\value{
A kernel two-sample global significance test is a list with fields:
\item{Tstat}{T statistic}
\item{zstat}{z statistic - normalised version of Tstat}
\item{pvalue}{\eqn{p}{p}-value of the double sided test}
\item{mean,var}{mean and variance of null distribution}
\item{var.fhat1,var.fhat2}{sample variances of KDE values evaluated at data points}
\item{n1,n2}{sample sizes}
\item{H1,H2}{bandwidth matrices}
\item{psi1,psi12,psi21,psi2}{kernel functional estimates}
}
\details{The null hypothesis is \eqn{H_0: f_1 \equiv f_2}{H_0: f_1 = f_2} where \eqn{f_1, f_2}{f_1, f_2}
are the respective density functions. The measure of discrepancy is
the integrated squared error (ISE)
\eqn{T = \int [f_1(\bold{x}) - f_2(\bold{x})]^2 \, d \bold{x}}{int [ f_1(x) - f_2(x)]^2 dx}. If
we rewrite this as \eqn{T = \psi_{0,1} - \psi_{0,12} - \psi_{0,21} + \psi_{0,2}}{T = psi_0,1 - psi_0,12 - psi_0,21 + psi_0,2}
where \eqn{\psi_{0,uv} = \int f_u (\bold{x}) f_v (\bold{x}) \, d \bold{x}}{psi_0,uv = int f_u(x) f_v(x) dx},
then we can use kernel functional estimators. This test statistic has a null
distribution which is asymptotically normal, so no bootstrap
resampling is required to compute an approximate \eqn{p}{p}-value.
If \code{H1,H2} are missing then the plug-in selector \code{\link{Hpi.kfe}}
is automatically called by \code{kde.test} to estimate the
functionals with \code{kfe(, deriv.order=0)}. Likewise for missing
\code{h1,h2}.
For \pkg{ks} \eqn{\geq}{>=} 1.8.8, \code{kde.test(,binned=TRUE)} invokes binned
estimation for the computation of the bandwidth selectors, and not the
test statistic and \eqn{p}{p}-value.
}
\references{
Duong, T., Goud, B. & Schauer, K. (2012) Closed-form density-based framework for automatic detection of cellular morphology changes. \emph{PNAS}, \bold{109}, 8382-8387.
}
\seealso{\code{\link{kde.local.test}}}
\examples{
set.seed(8192)
samp <- 1000
x <- rnorm.mixt(n=samp, mus=0, sigmas=1, props=1)
y <- rnorm.mixt(n=samp, mus=0, sigmas=1, props=1)
kde.test(x1=x, x2=y)$pvalue ## accept H0: f1=f2
library(MASS)
data(crabs)
x1 <- crabs[crabs$sp=="B", c(4,6)]
x2 <- crabs[crabs$sp=="O", c(4,6)]
kde.test(x1=x1, x2=x2)$pvalue ## reject H0: f1=f2
}
\keyword{test}
|
9497253abc72f9fd5f79301ee7432c24ab655a6f
|
817aef9be7c091e8f4966b72f8359426b425f6ee
|
/R/liftTable.R
|
0a8240639c3e5f84e515ef4e27598c9d8c982325
|
[] |
no_license
|
cran/CustomerScoringMetrics
|
07e47dbdeb77b087fb61d1f66cee13ff82de3c2b
|
09f407d8ba99ad86e146dba7af98d4ad0c2d9189
|
refs/heads/master
| 2020-03-08T20:27:40.716795
| 2018-04-06T09:39:01
| 2018-04-06T09:39:01
| 128,382,390
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,978
|
r
|
liftTable.R
|
#' Calculate lift table
#'
#' Calculates a lift table, showing for different percentiles of predicted scores how much
#' more the characteristic or action of interest occurs than for the overall sample.
#'
#' @param predTest Vector with predictions (real-valued or discrete)
#' @param depTest Vector with true class labels
#' @param resolution Value for the determination of percentile intervals. Default 1/10 (10\%).
#' @return A lift table.
#' @import stats
#' @export
#' @author Koen W. De Bock, \email{kdebock@@audencia.com}
#' @references Berry, M.J.A. and Linoff, G.S. (2004): "Data Mining Techniques: For Marketing, Sales, and
#' Customer Relationship Management - Second Edition". John Wiley & Sons.
#' @seealso \code{\link{topDecileLift}}, \code{\link{liftIndex}}, \code{\link{liftChart}}
#' @examples
#' ## Load response modeling predictions
#' data("response")
#' ## Apply liftTable function to obtain lift table for test sample results and print
#' ## results
#' lt<-liftTable(response$test[,2],response$test[,1])
#' print(lt)
#'
liftTable <- function (predTest,depTest,resolution=1/10) {
checkDepVector(depTest)
tmp <- unique(depTest)
depvalues <- tmp[order(tmp)]
yp = cbind(as.factor(depTest),predTest)
perc_list <- seq(0,1,resolution)[-1]
lift = array(0,length(perc_list))
ratio = array(0,length(perc_list))
nr_per_percentile <- array(0,length(perc_list))
P<- sum((yp[,1]==2)*1)
N<- sum((yp[,1]==1)*1)
yp_s <- yp[cbind(order(yp[,2],decreasing=TRUE)),]
yp_s <- as.data.frame(yp_s[complete.cases(yp_s),])
for (perc_idx in 1:length(perc_list)) {
#[tmp,idx] = sort(yp(:,2),'descend');
lift_percentage <- perc_list[perc_idx];
cut_value <- yp_s[round(nrow(yp_s)*lift_percentage),2];
#rm(idx);
consideration_table_part1 <- yp_s[(yp_s[,2]>cut_value),];
consideration_table_part2 <- yp_s[yp_s[,2]==cut_value,];
idx <- sample(1:nrow(consideration_table_part2),nrow(consideration_table_part2),replace=FALSE);
consideration_table_part2_s <- consideration_table_part2[idx,];
consideration_table <- rbind(consideration_table_part1,consideration_table_part2_s);
rm(consideration_table_part1)
rm(consideration_table_part2)
rm(consideration_table_part2_s)
ratio[perc_idx] <- sum((consideration_table[1:round(nrow(yp_s)*lift_percentage),1]==2)*1,na.rm=TRUE)/floor(nrow(yp_s)*lift_percentage);
#if (ratio == 0)
# ratio = sum(yp_s(yp_s(:,2)==max(yp_s(:,2)),1)==1)/sum(yp_s(:,2)==max(yp_s(:,2)),1);
#end
lift[perc_idx] <- (ratio[perc_idx] / (P/(P+N)));
nr_per_percentile[perc_idx] = sum(consideration_table[ceiling(nrow(yp_s)*(lift_percentage-perc_list[1])+0.000001):floor(nrow(yp_s)*lift_percentage),1]==2);
}
liftTable<- as.data.frame(cbind(perc_list*100,lift,(P/(P+N)),ratio,nr_per_percentile))
colnames(liftTable)<-c("Percentile","TopPercentileLift","expectedIncidence","trueIncidence","nrel")
return(liftTable)
}
|
a3d67687dcb8a0a6fe14b0a0a83f2edc820ffc83
|
ee8e8a21162e555196fc1d91f477bc81caf58764
|
/man/balance.Rd
|
39da3e5a62e95da0d18f3e1294fc525d7c2f798c
|
[
"MIT"
] |
permissive
|
rz6/DIADEM
|
8c97928d08b0ff9c2c71b8ce8f1e05bf7a89a31f
|
76b8701925a1d0b880625a3167d8606388433ad8
|
refs/heads/master
| 2020-04-09T01:05:36.276112
| 2019-12-25T20:55:30
| 2019-12-25T20:55:30
| 159,891,550
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 905
|
rd
|
balance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{balance}
\alias{balance}
\title{Inserts 0 columns and rows after last row/column to symmetrize matrix.}
\usage{
balance(mtx, N = NULL)
}
\arguments{
\item{mtx}{matrix in dense format to be symmetrized}
\item{N}{positive integer; additional argument for symmetrizing matrix to desired N x N dimension; N need not be larger than \code{ncol(mtx)} or \code{nrow(mtx)} in which case submatrix \code{mtx[1:N,1:N]} will be extraced}
}
\value{
N by N matrix which is either submatrix of \code{mtx} or \code{mtx} extended with 0's row and/or columns
}
\description{
Inserts 0 columns and rows after last row/column to symmetrize matrix.
}
\examples{
mtx1 <- matrix(1:24, ncol = 4)
mtx2 <- matrix(1:24, nrow = 4)
print(mtx1)
print(mtx2)
balance(mtx1)
balance(mtx2)
balance(mtx1, N = 8)
balance(mtx1, N = 3)
}
|
b281c5078cb575faa89aee79dffb7fcd5c9995e0
|
b7cd0766d2f808c64b20da60e983468240bdee48
|
/man/lfpcr_boot_oneunit.Rd
|
86a4e9960948b61776b6caced4260a08d0b7e28b
|
[] |
no_license
|
seonjoo/Lpredict
|
b168f83de72072aa1731d775fbb15ab6a3319d7a
|
2cf365b4c2b0ca44645221157b00562ae4c43c13
|
refs/heads/master
| 2021-06-10T05:46:57.051361
| 2017-09-19T19:43:49
| 2017-09-19T19:43:49
| 56,447,015
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 992
|
rd
|
lfpcr_boot_oneunit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lfpcr_boot_oneunit.R
\name{lfpcr_boot_oneunit}
\alias{lfpcr_boot_oneunit}
\title{Run one unit boot strapping}
\usage{
lfpcr_boot_oneunit(datct = NULL, idlist = NULL, seednum = 1000,
idvar = NULL, timevar = NULL, inYrs = FALSE, predvarlist = NULL,
outcomevar = NULL, covariates = NULL, varthresh = 0.85,
lambdalist = 2^(c(-8:8)), penalty.factor = NULL, nfold = 10)
}
\arguments{
\item{idlist}{List of sample unit}
\item{seednum}{random seed number}
\item{idvar}{ID variable name from the dataset}
\item{timevar}{time variable name from the dataset in days from the baseline}
\item{inYrs}{whether the time variable needs to be converted in years, default FALSE}
\item{predvarlist}{columns of the predictors}
\item{outcomevar}{outcome variable}
\item{varthresh}{threshold for LFPCA}
}
\description{
Run one unit boot strapping
}
\references{
TBA
}
\author{
Seonjoo lee \email{sl2670@cumc.columbia.edu}
}
|
e7cdab0703b24c9662321b6732090249f3896bae
|
2f31f71c1370e3597c9697ba06bb195950884e57
|
/ex15.R
|
323393cf69de1de2287264f5a8c07df54c392527
|
[] |
no_license
|
272-burger/statistical-methods
|
a7b81d11a4feb795df6da9807f5dc8cab9b76a68
|
409cc72f50a4c712664b5da2a14c4ee18029d043
|
refs/heads/main
| 2023-07-03T07:53:49.934794
| 2021-08-09T10:30:55
| 2021-08-09T10:30:55
| 366,439,630
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,914
|
r
|
ex15.R
|
# assigning data
mydata <- matrix(c(16, 19, 18, 20, 24, 26, 30, 32, 31, 34, 5, 6,
7, 8, 9, 11, 12, 13, 14, 15, 2, 1.9, 4, 5.6,
6.1, 6.2, 7, 7.2, 8, 9), nrow = 10, ncol = 3)
colnames(mydata) <- c("매출액", "광고비", "설비투자")
(mydata <- data.frame(mydata))
attach(mydata)
# scatter plot
## aspect ratio 45도 가이드라인; 선형성 판단
plot(매출액~광고비)
plot(매출액~설비투자)
# scatter plot matrix
pairs(mydata)
## lattice 패키지 사용
windows()
library(lattice)
splom(mydata)
# 상관계수; 선형 방향과 강도 파악
cor(mydata)
## 산점도에서는 매출액과 설비투자가 곡선경향이 있으나
## 상관계수는 0.917로 두 변수의 선형성이 높다고 할 수 있다.
## 다중공선성 판단 정도.. 는 ..
# 단순선형회귀
par(mfrow = c(1,3))
plot(매출액~광고비)
lm1 <- lm(매출액~광고비)
summary(lm1)
curve(lm1$coefficient[1]+lm1$coefficient[2]*x, add = T)
title(main = expression(paste(hat("y"), " =6.63+1.83x p값 = 0.000 R-sq = 0.97")))
plot(매출액~설비투자)
lm2 <- lm(매출액~설비투자)
summary(lm2)
curve(lm2$coefficient[1]+lm2$coefficient[2]*x, add=T)
title(main = expression(paste(hat("y"), " =10.72+2.50x p값 = 0.00018 R-sq = 0.84")))
plot(설비투자~광고비)
cor(설비투자, 광고비)
title(main="r = 0.95")
# 다중회귀: 매출액 = f(광고비, 설비투자)
lm12 <- lm(매출액 ~ 광고비 + 설비투자)
lm12
summary(lm12)
confint(lm12)
## 설비투자 변수를 제거하고 회귀분석 다시
## confint 0 포함여부
# residual plot
par(mfrow = c(1,3))
plot(lm12$fitted, lm12$residuals, xlab = "매출액 - hat", ylab = "잔차", main = "잔차그림")
abline(0,0)
plot(광고비, lm12$residuals, xlab="광고비", ylab="잔차", main="잔차그림")
abline(0,0)
plot(설비투자, lm12$residuals, xlab="설비투자", ylab="잔차", main="잔차그림")
abline(0,0)
## 이 경우 자료의 수가 적어 어떤 패턴을 단정적으로 말하기는 어렵긴 하나
# dummy variable
설비투자가변수 <- c(rep(0,4), rep(1,6))
dummy.data <- data.frame(매출액, 광고비, 설비투자가변수)
pairs(dummy.data)
## 설비투자가변수가 0일때 매출액이 적고, 1일때 매출액이 큼
mylm <- lm(매출액 ~ 광고비 + 설비투자가변수)
mylm
summary(mylm)
## 설비투자가변수가 비유의적이어서 회귀식에서
## 제거해야 하지만 여기서는 예시의 목적으로 아래 그림들을 추가로 그려본다
par(mfcol=c(1,3))
plot(매출액[설비투자가변수==0] ~ 광고비[설비투자가변수==0],
xlim=c(5,15), ylim=c(16,34), pch=19, xlab="광고비", ylab="매출액")
title("설비투자가변수=0")
lines(x <- c(4,9), y=8.04+1.57*x)
text(9, 18, "y=8.04+1.57x")
plot(매출액[설비투자가변수==1] ~ 광고비[설비투자가변수==1],
xlim=c(5,15), ylim=c(16,34), xlab="광고비", ylab="매출액")
title("설비투자가변수=1")
lines(x <- c(8,16), y=10.13+1.57*x, lty=3)
text(9.5, 30, "y=10.13+1.57x")
plot(매출액 ~ 광고비, xlab="광고비", ylab="매출액")
title("전체자료")
points(광고비[설비투자가변수==0], 매출액[설비투자가변수==0], pch=19)
lines(x <- c(4,9), y=8.04+1.57*x)
lines(x <- c(8, 16), y=10.13+1.57*x, lty=3)
text(9.5, 30, "y=10.13+1.57x")
## 두 직선의 절편 차 = 더미 계수의 절편값 = 2.088
# 다항회귀
# assigning data
x <- c(250, 260, 270, 280, 290, 300, 310, 320, 330, 340)
y <- c(45, 51, 56, 70, 72, 86, 81, 67, 53, 40)
plot(x,y, xlab = "온도", ylab ="강도")
# 이 예제의 산점도는 곡선형태를 분명히 보이므로 1차 회귀 없이 바로 이차 회귀를
# 시도할 수 있음. 여기서는 잔차도를 비교하기 위하여 1차 회귀를 적합함
# 산점도에서는 쉽게 보이지 않던 곡선형태가 잔차도에서는 뚜렷이 보일 수 있음
# 우선 1차 회귀해보면
mylm1 <- lm(y~x)
anova(mylm1)
summary(mylm1)
curve(mylm1$coeff[1] + mylm1$coeff[2]*x, add=T)
title(main="1차 회귀")
windows()
plot(mylm1$fitted, mylm1$residuals, xlab = "y-hat", ylab = "r", main = "1차 회귀 잔차그림")
abline(0, 0)
# 이차항 추가
x2 <- x*x
mylm2 <- lm(y~x+x2)
anova(mylm2)
summary(mylm2)
# 이차항의 계수가 유의적임
# 특별한 경우를 젤외하고 제일 고차항의 계수가 유의적이라면
# 낮은 차수의 항은 관례적으로 추가로 검정하지 않는다 (절편항 포함)
# 낮은 차수의 항이 비유의적이라 하더라도 회귀식에서 제외하지 않음
windows()
plot(x, y, xlab="온도", ylab="강도")
curve(mylm2$coeff[1] + mylm2$coeff[2]*x + mylm2$coeff[3]*x2,
add=T)
title(main = "2차 다항회귀")
windows()
plot(mylm2$fitted, mylm2$residuals, xlab = "y-hat",
ylab = "r", main = "2차 다항회귀 잔차그림")
abline(0, 0)
## 일차 다항회귀 잔차그림 vs 이차 다항회귀 잔차그림
## 상당히 fitting이 개선되었음을 비교해볼 수 있음
# 삼차항 추가
x3 <- x^3
mylm3 <- lm(y~x+x2+x3)
windows()
plot(x, y, xlab="온도", ylab="강도", main="3차 다항회귀")
curve(mylm3$coeff[1] + mylm3$coeff[2]*x + mylm3$coeff[3]*x^2
+ mylm3$coeff[4]*x^3, add=T)
summary(mylm3)
plot(mylm3$fitted, mylm3$residuals)
abline(0,0)
## 3차 회귀식에서의 잔차들은 (-7, 6) 범위로 2차 회귀식보다 0쪽으로 줄었으나
## 줄어든 정도는 아주 많이 둔화됨
## 3차 잔차그림; 2차 회귀식의 경우보다 오히려 더 뚜렷한 곡선형태가 나타남
## 따라서 이 경우 2차 회귀분석이 가장 적합하다는 결론을 내릴 수 있음
## 그러나 2차 다항곡선과 3차 다항곡선을 적합하여 보면 3차가 더 적합해 보임
## if 예측력 관점이라면 3차 곡선이 나을것
## 통계적 관례로는 2차 곡선이 나음
## 둘 다 제시하는 것이 바람직!
|
8c7aa8360daf8d133da11e2bd75dcaaa83076e11
|
ce6cc316a7dc9bc2ecce50616da50bd1689eccb2
|
/rankall.R
|
4ce2cfa962834f8d141b7f458fb15b007d71c46f
|
[] |
no_license
|
fmonera/ProgrammingAssignment3
|
97e3f4dc9747a5a482efb259209b27859da4c7b3
|
0615c46f81df058d34e6a19b96cc6ae0e7a4095d
|
refs/heads/master
| 2020-05-16T22:01:45.399989
| 2015-08-30T14:28:37
| 2015-08-30T14:28:37
| 41,631,382
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,320
|
r
|
rankall.R
|
rankall <- function( outcome, num = "best") {
## Read outcome data
dt <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
states <- unique(dt[,7])
if(outcome == "heart attack") cl = 11
else if (outcome == "heart failure") cl = 17
else if (outcome == "pneumonia") cl = 23
else stop("invalid outcome")
# Get only the needed columns
selc <- dt[,c(2,7,cl)]
cnm <- c("hospital", "state", "outcome")
colnames(selc) <- cnm
# make numeric field
selc[,3] <- suppressWarnings(as.numeric(selc[,3]))
# sort
sorted <- selc[order(selc$hospital), , drop=FALSE]
sorted <- sorted[order(sorted$outcome), , drop=FALSE]
# remove na
nona <- na.omit(sorted)
# Filter by state
listbystates <- split(nona, nona$state)
result = c()
for(currentstate in states) {
stdf <- listbystates[[currentstate]]
numhospitals <- nrow(stdf)
if(num == "best") {
rsel <- 1
} else if(num == "worst") {
rsel <- numhospitals
} else {
rsel <- num
}
if (rsel > numhospitals) {
res <- NA
} else {
res <- stdf[rsel, 1]
}
result <- rbind(result, c(res, currentstate))
}
result <- result[order(result[, 2]), ]
rownames(result) <- result[,2]
colnames(result) <- c("hospital", "state")
data.frame(result)
}
|
c7fd2b1e2d75fd077abb00a0cba660a97eae6e68
|
771c05fa7b58f8f2dab7938da389e9e72b3cf3d4
|
/Rvasp/man/print.dosdata.Rd
|
10401c7277627b3bcfaecef9306a8384eacf4ac8
|
[
"MIT"
] |
permissive
|
gokhansurucu/Rvasp
|
56a75b10daa606768791935530bd108204d92f4f
|
8983440a96ca8acf017f47af8dbfd3f32faaad22
|
refs/heads/master
| 2020-04-08T21:14:33.155967
| 2014-03-14T20:08:59
| 2014-03-14T20:08:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 233
|
rd
|
print.dosdata.Rd
|
\name{print.dosdata}
\alias{print.dosdata}
\title{Print a dosdata object}
\usage{
print.dosdata(dosdata, ...)
}
\arguments{
\item{dosdata}{object of type dosdata}
}
\description{
\code{print.dosdata} print a dosdata object.
}
|
173e821c61243f3e4f4287bf8763b6138cf3855d
|
413aac01b62ea0e6d47d09517e7acb4fac35fada
|
/0-library.R
|
570ab9781a7572841f001099235364c958ac514c
|
[] |
no_license
|
bgulbis/Dexmedetomidine_MUE_2015_2016
|
1e69e06eb9b5455d5942903d8cea0c80adefce67
|
c170831d99bdf0f49ef9c32184e9ad178f06065c
|
refs/heads/master
| 2021-01-21T04:43:20.587622
| 2016-06-16T21:30:32
| 2016-06-16T21:30:32
| 51,957,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
r
|
0-library.R
|
# 0-library.R
library(dplyr)
library(BGTools)
library(stringr)
library(lubridate)
library(tidyr)
source("0-dirs.R")
gzip_files(dir.proposal)
gzip_files(dir.screen)
gzip_files(dir.data)
# lookup_location <- function(pt, start) {
# x <- filter(data.locations, pie.id == pt,
# start >= arrive.datetime,
# start <= depart.datetime)
#
# if (length(x$location) < 1) {
# "Unable to match location"
# } else {
# x$location
# }
# }
|
d07dd48da2f9502af4c43efa8cdf788f69540e52
|
3b223c25e6dea9aeb8441556b9e54f726ffaf587
|
/experiments/elisa_paper_relevant/interactiveReferenceGame/results/rscripts/app/app_data_prep.R
|
bb2bc07b87a5ed05a580878b59ea176f2e117b6c
|
[
"MIT"
] |
permissive
|
thegricean/overinformativeness
|
4d53a0cc5f60d3cadc3e7a6435a60a240965a9ae
|
d20b66148c13af473b57cc4d1736191a49660349
|
refs/heads/master
| 2021-07-08T06:11:18.648504
| 2020-07-16T15:28:35
| 2020-07-16T15:28:35
| 33,973,028
| 1
| 2
|
MIT
| 2021-05-07T03:19:41
| 2015-04-15T04:25:13
|
HTML
|
UTF-8
|
R
| false
| false
| 2,503
|
r
|
app_data_prep.R
|
library(dplyr)
library(ggplot2)
library(bootstrap)
library(lme4)
library(tidyr)
theme_set(theme_bw(18))
setwd("/Users/elisakreiss/Documents/Stanford/overinformativeness/experiments/elisa_paper_relevant/interactiveReferenceGame/results/rscripts/app")
typ <- read.table(file="data/meantyp_short.csv",sep=",", header=T,check.names = FALSE)
typ$obj = paste(typ$Color,typ$Item,sep = "_")
# df_nonoise <- read.table(file="data/visualizationPredictives.csv",sep=",", header=T,check.names = FALSE)
# df_nonoise$obj = df_nonoise$target
empRef <- read.table(file="data/empiricalReferenceProbs.csv",sep=",", header=T,check.names = FALSE)
empRef$obj <- empRef$target
empRef <- empRef[,c('uttType','condition','empiricProb','obj')]
df_addnoise1 <- read.table(file="data/vizNoiseAddPredictives_1.csv",sep=",", header=T,check.names = FALSE)
df_addnoise2 <- read.table(file="data/vizNoiseAddPredictives_2.csv",sep=",", header=T,check.names = FALSE)
df_addnoise3 <- read.table(file="data/vizNoiseAddPredictives_3.csv",sep=",", header=T,check.names = FALSE)
df_addnoise4 <- read.table(file="data/vizNoiseAddPredictives_4.csv",sep=",", header=T,check.names = FALSE)
df_addnoise5 <- read.table(file="data/vizNoiseAddPredictives_5.csv",sep=",", header=T,check.names = FALSE)
df_addnoise <- rbind(df_addnoise1, df_addnoise2, df_addnoise3, df_addnoise4, df_addnoise5)
# df_nonoise$noiseRate <- 0
# df_nonoise$noise <- 1
# df_nonoise <- df_nonoise[,c('condition','obj', 'alpha','colorCost','typeCost','lengthWeight','typWeight','uttType','modelPrediction','noise','noiseRate')]
df_addnoise$noise <- ifelse(df_addnoise$noiseRate == 0, 1, 2)
df_addnoise <- df_addnoise[,c('condition','obj', 'alpha','colorCost','typeCost','lengthWeight','typWeight','uttType','modelPrediction','noise','noiseRate')]
# full_df <- rbind(df_addnoise,df_nonoise)
# no pink
# full_df <- df_addnoise[!(df_addnoise$obj == 'pink_carrot' | df_addnoise$obj == 'orange_carrot' | df_addnoise$obj == 'brown_carrot'),]
# full_df <- full_df[!(full_df$obj == 'pink_tomato' | full_df$obj == 'red_tomato' | full_df$obj == 'green_tomato'),]
full_df <- df_addnoise
df <- left_join(full_df,empRef)
df$Typicality = typ$Typicality[match(df$obj, typ$obj)]
df <- df[,c('condition','alpha','colorCost','typeCost','lengthWeight','typWeight','uttType','modelPrediction','noise','noiseRate','empiricProb','Typicality')]
# write.csv(df, "data/completeDataPredictives_nopink.csv", row.names = FALSE)
write.csv(df, "data/completeDataPredictives.csv", row.names = FALSE)
|
e6e2b6c18d5443e0ee609ca750af395ce4e2e21f
|
c44650689eec32af888d8eaa38aa40ceb76d5689
|
/R/timeseries_plots.R
|
747ffce5009f7fc3534b326cee07302ccd8abbd0
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
jsta/glmtools
|
6923c56f97bbd36ec5a50f146bbee47d0107b2fd
|
ea744cc56e93bf31be9ae2bf780de1197e0f319b
|
refs/heads/master
| 2021-01-14T08:30:44.896709
| 2020-05-13T15:18:18
| 2020-05-13T15:18:18
| 286,780,574
| 0
| 0
|
NOASSERTION
| 2020-08-11T15:29:20
| 2020-08-11T15:29:19
| null |
UTF-8
|
R
| false
| false
| 2,056
|
r
|
timeseries_plots.R
|
.plot_nc_heatmap <- function(file, var_name, reference, num_cells=100, palette, ...){
surface <- get_surface_height(file)
max_depth <- max(surface[, 2])
min_depth <- 0
z_out <- seq(min_depth, max_depth,length.out = num_cells)
data <- get_var(file, z_out = z_out, var_name = var_name, reference = reference)
title = .unit_label(file, var_name)
.plot_df_heatmap(data, title, num_cells, palette, ...)
}
#' @importFrom graphics .filled.contour
#' @importFrom grDevices colorRampPalette
#' @importFrom utils head
.plot_df_heatmap <- function(data, bar_title, num_cells, palette, title_prefix=NULL, overlays=NULL, xaxis=NULL, col_lim){
z_out <- rLakeAnalyzer::get.offsets(data)
reference = ifelse(substr(names(data)[2],1,3) == 'elv', 'bottom', 'surface')
if (missing(col_lim))
col_lim = range(data[, -1], na.rm = TRUE)
if (missing(palette))
palette <- colorRampPalette(c("violet","blue","cyan", "green3", "yellow", "orange", "red"),
bias = 1, space = "rgb")
col_subs <- head(pretty(col_lim, 6), -1)
levels <- sort(unique(c(col_subs, pretty(col_lim, 15))))
colors <- palette(n = length(levels)-1)
dates <- data[, 1]
matrix_var <- data.matrix(data[, -1])
if(is.null(xaxis)){
xaxis <- get_xaxis(dates)
}
yaxis <- get_yaxis_2D(z_out, reference, prefix=title_prefix)
plot_layout(xaxis, yaxis, add=TRUE)
.filled.contour(x = dates, y = z_out, z =matrix_var,
levels= levels,
col=colors)
overlays # will plot any overlay functions
axis_layout(xaxis, yaxis) #doing this after heatmap so the axis are on top
color_key(levels, colors, subs=col_subs, col_label = bar_title)
}
#' @importFrom graphics points
.plot_nc_timeseries <- function(file, var_name){
ylab = .unit_label(file, var_name)
variable_df <- get_var(file, var_name=var_name)
xaxis <- get_xaxis(variable_df[,1])
yaxis <- get_yaxis(variable_df[,2], title = ylab)
plot_layout(xaxis, yaxis, add=TRUE)
points(variable_df)
axis_layout(xaxis, yaxis)
}
|
0becf17c160a2d1f41a575ec2f65b36075f8c968
|
e83edc59f6bb2424ab86375762b0c116000d0167
|
/R/raster_to_filenames.R
|
ace2260a67bbd7ae9058ad2852a79db35297926c
|
[] |
no_license
|
gearslaboratory/spatial.tools
|
5f919f4474ef0792c37ba88bf47ce60229f65221
|
905954e12ed3092a56afaaa6f356d3f0a7e572c1
|
refs/heads/master
| 2021-01-04T02:51:21.597561
| 2020-02-13T19:34:33
| 2020-02-13T19:34:33
| 240,344,310
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,350
|
r
|
raster_to_filenames.R
|
#' Extract filenames from all Raster* objects.
#'
#' @param x Raster*. A Raster* object (even one without values/in memory) to determine the filename(s).
#' @param unique Logical. Only return unique filenames? If FALSE, one filename per layer.
#'
#' @return Character vector of filenames.
#' @author Jonathan A. Greenberg
#' @seealso \code{\link[raster]{filename}}
#' @details This is an expansion of filename() that allows for RasterStacks, in-memory Raster*s,
#' and Raster*s without values. If a filename is not found, the entry will be "".
#'
#' @examples {
#' library("raster")
#' tahoe_highrez <- brick(system.file("external/tahoe_highrez.tif", package="spatial.tools"))
#' raster_to_filenames(tahoe_highrez)
#' raster_to_filenames(tahoe_highrez,unique=TRUE)
#' nodata <- raster()
#' raster_to_filenames(nodata)
#' }
#' @import raster
#' @export
raster_to_filenames <- function(x,unique=FALSE)
{
if(!hasValues(x)) return("")
if(inMemory(x)) return("")
filenames <- sapply(X=seq(nlayers(x)),
function(X,raster)
{
raster_layer <- raster(raster,layer=X)
if(!hasValues(raster_layer)) return("")
if(inMemory(raster_layer)) return("")
else
return(filename(raster_layer))
},
raster=x)
if(unique)
{
filenames <- unique(filenames)
}
return(filenames)
}
|
08e8119664b5c71f1ed23f1e3ac6d29e54840e2d
|
551d820cf59055378590fa27a26049c11970b794
|
/lesson2_keypoint.R
|
64ffa6dcfd158e45b14b1f67f7d8e84542987e53
|
[] |
no_license
|
GMfatcat/R-tutorial-lesson2
|
d9439d12a2eb4509f05533ca971d740c498b25f7
|
e44eeb38a66074fae8813827b52bda9b4c54d20f
|
refs/heads/master
| 2021-04-03T12:22:47.688788
| 2020-03-18T22:09:14
| 2020-03-18T22:09:14
| 248,352,492
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 529
|
r
|
lesson2_keypoint.R
|
# Run the code section by section
# ggplot2 install
install.packages(ggplot2)
# ggplot2 include
library(ggplot2)
#-----------------
#ggplot histogram
ggplot(data)+geom_histogram(aes(x=name,y=name2),fill=#BADDCC")
#ggplot histogram with multiple data
ggplot(data)+geom_histogram(aes(x=name),fill="red")+
geom_histogram(aes(x=name),fill=blue")
#check colors built in R
c1 <- colors()
c1[1:length(c1)]
#data SPLIT UP
ggplot(diamonds,aes(x=carat,y=price))+geom_point(aes(color=color))+facet_wrap(~color)
|
fb5cd5a47d499113c9789f461cce1bd003e88c31
|
e9d6628fc6b6fcc7cd57aa84ac1ae875ccec0aa3
|
/R/AllClasses_old.R
|
fe9052aa3ce9e08028d571832af971f582fb6baf
|
[
"MIT"
] |
permissive
|
bayesiandemography/demarray
|
f925b3a4c10550f65cf7262d5cf3ca93902ec8fa
|
7bade2c7be5e2cc6d5e78ed4484fd0f67afdf7d9
|
refs/heads/master
| 2021-12-30T09:32:28.303845
| 2021-12-17T03:07:50
| 2021-12-17T03:07:50
| 201,564,235
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,225
|
r
|
AllClasses_old.R
|
## DemographicArray ----------------------------------------------------------------------
validity_DemographicArray <- function(object) {
dim <- dim(object)
dimtypes <- object@dimtypes
## is numeric
val <- demcheck::chk_is_numeric(x = object,
name = "object")
if (!isTRUE(val))
return(val)
## has valid dimnames
val <- demcheck::chk_names_dimnames_complete(x = object,
name = "object")
if (!isTRUE(val))
return(val)
val <- demcheck::chk_dimnames_complete(x = object,
name = "object")
if (!isTRUE(val))
return(val)
dimnames <- dimnames(object)
names <- names(dimnames)
## dimtypes valid (for the moment we are not enforcing
## the requirement that origin, destination, parent,
## and child dimensions have their pair present)
val <- demcheck::chk_member_dimtype(x = dimtypes,
name = "dimtypes")
if (!isTRUE(val))
return(val)
val <- demcheck::chk_no_names(x = dimtypes,
name = "dimtypes")
if (!isTRUE(val))
return(val)
val <- demcheck::chk_dimtypes_mutually_compatible(dimtypes)
if (!isTRUE(val))
return(val)
val <- demcheck::chk_names_pairs_suffix(dimtypes = dimtypes,
names = names)
if (!isTRUE(val))
return(val)
## dim and dimtypes consistent
val <- demcheck::chk_length_same(x1 = dimtypes,
x2 = dim,
name1 = "dimtypes",
name2 = "dim")
if (!isTRUE(val))
return(val)
TRUE
}
#' An S4 class to represent a demographic array
#'
#' Differences from ordinary array:
#'
#' \itemize{
#' \item \code{drop} is \code{FALSE} by default
#' }
#'
#'
#' @slot dimtypes Character vector
#'
#' @export
setClass("DemographicArray",
contains = "array",
slots = c(dimtypes = "character"),
validity = validity_DemographicArray)
#' @rdname DemographicArray
#' @export
setClass("Counts",
contains = "DemographicArray")
#' @rdname DemographicArray
#' @export
setClass("Values",
contains = "DemographicArray")
## Labels -------------------------------------------------------------------
#' S4 classes to represent information in labels
#'
#' S4 classes to hold information extracted from labels.
#' End users would not normally interact directly with
#' these classes.
#'
#' @slot values Vector holding information for each label.
#' @slot include_na Logical. Whether to append an
#' \code{NA} to the labels.
#'
#' @keywords internal
#'
#' @name Labels-class
NULL
#' @rdname Labels-class
setClass("Labels",
contains = "VIRTUAL",
slots = c(values = "vector",
include_na = "logical"),
validity = function(object) {
include_na <- object@include_na
val <- demcheck::chk_is_logical_flag(x = include_na,
name = "include_na")
if (!isTRUE(val))
return(val)
TRUE
})
## Categories -----------------------------------------------------------------
## HAS_TESTS
#' @rdname Labels-class
setClass("Categories",
contains = "Labels",
validity = function(object) {
values <- object@values
val <- demcheck::chk_label_values_categories(values)
if (!isTRUE(val))
return(val)
TRUE
})
## Specialised classes --------------------------------------------------------
## HAS_TESTS
#' @rdname Labels-class
setClass("Triangles",
contains = "Categories",
prototype = prototype(values = c("Lower", "Upper")),
validity = function(object) {
values <- object@values
val <- demcheck::chk_label_values_triangles(values)
if (!isTRUE(val))
return(val)
TRUE
})
## HAS_TESTS
#' @rdname Labels-class
setClass("Direction",
contains = "Categories",
prototype = prototype(values = c("In", "Out")),
validity = function(object) {
values <- object@values
val <- demcheck::chk_label_values_direction(values)
if (!isTRUE(val))
return(val)
TRUE
})
## HAS_TESTS
#' @rdname Labels-class
setClass("Quantiles",
contains = "Categories",
validity = function(object) {
values <- object@values
val <- demcheck::chk_label_values_quantiles(values)
if (!isTRUE(val))
return(val)
TRUE
})
## Numeric --------------------------------------------------------------------
## HAS_TESTS
#' @rdname Labels-class
setClass("Integers",
contains = "Labels",
validity = function(object) {
values <- object@values
val <- demcheck::chk_label_values_integers(values)
if (!isTRUE(val))
return(val)
TRUE
})
## HAS_TESTS
#' @rdname Labels-class
setClass("Quantities",
contains = "Labels",
validity = function(object) {
values <- object@values
val <- demcheck::chk_label_values_quantities(values)
if (!isTRUE(val))
return(val)
TRUE
})
## HAS_TESTS
#' @rdname Labels-class
setClass("Intervals",
contains = "Labels",
validity = function(object) {
values <- object@values
val <- demcheck::chk_label_values_intervals(values)
if (!isTRUE(val))
return(val)
TRUE
})
## Dates ----------------------------------------------------------------------
## HAS_TESTS
#' @rdname Labels-class
setClass("Quarters",
contains = "Labels",
validity = function(object) {
values <- object@values
val <- demcheck::chk_label_values_quarters(values)
if (!isTRUE(val))
return(val)
TRUE
})
## HAS_TESTS
#' @rdname Labels-class
setClass("Months",
contains = "Labels",
validity = function(object) {
values <- object@values
val <- demcheck::chk_label_values_months(values)
if (!isTRUE(val))
return(val)
TRUE
})
## HAS_TESTS
#' @rdname Labels-class
setClass("Dates",
contains = "Labels",
validity = function(object) {
values <- object@values
val <- demcheck::chk_label_values_dates(values)
if (!isTRUE(val))
return(val)
TRUE
})
## HAS_TESTS
#' @rdname Labels-class
setClass("DateRanges",
contains = "Labels",
validity = function(object) {
values <- object@values
val <- demcheck::chk_label_values_dateranges(values)
if (!isTRUE(val))
return(val)
TRUE
})
####################
validity_CalendarQuarters <- function(object) {
for (name in c("break_min", "break_max")) {
x <- methods::slot(object, name)
val <- demcheck::chk_first_day_unit_scalar(x = x,
name = name,
unit = "quarter")
if (!isTRUE(val))
return(val)
}
TRUE
}
## HAS_TESTS
#' @rdname Labels-class
setClass("LabCalendarQuarters",
contains = "LabCalendar",
validity = validity_CalendarQuarters)
validity_CalendarMonths <- function(object) {
for (name in c("break_min", "break_max")) {
x <- methods::slot(object, name)
val <- demcheck::chk_first_day_unit_scalar(x = x,
name = name,
unit = "month")
if (!isTRUE(val))
return(val)
}
TRUE
}
## HAS_TESTS
#' @rdname Labels-class
setClass("LabCalendarMonths",
contains = "LabCalendar",
validity = validity_CalendarMonths)
## Durations ------------------------------------------------------------------
## contains the breaks between intervals
validity_Durations <- function(object) {
break_min <- object@break_min
break_max <- object@break_max
open_last <- object@open_last
for (name in c("break_min", "break_max")) {
x <- methods::slot(object, name)
val <- demcheck::chk_length_1(x = x,
name = name)
if (!isTRUE(val))
return(val)
val <- demcheck::chk_not_na_scalar(x = x,
name = name)
if (!isTRUE(val))
return(val)
}
val <- demcheck::chk_is_logical_flag(x = open_last,
name = "open_last")
if (!isTRUE(val))
return(val)
if (break_max < break_min)
return(gettextf("'%s' [%s] less than '%s' [%s]",
"break_max", break_max, "break_min", break_min))
if (break_min == break_max) {
if (!open_last)
return(gettextf("'%s' [%s] equals '%s' but '%s' is %s",
"break_min",
break_min,
"break_max",
"open_last",
"FALSE"))
}
TRUE
}
#' @rdname Labels-class
setClass("LabDurations",
contains = c("Labels",
"VIRTUAL"),
slots = c(break_min = "integer",
break_max = "integer",
open_last = "logical"),
validity = validity_Durations)
## HAS_TESTS
#' @rdname Labels-class
setClass("LabDurationsQuarters",
contains = "LabDurations")
## HAS_TESTS
#' @rdname Labels-class
setClass("LabDurationsMonths",
contains = "LabDurations")
|
8b90620468c5e8d93c15f1e1b6bdc66c5a9f9498
|
ae551ea3266ace856a2029e675c255c3a1ffaf19
|
/R/genmovnet.R
|
7a7886165af8b6584f0c384e6b0bcfc2cb292225
|
[] |
no_license
|
robchoudhury/INApreliminary
|
c2bd15e61512841ea2c6d7442c46820cb71c2fb1
|
593e0009111b8d29cd8784f363e49c1146b5d4fe
|
refs/heads/master
| 2022-11-12T05:03:57.691687
| 2020-07-08T17:13:08
| 2020-07-08T17:13:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,780
|
r
|
genmovnet.R
|
#' Generate network adjacency matrix for movement
#'
#' This function generates an adjacency matrix for movement, assumed symmetric (in this version). It is used by functions including \code{INAscene}. The movement adjacency matrix is composed of 1s and 0s only if lktype="pa" option is used
#'
#' Updated 2020-06-02
#' @param distf the function of distance used to estimate movement probability - 'random' (not related to distance) or 'powerlaw' (inverse power law) or 'exp' (negative exponential, to be added)
#' @param iplot if T, generates igraph plot of adjacency matrix
#' @param lktype link type, pa is presence/absence (unweighted, occurence/non-occurence), pr is a probability of occurence, wtd1 is general weight
#' @param pla inverse power law parameter a in ad^(-b)
#' @param plb inverse power law parameter b in ad^(-b)
#' @param randp random matrix with entries binomial with probability p
#' @param tlink threshold for whether link exists
#' @param xymat the matrix of xy coordinates for node locations, used when the probability of a link is a function of distance (note that the distance between each pair of locations is assumed to be greater than 1)
#' @keywords dispersal
#' @export
#' @examples
#' x1 <- genmovnet(j <- genlocs(extx=c(0,50), exty=c(0,50), nn=50, rand=TRUE), distf='random', randp=0.01, lktype='pa', tlink=0.05, iplot=T)
#' x2 <- genmovnet(j <- genlocs(extx=c(0,50), exty=c(0,50), nn=100, rand=TRUE), distf='random', randp=0.02, lktype='pa', tlink=0.05, iplot=T)
#' x7 <- genmovnet(xymat=matrix(c(1,1, 1,2, 1,3, 2,1, 2,2, 2,3),ncol=2,byrow=T), distf='powerlaw', pla=2, plb=1, lktype='pa', tlink=0.9, iplot=T)
#' x8 <- genmovnet(j <- genlocs(nn=30, extx = c(0, 10), exty = c(0, 10)), distf='powerlaw', pla=2, plb=1, lktype='pa', tlink=0.9, iplot=T)
#' x9 <- genmovnet(j <- genlocs(nn=300, extx = c(0, 10), exty = c(0, 100)), distf='powerlaw', pla=2, plb=1, lktype='pa', tlink=0.95, iplot=T)
genmovnet <- function(xymat, distf, iplot=F, lktype, randp, pla, plb, tlink){
dimam <- dim(xymat)[1]
if (distf == 'powerlaw') { # ad^(-b)
tdist <- as.matrix(dist(xymat, method = "euclidean", diag=T, upper=T))
linkmat <- pla*tdist^(-plb)
}
else if(distf == 'random'){
linkmat <- matrix(rbinom(n=dimam*dimam, size=1, prob=randp), nrow=dimam)
linkmat[lower.tri(linkmat)] <- t(linkmat)[lower.tri(linkmat)]
}
# If generating presence/absence of links for cases other than 'random', keep links where linkmat entries are above the threshold tlink
if(lktype == 'pa' & distf == 'powerlaw'){linkmat <- linkmat > tlink}
diag(linkmat) <- 0
if(iplot){
library(igraph)
linkmati <- graph.adjacency(linkmat)
plot(linkmati, edge.arrow.size=0, vertex.color='skyblue')
}
linkmat
}
|
81a33f21d097bd0431c06cc97db9c08e4136ec58
|
5529bde1e41c21bb4b63a8f2c1addf295735f34b
|
/man/clip_read1_obj.Rd
|
1bf1a855dd3ae32e7d1a91b98cd75bf3cdae85ac
|
[] |
no_license
|
NikNakk/objectclipboard
|
075336dc603de38d9fd2cfeb8a7a49fa3b0d1c43
|
1b9201e53967933a32ca62290e3a347eae8e90a8
|
refs/heads/master
| 2020-04-07T13:11:10.789456
| 2018-11-20T13:45:56
| 2018-11-20T13:45:56
| 158,396,207
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 368
|
rd
|
clip_read1_obj.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clip_read1_obj.R
\name{clip_read1_obj}
\alias{clip_read1_obj}
\title{Read first object from clipboard written with clip_write_obj}
\usage{
clip_read1_obj()
}
\value{
object from clipboard (could be of any type)
}
\description{
Read first object from clipboard written with clip_write_obj
}
|
a621eee0af893c11bf02df10b97473f66dfbcbc9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/remedy/examples/listr.Rd.R
|
c2c492d18593b42f4d1fadcf5060ef92432ba2ef
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
listr.Rd.R
|
library(remedy)
### Name: listr
### Title: Convert to list
### Aliases: listr olistr
### ** Examples
## Not run:
##D #unordered list
##D remedy_example(c('line 1','line 2'),listr)
##D
##D #ordered list
##D remedy_example(c('line 1','line 2'),olistr)
## End(Not run)
|
54767eec8b4a464494bb3ba9c2b5470f3b9f3820
|
2f680317ef881d9255a68eb603c8f3a6a5383909
|
/man/dplyr-ggvis.Rd
|
19e3522e494ac60b4e2213dedd33e959aaaeac79
|
[] |
no_license
|
BAAQMD/ggvis
|
bd95b8501dc529387f7131217047859a0dc6f198
|
6e7083411ef710b214027f0452b9bb438478afe3
|
refs/heads/master
| 2021-01-20T16:28:13.546638
| 2014-09-29T19:01:38
| 2014-09-29T19:01:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,047
|
rd
|
dplyr-ggvis.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{dplyr-ggvis}
\alias{arrange_.ggvis}
\alias{arrange_.reactive}
\alias{dplyr-ggvis}
\alias{filter_.ggvis}
\alias{filter_.reactive}
\alias{group_by_.ggvis}
\alias{group_by_.reactive}
\alias{groups.ggvis}
\alias{groups.reactive}
\alias{mutate_.ggvis}
\alias{mutate_.reactive}
\alias{select_.ggvis}
\alias{select_.reactive}
\alias{summarise_.ggvis}
\alias{summarise_.reactive}
\alias{ungroup.ggvis}
\alias{ungroup.reactive}
\title{Dplyr verbs for ggvis.}
\usage{
groups.ggvis(x)
group_by_.ggvis(.data, ..., .dots, add = FALSE)
ungroup.ggvis(x)
summarise_.ggvis(.data, ..., .dots)
mutate_.ggvis(.data, ..., .dots)
arrange_.ggvis(.data, ..., .dots)
select_.ggvis(.data, ..., .dots)
\method{filter_}{ggvis}(.data, ..., .dots)
groups.reactive(x)
ungroup.reactive(x)
group_by_.reactive(.data, ..., .dots, add = FALSE)
summarise_.reactive(.data, ..., .dots)
mutate_.reactive(.data, ..., .dots)
arrange_.reactive(.data, ..., .dots)
select_.reactive(.data, ..., .dots)
\method{filter_}{reactive}(.data, ..., .dots)
}
\description{
Reactive components must be wrapped in \code{eval} - this makes it
possible to separate out the non-standard evaluation of dplyr and ggvis.
}
\examples{
library(dplyr)
base <- mtcars \%>\% ggvis(~mpg, ~cyl) \%>\% layer_points()
base \%>\% group_by(cyl) \%>\% summarise(mpg = mean(mpg)) \%>\%
layer_points(fill := "red", size := 100)
base \%>\% filter(mpg > 25) \%>\% layer_points(fill := "red")
base \%>\% mutate(cyl = jitter(cyl)) \%>\% layer_points(fill := "red")
\dontrun{
# Dynamically restrict range using filter
mtcars \%>\% ggvis(~disp, ~mpg) \%>\%
filter(cyl > eval(input_slider(0, 10))) \%>\%
layer_points()
# Dynamically compute box-cox transformation with mutate
bc <- function(x, lambda) {
if (abs(lambda) < 1e-6) log(x) else (x ^ lambda - 1) / lambda
}
bc_slider <- input_slider(-2, 2, 1, step = 0.1)
mtcars \%>\%
ggvis(~disp, ~mpg) \%>\%
mutate(disp = bc(disp, eval(bc_slider))) \%>\%
layer_points()
}
}
\keyword{internal}
|
0dab566c8846728caf8176196c98712ff120d64a
|
8f603f7a9f4cd16df4365874307beaeced2b9286
|
/bird_sankey.R
|
35bead2cc942297105052818a6e63023eec42945
|
[] |
no_license
|
rhaefer/bird_map
|
199eded4fb376c0fdbfb32cdf98668a5ae46293e
|
167868abaf3f1c9d3ce6adabdd8600e7ad8fa544
|
refs/heads/master
| 2021-01-19T10:54:14.888591
| 2017-02-23T04:27:09
| 2017-02-23T04:27:09
| 82,233,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,746
|
r
|
bird_sankey.R
|
setwd("/Users/reid/Documents/Data Projects/birds")
library(dplyr)
birds<-read.csv("MyEBirdData.csv",header=T)
bird_select<-data.frame(birds %>% select(Common.Name, Count, County) %>%group_by(Common.Name, County) %>%
summarise(Frequency=sum(Count)) %>% filter(Frequency>=8))
bird_select$source<-bird_select$Common.Name
bird_select$target<-bird_select$County
bird_select$value<-bird_select$Frequency
bird_select<-bird_select%>% select(source, target, value)
makeRivPlot <- function(data, var1, var2) {
require(dplyr)
require(riverplot) # Does all the real work
require(RColorBrewer) # To assign nice colours
names1 <- levels(data[, var1])
names2 <- levels(data[, var2])
var1 <- as.numeric(data[, var1])
var2 <- as.numeric(data[, var2])
edges <- data.frame(var1, var2 + max(var1, na.rm = T))
edges <- count(edges)
colnames(edges) <- c("N1", "N2", "Value")
nodes <- data.frame(
ID = c(1:(max(var1, na.rm = T) +
max(var2, na.rm = T))),
x = c(rep(1, times = max(var1, na.rm = T)),
rep(2, times = max(var2, na.rm = T))),
labels = c(names1, names2) ,
col = c(brewer.pal(max(var1, na.rm = T), "Set1"),
brewer.pal(max(var2, na.rm = T), "Set1")),
stringsAsFactors = FALSE)
nodes$col <- paste(nodes$col, 95, sep = "")
return(makeRiver(nodes, edges))
}
a <- makeRivPlot(bird_select, "source", "target")
plot(x=a, srt = 45,lty=1,plot_area=0.9,nsteps=50,nodewidth=3)
pdf("test1.pdf",width=18,height=10)
riverplot(x=a, srt = 45,lty=1,plot_area=0.9,nsteps=50,nodewidth=3)
dev.off()
#source http://stats.stackexchange.com/questions/56322/graph-for-relationship-between-two-ordinal-variables
|
4a3a57219cf1128517e76c77fb2fd60a7cdd4726
|
6b62017399a34214f029abae65aa42f026da8efd
|
/new_ase_code/rscripts/utils/combine.genots.r
|
0cec5468002f94bac4749510984133ee494ee14b
|
[] |
no_license
|
kundajelab/Personal_genome_mapping
|
839369054c43bdb0acf0f725568e864f0d4d5d49
|
5f2117489ff1c76ff68bcd31be878ec0cd12bad3
|
refs/heads/master
| 2020-12-25T09:00:30.090789
| 2014-08-13T05:09:58
| 2014-08-13T05:09:58
| 23,176,657
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,940
|
r
|
combine.genots.r
|
rm(list=ls())
library(Matrix)
library(ggplot2)
library(ape)
library(geiger)
source('utils/deseq.utils.r')
non.san = new.env()
load('../../rawdata/variants/all/snps/allNonSan/allNonSan.snps.RData', non.san)
non.san.genot = new.env()
load('../../rawdata/variants/all/snps/allNonSan/all_genot.RData', non.san.genot)
san = new.env()
load('../../rawdata/variants/sanConsensus/snps/san.snps.RData', san)
san.genot = new.env()
load('../../rawdata/variants/sanConsensus/snps/all_genot.RData', san.genot)
# Overlaps between non-san and san SNPs
ov = findOverlaps(snps.to.ranges(non.san$snp.pos), snps.to.ranges(san$snp.pos), select = 'first', ignore.strand = T)
genot = non.san.genot$genot
genot.cols = colnames(genot)
# For each San individual, append it's non-San-specific SNPs to genot
for(i in 1:ncol(san.genot$genot)){
cat(colnames(san.genot$genot)[i], '\n')
new.genot = array(0, dim = c(length(ov), 1))
new.genot[!is.na(ov)] = san.genot$genot[ov[!is.na(ov)], i]
genot = cBind(genot, new.genot)
genot.cols = append(genot.cols, colnames(san.genot$genot)[i])
}
# San-specific SNPs that haven't been added so far
ov = findOverlaps(snps.to.ranges(san$snp.pos), snps.to.ranges(non.san$snp.pos), select = 'first', ignore.strand = T)
non.ov = is.na(ov)
snp.pos = non.san$snp.pos
snp.pos = rbind(snp.pos, san$snp.pos[non.ov, ])
genot2 = san.genot$genot[non.ov, ]
genot2.cols = colnames(genot2)
for(i in 1:ncol(non.san.genot$genot)){
cat(colnames(non.san.genot$genot)[i], '\n')
new.genot = array(0, dim = c(sum(non.ov), 1)) # These are not present in the non-San, so just zeros
genot2 = cBind(genot2, new.genot)
genot2.cols = append(genot2.cols, colnames(non.san.genot$genot)[i])
}
genot = rBind(genot, genot2[, match(genot.cols, genot2.cols)])
# Finally, add GM19193
gm = new.env()
load('../../rawdata/variants/novelCalls/filtered/snps/gm19193.snps.RData', gm)
load('../../rawdata/variants/novelCalls/filtered/snps/GM19193.snps.RData')
gm.genot = geno.info$mat + geno.info$pat
ov = findOverlaps(snps.to.ranges(gm$snp.pos), snps.to.ranges(snp.pos), select = 'first', ignore.strand = T)
new.gm.genot = array(0, dim = c(nrow(snp.pos), 1))
new.gm.genot[ov[!is.na(ov)]] = gm.genot[!is.na(ov)]
genot = cBind(genot, new.gm.genot)
genot.cols = append(genot.cols, 'GM19193')
colnames(genot) = genot.cols
snp.pos = rbind(snp.pos, gm$snp.pos[is.na(ov), ])
tmp.genot = array(0, dim = c(sum(is.na(ov)), ncol(genot)))
tmp.genot[, ncol(genot)] = gm.genot[is.na(ov)]
genot = rBind(genot, tmp.genot)
save(genot, file = '../../rawdata/variants/all_Mar13/genot.RData')
save(snp.pos, file = '../../rawdata/variants/all_Mar13/snps.RData')
load('../../rawdata/variants/all_Mar13/genot.RData')
load('../../rawdata/variants/all_Mar13/snps.RData')
load('../../rawdata/transcriptomes/gencode.v13.annotation.noM.flat.RData')
# SNPs on exons
ov = findOverlaps(snps.to.ranges(snp.pos), regions.to.ranges(gene.meta), select = 'first', ignore.strand = T)
sel = !is.na(ov)
genot.sample = genot[sel, ][sample(1:sum(sel), 100000), colnames(genot) != 'GM19193' & colnames(genot) != 'GM12890']
colnames(genot.sample) = fix.indiv.names(colnames(genot.sample))
nj.tree = nj(dist(t(genot.sample), method = 'manhattan'))
edges = nj.tree$edge
edge.len = as.integer(nj.tree$edge.length * 100 / max(nj.tree$edge.length))
sel.edges = edges[, 1] > ncol(genot) & edges[, 2] > ncol(genot) & edge.len > 10
edge.lab = array('', dim = c(nrow(edges), 1))
edge.lab[sel.edges] = edge.len[sel.edges]
pdf('../../rawdata/variants/all_Mar13/genot_pca_noGM19193_noGM12890_exons_nj.pdf')
plot(nj.tree, 'u', cex = 1, edge.width = 0.5, no.margin = T, lab4ut='axial', label.offset = 0.5, tip.col = get.pop.col(get.pop(colnames(genot.sample))))
edgelabels(edge.lab, frame = 'none', adj = c(1, 0.5), cex = 0.9)
dev.off()
# Select variable sites
sel = rowSums(genot.sample == 0) < ncol(genot.sample) - 3 & rowSums(genot.sample == 1) < ncol(genot.sample) - 3 & rowSums(genot.sample == 2) < ncol(genot.sample) - 3
#genot.sample[genot.sample == 2] = 1 # Trios have much fewer homozygous alternative calls. This causes biases
genot.norm = scale(genot.sample[sel, ])
colnames(genot.norm) = colnames(genot.sample)
pca.fit = prcomp(t(genot.norm[, !(colnames(genot.sample) %in% c('GM12878', 'GM19240'))]), center = F, scale = F)
p=plot.pcs(t(genot.norm) %*% pca.fit$rotation, pca.fit$rotation, pca.fit$sdev, labels = array('', dim=c(ncol(genot.sample),1)), groups = get.pop(colnames(genot.sample)), all = F, ndim = 2)
ggsave('../../rawdata/variants/all_Mar13/genot_pca_noGM19193_noGM12890_exonsVariable_small.pdf', p$p1, width = 4, height = 3)
save(genot.norm, pca.fit, file = '../../rawdata/variants/all_Mar13/genot_pca_noGM19193_exonsVariable_pca.RData')
ggsave('../../rawdata/variants/all_Mar13/genot_pca_noGM19193_exonsVariable.pdf', p$p1, width = 13.6, height = 11.8)
ggsave('../../rawdata/variants/all_Mar13/genot_eigen_noGM19193_exonsVariable.pdf', p$p2, width = 6.5, height = 5.6)
|
77876f274901bfe7a3045f8dad386810cc6eab22
|
70e015e71ce31e129c141ddfbcdbf5b200c52df4
|
/Content/examples_code/ZIP_Nmixture_SwissGreatTits/ZIP_Nmixture_SwissGreatTits_setup.R
|
7400c4f6a764456c36af706e2e42702e8623c216
|
[] |
no_license
|
lponisio/Vogelwarte_NIMBLE_workshop
|
ec258a845381621c303b779bb72c5b72924bbdd6
|
323a8ab63ba5b0199b2e3b368dfe2f51bfa17e1f
|
refs/heads/master
| 2020-06-04T00:58:16.528340
| 2018-04-25T11:31:03
| 2018-04-25T11:31:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,206
|
r
|
ZIP_Nmixture_SwissGreatTits_setup.R
|
# This example is adapted for NIMBLE from the AHM book by Jacob Levine and Perry de Valpine
# 6.11.1 Bayesian fitting of the basic ZIP N-mixture model
# ------------------------------------------------------------------------
# From section 6.9.2 - for data creation/organization:
if(!exists("DO_PLOT"))
DO_PLOT <- FALSE
library(AHMbook)
## Code modified to use the SwissTits data set included in the AHMbook package
data(SwissTits)
str(SwissTits)
SwissTits$species # Available species
# Select Great tit and covariate data from 2013 and
# drop 4 sites not surveyed in 2013
y0 <- SwissTits$counts[, , '2013', 'Great tit']
( NA.sites <- which(rowSums(is.na(y0)) == 3) ) # Unsurveyed sites
y <- y0[-NA.sites, ] # Drop them from the count data
tits <- SwissTits$sites[-NA.sites, ] # Also drop from the site covariates
str(y) # Check the matrix of count data
# Get date and duration data for 2013, without the NA.sites rows:
date <- SwissTits$date[-NA.sites, , '2013']
dur <- SwissTits$dur[-NA.sites, , '2013']
# Plot observed data: counts vs survey date (Fig. 6-9)
if(DO_PLOT) matplot(t(date), t(y), type = "l", lwd = 3, lty = 1, frame = F, xlab = "Survey data (1 = April 1)", ylab = "Count of Great Tits")
# Load unmarked, create unmarked data frame and inspect result
library(unmarked)
time <- matrix(rep(as.character(1:3), nrow(y)), ncol = 3, byrow = TRUE)
umf <- unmarkedFramePCount(y = y,
siteCovs=data.frame(elev=scale(tits[,"elev"]), forest=scale(tits[,"forest"]), iLength=1/tits[,"rlength"]),
obsCovs=list(time = time, date = scale(date), dur = scale(dur)))
summary(umf) # Summarize unmarked data frame
summary(apply(y, 1, max, na.rm = TRUE)) # Summarize max counts
elev <- umf@siteCovs$elev ; elev2 <- elev^2
forest <- umf@siteCovs$forest ; forest2 <- forest^2
date <- matrix(umf@obsCovs$date, ncol = 3, byrow = TRUE)
dur <- matrix(umf@obsCovs$dur, ncol = 3, byrow = TRUE)
date[is.na(date)] <- 0 ; date2 <- date^2
dur[is.na(dur)] <- 0 ; dur2 <- dur^2
iRoute <- umf@siteCovs$iLength
# Design matrix for abundance model (no intercept)
lamDM <- model.matrix(~ elev + elev2 + forest + forest2 + elev:forest + elev:forest2 + iRoute)[,-1]
# Initial values
Nst <- apply(y, 1, max, na.rm = T) + 1
Nst[is.na(Nst)] <- round(mean(y, na.rm = TRUE))
Nst[Nst == "-Inf"] <- round(mean(y, na.rm = TRUE))
SGT_inits <- function(){ list(N = Nst,
beta0 = 0,
mean.p = rep(0.5, 3),
beta = runif(7, 0, 0),
alpha = runif(13, 0, 0)
)}
# Bundle data and choose to fit simple ZIP model (model 1)
SGT_data1 <- list(y = y,
lamDM = lamDM,
elev = elev,
date = date,
dur = dur,
elev2 = elev2,
date2 = date2,
dur2 = dur2,
e = 1e-06,
hlam.on = 0,
hp.site.on = 0,
hp.survey.on = 0,
nsite = 263,
nrep = 3)
|
13b765234c6f82141583c80e3b84a98ce9aea166
|
089f560b12e6de236bc52852a05c6ad6c09df17e
|
/R/delphi-states.R
|
5c151a79228cce31834fd0670ffdcdcf488b18a7
|
[
"MIT"
] |
permissive
|
mlamias/delphiepidata
|
e392c518f6f4c0c0d1c8d102c62845243e75ec30
|
7dc9eb67a3530cc027e66d3ccc7e31003cd7f536
|
refs/heads/master
| 2022-01-15T20:01:27.577372
| 2019-05-29T12:18:00
| 2019-05-29T12:18:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 387
|
r
|
delphi-states.R
|
#' Supported States
#'
#' @docType data
#' @export
c(
'AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL',
'GA', 'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA',
'MD', 'ME', 'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE',
'NH', 'NJ', 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'RI',
'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VT', 'WA', 'WI', 'WV', 'WY'
) -> delphi_states
|
18205f3646909fad5c5131f1d2e4b7556afeed3b
|
782dfc9dbe0efa627fccdfac7bf4ce5efaafe00d
|
/R/get_degree_distribution.R
|
03f08e3f1ce9f20ad1f913efa232efbeb2757733
|
[] |
no_license
|
KID4978/DiagrammeR
|
a2943b8a97c5f68c70a08a9bfde5d33d33565262
|
c2c060a67e9b606fedb2f9b653d0361a09573a80
|
refs/heads/master
| 2021-01-23T01:17:11.540769
| 2017-03-22T16:43:32
| 2017-03-22T16:43:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,297
|
r
|
get_degree_distribution.R
|
#' Get degree distribution data for a graph
#' @description Get degree distribution data for
#' a graph. Graph degree is represented as a
#' frequency of degree values over all nodes in
#' the graph.
#' @param graph a graph object of class
#' \code{dgr_graph}.
#' @return a named vector of degree frequencies
#' where the degree values serve as names.
#' @examples
#' # Create a random, directed graph with 18 nodes
#' # and 22 edges
#' graph <-
#' create_random_graph(
#' n = 18, m = 22,
#' set_seed = 23)
#'
#' # Get degree distribution data for `random_graph`
#' graph %>% get_degree_distribution()
#' #> 0 1 2 3
#' #> 0.05555556 0.22222222 0.22222222 0.22222222
#' #> 4
#' #> 0.27777778
#' @importFrom igraph degree_distribution
#' @export get_degree_distribution
get_degree_distribution <- function(graph) {
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
stop("The graph object is not valid.")
}
# Convert the graph to an igraph object
ig_graph <- to_igraph(graph)
# Get the degree distribution for the graph
deg_dist <- degree_distribution(ig_graph)
# Transform to a named vector where the names are
# the number of degrees
names(deg_dist) <- seq(0, length(deg_dist) - 1)
return(deg_dist)
}
|
e306e114d2dfb4b90e428256d1514571d404628f
|
e58cb0a3ce95401501f0f0441a492529632b41f7
|
/analysis/getTranscriptSeqs.R
|
5a220b4e5a1c875d18fa77852ee32b61960df186
|
[] |
no_license
|
larsgr/GRewdPipeline
|
ea451c75b5f4d91d4f92a941e3b2f3461566ee98
|
77a7d5b17373a2139d34327942fcec300b62fb40
|
refs/heads/master
| 2020-12-29T02:19:46.034273
| 2019-01-15T10:24:33
| 2019-01-15T10:24:33
| 30,870,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,787
|
r
|
getTranscriptSeqs.R
|
####
#
# get the sequence of the "conserved" genes
#
#
# get grpIDs for the genes that are significantly DE in all species
#
source("~/GRewd/pipeline/R/orthoGrpTools.R")
DEmat <- readRDS("~/GRewd/pipeline/data/DEmat.RDS") # from: superGeneModel.Rmd
dim(DEmat$peak$pVal)
sigMatUp <- lapply(DEmat, with, { ifelse(is.na(pAdj), F, pAdj < 0.05 & lfc > 1) })
sigMatDown <- lapply(DEmat, with, { ifelse(is.na(pAdj), F, pAdj < 0.05 & lfc < -1) })
# get genes with significant peak/ramp up/down in all species
grpIDs <- unique(c(names(which(apply(sigMatUp$ramp,1,all))),
names(which(apply(sigMatUp$peak,1,all))),
names(which(apply(sigMatDown$ramp,1,all))),
names(which(apply(sigMatDown$peak,1,all)))))
#
# get sequences
#
####
#
# extractFromFasta
#
library("RLinuxModules")
moduleInit()
module("load samtools")
extractFromFasta <- function(fastaFile, seqIDs, outFile=NULL){
cmd <- paste("samtools faidx", fastaFile, paste(shQuote(seqIDs),collapse=" "))
if(is.null(outFile)){
return(system(cmd,intern = T))
} else {
system(paste(cmd,">",outFile))
}
}
####
# get seqIDs from orthoGrps
orthoPath <- "/mnt/NOBACKUP/mariansc/share/orthos"
grps <- loadOrthoGrpsArray(file.path(orthoPath,"splitGroups/goodGroups.txt"))
spcs <- c("BrDi","HoVu","MeNu1","NaSt","StLa")
spcs <- setNames(spcs,spcs)
# look up the table to get the original transcriptIDs
seqID2transcriptID <- function(seqIDs, spc){
filename <- file.path(orthoPath,"longestORFs",paste0(spc,".tbl"))
seqIDtbl <- sapply(seqIDs,function(seqID){
system(paste("grep",seqID,filename),intern = T)
})
sapply(strsplit(seqIDtbl,split = "\t"), function(x){
sub("cds\\.(TR[0-9]+\\|c[0-9]+_g[0-9]+_i[0-9]+)\\|m\\.[0-9]+","\\1",x[2])
})
}
#
# Store sequence
#
outDir = "seqs"
dir.create(outDir)
# get alignment files:
alnFiles <- file.path(orthoPath,"grpAligned",paste0(sub("\\.[0-9]+","",grpIDs),".aln"))
cdsalnFiles <- file.path(orthoPath,"pal2nal",paste0(sub("\\.[0-9]+","",grpIDs),".cds.aln"))
# copy them to the outDir
file.copy(alnFiles,outDir)
file.copy(cdsalnFiles,outDir)
# for each grp
lapply(setNames(grpIDs,grpIDs),function(grpID){
# create path for grp
grpDir <- file.path(outDir,grpID)
dir.create(grpDir)
#for each spc in grp
lapply(spcs,function(spc){
seqIDs <- grps[[grpID,spc]]
transcriptIDs <- seqID2transcriptID(seqIDs,spc)
fastaFile <- file.path("/mnt/NOBACKUP/mariansc/share/trinity",spc,paste0(spc,".fasta"))
# for each paralogous seq
for(i in seq_along(transcriptIDs)){
outFile <- file.path(grpDir,paste0(spc,"_",names(transcriptIDs)[i],".fa"))
extractFromFasta(fastaFile = fastaFile, seqIDs = transcriptIDs[i],
outFile = outFile)
}
})
})
|
359da36799e789837b39e2f3537dd7aaa243ed0d
|
cc4dedb1efc2abcdc2176ad763a624aea83cf978
|
/to_reprex.R
|
e35e0dbd75c11bccd18af5679be2d94fdb17289c
|
[] |
no_license
|
nmolanog/AB_material
|
f39b17af0ffc2a9423778d3cacd7c6c1fa383181
|
c22dfe6e04140ae45b0de79c6d3e9b2c3d284870
|
refs/heads/master
| 2022-06-14T01:19:34.959414
| 2022-06-08T19:48:20
| 2022-06-08T19:48:20
| 149,158,542
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,556
|
r
|
to_reprex.R
|
###############
rm(list=ls())
options(max.print=999999)
library(pacman)
p_load(tidyverse)
p_load(mvtnorm)
p_load(cowplot)
p_load(gridGraphics)
p_load(GA)
my_mean<-c(25,65)
mycors<-seq(-1,1,by=.25)
sd_vec<-c(5,7)
i<-3
temp_cor<-matrix(c(1,mycors[i],
mycors[i],1),
byrow = T,ncol=2)
V<-sd_vec %*% t(sd_vec) *temp_cor
my_x<-seq(my_mean[1]-3*sd_vec[1], my_mean[1]+3*sd_vec[1], length.out=20)
my_y<-seq(my_mean[2]-3*sd_vec[2], my_mean[2]+3*sd_vec[2], length.out=20)
temp_f<-function(a,b){dmvnorm(cbind(a,b), my_mean,V)}
my_z<-outer(my_x, my_y,temp_f)
nlevels<-20
my_zlim <- range(my_z, finite = TRUE)
my_levels <- pretty(my_zlim, nlevels)
zz <- (my_z[-1, -1] + my_z[-1, -ncol(my_z)] + my_z[-nrow(my_z), -1] + my_z[-nrow(my_z),
-ncol(my_z)])/4
cols <- jet.colors(length(my_levels) - 1)
zzz <- cut(zz, breaks = my_levels, labels = cols)
persp(my_x, my_y, my_z, theta = -25, phi = 45, expand = 0.5,xlab="x",ylab="y",zlab="f(x,y)",col = as.character(zzz))
p1 <- recordPlot()
data.grid <- expand.grid(x = seq(my_mean[1]-3*sd_vec[1], my_mean[1]+3*sd_vec[1], length.out=200),
y = seq(my_mean[2]-3*sd_vec[2], my_mean[2]+3*sd_vec[2], length.out=200))
q.samp <- cbind(data.grid, prob = dmvnorm(data.grid, mean = my_mean, sigma = V))
p2<-ggplot(q.samp, aes(x, y, z = prob)) +
geom_contour(aes(color = ..level..), bins = 11, size = 1) +
scale_color_gradientn(colours = jet.colors(11)) +
theme_bw()
plot_grid(p1, p2)
|
fe6e1ac2cf160eb48390813231628e587f3fb781
|
f96d2b4516dda1e1b5007fb953671ed301ba0210
|
/boosting_final.R
|
61b0f2e057b2a0ef7b226617bcddd880a5dead3d
|
[] |
no_license
|
simonzhangzp/ml3
|
c7180bc05f8f807136be1dd2cbc1769e04f99a17
|
b5ecdc3b42479e376c23d44e95fbc9a176367dfc
|
refs/heads/master
| 2020-12-25T12:40:23.179610
| 2017-04-08T18:13:39
| 2017-04-08T18:13:39
| 83,711,926
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,953
|
r
|
boosting_final.R
|
rm(list=ls())
library(MASS)
library(gbm)
library(randomForest)
set.seed(1)
customer = read.csv("Lab3Data.csv", header=TRUE)
customer=na.omit(customer)
customer$Churn <- ifelse(customer$Churn=="Yes", 1, 0)
test = sample(1:nrow(customer),1000)
customer.train=customer[-test,]
customer.test = customer[test,"Churn"]
summary(customer)
boost.customer = gbm(Churn ~.-customerID, #formula
data = customer.train,
#training dataset
distribution = 'multinomial', # "bernoulli" (logistic regression for 0-1 outcomes),"multinomial"(classification when there are more than 2 classes),
#'gaussian' for regression models, 'bernouli' for classification
n.trees = 5000, #number of trees
interaction.depth = 4, #depth of each tree or number of leaves
shrinkage = 0.001 #0.001 default value
)
#### calculate test error rate
yhat.boost <- predict(boost.customer, newdata = customer[test,], n.trees = 5000,type="response") # Use 5000 trees again for test set
p.pred<- apply(yhat.boost, 1, which.max) # assign the column number of which has a bigger probility
yhat.pred <- ifelse(p.pred=="2", 1, 0)
x=table(yhat.pred,customer.test)
as.numeric(x["1",]["1"]+x["0",]["0"])/1000 # accuracy rate
1-as.numeric(x["1",]["1"]+x["0",]["0"])/1000 # error rate
# Tried different combination of interaction.depth, shrinkage rate, predictors, and got the best error rate of 0.192
# when using TotalCharges+MonthlyCharges+Contract+tenure+InternetService+PaymentMethod as predictors.(Better than use all of the predictors))
boost.customer = gbm(Churn ~TotalCharges+MonthlyCharges+Contract+tenure+InternetService+PaymentMethod-customerID, #formula
data = customer.train,
#training dataset
distribution = 'multinomial', # "bernoulli" (logistic regression for 0-1 outcomes),"multinomial"(classification when there are more than 2 classes),
#'gaussian' for regression models, 'bernouli' for classification
n.trees = 5000, #number of trees
interaction.depth = 4, #depth of each tree or number of leaves
shrinkage = 0.001 #0.001 default value
)
#### calculate test error rate
yhat.boost <- predict(boost.customer, newdata = customer[test,], n.trees = 5000,type="response") # Use 5000 trees again for test set
p.pred<- apply(yhat.boost, 1, which.max) # assign the column number of which has a bigger probility
yhat.pred <- ifelse(p.pred=="2", 1, 0)
x=table(yhat.pred,customer.test)
as.numeric(x["1",]["1"]+x["0",]["0"])/1000 # accuracy rate
1-as.numeric(x["1",]["1"]+x["0",]["0"])/1000 # error rate
gbm.mse <- mean((yhat.boost -customer.test)^2)
gbm.mse
set.seed(1)
boost.customer1 = gbm(Churn ~.-customerID, #formula
data = customer.train,
#training dataset
distribution = 'multinomial', #'gaussian' for regression models, 'bernouli' for classification
n.trees = 5000, #number of trees increasing slightly decreases mse
interaction.depth = 4, #depth of each tree or number of leaves
shrinkage = 0.0001, #0.001 default value
verbose = F
)
#### calculate test error rate
yhat.boost1 <- predict(boost.customer1, newdata = customer[test,], n.trees = 5000,type="response") # Use 5000 trees again for test set
p.pred<- apply(yhat.boost1, 1, which.max) # assign the column number of which has a bigger probility
yhat.pred <- ifelse(p.pred=="2", 1, 0)
x=table(yhat.pred,customer.test)
as.numeric(x["1",]["1"]+x["0",]["0"])/1000 # accuracy rate
1-as.numeric(x["1",]["1"]+x["0",]["0"])/1000 # error rate
gbm.mse1 <- mean((yhat.boost1 -customer.test)^2)
gbm.mse1
library(C50)
library(ggplot2)
library(ada)
set.seed(1)
adafit <- ada(Churn ~.-customerID, #formula
data = customer.train, #training data set
iter = 50, #number of tree iterations
bag.frac = 0.5, #Randomly samples the churnTrain set. Value of 1 equivalent to bagging
rpart.control(maxdepth=30,minsplit=20,cp=0.01,xval=10)
)
#maxdepth controls depth of trees (leaves), minsplit is the minimum number of observations in a node before attempting split (20) and that split must decrease the overall error by 0.01 (cp controls complexity)
print(adafit)
varplot(adafit)
prtrain <- predict(adafit, newdata=customer[test,])
#table(churnTrain[,"churn"], prtrain , dnn=c("Actual", "Predicted"))
round(100* table(customer.test, prtrain,dnn=c("% Actual", "% Predicted"))/length(prtrain),1)
|
e718f3900d66f64258b4390911a2168822b2b942
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/MarkEdmondson1234/autoGoogleAPI/genomics_functions.R
|
da1a23bdc96c50c2f0fc9d12321e19ab72652fbf
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,663
|
r
|
genomics_functions.R
|
#' Genomics API
#' Upload, process, query, and search Genomics data in the cloud.
#'
#' Auto-generated code by googleAuthR::gar_create_api_skeleton
#' at 2017-03-05 19:53:15
#' filename: /Users/mark/dev/R/autoGoogleAPI/googlegenomicsv1alpha2.auto/R/genomics_functions.R
#' api_json: api_json
#'
#' @details
#' Authentication scopes used are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/genomics
#' \item https://www.googleapis.com/auth/compute
#' }
#'
#' @docType package
#' @name genomics_googleAuthR
#'
NULL
## NULL
#' A helper function that tests whether an object is either NULL _or_
#' a list of NULLs
#'
#' @keywords internal
is.NullOb <- function(x) is.null(x) | all(sapply(x, is.null))
#' Recursively step down into list, removing all such objects
#'
#' @keywords internal
rmNullObs <- function(x) {
x <- Filter(Negate(is.NullOb), x)
lapply(x, function(x) if (is.list(x))
rmNullObs(x) else x)
}
#' Sets status of a given operation. Any new timestamps (as determined bydescription) are appended to TimestampEvents. Should only be called by VMscreated by the Pipelines Service and not by end users.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/genomics}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/genomics
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param SetOperationStatusRequest The \link{SetOperationStatusRequest} object to pass to this method
#' #' @importFrom googleAuthR gar_api_generator
#' @family SetOperationStatusRequest functions
#' @export
pipelines.setOperationStatus <- function(SetOperationStatusRequest) {
url <- "https://genomics.googleapis.com/v1alpha2/pipelines:setOperationStatus"
# genomics.pipelines.setOperationStatus
f <- googleAuthR::gar_api_generator(url, "PUT", data_parse_function = function(x) x)
stopifnot(inherits(SetOperationStatusRequest, "gar_SetOperationStatusRequest"))
f(the_body = SetOperationStatusRequest)
}
#' Deletes a pipeline based on ID.Caller must have WRITE permission to the project.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/genomics}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/genomics
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param pipelineId Caller must have WRITE access to the project in which this pipeline
#' @importFrom googleAuthR gar_api_generator
#' @export
pipelines.delete <- function(pipelineId) {
url <- sprintf("https://genomics.googleapis.com/v1alpha2/pipelines/%s", pipelineId)
# genomics.pipelines.delete
f <- googleAuthR::gar_api_generator(url, "DELETE", data_parse_function = function(x) x)
f()
}
#' Gets controller configuration information. Should only be calledby VMs created by the Pipelines Service and not by end users.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/genomics}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/genomics
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param operationId The operation to retrieve controller configuration for
#' @param validationToken
#' @importFrom googleAuthR gar_api_generator
#' @export
pipelines.getControllerConfig <- function(operationId = NULL, validationToken = NULL) {
url <- "https://genomics.googleapis.com/v1alpha2/pipelines:getControllerConfig"
# genomics.pipelines.getControllerConfig
pars = list(operationId = operationId, validationToken = validationToken)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Lists pipelines.Caller must have READ permission to the project.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/genomics}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/genomics
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param namePrefix Pipelines with names that match this prefix should be
#' @param pageToken Token to use to indicate where to start getting results
#' @param pageSize Number of pipelines to return at once
#' @param projectId Required
#' @importFrom googleAuthR gar_api_generator
#' @export
pipelines.list <- function(namePrefix = NULL, pageToken = NULL, pageSize = NULL,
projectId = NULL) {
url <- "https://genomics.googleapis.com/v1alpha2/pipelines"
# genomics.pipelines.list
pars = list(namePrefix = namePrefix, pageToken = pageToken, pageSize = pageSize,
projectId = projectId)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Creates a pipeline that can be run later. Create takes a Pipeline thathas all fields other than `pipelineId` populated, and then returnsthe same pipeline with `pipelineId` populated. This id can be usedto run the pipeline.Caller must have WRITE permission to the project.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/genomics}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/genomics
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param Pipeline The \link{Pipeline} object to pass to this method
#' #' @importFrom googleAuthR gar_api_generator
#' @family Pipeline functions
#' @export
pipelines.create <- function(Pipeline) {
url <- "https://genomics.googleapis.com/v1alpha2/pipelines"
# genomics.pipelines.create
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(Pipeline, "gar_Pipeline"))
f(the_body = Pipeline)
}
#' Runs a pipeline. If `pipelineId` is specified in the request, thenrun a saved pipeline. If `ephemeralPipeline` is specified, then runthat pipeline once without saving a copy.The caller must have READ permission to the project where the pipelineis stored and WRITE permission to the project where the pipeline will berun, as VMs will be created and storage will be used.If a pipeline operation is still running after 6 days, it will be canceled.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/genomics}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/compute
#' \item https://www.googleapis.com/auth/genomics
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/compute, https://www.googleapis.com/auth/genomics)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param RunPipelineRequest The \link{RunPipelineRequest} object to pass to this method
#' #' @importFrom googleAuthR gar_api_generator
#' @family RunPipelineRequest functions
#' @export
pipelines.run <- function(RunPipelineRequest) {
url <- "https://genomics.googleapis.com/v1alpha2/pipelines:run"
# genomics.pipelines.run
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(RunPipelineRequest, "gar_RunPipelineRequest"))
f(the_body = RunPipelineRequest)
}
#' Retrieves a pipeline based on ID.Caller must have READ permission to the project.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/genomics}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/genomics
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param pipelineId Caller must have READ access to the project in which this pipeline
#' @importFrom googleAuthR gar_api_generator
#' @export
pipelines.get <- function(pipelineId) {
url <- sprintf("https://genomics.googleapis.com/v1alpha2/pipelines/%s", pipelineId)
# genomics.pipelines.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. Clients may use Operations.GetOperation or Operations.ListOperations to check whether the cancellation succeeded or the operation completed despite cancellation.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/genomics}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/genomics
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param CancelOperationRequest The \link{CancelOperationRequest} object to pass to this method
#' @param name The name of the operation resource to be cancelled
#' @importFrom googleAuthR gar_api_generator
#' @family CancelOperationRequest functions
#' @export
operations.cancel <- function(CancelOperationRequest, name) {
url <- sprintf("https://genomics.googleapis.com/v1alpha2/{+name}:cancel", name)
# genomics.operations.cancel
f <- googleAuthR::gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(CancelOperationRequest, "gar_CancelOperationRequest"))
f(the_body = CancelOperationRequest)
}
#' Lists operations that match the specified filter in the request.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/genomics}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/genomics
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation collection
#' @param pageSize The maximum number of results to return
#' @param filter A string for filtering Operations
#' @param pageToken The standard list page token
#' @importFrom googleAuthR gar_api_generator
#' @export
operations.list <- function(name, pageSize = NULL, filter = NULL, pageToken = NULL) {
url <- sprintf("https://genomics.googleapis.com/v1alpha2/{+name}", name)
# genomics.operations.list
pars = list(pageSize = pageSize, filter = filter, pageToken = pageToken)
f <- googleAuthR::gar_api_generator(url, "GET", pars_args = rmNullObs(pars),
data_parse_function = function(x) x)
f()
}
#' Gets the latest state of a long-running operation. Clients can use thismethod to poll the operation result at intervals as recommended by the APIservice.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://cloud.google.com/genomics}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://www.googleapis.com/auth/cloud-platform
#' \item https://www.googleapis.com/auth/genomics
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param name The name of the operation resource
#' @importFrom googleAuthR gar_api_generator
#' @export
operations.get <- function(name) {
url <- sprintf("https://genomics.googleapis.com/v1alpha2/{+name}", name)
# genomics.operations.get
f <- googleAuthR::gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
|
6cc25afc1999e54913c9d15f85385875a45fe3af
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/KFAS/examples/ldl.Rd.R
|
23b0e599e3387e202bdee1990ccb69536ff91763
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 347
|
r
|
ldl.Rd.R
|
library(KFAS)
### Name: ldl
### Title: LDL Decomposition of a Matrix
### Aliases: ldl
### ** Examples
# Positive semidefinite matrix, example matrix taken from ?chol
x <- matrix(c(1:5, (1:5)^2), 5, 2)
x <- cbind(x, x[, 1] + 3*x[, 2])
m <- crossprod(x)
l <- ldl(m)
d <- diag(diag(l))
diag(l) <- 1
all.equal(l %*% d %*% t(l), m, tol = 1e-15)
|
89e252b224199350fb1cc254b74d1655dd656079
|
4f355faa933fe9653d7b33585cc687cff17d0ba9
|
/day2/trees.R
|
1356b29e8c69a3e9effa4743146bb632e03c0f97
|
[] |
no_license
|
chandulal/data-science-with-R
|
b25c07b71088fe0cde9ac3aef096310cb996c689
|
be051934c65bfcd7ac4c3bc1c02e26d446b06431
|
refs/heads/master
| 2016-08-11T17:43:07.240309
| 2016-03-12T10:44:25
| 2016-03-12T10:44:25
| 53,643,025
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,719
|
r
|
trees.R
|
# Boston dataset variables:
# LON and LAT are the longitude and latitude of the
# center of the census tract.
# MEDVisthemedianvalueofowner-occupied homes, in thousands of dollars
# CRIM is the per capita crime rate
# ZN is related to how much of the land is zoned for large residential properties
# INDUS is proportion of area used for industry
# CHAS is 1 if the census tract is next to the Charles River
# NOX is the concentration of nitrousoxides in the air
# RM is the average number of rooms per dwelling
# AGE is the proportion of owner-occupied units built before 1940
# DIS is a measure of how far the tract is from centers of employment in Boston
# RAD is a measure of closeness to important highways
# TAX is the property tax rate per$10,000 of value
# PTRATIO is the pupil-teacher ratio by town
# how air polution affects the house prices
#------ reading file ---------
boston <- read.csv("dataset/house/boston.csv")
str(boston)
summary(boston)
plot(boston$LAT, boston$LON)
avg <- mean(boston$CHAS)
points(boston$LAT[boston$CHAS >= avg], boston$LON[boston$CHAS >= avg], col="blue", pch=19)
summary(boston$NOX)
avg <- mean(boston$NOX)
points(boston$LAT[boston$NOX >= avg], boston$LON[boston$NOX >= avg], col="red", pch=19)
summary(boston$MEDV)
avg <- mean(boston$MEDV)
points(boston$LAT[boston$MEDV >= avg], boston$LON[boston$MEDV >= avg], col="yellow", pch=19)
avg <- mean(boston$RAD)
points(boston$LAT[boston$RAD >= avg], boston$LON[boston$RAD >= avg], col="green", pch=19)
#------------
latLotModel <- lm(boston$MEDV ~ boston$LAT + boston$LON, data=boston)
summary(latLotModel)
#visualize the output
plot(boston$LAT, boston$LON)
# what our model predicts more than overage
avg <- mean(boston$MEDV)
points(boston$LAT[boston$MEDV >= avg], boston$LON[boston$MEDV >= avg], col="yellow", pch=19)
# What our model predicts more than average
latLotModel$fitted.values
points(boston$LAT[latLotModel$fitted.values >= avg], boston$LON[latLotModel$fitted.values >=avg], col="gray", pch=19)
#-------------Trees --------------
library(rpart)
library(rpart.plot)
latLonTree <- rpart(boston$MEDV ~ boston$LAT + boston$LON, data=boston)
prp(latLonTree)
# what our model predicts more than overage
plot(boston$LAT, boston$LON)
avg <- mean(boston$MEDV)
points(boston$LAT[boston$MEDV >= avg], boston$LON[boston$MEDV >= avg], col="yellow", pch=19)
# What our model predicts more than average
points(boston$LAT[predict(latLonTree) >= avg], boston$LON[predict(latLonTree) >=avg], col="blue", pch=19)
newLatLonTree <- rpart(boston$MEDV ~ boston$LAT + boston$LON, data =boston, minbucket = 50)
prp(newLatLonTree)
plot(boston$LAT, boston$LON)
abline(h= -71)
abline(v= 42.07)
#----linear regression
|
e5ad706c64ae4279a64b1127836fe40c84b6dc03
|
9c0f9a7675dd9ff2fe0f6e2eff94b8fe153dd7e0
|
/dms.R
|
bd1c050d1036292b743dc6880b1310cadf32aad5
|
[] |
no_license
|
pmoracho/R
|
8a6eef57c282c18b44f03ab2d5341a2e91f12505
|
4b7adb1104abed5f6205079f5ee905839bd24175
|
refs/heads/master
| 2022-07-07T00:14:48.361838
| 2022-07-02T22:34:05
| 2022-07-02T22:34:05
| 91,250,857
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,361
|
r
|
dms.R
|
library(RODBC)
library(ggplot2)
start.time <- Sys.time()
cn<-odbcDriverConnect("DRIVER={SQL Server};SERVER=momdb2;Database=master;uid=plussistemas;pwd=plus")
df <- sqlQuery(cn, "
SELECT top 1000000
P.SYSTEM_ID,
P1.USER_ID,
P1.FULL_NAME,
P2.USER_ID,
P2.FULL_NAME,
DT.DESCRIPTION,
CREATION_DATE,
LAST_EDIT_DATE,
LAST_ACCESS_DATE,
A.ASUNTO_ID,
A.ASUNTO_DESC
FROM LIB_ASUNTOS.DOCSADM.PROFILE P
LEFT JOIN LIB_ASUNTOS.DOCSADM.PEOPLE P1
ON P.TYPIST = P1.SYSTEM_ID
LEFT JOIN LIB_ASUNTOS.DOCSADM.PEOPLE P2
ON P.AUTHOR = P2.SYSTEM_ID
LEFT JOIN LIB_ASUNTOS.DOCSADM.DOCUMENTTYPES DT
ON DT.SYSTEM_ID = P.DOCUMENTTYPE
LEFT JOIN LIB_ASUNTOS.DOCSADM.MOM_ASUNTOS A
ON A.SYSTEM_ID = P.MOM_ASUNTOS")
close(cn)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
load("dms.RData")
summary(df)
df[df$SYSTEM_ID == min(df$SYSTEM_ID), ]
autores <- aggregate(df$FULL_NAME.1, by=list(df$FULL_NAME.1), length)
autores <- autores[with(autores, order(-x)), ]
colnames(autores) <- c("AUTHOR", "Cant")
top <- 10
autoresTop <- autores[1:top,]
autoresTop <- rbind(autoresTop, data.frame(AUTHOR="Resto", Cant=sum(autores[-c(1:top),2])))
autoresTop
median(autores$Cant)
mean(autores$Cant)
var(autores$Cant)
sd(autores$Cant)
ggplot(autoresTop, aes(x=AUTHOR, y=Cant)) +
geom_bar(stat='identity', aes(fill=AUTHOR), width=.5) +
geom_text(aes(label=Cant), position=position_dodge(width=0.9), vjust=-0.25) +
scale_y_continuous(name="Documentos", labels = scales::comma) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(fill="Autores",
x=NULL,
y=NULL,
title="Autores DMS (TOP 10)",
caption=paste0("fuente: LIB_ASUNTOS (",sum(autoresTop$Cant)," documentos) al 1/12"))
tiposdoc <- aggregate(df$DOCUMENTTYPE, by=list(df$DOCUMENTTYPE), length)
tiposdoc <- tiposdoc[with(tiposdoc, order(-x)), ]
# Clientes:
# Se migran los Clientes de Central (ClienteId y RazonSocial) que no estuvieran en el DMS
# Se actualiza el cambio de Razon social también
#
# Asuntos
# Se migran los asunto de legales.dbo.LegalesDbAsuntos
# Si el asunto es judical se usa la caratula judicial sino se usa la de factura
# Los clientes relacionados son los del asunto
# Se ve el Flag flagAsuntomigradodms
|
923cbda6e15c75ff328103b1eb9bd3c8e233abbb
|
a8d39817671c2aad5994396f839f4d527b42ce4d
|
/app.R
|
66c85d0199b58c19890078cb97c0e4561c0bc29e
|
[] |
no_license
|
chrisselig/nycdogs
|
b193727ed67c869f62f9dbef9d5169cfdfbd1780
|
6d8f836fc996a6041840b282c7c4575127c730eb
|
refs/heads/main
| 2022-12-11T11:25:39.832646
| 2020-09-14T20:36:54
| 2020-09-14T20:36:54
| 294,489,439
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,619
|
r
|
app.R
|
#
# Created by Chris Selig of BIDAMIA INC. ----
# https://www.linkedin.com/in/chris-selig/
# bidamia.ca
# App for exploring and forecasting dog bites in NYC ----
# September 2020 v1.0
#
# * Libraries ----
# ** Shiny libraries ----
library(shiny)
library(shinyjs)
library(shinyWidgets)
library(shinythemes)
# ** Data manipulation libraries ----
library(tidyverse)
library(lubridate)
# ** Data library ----
library(nycdogs)
# * Source Scripts ----
source('01_scripts/01_get_clean_data.R')
# * Load Data ----
bites_tbl <- bites_function()
# Define UI ----
ui <-
tagList(
# * CSS ----
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "styles.css"),
tags$link(href="https://fonts.googleapis.com/css?family=Old+Standard+TT:400,700&display=swap",
rel="stylesheet"
)
),
# * JS ----
shinyjs::useShinyjs(),
navbarPage(
# Application title
title = "Exploring NYC Dog Data",
collapsible = TRUE,
theme = shinytheme("flatly"),
# * Exploring Bites Tab ----
tabPanel(
class = "tabPanel",
title = "Exploring Dog Bites",
# ** Visualization Panel ----
column(
width = 8
),
# ** Filter Panel ----
column(
width = 4,
fluidRow(
h2('Filters'),
# *** Borough Filter ----
pickerInput(
inputId = 'bitesBurough',
label = 'Burough',
choices = sort(unique(bites_tbl$borough)),
selected = c('Bronx','Brooklyn', 'Manhattan','Queens','Staten Island','Other'),
multiple = TRUE,
options = list(
`actions-box` = TRUE,
`multiple-separator` = " | "
)
),
br(),
# * Breed Filter ----
pickerInput(
inputId = 'bitesBreed',
label = 'Breed',
choices = sort(unique(bites_tbl$breed)),
selected = bites_tbl %>% select(breed_rc) %>% pull(),
multiple = TRUE,
options = list(
`actions-box` = TRUE,
liveSearch = TRUE,
size = 1200,
`multiple-separator` = " | "
)
)
)
)
),
# * Exploring Names Tab ----
tabPanel(
class = "tabPanel",
title = "Exploring Dog Names"
),
# * Forecasting Dog Bites Tab ----
tabPanel(
class = "tabPanel",
title = "Forecasting Dog Bites"
)
)
)
# Define server logic
server <- function(input, output) {
}
# Run the application
shinyApp(ui = ui, server = server)
|
7d71e54e0cb8d2558ae2a11b46179ccb32e0e397
|
6a704b8808392875f192dae75f4816e5b0de3d61
|
/bd_serve.R
|
4f3c40f9d1a9addad3131a91a3fb4b21dd66753a
|
[] |
no_license
|
vudat081299/stat101
|
fe85609050e82e550f4fe7f0d5d119f2b1f2b2b1
|
3313cac2391e1d30c19af28ba453d0d9984f4e93
|
refs/heads/master
| 2023-01-30T18:25:14.832491
| 2020-12-17T09:01:35
| 2020-12-17T09:01:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 114
|
r
|
bd_serve.R
|
bookdown::serve_book(dir = ".", output_dir = "_book",
preview = TRUE, in_session = TRUE, quiet = FALSE)
|
f38e8cc4ceeea2393da4d4cc8be0cc3022556c12
|
3524de329d1f28a6df15093155ea6c2df9c37f54
|
/TurtleBackTest(v0.1) .r
|
60015792cafdea8a02af21068be51548eefd883b
|
[] |
no_license
|
SidGor/turtle_project
|
324580276d8c57b7b5939f919fb7f48115458298
|
47f972d3c7aef2903e302081c9ac9a110094c71d
|
refs/heads/master
| 2021-01-23T16:17:48.310002
| 2017-06-19T07:12:19
| 2017-06-19T07:12:19
| 93,289,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,205
|
r
|
TurtleBackTest(v0.1) .r
|
##########################Packages#############################################
library(quantex) #数据下载
library(rlist) #一些好用的列表操作比如list.stack
library(data.table)
library(dplyr) #在清洁数据的时候需要用到 left_join 以及%>%
library(zoo) #要使用rollapplyr来算各种滚动数据
###############################################################################
#####################Input & Lists ############################################
product_ids <- c("rb", "cu", "al") ##########################
start_date <- 20160601 ########## DATA ########
end_date <- 20161231 ######## LOADING #######
frequency <- "day" ##########################
vm <- c(products$rb$volume_multiple, #提取合约单位
products$cu$volume_multiple,
products$al$volume_multiple)
account <- 100000000 #初始账户资金
acc_origin <- account #当account大于这个数的时候就不用调整shadow
shadow_account <- account #判定Unit大小的账户资金,最大为初始资金
cash <- account #初始现金
slippage <- c(2*products$rb$price_tick, #滑点读取
2*products$cu$price_tick,
2*products$al$price_tick)
fee.rate <- c(products$rb$cost_ratio, #手续费读取
products$cu$cost_ratio,
products$al$cost_ratio)
system.selection <- 2 #choose sys1 or sys2 #采用哪个交易系统
position <- rep(0,length(product_ids)) #表现持仓unit的数列
holding <- rep(0,length(product_ids)) #表现持仓合约的数列
corr_mat <- list( #两个判定风险的相关矩阵
clscorr = matrix(c(1,1,0,1,1,0,0,0,1),3,3,
dimnames = list(product_ids, product_ids)
),
lslcorr = matrix(c(1,0,0,0,1,0,0,0,1),3,3,
dimnames = list(product_ids, product_ids)
))
close_sys <- NULL #后面可以用这个控制使用什么平仓规则
data <- list() #存储价格数据
trade_in <- list() #记录买入
trade_out <- list() #记录卖出
standing_contract <- list() #持仓记录
asset_sheet <- list() #资产记录
#非常有用的bar和pre方程,节省很多工作量:
bar <- function(w){ # eg. bar("close") returns close price of this bar
cdt[[w]][ptr]
}
pre <- function(w, n=1){ # eg. pre("close") returns the previous close price
if(n <= 0){
stop("pre(): window should be greater than 1")
} else {
cdt[[w]][ptr - 1 - abs(n-1)]
}
}
#####################End of "Input & Lists"####################################
####################Data Cleaning##############################################
#下载数据进行初步处理↓
for (i in 1:length(product_ids)) {
#下载数据↓
data[[i]] <- query_dominant_future_bars(product_id = product_ids[i],
trading_day = start_date ~ end_date,
type = frequency)
if (nrow(data[[i]]) < 22) {
stop(paste(product_ids[[i]],"doesn't contain over 21 rows for calculation"))
}
#简化数据↓
data[[i]] <- na.omit(data[[i]][, .(date = trading_day,
code = instrument_id, open, high, low, close, volume)])
#跳过了所有有NA的数据,下面合并的时候可能会重新出现NA值(因为用了left_join)
#变更产品名
data[[i]][,code := product_ids[i]]
#计算N*DPP
data[[i]][, TR := pmax(high-low,
high-shift(close,1,type = "lag"),
shift(close,1,type = "lag")-low)] #使用pmax()算true range
s <- mean(data[[i]][2:21,TR])
v <- rep(NA,nrow(data[[i]]))
k <- data[[i]][,TR]
v[1:20] = NA
v[21] = s
for (j in 1:(nrow(data[[i]])-21)){
v[21+j] = (v[21+j-1]*19+k[21+j])/20
}
data[[i]][, ATR := shift(v,1,type="lag")]
#↑除了计算ATR之外,还需要lag一个单位以避免未来数据
#(这样做后面可以用NA直接跳过不用交易的日期,否则开仓算法容易报错)
data[[i]][, NxDPP := vm[1]*ATR] #这个就是待除的N*DPP,本来想合并三个公式,
#但那样的可读性会极其差,放弃了。
#添加 10日及20日收盘最高最低线(一共有4条)
data[[i]][, max10high := shift(rollapplyr(high, width = 10, FUN = max,
fill = NA ), 1, type = "lag")]
data[[i]][, min10low := shift(rollapplyr(low, width = 10, FUN = min,
fill = NA ), 1, type = "lag")]
data[[i]][, max20high := shift(rollapplyr(high, width = 20, FUN = max,
fill = NA ), 1, type = "lag")]
data[[i]][, min20low := shift(rollapplyr(low, width = 20, FUN = min,
fill = NA ), 1, type = "lag")]
#添加55日收盘价最高最低线(两条)(system2)
data[[i]][, max55high := shift(rollapplyr(high, width = 55, FUN = max,
fill = NA ), 1, type = "lag")]
data[[i]][, min55low := shift(rollapplyr(low, width = 55, FUN = min,
fill = NA ), 1, type = "lag")]
}#end of product data downloading loop
names(data) = product_ids #命名
###################测试:假设al最后一行无数据,最后是可以形成表格的############
# #
# data$al <- data$al[1:(.N)-1,] #删掉一个数据的尾巴 #
# data$cu <- data$cu[-21,] #删掉一个数据的中间 #
###############################################################################
data_bind <- data %>% #通过reduce的形式来合并表格,缺失值会变成NA
Reduce(function(dtf1,dtf2) left_join(dtf1,dtf2,by="date"), .)
data_dt <- as.data.table(data_bind) #这就是可以输出到下一个环节的大表了
cdt <- copy(data_dt)
####################End of Data Cleaning#######################################
#####################Main Loop#################################################
for (ptr in 1:nrow(cdt)){ #start of main loop
#跳过前面N行
if(is.na(cdt[ptr,max55high])) next
#####################Asset Monitor#############################################
#判定是否要调整操盘资金量的大小
if (account < 0.9*shadow_account) {
shadow_account = shadow_account*0.8
} else if (account >(1/0.9)*shadow_account & shadow_account < acc_origin) {
shadow_account = 1.25*shadow_account
}
#计算Unit,并且判定调整
NxDPPs <- rep(NA,length(product_ids))
for (j in 1:length(product_ids)){
NxDPPs[j] <- as.numeric(cdt[[15*j-5]][ptr]) #根据相对位置提取N*DPP以计算Unit限制
}
units <- 0.01*shadow_account/NxDPPs #当前每个产品下的Unit
units <- floor(units) #向下取整
#建立4个测试向量
test1 <- rep(NA,length(product_ids))
test2 <- rep(NA,length(product_ids))
test3 <- rep(NA,length(product_ids))
test4 <- rep(NA,length(product_ids))
#根据Unit判定position,如果position 全为0则直接跳过,否则进行下列运算。
if(all(position == 0)) {
test1
test2
test3
test4
} else {}
#subset需要调整的product
#forloop j in selected products, 每个做一次平仓
#####################End of Asset Monitor######################################
####################Open Position##############################################
#system2
#generate long/short signal
sav_long <- vector()
sav_short <- vector()
sig_long <- vector()
sig_short <- vector()
for (j in 1:length(product_ids)){ #extract the high,55high, low,55low
sav_long <- append(sav_long,c(cdt[[4+(j-1)*15]][ptr],cdt[[15+(j-1)*15]][ptr]))#get the high and 55high
sav_short <- append(sav_short,c(cdt[[5+(j-1)*15]][ptr],cdt[[16+(j-1)*15]][ptr]))
}
#Then we will need a vector to see if channels been broke
for (j in 1:length(product_ids)){
sig_long[j] <- sav_long[(2*j - 1)] >sav_long[2*j]
sig_short[j] <- sav_short[(2*j - 1)] < sav_short[2*j]
}
#how many units there has to be according to the signal
#the key here is to have the ALLOWANCE matrix for each asset, it dictates how many units in maximum we can hold in each period.
#should have a vector indicating by units, the other shows the exact amount of contracts(position * vm)
####################End of Open Position#######################################
#####################Close Position############################################
#####################End of Close Position#####################################
#####################Profit Taking#############################################
#####################End of Profit Taking######################################
#####################Asset Chart Update########################################
#####################End of Asset Chart Update#################################
}# end of main loop
######################End of Main Loop#########################################
|
43bf75c6cc78613d1d66fa582b87d10fc07de0cd
|
b30559e9ebad86aa28d32359b662457207cd3f23
|
/4-meta_analyze.R
|
d52d13290bfce5754b9dd331f0bf4a3f063609b2
|
[
"MIT"
] |
permissive
|
jaydennord/glmm_variance
|
24b4e68e60b63b5020fe65c05d39dde5808efbca
|
ced58822dbce3fbc6aa89d3799fae58ccf258953
|
refs/heads/master
| 2022-12-07T11:46:10.463894
| 2020-09-04T22:01:12
| 2020-09-04T22:01:12
| 236,608,998
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,935
|
r
|
4-meta_analyze.R
|
library(data.table)
library(tidyverse)
library(latex2exp)
# d <- read_csv("results_full.csv", guess_max = 5e5)
d <- fread("results_full.csv", sep = ",") %>%
as_tibble()
grps <- quos(nblk, neu, blk_sd, mu, gen_method, ana_method, term)
d2 <- d %>%
filter(
!(
str_detect(term, "(trt)|(blk.eu)") |
is.na(term)
)
) %>%
filter(
!str_detect(group, "blk.eu") |
is.na(group)
) %>%
# patch code to deal with misnamed columns
mutate(
conf.low = case_when(
!is.na(conf.low) & is.na(c025) ~ conf.low,
is.na(conf.low) & !is.na(c025) ~ c025,
TRUE ~ NA_real_
),
conf.high = case_when(
!is.na(conf.high) & is.na(c975) ~ conf.high,
is.na(conf.high) & !is.na(c975) ~ c975,
TRUE ~ NA_real_
)
)
# d2 %>% count(!!! grps) %>% View()
#
# wtf <- d2 %>% filter(
# nblk == 4,
# neu == 6,
# blk_sd == .50,
# mu == "m10",
# gen_method == "pois_normal",
# ana_method == "lme4_fit_normal"
# )
#
grps <- quos(nblk, neu, blk_sd, mu, gen_method, ana_method)
d3 <- d2 %>%
mutate(
cover = as.numeric(conf.low <= blk_sd & blk_sd <= conf.high),
bias_rep = (estimate - blk_sd) / blk_sd
) %>%
group_by(!!! grps) %>%
summarize(
coverage = mean(cover),
bias = mean(bias_rep)
) %>%
ungroup() %>%
mutate(
pt_shape = case_when(
bias > .1 | bias < -.1 ~ 2L,
TRUE ~ 1L
),
bias = case_when(
bias > .1 ~ .1,
bias < -.1 ~ -.1,
TRUE ~ bias
),
# nblk = as_factor(TeX(paste0("$n_b = ", nblk, "$"), output = "text"))
nblk = {
f <- as_factor(nblk)
l <- levels(f)
nl <- paste0("$n_b = ", l, "$")
levels(f) <- TeX(nl)
f
},
neu = {
f <- as_factor(neu)
l <- levels(f)
nl <- paste0("$n_e = ", l, "$")
levels(f) <- TeX(nl)
f
},
blk_sd = {
f <- as_factor(blk_sd)
l <- levels(f)
nl <- paste0("$\\tau^2 = ", l, "$")
levels(f) <- TeX(nl)
f
},
#
mu = {
f <- as_factor(mu)
l <- levels(f)
# message(class(mu))
nl <- paste0(str_replace(l, "^m", "$\\\\mu = "), "$")
# nl <- gsub("^m", "$\\\\mu = ", mu)
levels(f) <- TeX(nl)
f
},
gen_method = factor(
gen_method,
levels = c("pois_gamma", "pois_normal"),
labels = c("Gamma", "Normal")
)
# nblk = TeX(paste0("$n_b = ", as.character(nblk), "$")),
#
# neu = TeX(paste0("$n_e = ", as.character(neu), "$")),
#
# blk_sd = TeX(paste0("$\\tau^2 = ", blk_sd, "$")),
#
# mu = str_replace(mu, "$m", "$\\mu = ") %>% paste0("$") %>% TeX()
)
coverage <- ggplot(data = d3, aes(x = ana_method, y = coverage, fill = ana_method)) +
geom_bar(stat = "identity") +
scale_fill_discrete(labels = c(
"lme4 - NB",
"lme4 - Norm",
"stan - NB",
"stan - Norm"
)) +
facet_grid(
nblk + neu ~ blk_sd + mu + gen_method,
labeller = label_parsed
) +
labs(
y = "Coverage",
fill = "Analytic method"
) +
theme(
# axis.text.x = element_text(angle = 90)
axis.text.x = element_blank(),
axis.title.x = element_blank()
)
bias <- ggplot(data = d3, aes(x = ana_method, color = ana_method, y = bias, shape = factor(pt_shape))) +
geom_rect(xmin = -Inf, xmax = Inf, ymin = .05, ymax = Inf, alpha = .075) +
geom_rect(xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = -.05, alpha = .075) +
geom_hline(yintercept = 0) +
geom_point(size = 2) +
scale_shape_manual(values = c(19, 1), name = NULL, labels = NULL, guide = "none") +
# scale_color_manual(values = 1:4, labels = letters[1:4]) +
# scale_color_discrete(labels = letters[1:4]) +
scale_color_discrete(labels = c(
"lme4 - NB",
"lme4 - Norm",
"stan - NB",
"stan - Norm"
)) +
ylim(-.1, .1) +
facet_grid(
nblk + neu ~ blk_sd + mu + gen_method,
labeller = label_parsed
) +
# theme_bw() +
labs(
shape = NULL,
color = "Analytic method",
y = "Relative bias"
) +
theme(
# axis.text.x = element_text(angle = 90)
axis.text.x = element_blank(),
axis.title.x = element_blank(),
panel.border = element_rect(fill = NA)
)
# for poster
ggsave("pres_coverage.png", coverage, width = 22.56, height = 14.24, dpi = 72)
ggsave("pres_bias.png", bias, width = 22.56, height = 14.24, dpi = 72)
#
#
# d2 <- d %>%
# mutate(
# bias = (estimate - blk_sd) / blk_sd,
# # reject = as.numeric(between(blk_sd, c025, c975))
# reject = as.numeric(c025 <= blk_sd & blk_sd <= c975)
# ) %>%
# filter(
# !is.na(c025) & !is.na(c975),
# abs(bias) < 1,
# str_detect(ana_method, "wald_", negate = TRUE) & is.na(error) |
# str_detect(ana_method, "wald_") & sas_status == 0 & sas_g_mat == 1
# )
# d3 <- d2 %>%
# group_by(!!! grps) %>%
# summarize(
# bias_ugh = mean(bias),
# bias_lo = mean(bias) - 1.96 * sd(bias) / sqrt(n()),
# bias_hi = mean(bias) + 1.96 * sd(bias) / sqrt(n()),
# cover = mean(reject),
# cover_lo = mean(reject) - 1.96 * sd(reject) / sqrt(n()),
# cover_hi = mean(reject) + 1.96 * sd(reject) / sqrt(n())
# )
#
# ggplot(d3, aes(x = ana_method, y = bias_ugh, ymin = bias_lo, ymax = bias_hi)) +
# geom_point() +
# geom_errorbar() +
# facet_grid(nblk + neu ~ blk_sd + mu + gen_method) +
# labs(title = "Bias")
#
# ggplot(d3, aes(x = ana_method, y = cover, ymin = cover_lo, ymax = cover_hi)) +
# geom_point() +
# geom_errorbar() +
# facet_grid(nblk + neu ~ blk_sd + mu + gen_method) +
# labs(title = "Coverage")
#
#
# # ggplot(d2, aes(x = ana_method, y = reject)) +
# # stat_summary(fun.y = "mean", geom = "bar") +
# # facet_grid(nblk + neu ~ blk_sd + mu + gen_method) +
# # labs(title = "Coverage")
# #
# # ggplot(d2, aes(x = ana_method, y = bias)) +
# # stat_summary(fun.y = "mean", geom = "point") +
# # facet_grid(nblk + neu ~ blk_sd + mu + gen_method) +
# # labs(title = "Bias")
# #
# #
|
cb79b287867969b3d256ad8a200cd0ed4928fc99
|
c459dd32d88158cb064c3af2bc2ea8c7ab77c667
|
/tumor_subcluster/plotting/heatmap/heatmap_emt_markers_by_individualsample_cluster_highlight.R
|
b1ace3efaf80e2c3c2a12d5f20c78c52998b3ab9
|
[] |
no_license
|
ding-lab/ccRCC_snRNA_analysis
|
d06b8af60717779671debe3632cad744467a9668
|
ac852b3209d2479a199aa96eed3096db0b5c66f4
|
refs/heads/master
| 2023-06-21T15:57:54.088257
| 2023-06-09T20:41:56
| 2023-06-09T20:41:56
| 203,657,413
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,450
|
r
|
heatmap_emt_markers_by_individualsample_cluster_highlight.R
|
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
source("./ccRCC_snRNA_analysis/plotting.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input the average expression calculated (SCT)
avgexp_df <- fread(input = "./Resources/Analysis_Results/average_expression/averageexpression_sct_usescale_byindividualcluster_bycellgroup7_byaliquot_on_katmai/20200917.v1/avgexp.SCT.bycellgroup.byaliquot.bycluster.31_aliquot_integration.20200917.v1.tsv", data.table = F)
## input the barcode-cell-type table
barcode2celltype_df <- fread(input = "./Resources/Analysis_Results/annotate_barcode/annotate_barcode_with_major_cellgroups/20200917.v2/31Aliquot.Barcode2CellType.20200917.v2.tsv", data.table = F)
## barcode 2 individual cluster id
barcode2cluster_df <- fread(data.table = F, input = "./Resources/Analysis_Results/data_summary/fetch_data/fetch_data_by_individual_sample/20200717.v1/Barcode2MetaData.20200717.v1.tsv")
## input id meta data
idmetadata_df <- fread(data.table = F, input = "./Resources/Analysis_Results/sample_info/make_meta_data/20200716.v1/meta_data.20200716.v1.tsv")
# specify genes to filter -------------------------------------------------
## input kidney-specific EMT genes
# emt_genes_df <- fread(data.table = F, input = "./Resources/Analysis_Results/dependencies/combine_pt_with_emt_markers/20200911.v1/Kidney_Specific_EMT_Genes.20200911.v1.tsv")
emt_genes_df <- fread(data.table = F, input = "./Resources/Analysis_Results/dependencies/combine_pt_with_emt_markers_all/20200920.v1/Kidney_Specific_EMT_Genes.20200920.v1.tsv")
## add name for the marker groups
emt_genes_df <- emt_genes_df %>%
mutate(Text_Gene_Group = ifelse(Gene_Group2 == "Tumor cells",
"Tumor-cell\nmarkers", paste0(Gene_Group2, "\nmarkers")))
genes2filter <- emt_genes_df$Gene
# genes2filter <- emt_genes_df$Gene[emt_genes_df$Gene_Group2 %in% "Mesenchymal"]
# genes2filter <- emt_genes_df$Gene[emt_genes_df$Gene_Group2 %in% c("Mesenchymal", "Epithelial")]
# genes2filter <- emt_genes_df$Gene[emt_genes_df$Gene_Group2 %in% c("Mesenchymal", "Epithelial") & !(emt_genes_df$Gene %in% c("MMP2", "MMP9", "MMP15"))]
# genes2filter <- emt_genes_df$Gene[!(emt_genes_df$Gene %in% c("MMP2", "MMP9", "MMP15"))]
# genes2filter <- emt_genes_df$Gene[emt_genes_df$Gene_Group2 %in% c("Mesenchymal", "Epithelial") & !(emt_genes_df$Gene %in% c("MMP2", "MMP9", "MMP15"))]
# genes2filter <- emt_genes_df$Gene[!(emt_genes_df$Gene %in% c("MMP2", "MMP9", "MMP15"))]
# count cell number and filter clusters -----------------------------------
barcode2celltype_df <- merge(barcode2celltype_df, barcode2cluster_df, by.x = c("orig.ident", "individual_barcode"), by.y = c("aliquot", "individual_barcode"), all.x = T)
barcode2celltype_df <- barcode2celltype_df %>%
mutate(id_bycluster_bycellgroup_byaliquot = paste0(orig.ident, "_", seurat_cluster_id, "_",Cell_group7))
cellcount_bycluster_df <- barcode2celltype_df %>%
select(id_bycluster_bycellgroup_byaliquot) %>%
table() %>%
as.data.frame() %>%
rename(id_bycluster_bycellgroup_byaliquot_original = ".") %>%
mutate(id_bycluster_bycellgroup_byaliquot = gsub(x = id_bycluster_bycellgroup_byaliquot_original, pattern = " |\\-", replacement = "."))
# format expression data --------------------------------------------------
plot_data_long_df <- avgexp_df %>%
filter(V1 %in% genes2filter) %>%
melt() %>%
mutate(id_bycluster_bycellgroup_byaliquot = gsub(x = variable, pattern = "SCT.", replacement = "")) %>%
mutate(aliquot = str_split_fixed(string = id_bycluster_bycellgroup_byaliquot, pattern = "_", n = 3)[,1]) %>%
mutate(id_cluster = str_split_fixed(string = id_bycluster_bycellgroup_byaliquot, pattern = "_", n = 3)[,2]) %>%
mutate(cellgroup = str_split_fixed(string = id_bycluster_bycellgroup_byaliquot, pattern = "_", n = 3)[,3])
plot_data_long_df$Cell_count <- mapvalues(x = plot_data_long_df$id_bycluster_bycellgroup_byaliquot, from = cellcount_bycluster_df$id_bycluster_bycellgroup_byaliquot, to = as.vector(cellcount_bycluster_df$Freq))
plot_data_long_df$Cell_count <- as.numeric(as.vector(plot_data_long_df$Cell_count))
plot_data_long_df <- plot_data_long_df %>%
dplyr::filter(Cell_count >= 30) %>%
dplyr::filter(cellgroup %in% c("Tumor.cells", "Transitional.cells", "Tumor.like.cells"))
plot_data_long_df$id_aliquot_wu <- mapvalues(x = plot_data_long_df$aliquot, from = idmetadata_df$Aliquot.snRNA, to = as.vector(idmetadata_df$Aliquot.snRNA.WU))
plot_data_long_df <- plot_data_long_df %>%
dplyr::mutate(id_bycluster_bycellgroup_byaliquot_new = paste0(id_aliquot_wu, "_", id_cluster, "_", cellgroup))
## make matrix
plot_data_wide_df <- dcast(data = plot_data_long_df, formula = V1 ~ id_bycluster_bycellgroup_byaliquot_new, value.var = "value")
plot_data_mat <- as.matrix(plot_data_wide_df[,-1])
rownames(plot_data_mat) <- plot_data_wide_df$V1
plot_data_mat[1:5, 1:5]
# filter genes based on variation -----------------------------------------
sd_bygene_df <- data.frame(SD = apply(plot_data_mat,1, sd, na.rm = TRUE), gene = rownames(plot_data_mat))
sd_bygene_df$Cell_Group2 <- mapvalues(x = sd_bygene_df$gene, from = emt_genes_df$Gene, to = as.vector(emt_genes_df$Gene_Group2))
sd_bygene_df <- sd_bygene_df %>%
arrange(desc(SD))
genes_plot_mesenchymal <- as.vector(sd_bygene_df$gene[sd_bygene_df$Cell_Group2 == "Mesenchymal"])
genes_plot_mesenchymal
# sd_bygene_other_df <- sd_bygene_df %>%
# filter(Cell_Group2 != "Mesenchymal") %>%
# group_by(Cell_Group2) %>%
# top_n(n = 2, wt = SD)
# genes_plot_other <- as.vector(sd_bygene_other_df$gene)
genes_plot_tumormarkers <- c("PAX8", "PAX2", "CA9")
sd_bygene_epithelial_df <- sd_bygene_df %>%
filter(Cell_Group2 %in% c("Epithelial", "Proximal tubule")) %>%
arrange(desc(SD))
genes_plot_epithelial <- head(x = as.vector(sd_bygene_epithelial_df$gene), n = 10)
genes_plot_other <- c(genes_plot_tumormarkers, genes_plot_epithelial)
genes_plot <- c(genes_plot_mesenchymal, genes_plot_other)
plot_data_mat <- plot_data_mat[genes_plot,]
## make mesenchymal score
genes_mesenchymal_score <- c("FN1", "CDH2", "VIM", "FOXC2", "SNAI2")
scores_mesenchymal <- colMeans(plot_data_mat[genes_mesenchymal_score,])
## make mesenchymal score
# genes_epithelial_score <- c("KRT19", "CLDN10", "GPX3", "SLC5A12")
genes_epithelial_score <- head(x = genes_plot_epithelial, n = 5)
scores_epithelial <- colMeans(plot_data_mat[genes_epithelial_score,])
# specify colors ----------------------------------------------------------
## specify color for NA values
color_na <- "grey50"
## make color function for heatmap body colors
color_blue <- RColorBrewer::brewer.pal(n = 3, name = "Set1")[2]
color_red <- RColorBrewer::brewer.pal(n = 3, name = "Set1")[1]
# summary(as.vector(plot_data_mat))
colors_heatmapbody = colorRamp2(c(-1.5,
0,
1.5),
c(color_blue, "white", color_red))
## make colors for mesenchymal score
summary(scores_mesenchymal)
color_orange <- RColorBrewer::brewer.pal(n = 5, name = "Set1")[5]
color_purple <- RColorBrewer::brewer.pal(n = 5, name = "Set1")[4]
colors_scores_mesenchymal = colorRamp2(c(-0.5, 0, 0.5),
c(color_purple, "white", color_orange))
## make colors for epithelial score
summary(scores_epithelial)
color_yellow <- RColorBrewer::brewer.pal(n = 9, name = "YlGnBu")[1]
color_blue2 <- RColorBrewer::brewer.pal(n = 9, name = "YlGnBu")[9]
colors_scores_epithelial = colorRamp2(c(-1, 0, 1),
c(color_blue2, "white", color_yellow))
# get column ids ----------------------------------------------------------
columnnames_plot <- colnames(plot_data_mat)
ids_aliquot_wu <- str_split_fixed(string = columnnames_plot, pattern = "_", n = 3)[,1]; ids_aliquot_wu
ids_cluster <- str_split_fixed(string = columnnames_plot, pattern = "_", n = 3)[,2]; ids_cluster
# make row split ----------------------------------------------------------
row_split_vec <- mapvalues(x = genes_plot, from = emt_genes_df$Gene, to = as.vector(emt_genes_df$Text_Gene_Group))
row_split_vec
row_split_factor <- factor(x = row_split_vec, levels = c("Mesenchymal\nmarkers", "Epithelial\nmarkers", "Proximal tubule\nmarkers", "Tumor-cell\nmarkers"))
# make column annotation --------------------------------------------------
## make highlighted samples
index_highlight <- which(columnnames_plot %in% c("C3N-01200-T2_5_Transitional.cells", "C3N-01200-T1_5_Tumor.cells", "C3N-01200-T3_2_Transitional.cells", "C3N-00495-T1_10_Transitional.cells",
"C3L-00079-T1_7_Transitional.cells", "C3L-00790-T1_7_Transitional.cells", "C3N-00495-T1_9_Transitional.cells"))
texts_aliquot_cluster <- paste0(ids_aliquot_wu, "_C", ids_cluster)
texts_highlight <- texts_aliquot_cluster[index_highlight]; texts_highlight
## make column annotation object
colanno_obj = HeatmapAnnotation(MesenchymalScore = anno_simple(x = scores_mesenchymal, col = colors_scores_mesenchymal, height = unit(1, "cm")),
EpithelialScore = anno_simple(x = scores_epithelial, col = colors_scores_epithelial, height = unit(1, "cm")),
link = anno_mark(at = index_highlight, labels = texts_highlight, labels_gp = gpar(fontsize = 15), side = "bottom"),
annotation_name_gp = gpar(fontsize = 20, fontface = "italic"), annotation_name_side = "left")
# make column order --------------------------------------------------
column_order_vec <- order(scores_mesenchymal, decreasing = T)
# plot ------------------------------------------------------------
p <- ComplexHeatmap::Heatmap(matrix = plot_data_mat,
col = colors_heatmapbody,
na_col = color_na, border = "black",
show_row_names = T, row_names_gp = gpar(fontsize = 15, fontface = "italic"),
row_split = row_split_factor,
row_title_rot = 0, row_title_gp = gpar(fontsize = 20, fontface = "bold"),
# row_labels = factor_cellgroup,
cluster_row_slices = F, show_row_dend = F,
# column_km = 8, column_km_repeats = 150,
show_column_dend = F, cluster_columns = F,
column_order = column_order_vec,
bottom = colanno_obj, show_column_names = F, column_title = NA,
show_heatmap_legend = F)
p
# make legend list --------------------------------------------------------
list_lgd = list(
Legend(col_fun = colors_heatmapbody,
title = "Scaled snRNA expression",
title_gp = gpar(fontsize = 15, fontface = "bold"),
legend_width = unit(6, "cm"),
legend_height = unit(3, "cm"),
direction = "horizontal"),
Legend(col_fun = colors_scores_mesenchymal,
title = "Mesenchymal score",
title_gp = gpar(fontsize = 15, fontface = "bold"),
legend_width = unit(6, "cm"),
legend_height = unit(3, "cm"),
direction = "horizontal"),
Legend(col_fun = colors_scores_epithelial,
title = "Epithelial score",
title_gp = gpar(fontsize = 15, fontface = "bold"),
legend_width = unit(6, "cm"),
legend_height = unit(3, "cm"),
direction = "horizontal"))
# write output ------------------------------------------------------------
file2write <- paste0(dir_out, "EMT_Genes_by_tumorcluster", ".png")
png(file2write, width = 3000, height = 1200, res = 150)
draw(object = p,
annotation_legend_side = "top", annotation_legend_list = list_lgd)
dev.off()
file2write <- paste0(dir_out, "EMT_Genes_by_tumorcluster", ".pdf")
pdf(file2write, width = 20, height = 9, useDingbats = F)
draw(object = p,
annotation_legend_side = "top", annotation_legend_list = list_lgd)
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.