blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1ae900f0ccd22b40f59d0260cfc4a31cf45186e6 | fc60224dd32db5536c7fd3ab65c2bd523393eaba | /man/ndbc_fix.Rd | 504a0fc9d082af01b25512f4cb1dac12bb5fb59c | [] | no_license | evmo/ndbc | a491995a381cf961c5bfd2d6448301b8bc8be5e1 | 240a5b339b7cc93fc4e5462fbd23dc0cfafc3203 | refs/heads/master | 2021-01-19T02:55:20.413150 | 2020-10-25T05:35:56 | 2020-10-25T05:35:56 | 78,153,128 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 286 | rd | ndbc_fix.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{ndbc_fix}
\alias{ndbc_fix}
\title{Fix variations in older historical data}
\usage{
ndbc_fix(data)
}
\arguments{
\item{data}{}
}
\value{
}
\description{
Fix variations in older historical data
}
|
db92ec041317d7374b6985bee9debcea4330e116 | 8eb66f70525aad1284cfd5a33ec7a38c06b1955c | /list8/EA_predict.R | 1310fa228f894c3aaba2c70d7d2c665de115dc24 | [] | no_license | iloscnd/data-mining | 09857e4bc1efefbb8341a83504d9fdf024c3ad58 | 1f2a90a4de532cddbd867531cecf357269de693f | refs/heads/master | 2020-04-01T18:41:59.934349 | 2019-02-21T00:16:34 | 2019-02-21T00:16:34 | 153,508,797 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,560 | r | EA_predict.R | library("forecast")
library("TTR")
plotForecastErrors <- function(forecasterrors) {
# make a histogram of the forecast errors:
mybinsize <- IQR(forecasterrors, na.rm=TRUE)/4
mysd <- sd(forecasterrors, na.rm=TRUE)
mymin <- min(forecasterrors, na.rm=TRUE) - mysd*5
mymax <- max(forecasterrors, na.rm=TRUE) + mysd*3
# generate normally distributed data with mean 0 and standard deviation mysd
mynorm <- rnorm(10000, mean=0, sd=mysd)
mymin2 <- min(mynorm)
mymax2 <- max(mynorm)
if (mymin2 < mymin) { mymin <- mymin2 }
if (mymax2 > mymax) { mymax <- mymax2 }
# make a red histogram of the forecast errors, with the normally distributed data overlaid:
mybins <- seq(mymin, mymax, mybinsize)
hist(forecasterrors, col="red", freq=FALSE, breaks=mybins)
# freq=FALSE ensures the area under the histogram = 1
# generate normally distributed data with mean 0 and standard deviation mysd
myhist <- hist(mynorm, plot=FALSE, breaks=mybins)
# plot the normal curve as a blue line on top of the histogram of forecast errors:
points(myhist$mids, myhist$density, type="l", col="blue", lwd=2)
}
ea <- scan("list8/EA.dat")
eats <- ts(ea)
plot(eats)
##EAcomponents <- decompose(eats)
eatsSMA <- SMA(eats, n=4)
plot.ts(eatsSMA)
## brak sezonowości
eaForecast <- HoltWinters(eats, gamma=FALSE)
eaForecast
plot(eaForecast)
eaForecast2 <- forecast(eaForecast, h=10)
plot(eaForecast2)
acf(eaForecast2$residuals, lag.max=20, na.action=na.pass)
Box.test(eaForecast2$residuals, lag=20, type="Ljung-Box")
plot.ts(eaForecast2$residuals)
plotForecastErrors(eaForecast2$residuals)
### bez bety
## brak sezonowości
eaForecast <- HoltWinters(eats, beta=FALSE, gamma=FALSE)
eaForecast
plot(eaForecast)
eaForecast2 <- forecast(eaForecast, h=10)
plot(eaForecast2)
acf(eaForecast2$residuals, lag.max=20, na.action=na.pass)
Box.test(eaForecast2$residuals, lag=20, type="Ljung-Box")
plot.ts(eaForecast2$residuals)
plotForecastErrors(eaForecast2$residuals)
#### ARIMA
eadiff <- diff(eats, differences=2)
plot.ts(eadiff) ## no nie wiem
acf(eadiff, lag.max=20)
acf(eadiff, lag.max=20, plot=FALSE) ## 1
pacf(eadiff, lag.max=20)
pacf(eadiff, lag.max=20, plot=FALSE) ## 2
eaarima <- arima(eats, order=c(1,2,1))
eaarima
eaarimaforecast <- forecast(eaarima, h=10)
plot(eaarimaforecast)
acf(eaarimaforecast$residuals, lag.max=20)
Box.test(eaarimaforecast$residuals, lag=20, type="Ljung-Box")
plot.ts(eaarimaforecast$residuals)
plotForecastErrors(eaarimaforecast$residuals)
|
5c4a06d6f6c830bcff634f4f84d925661e5f53c6 | 4a9747c24015dd65a3bf460a62d01109a8058c43 | /R/hhcartr_base_functions.R | ac01b64e4fbdcc35ad7bcee1a9b08c877c6e567e | [] | no_license | cran/hhcartr | 76b301038254e80ed795316949aa9163b686b20c | 55a061c1fe0ce90e8298372f39c40d6e5d8cc1fb | refs/heads/master | 2023-06-07T04:26:11.104290 | 2021-07-02T06:00:08 | 2021-07-02T06:00:08 | 382,391,912 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 12,116 | r | hhcartr_base_functions.R | # source: hhcartr_base_functions.R
#
# Contains functions that are common to all hhcartr models.
# hhcartr models - HHDecisionTree
# - HHRandomForest
# base functions - validate_parameters
# - gini_
# - compute_max_features
# - find_number_valid_feature_columns
# - check_package
# - notin
# - is.not.null
# - mni.control
# - prune.control
validate_parameters <- function(response,
n_min,
n_folds,
n_trees,
min_node_impurity,
sample_size,
testSize,
sampleWithReplacement,
useIdentity,
pruning,
parallelize,
use_smote = FALSE,
useOOBEE = FALSE,
calcVarImp = FALSE,
max_features = NA){
# validate response parameter
# valid parameter values
response_choices <- c("classify", "regressor")
# validate values of response parameter
checkmate::assertChoice(response, response_choices)
# validate n_min parameter
checkmate::assertInt(n_min, lower = 1)
# validate n_folds parameter
checkmate::assertInt(n_folds, lower = 1)
# validate n_trees parameter
# use max_features to determine if using HHRandomForest or HHDecisionTree; if its not NA then
# using HHRandomForest so can specify an upper limit of more than one.
if(is.na(max_features)){
checkmate::assertInt(n_trees, lower = 1, upper = 1)
} else {
checkmate::assertInt(n_trees, lower = 1, upper = 10000)
}
# validate min_node_impurity parameter
if(typeof(min_node_impurity) == "character"){
checkmate::assert_character(min_node_impurity, pattern = "auto")
} else {
checkmate::assert_number(min_node_impurity, lower = 0.0, upper = 1.0)
}
# validate sample_size parameter
checkmate::assert_number(sample_size, lower = 0.10, upper = 1.0)
# validate testSize parameter
checkmate::assert_number(testSize, lower = 0.0, upper = 0.75)
# valid parameter values
choices <- c(TRUE, FALSE)
# validate values of sampleWithReplacement parameter
checkmate::assertChoice(sampleWithReplacement, choices)
# validate values of useIdentity parameter
checkmate::assertChoice(useIdentity, choices)
# validate values of parallelize parameter
checkmate::assertChoice(parallelize, choices)
# validate values of pruning parameter
checkmate::assertChoice(pruning, choices)
# validate values of use_smote parameter
checkmate::assertChoice(use_smote, choices)
# validate values for the use_oobee parameter
checkmate::assertChoice(useOOBEE, choices)
# validate values for the calcVarImp parameter
checkmate::assertChoice(calcVarImp, choices)
if(!is.na(max_features)){
# validate max_features parameter, can have the
# following values: "sqrt", "log2", None, an int or
# a type float.
if(is.integer(max_features)){
checkmate::assertInteger(max_features, lower = 1)
} else if(is.double(max_features)){
checkmate::assert_number(max_features, lower = 1.0)
} else {
choices <- c("sqrt", "log2", "None")
checkmate::assertChoice(max_features, choices)
}
}
# validate values of the classify parameter
#classify_choices <- c(TRUE, FALSE)
#checkmate::assertChoice(classify, classify_choices)
# validate values of the modelName parameter
#modelName_choices <- c("HHDecisionTreeClassifier", "HHDecisionTreeRegressor")
#checkmate::assertChoice(modelName, modelName_choices)
}
# Calulate Gini-index
gini_ <- function(y, length_of_y){
# Compute Gini impurity of a non-empty node.
# Gini impurity is defined as Σ p(1-p) over all classes, with p the frequency of a
# class within the node. Since Σ p = 1, this is equivalent to 1 - Σ p^2.
m <- length_of_y
num_samples_per_class <- data.frame(table(y))
G <- 1.0 - sum((num_samples_per_class$Freq/m)**2)
return (G)
}
compute_max_features <- function(max_features, n_features_){
# for random forests compute the number of features to randomly select from the
# total number of features in the training set.
if(max_features == "None"){
return(n_features_)
} else if(max_features == "sqrt"){
return(as.integer(sqrt(n_features_)))
} else if(is.integer(max_features)){
return(max_features)
} else if(is.double(max_features)){
return(as.integer(max_features))
} else {
if(pkg.env$show_progress){
message("compute_max_features() max_features unknown, defaulting to n_features_.")
}
return(n_features_)
}
}
find_number_valid_feature_columns <- function(X, n_features_){
# find number of feature columns in current training set.
debug_msg <- FALSE
# create list of indices to represent each column.
initial_feature_list <- 1:n_features_
for(k in 1:n_features_){
# all the same values in current column?
if(length(unique(X[,k])) == 1){
# yes, so remove from the list.
initial_feature_list <- initial_feature_list[!initial_feature_list %in% k]
if(debug_msg){
msg <- "find_number_valid_feature_columns() Skipping column %s as all values are the same."
msgs <- sprintf(msg, k)
message(msgs)
}
}
}
# if(length(initial_feature_list)==0) then no valid columns.
return(initial_feature_list)
}
check_package <- function(pkgname){
package.check <- lapply(pkgname, FUN = function(x) {
if (!require(x, character.only = TRUE)) {
# no ! - can't install packages in my package.
#install.packages(x, dependencies = TRUE)
library(x, character.only = TRUE)
}
})
}
# notin function
`%notin%` <- Negate(`%in%`)
# is.not.null function
is.not.null <- function(x) !is.null(x)
#################################################################################################
#'
#' mni.control
#'
#' This internal function is used to validate the parameters specified on the control
#' parameter.
#'
#' The following parameters are supported:
#'
#' @param mni_trials The number of times the process is repeated i.e. how many times the
#' n-fold cross-validation process is repeated. The resulting value of min_node_impurity
#' is the mean of mni_trials trials. The default value is 1.
#' @param mni_n_folds The number of folds to use when evaluating values of min_node_impurity.
#' Any integer value in the range 1 to the number of observations in the training dataset is
#' accepted. The default value is 10.
#' @param mni_n_trees The number of trees to use when evaluating values of min_node_impurity.
#' At the time of writing the only allowable value is 1. The default value is 1.
#' @param mni_size After a value of min_node_impurity is tried, the next value is calculated
#' by adding mni_size to the previous value. A value in the range 0.001 to 0.10 is accepted.
#' The default value is 0.01.
#' @param mni_start The first value of min_node_impurity to be evaluated. A value in the range
#' 0.001 to 1.0 is accepted. The default value is 0.05.
#' @param mni_numvals The number of min_node_impurity values to try while attempting to find
#' the optimum. Any integer value in the range 1 to 1000 is accepted. The default value is 50.
#' @param ... parameter catch all.
#'
#' @return Returns a list of all validated parameters.
#'
#' @export
mni.control <- function(mni_trials = 1,
mni_n_folds = 10,
mni_n_trees = 1,
mni_size = 0.01,
mni_start = 0.05,
mni_numvals = 50,
...){
mni_parms <- c(as.list(environment()), list(...))
# validate mni_trials parameter
checkmate::assertInt(mni_parms$mni_trials, lower = 1)
# validate mni_n_folds parameter
checkmate::assertInt(mni_parms$mni_n_folds, lower = 1)
# validate mni_n_trees parameter
checkmate::assertInt(mni_parms$mni_n_trees, lower = 1, upper = 1)
# validate mni_size parameter
checkmate::assert_number(mni_parms$mni_size, lower = 0.001, upper = 0.10)
# validate mni_start parameter
checkmate::assert_number(mni_parms$mni_start, lower = 0.001, upper = 1.0)
# validate mni_numvals parameter
checkmate::assertInt(mni_parms$mni_numvals, lower = 1, upper = 1000)
# parameters all validated, now save
outp <- list(mni_parms$mni_trials,
mni_parms$mni_n_folds,
mni_parms$mni_n_trees,
mni_parms$mni_size,
mni_parms$mni_start,
mni_parms$mni_numvals)
return(outp)
}
#################################################################################################
#'
#' prune.control
#'
#' This internal function is used to validate the parameters specified on the
#' prune_control parameter.
#'
#' The following parameters are supported:
#'
#' @param prune_type The prune type required, valid values are 'all', 'ccp' and
#' stochastic'. The default value is 'all'.
#' @param prune_stochastic_max_nodes The prune_stochastic_max_nodes parameter specifies
#' the number of internal nodes to randomly sample on each prune_stochastic_samples. The
#' value specified must be an even number as an equal number of left and right internal nodes
#' will form the sample. The prune_stochastic_max_nodes parameter can have a value of any even
#' integer in the range two to twenty-four. The default value is 10.
#' @param prune_stochastic_max_depth When sampling internal nodes, the
#' prune_stochastic_max_depth parameter specifies the maximum decision tree depth to select
#' internal nodes from. Internal nodes occurring in the original decision tree at depths
#' greater than prune_stochastic_max_depth are not eligible for sampling. Any positive integer
#' in the range two to the maximum depth of the current tree is accepted.
#' The default value is 12.
#' @param prune_stochastic_samples The prune_stochastic_samples parameter specifies the number
#' of times internal nodes will be sampled from the current decision tree. The number of
#' internal nodes to be sampled each iteration is determined by the prune_stochastic_max_nodes
#' parameter, the internal nodes eligible to be sampled is determined by the
#' prune_stochastic_max_depth parameter. The prune_stochastic_samples parameter can have any
#' positive integer value greater than zero. The default value is 100.
#' @param ... parameter catch-all.
#'
#' @return Returns a list of validated parameters.
#'
#' @export
prune.control <- function(prune_type = "ccp",
prune_stochastic_max_nodes = 10,
prune_stochastic_max_depth = 10,
prune_stochastic_samples = 100,
...){
prune_parms <- c(as.list(environment()), list(...))
# validate prune_type parameter
# valid parameter values are one of - "ccp", "stochastic", "all"
choices <- c("ccp", "all") # only this value supported in V1
checkmate::assertChoice(prune_parms$prune_type, choices)
# validate prune_stochastic_max_nodes parameter
checkmate::assertInt(prune_parms$prune_stochastic_max_nodes, lower = 2, upper = 24)
# ensure value is divisible evenly by 2.
checkmate::assert(prune_parms$prune_stochastic_max_nodes %% 2 == 0)
# validate prune_stochastic_max_depth parameter
checkmate::assertInt(prune_parms$prune_stochastic_max_depth, lower = 1)
# validate prune_stochastic_iterations parameter
checkmate::assertInt(prune_parms$prune_stochastic_samples, lower = 1, upper = 10000)
# parameters all validated, now save
outp <- list(prune_parms$prune_type,
prune_parms$prune_stochastic_max_nodes,
prune_parms$prune_stochastic_max_depth,
prune_parms$prune_stochastic_samples)
return(outp)
}
|
7e86c75f94e361a508031f2ba0c7981738ce9d82 | 3cea5014d1d3500d3dc6748d438896e08187a518 | /Módulo 1 - Tratamiento de datos en R/Código/1.4. Limpieza y Transformación de Datos.R | b748e6e12d85b2cc74efbf5d2c1c030f9d6de7d1 | [] | no_license | MAchineLearningTrIT/Machine-Learning-programacion-de-algoritmos-en-R-y-Python | ed6dd99c3b120dfd433460fab2bdb04f4146272b | e9e0d9d32d55d7b1e5fab1229eb7a36655f78f6c | refs/heads/master | 2022-04-23T19:57:05.747335 | 2020-04-19T20:14:56 | 2020-04-19T20:14:56 | 257,051,539 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 1,923 | r | 1.4. Limpieza y Transformación de Datos.R | # LIMPIEZA DE DATOS
x <- c(10, 20, NA, 30, NA, 40) # Vector con valores NA (Not availables)
is.na(x) # Esta función permite conocer los valores NA
x[!is.na(x)] # Excluyo los valores NA
mean(x,na.rm=TRUE) # Muchas funciones permiten ignorar los valores NA
# Dataset de ejemplo
data("airquality")
df = airquality
# Cantidad de valores NA
sum(is.na(df))
summary(df)
colSums(is.na(df))
# Computar por la mediana
df$Ozone[is.na(df$Ozone)] <- median(df$Ozone, na.rm = TRUE)
colSums(is.na(df))
df$Solar.R[is.na(df$Solar.R)] <- median(df$Solar.R, na.rm = TRUE)
colSums(is.na(df))
# Omitir valores NA
df2 = na.omit(airquality)
# CORRECCIÓN DE OUTLIERS
boxplot(df)
par(mfrow=c(1,2))
boxplot(df$Ozone,col = "antiquewhite3",main = "Boxplot of Ozone",outcol="Blue",outpch=19,boxwex=0.7,range = 1.5)
hist(df$Ozone,col = "antiquewhite3",main = "Histogram of Ozone", xlab = "Observations",breaks = 15)
# Valores fuera del rango inter cuartílico
ric <- IQR(df$Ozone)
q2 <- quantile(df$Ozone,0.75)
sum(df$Ozone > q2+1.5*IQR(df$Ozone))
df[df$Ozone > q2+1.5*IQR(df$Ozone),]
# TRANSFORMACIÓN DE VARIABLES CATEGÓRICAS
rm(list=ls())
data("Titanic")
df = as.data.frame(Titanic)
summary(df)
unique(df$Class)
# Imprimo la clase de cada columna
for (col in names(df)){
print(c(col,class(df[[col]])))
}
# Transformación de la columnas categóricas a numérica
df$Class_num <- ifelse(df$Class=="1st",1,
ifelse(df$Class=="2nd",2,
ifelse(df$Class=="3rd",3,4)))
head(df)
df$Sex_num <- ifelse(df$Sex=="Male",1,0)
df$Age_num <- ifelse(df$Age=="Child",1,0)
df$Survived_num <- ifelse(df$Survived=="No",0,1)
head(df)
class(df$Freq)
df2 <- df[,c("Class_num","Sex_num","Age_num","Survived_num","Freq")]
df2
# One-hot encoding
install.packages("dummies")
library(dummies)
df3 <- dummy.data.frame(df)
head(df3)
df4 <- cbind(df3[,1:4],df2[,2:5])
df4
|
c5541955d7a669de1bd7ad08615c7820058ba02d | 30f69e17253e3ccba7654766f790430a23dfd608 | /R/analyse_gvc_indicators.R | a78a14da5ef3d0e5471eba3b6916d1ca521fc089 | [] | no_license | amrofi/GVCs-LICs | bd535ef059e70bd9e9fc9d20934937e303ffe871 | 47ac9e2ee56854d9163746bef4c7bf6b58d1331d | refs/heads/master | 2020-07-24T21:17:08.647680 | 2017-02-21T08:28:24 | 2017-02-21T08:28:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,345 | r | analyse_gvc_indicators.R | # analyse_gvc_indicators.R
# Bastiaan Quast
# bquast@gmail.com
# load the data
load(file = 'data/new_gvc_indicators.RData')
load(file = 'data/nrca_df.RData')
# load("data/country_vars.RData") # NOT USING THIS ONE, USING COUNTRY_CHARS
load(file = 'data/country_chars.RData')
load(file = 'data/extra_vars.RData')
load(file = 'data/w1995_2011.RData')
# load("data/trca.RData")
# load required libraries
library(dplyr)
library(ggvis)
library(magrittr)
# country_vars <- extra
country_chars$iso3 <- tolower(country_chars$iso3)
# merge NRCA and country chars
nrca %<>% merge(country_chars, by.x = 'country', by.y = 'iso3')
# merge wwz and country chars
w1995_2011 %<>% merge(country_chars, by.x = 'Exporting_Country', by.y = 'iso3')
# merge gvc indicators and country chars
gvc_indicators %<>% merge(country_chars, by.x = 'country', by.y = 'iso3', all.x = TRUE)
# order gvc_indicators
gvc_indicators <- gvc_indicators[order(gvc_indicators$country, gvc_indicators$sector, gvc_indicators$year),]
# year as numeric
gvc_indicators$year <- as.numeric(as.character(gvc_indicators$year))
# # remove values close to zero
# gvc_indicators[which(gvc_indicators$e2r <= 0.01),]$e2r <- NA
# gvc_indicators[which(gvc_indicators$e2rL <= 0.01),]$e2rL <- NA
# gvc_indicators[which(gvc_indicators$e2rLM <= 0.01),]$e2rLM <- NA
# gvc_indicators[which(gvc_indicators$e2rUM <= 0.01),]$e2rUM <- NA
# gvc_indicators[which(gvc_indicators$e2rH <= 0.01),]$e2rH <- NA
# below does NOT work
# create fd
# gvc_indicators %<>%
# group_by(sector) %>%
# mutate(fdloge2r = log(e2r) - lag(log(e2r)),
# fdloge2rL = log(e2rL) - lag(log(e2rL)),
# fdloge2rLM = log(e2rLM) - lag(log(e2rLM)),
# fdloge2rUM = log(e2rUM) - lag(log(e2rUM)),
# fdloge2rH = log(e2rH) - lag(log(e2rH)),
# fdlogi2e = log(i2e) - lag(log(i2e)),
# fdlogi2eL = log(i2eL) - lag(log(i2eL)),
# fdlogi2eLM = log(i2eLM) - lag(log(i2eLM)),
# fdlogi2eUM = log(i2eUM) - lag(log(i2eUM)),
# fdlogi2eH = log(i2eH) - lag(log(i2eH)) )
# save
save(gvc_indicators, file = 'data/gvc_indicators_merged.RData')
# plot some NRCA
## Low income
nrca %>%
group_by(country, year) %>%
filter(class == 'L') %>%
summarise(nrca = sum(nrca, na.rm=TRUE)) %>%
ggvis(~year, ~nrca, stroke=~factor(country)) %>%
layer_lines()
## Lower middle income
nrca %>%
group_by(country, year) %>%
filter(class == 'LM') %>%
summarise(nrca = sum(nrca, na.rm=TRUE)) %>%
ggvis(~year, ~nrca, stroke=~factor(country)) %>%
layer_lines()
# merge gvc indicators and nrca
gvc_indicators %<>% merge(nrca, by.x = c("ctry", "isic", "year"), by.y = c("country", "industry", "year") )
gvc_indicators %<>% merge(trca, by = c("ctry", "isic", "year") )
# # create logical gdp var
# gvc_indicators$lic <- gvc_indicators$avg_gdppc <= 6000
# gvc_indicators$lmic <- gvc_indicators$avg_gdppc <= 12000
# gvc_indicators$hic <- gvc_indicators$avg_gdppc >= 12000
gvc_indicators %>%
group_by(year) %>% ###### REMOVE CLASS HERE THEN RUN AGAIN AND PLOT
summarise(fdloge2r = log(sum(e2r, na.rm=TRUE)),
fdloge2rL = log(sum(e2rL, na.rm=TRUE)),
fdloge2rLM = log(sum(e2rLM, na.rm=TRUE)),
fdloge2rUM = log(sum(e2rUM, na.rm=TRUE)),
fdloge2rH = log(sum(e2rH, na.rm=TRUE)),
fdlogi2e = log(sum(i2e, na.rm=TRUE)),
fdlogi2eL = log(sum(i2eL, na.rm=TRUE)),
fdlogi2eLM = log(sum(i2eLM, na.rm=TRUE)),
fdlogi2eUM = log(sum(i2eUM, na.rm=TRUE)),
fdlogi2eH = log(sum(i2eH, na.rm=TRUE)) ) -> logged
logged$fdloge2r <- logged$fdloge2r - lag(logged$fdloge2r)
logged$fdloge2rL <- logged$fdloge2rL - lag(logged$fdloge2rL)
logged$fdloge2rLM <- logged$fdloge2rLM - lag(logged$fdloge2rLM)
logged$fdloge2rUM <- logged$fdloge2rUM - lag(logged$fdloge2rUM)
logged$fdloge2rH <- logged$fdloge2rH - lag(logged$fdloge2rH)
logged$fdlogi2e <- logged$fdlogi2e - lag(logged$fdlogi2e)
logged$fdlogi2eL <- logged$fdlogi2eL - lag(logged$fdlogi2eL)
logged$fdlogi2eLM <- logged$fdlogi2eLM - lag(logged$fdlogi2eLM)
logged$fdlogi2eUM <- logged$fdlogi2eUM - lag(logged$fdlogi2eUM)
logged$fdlogi2eH <- logged$fdlogi2eH - lag(logged$fdlogi2eH)
logged <- subset(logged, year!=1995)
# e2r plot
logged %>%
ggvis(~year, ~fdloge2r, stroke='e2r') %>%
layer_lines() %>%
layer_lines(~year, ~fdloge2rL, stroke='e2r (L)') %>%
layer_lines(~year, ~fdloge2rLM, stroke='e2r (LM)') %>%
layer_lines(~year, ~fdloge2rUM, stroke='e2r (UM)') %>%
layer_lines(~year, ~fdloge2rH, stroke='e2r (H)')
# i2e plot
logged %>%
ggvis(~year, ~fdlogi2e, stroke='i2e') %>%
layer_lines() %>%
layer_lines(~year, ~fdlogi2eL, stroke='i2e (L)') %>%
layer_lines(~year, ~fdlogi2eLM, stroke='i2e (LM)') %>%
layer_lines(~year, ~fdlogi2eUM, stroke='i2e (UM)') %>%
layer_lines(~year, ~fdlogi2eH, stroke='i2e (H)')
## create basic summaries
# e2r
gvc_indicators %>%
group_by(year) %>%
summarise(e2r = sum(e2r) ) %>%
ggvis(~year, ~e2r ) %>%
layer_lines()
# e2r class == L
gvc_indicators %>%
group_by(year) %>%
summarise(e2rL = sum(e2rL) ) %>%
ggvis(~year, ~e2rL ) %>%
layer_lines()
# e2r class == LM
gvc_indicators %>%
group_by(year) %>%
summarise(e2rLM = sum(e2rLM) ) %>%
ggvis(~year, ~e2rLM ) %>%
layer_lines()
# e2r class == UM
gvc_indicators %>%
group_by(year) %>%
summarise(e2rUM = sum(e2rUM) ) %>%
ggvis(~year, ~e2rUM ) %>%
layer_lines()
# e2r class == H
gvc_indicators %>%
group_by(year) %>%
summarise(e2rH = sum(e2rH) ) %>%
ggvis(~year, ~e2rH ) %>%
layer_lines()
## create basic LOG summaries
# e2r log
gvc_indicators %>%
group_by(year) %>%
summarise(e2rlog = log(sum(e2r)) ) %>%
ggvis(~year, ~e2rlog ) %>%
layer_lines()
# e2r log class == L
gvc_indicators %>%
group_by(year) %>%
summarise(e2rLlog = log(sum(e2rL)) ) %>%
ggvis(~year, ~e2rLlog ) %>%
layer_lines()
# e2r log class == LM
gvc_indicators %>%
group_by(year) %>%
summarise(e2rLMlog = log(sum(e2rLM)) ) %>%
ggvis(~year, ~e2rLMlog ) %>%
layer_lines()
# e2r log class == UM
gvc_indicators %>%
group_by(year) %>%
summarise(e2rUMlog = log(sum(e2rUM)) ) %>%
ggvis(~year, ~e2rUMlog ) %>%
layer_lines()
# e2r log class == H
gvc_indicators %>%
group_by(year) %>%
summarise(e2rHlog = log(sum(e2rH)) ) %>%
ggvis(~year, ~e2rHlog ) %>%
layer_lines()
# i2e
gvc_indicators %>%
group_by(year) %>%
summarise(i2e = sum(i2e) ) %>%
ggvis(~year, ~i2e ) %>%
layer_lines()
#### in paper
# i2e with divided
gvc_indicators %>%
group_by(year) %>%
summarise(i2e = sum(i2e) ) %>%
mutate(divided = c(17.3, 20.5, 22.1, 23.2, 21.0, 22.4, 23.2)) %>%
ggvis(x=~year) %>%
layer_lines(y=~i2e) %>%
add_axis('y', orient = 'left', title = 'i2e') %>%
add_axis('y', 'ydiv' , orient = 'right',
title= 'i2e as Percentage of exports (red)', grid=F, title_offset = 50,
properties = axis_props(labels = list(fill = 'red')) ) %>%
layer_lines( prop('y', ~divided, scale='ydiv'), stroke:='red' )
gvc_indicators %>%
group_by(year) %>%
summarise(total = sum(i2e) ) %>%
mutate(share = c(17.3, 20.5, 22.1, 23.2, 21.0, 22.4, 23.2)) %>%
melt(id.vars = 'year') %>%
ggplot(aes(x=year, y=value)) %+%
facet_grid(variable ~ ., scale='free') %+%
geom_area(aes(fill=variable)) %+%
scale_fill_brewer(palette='Set1')
# i2e class == L
gvc_indicators %>%
group_by(year) %>%
summarise(i2eL = sum(i2eL) ) %>%
ggvis(~year, ~i2eL ) %>%
layer_lines()
# i2e class == LM
gvc_indicators %>%
group_by(year) %>%
summarise(i2eLM = sum(i2eLM) ) %>%
ggvis(~year, ~i2eLM ) %>%
layer_lines()
# i2e class == UM
gvc_indicators %>%
group_by(year) %>%
summarise(i2eUM = sum(i2eUM) ) %>%
ggvis(~year, ~i2eUM ) %>%
layer_lines()
# i2e class == H
gvc_indicators %>%
group_by(year) %>%
summarise(i2eH = sum(i2eH) ) %>%
ggvis(~year, ~i2eH ) %>%
layer_lines()
# i2e log
gvc_indicators %>%
group_by(year) %>%
summarise(i2elog = log(sum(i2e)) ) %>%
ggvis(~year, ~i2elog ) %>%
layer_lines()
# i2e log class == L
gvc_indicators %>%
group_by(year) %>%
summarise(i2eLlog = log(sum(i2eL)) ) %>%
ggvis(~year, ~i2eLlog ) %>%
layer_lines()
# i2e log class == LM
gvc_indicators %>%
group_by(year) %>%
summarise(i2eLMlog = log(sum(i2eLM)) ) %>%
ggvis(~year, ~i2eLMlog ) %>%
layer_lines()
# i2e log class == UM
gvc_indicators %>%
group_by(year) %>%
summarise(i2eUMlog = log(sum(i2eUM)) ) %>%
ggvis(~year, ~i2eUMlog ) %>%
layer_lines()
# i2e log class == H
gvc_indicators %>%
group_by(year) %>%
summarise(i2eHlog = log(sum(i2eH)) ) %>%
ggvis(~year, ~i2eHlog ) %>%
layer_lines()
# fdloge2r
gvc_indicators %>%
filter(year > 1995) %>%
group_by(year) %>%
summarise(fdloge2r = sum(fdloge2r, na.rm=TRUE) ) %>%
ggvis(~year, ~fdloge2r ) %>%
layer_lines()
# fdloge2r class == L
gvc_indicators %>%
filter(year > 1995) %>%
group_by(year) %>%
summarise(fdloge2rL = sum(fdloge2rL, na.rm=TRUE) ) %>%
ggvis(~year, ~fdloge2rL ) %>%
layer_lines()
# fdloge2r class == LM
gvc_indicators %>%
filter(year > 1995) %>%
group_by(year) %>%
summarise(fdloge2rLM = sum(fdloge2rLM, na.rm=TRUE) ) %>%
ggvis(~year, ~fdloge2rLM ) %>%
layer_lines()
# fdloge2r class == UM
gvc_indicators %>%
filter(year > 1995) %>%
group_by(year) %>%
summarise(fdloge2rUM = sum(fdloge2rUM, na.rm=TRUE) ) %>%
ggvis(~year, ~fdloge2rUM ) %>%
layer_lines()
# fdloge2r class == H
gvc_indicators %>%
filter(year > 1995) %>%
group_by(year) %>%
summarise(fdloge2rH = sum(fdloge2rH, na.rm=TRUE) ) %>%
ggvis(~year, ~fdloge2rH ) %>%
layer_lines()
# fdlogi2e
gvc_indicators %>%
filter(year > 1995) %>%
group_by(year) %>%
summarise(fdlogi2e = sum(fdlogi2e, na.rm=TRUE) ) %>%
ggvis(~year, ~fdlogi2e ) %>%
layer_lines()
# fdlogi2e class == L
gvc_indicators %>%
filter(year > 1995) %>%
group_by(year) %>%
summarise(fdlogi2eL = sum(fdlogi2eL, na.rm=TRUE) ) %>%
ggvis(~year, ~fdlogi2eL ) %>%
layer_lines()
# fdlogi2e class == LM
gvc_indicators %>%
filter(year > 1995) %>%
group_by(year) %>%
summarise(fdlogi2eLM = sum(fdlogi2eLM, na.rm=TRUE) ) %>%
ggvis(~year, ~fdlogi2eLM ) %>%
layer_lines()
# fdlogi2e class == UM
gvc_indicators %>%
filter(year > 1995) %>%
group_by(year) %>%
summarise(fdlogi2eUM = sum(fdlogi2eUM, na.rm=TRUE) ) %>%
ggvis(~year, ~fdlogi2eUM ) %>%
layer_lines()
# fdlogi2e class == H
gvc_indicators %>%
filter(year > 1995) %>%
group_by(year) %>%
summarise(fdlogi2eH = sum(fdlogi2eH, na.rm=TRUE) ) %>%
ggvis(~year, ~fdlogi2eH ) %>%
layer_lines()
# add PVC
# add intermediate imports (?)
# plot gvc_length
# RDV
w1995_2011 %>%
group_by(year) %>%
summarise( RDV = ( sum(RDV_INT) + sum(RDV_FIN) + sum(RDV_FIN2) ) / sum(texp) ) %>%
ggvis(~year, ~RDV) %>%
layer_lines()
# PDC
w1995_2011 %>%
group_by(year) %>%
summarise( PDC = ( sum(DDC_FIN) + sum(DDC_INT) + sum(ODC) + sum(MDC) ) / sum(texp) ) %>%
ggvis(~year, ~PDC) %>%
layer_lines()
# both
w1995_2011 %>%
group_by(year) %>%
summarise( RDV = ( sum(RDV_INT) + sum(RDV_FIN) + sum(RDV_FIN2) ) / sum(texp),
PDC = ( sum(DDC_FIN) + sum(DDC_INT) + sum(ODC) + sum(MDC) ) / sum(texp) ) %>%
ggvis(~year, ~RDV, stroke="RDV") %>%
layer_lines() %>%
layer_lines(~year, ~PDC, stroke="PDC")
|
a1c5245eab7e7b9deeaf89527cb916916d90dcc2 | aba7e326509430108d36bbdd4840dc3145887f87 | /bin/RScripts/test.R | a5729fa3a561865fe5645065ad7d120b668c1bc8 | [] | no_license | thakurmm/popgen | e202eb68307b2897d534616af30080316169737a | d3a6a468de8987568e391d45b0eb1d6ac9995039 | refs/heads/master | 2020-03-17T02:33:01.801412 | 2018-10-14T18:08:44 | 2018-10-14T18:08:44 | 133,194,358 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 8,421 | r | test.R | source("/home/greg/School/popgen/bin/RScripts/Chr_kmer_InformativeSNPs_Haploid_HMM.R")
start.time <- Sys.time()
### 10 samples / 40 rows with some modifications
# homologous_filename = "/home/greg/School/popgen/data/chr22.phase3.ASW_CEU_YRI.SNPs.homologous_trimmed40rows_10samples.txt"
# all_admixture_filename = "/home/greg/School/popgen/data/admixture/all.chr.ASW_CEU_YRI.SNPs.homologous.25k.2_10samples.Q"
# admixture_filename = "/home/greg/School/popgen/data/admixture/chr22.ASW_CEU_YRI.SNPs.homologous.25k.2_10samples.Q"
### 10 samples/ 40 rows with no other modifications
# homologous_filename = "/home/greg/School/popgen/data/chr22.phase3.ASW_CEU_YRI.SNPs.homologous_trimmed40rows_10samples.txt"
# all_admixture_filename = "/home/greg/School/popgen/data/admixture/all.chr.ASW_CEU_YRI.SNPs.homologous.25k.2_10samples_correct.Q"
# admixture_filename = "/home/greg/School/popgen/data/admixture/chr22.ASW_CEU_YRI.SNPs.homologous.25k.2_10samples_correct.Q"
### full dataset. start: 11:49
homologous_filename = "/home/greg/School/popgen/data/chr22.phase3.ASW_CEU_YRI.SNPs.homologous.txt"
all_admixture_filename = "/home/greg/School/popgen/data/admixture/all.chr.ASW_CEU_YRI.SNPs.homologous.25k.2.Q"
admixture_filename = "/home/greg/School/popgen/data/admixture/chr22.ASW_CEU_YRI.SNPs.homologous.25k.2.Q"
ASW_ids_filename = "/home/greg/School/popgen/SampleIDs/ASW_Sample_IDs_haploid.txt"
out_filename = "/home/greg/School/popgen/data/R_out.txt"
kmer = 5
ASW_Chr_Strands <- read.table(homologous_filename, head=T, sep="\t")
ASW_Chr_Strands <- subset(ASW_Chr_Strands,!(duplicated(ASW_Chr_Strands$ID)|duplicated(ASW_Chr_Strands$ID,fromLast = TRUE)))
###### Function to Create Test Chromosomes ######
Create_Test_Chromosomes <- function(Chr_Strands,Pops,Ref_IDs,Num_Switch){
## sample from SNPs (specifically, sample positions of SNPs)
Test_Pos_Chromosome <- sample(Chr_Strands$POS, Num_Switch, replace=FALSE) ### TODO: add this back
# offset = 0
# Test_Pos_Chromosome <- Chr_Strands$POS[ (offset+1) : (Num_Switch+offset) ] ### for now, pick first n rows so results can be compared with Python results
Test_Pos_Chromosome <- sort(Test_Pos_Chromosome)
# print(Test_Pos_Chromosome)
### get index in Chr_Strands that each SNP came from
Test_Pos_Chr_index_stop <- sapply(1:Num_Switch, function(x){which(Chr_Strands$POS == Test_Pos_Chromosome[x])})
Test_Pos_Chr_index_start <- c(0,Test_Pos_Chr_index_stop)
Test_Pos_Chr_index_stop <- c(Test_Pos_Chr_index_stop,nrow(Chr_Strands))
Test_Chr <- numeric(nrow(Chr_Strands))
True_Chr <- character(nrow(Chr_Strands))
for (j in 1:length(Pops)){
for (i in seq(j,length(Test_Pos_Chr_index_start),length(Pops))){
Test_Chr[(Test_Pos_Chr_index_start[i]+1):Test_Pos_Chr_index_stop[i]] <- Chr_Strands[(Test_Pos_Chr_index_start[i]+1):Test_Pos_Chr_index_stop[i],Ref_IDs[j]]
True_Chr[(Test_Pos_Chr_index_start[i]+1):Test_Pos_Chr_index_stop[i]] <- rep(Pops[j],length((Test_Pos_Chr_index_start[i]+1):Test_Pos_Chr_index_stop[i]))
}
}
return(list(Test_Chr,True_Chr))
}
#### Test Strand 1 ####
print("Creating Test Strands")
### for testing
# Known_Sample1 <- Create_Test_Chromosomes(ASW_Chr_Strands,Pops = c("Pop2","Pop1"),Ref_IDs = c("NA06984_1","NA06994_2"),Num_Switch = 4)
### TODO: use this when using full dataset
Known_Sample1 <- Create_Test_Chromosomes(ASW_Chr_Strands,Pops = c("Pop2","Pop1"),Ref_IDs = c("NA06984_1","NA18486_1"),Num_Switch = 4)
Test_1 <- Known_Sample1[[1]]
True_1 <- Known_Sample1[[2]]
### for testing
# Known_Sample2 <- Create_Test_Chromosomes(ASW_Chr_Strands,Pops = c("Pop2","Pop1"),Ref_IDs = c("NA06984_1","NA06994_2"),Num_Switch = 6)
### TODO: use this when using full dataset
Known_Sample2 <- Create_Test_Chromosomes(ASW_Chr_Strands,Pops = c("Pop2","Pop1"),Ref_IDs = c("NA06984_2","NA18486_2"),Num_Switch = 4)
Test_2 <- Known_Sample2[[1]]
True_2 <- Known_Sample2[[2]]
True_LA <- data.frame(POS_Start=ASW_Chr_Strands$POS,
POS_End=ASW_Chr_Strands$POS,True_1,True_2)
#### Read in Admixture results for all chromosomes ####
All_ADMIX2<-read.table(all_admixture_filename,head=F)
num_pops <- ncol(All_ADMIX2)
All_ADMIX2_ordered <- All_ADMIX2[,order(colMeans(All_ADMIX2),decreasing=TRUE)]
colnames(All_ADMIX2_ordered)<-paste0("Pop",1:ncol(All_ADMIX2_ordered))
#### Read in Admixture (k=2) results for single chromosome ####
print("Loading Admixture")
ADMIX2_unordered <-read.table(admixture_filename,head=F)
Admix_chr_diff_all <- apply(ADMIX2_unordered, 2, function(x) {sum(All_ADMIX2_ordered$Pop1-x)})
ADMIX2 <- ADMIX2_unordered[,order(Admix_chr_diff_all, decreasing=FALSE)]
colnames(ADMIX2)<-paste0("Pop",1:ncol(ADMIX2))
rownames(ADMIX2)<-colnames(ASW_Chr_Strands)[10:ncol(ASW_Chr_Strands)]
Test_ADMIX <- rbind((table(True_LA$True_1)/nrow(True_LA)),(table(True_LA$True_2)/nrow(True_LA)))
Test_IDs <- c("Test_1","Test_2")
row.names(Test_ADMIX) <- Test_IDs
ADMIX2_with_Test <- rbind(ADMIX2,Test_ADMIX)
#### Subset ASW from full data ####
print("Subsetting - ASW Only")
ASW_ids <- read.table(ASW_ids_filename,head=F)
ASW_Only_Chr_Strands <- ASW_Chr_Strands[,c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT",as.character(ASW_ids$V1))] ### TODO: uncomment this for full data
# ASW_Only_Chr_Strands <- ASW_Chr_Strands
ADMIX2_with_Test_ASW_Only <- ADMIX2_with_Test[c(as.character(ASW_ids$V1),"Test_1","Test_2"),] ### TODO: readd this
# ADMIX2_with_Test_ASW_Only <- ADMIX2_with_Test
Prob_Pops <- (colSums(ADMIX2_with_Test_ASW_Only)/nrow(ADMIX2_with_Test_ASW_Only)) # Prob(Pop1) & Prob(Pop2), respectively
# #### Update ASW_Chr_Strands with Test Individuals ####
ASW_Chr_Strands_with_Test <- data.frame(ASW_Only_Chr_Strands,Test_1 = Test_1, Test_2 = Test_2)
# print("ASW_Chr_Strands_with_Test:")
# print(ASW_Chr_Strands_with_Test)
# #### Select the most informative SNPs ####
### TODO: was originally 0.1
Chr_Strands_with_Test_top <- SelectInformativeSNPs(ASW_Chr_Strands_with_Test, Prob_Pops, ADMIX2_with_Test_ASW_Only, diff_quantile = 0.1)
# Chr_Strands_with_Test_top <- SelectInformativeSNPs(ASW_Chr_Strands_with_Test, Prob_Pops, ADMIX2_with_Test_ASW_Only, diff_quantile = 0.3)
# print("Chr_Strands_with_Test_top:")
# print(Chr_Strands_with_Test_top)
# #### Create new data frame with k consecutive snps together (k-mers) ####
Chr_Strand_with_Test_Substrings <- CreateKmerHaplotypes(Chr_Strands_with_Test_top, kmer)
# print("Chr_Strand_with_Test_Substrings:")
# print(Chr_Strand_with_Test_Substrings)
# #### Create Emission Matrices for each k-mer ####
kmer_haplotypes <- apply(permutations(n = 2, r = kmer, v = c("0","1"), repeats=TRUE), 1, paste, collapse="")
# print("kmer_haplotypes:")
# print(kmer_haplotypes)
log2_emissionmatrices <- CreateEmissionMatrix(Chr_Strand_with_Test_Substrings, ADMIX2_with_Test_ASW_Only, Prob_Pops, kmer_haplotypes)
# print("log2_emissionmatrices:")
# print(log2_emissionmatrices)
# #### Local Ancestry Inference ####
Train_with_Test_LA <- BothDirectionsLocalAncestry_Prob(Chr_Strand_with_Test_Substrings, ADMIX2_with_Test_ASW_Only, Test_IDs, log2_emissionmatrices, kmer, recomb_rate = 0.001)
# print("Train_with_Test_LA:")
# print(Train_with_Test_LA)
Train_with_Test_LA_Both <- Train_with_Test_LA[[1]]
True_Full <- data.frame(POS=ASW_Only_Chr_Strands$POS, True_1, True_2)
True_Substrings <- merge(Chr_Strand_with_Test_Substrings[,1:3], True_Full, by.x="POS_Start", by.y="POS")
# print("True_Full:")
# print(head(True_Full, n=10))
# print(tail(True_Full, n=10))
# print("Chr_Strand_with_Test_Substrings[,1:3]:")
# print(head(Chr_Strand_with_Test_Substrings[,1:3], n=10))
# print(tail(Chr_Strand_with_Test_Substrings[,1:3], n=10))
# print("True_Substrings:")
# print(head(True_Substrings, n=10))
# print(tail(True_Substrings, n=10))
# print("Train_with_Test_LA_Both[,1:3]:")
# print(head(Train_with_Test_LA_Both[,1:3], n=10))
Test_LA_Reordered <- data.frame(Train_with_Test_LA_Both[,1:3], True_1=True_Substrings$True_1, Test_2=Train_with_Test_LA_Both[,4], True_2=True_Substrings$True_2)
print("Test_LA_Reordered:")
print(head(Test_LA_Reordered, n=10))
print(tail(Test_LA_Reordered, n=10))
write.table(Test_LA_Reordered, out_filename, quote=F, sep="\t", row.names = F)
out_png_filename <- gsub("txt", "png", out_filename)
end.time <- Sys.time()
time.taken <- end.time - start.time
print("time taken:")
print(time.taken)
PlotLocalAncestry(LocalAncestryDataFrame = Test_LA_Reordered, kmer, imagename=out_png_filename)
|
5876a3720cbe6c5db8f3804152135a4d3e09adc0 | 75db3ebe5afac158914d0be184d231dfcfda83c4 | /plot3.R | c42ba6d9ffa2dd246811bf5ce0c92a6a9fd0347b | [] | no_license | kyleppierce/ExData_Plotting1 | 348fef7193f2e4163ca077bbdfab8f97b5bf9aee | d2fefa68b14567bd356d39103cc01b55c7153c60 | refs/heads/master | 2020-12-25T07:24:30.077864 | 2015-09-11T17:16:29 | 2015-09-11T17:16:29 | 42,232,026 | 0 | 0 | null | 2015-09-10T08:22:29 | 2015-09-10T08:22:29 | null | UTF-8 | R | false | false | 1,189 | r | plot3.R | # Download and extract the data file if it does not already exist in the working directory
if (!file.exists("household_power_consumption.txt")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
"household_power_consumption.zip", method="curl")
unzip("household_power_consumption.zip")
}
# Read file into R
x = read.csv("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?")
# Convert Date and Time from factors to a POSIXlt date
x$Timestamp = strptime(paste(as.character(x$Date), as.character(x$Time)), "%d/%m/%Y %H:%M:%S")
# subset to the dates 2007-02-01 and 2007-02-02
x = x[x$Timestamp>="2007-02-01" & x$Timestamp<"2007-02-03", ]
# Create PNG plot
# Open PNG graphics device
png(filename="plot3.png", width=480, height=480)
# Plot
with(x, plot(Timestamp, Sub_metering_1, type="l", xlab="", ylab="Energy sub metering"))
with(x, lines(Timestamp, Sub_metering_2, col="red"))
with(x, lines(Timestamp, Sub_metering_3, col="blue"))
legend("topright", lty=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Close PNG graphics device
dev.off() |
72a03af45a0d638643ddc445628ceb1c24aee541 | 8e83d9415e9e4ffe7e9287a4cbe06080905cfcc3 | /R/Rank_Countries.R | 6cda0921e952b8630e8d29eeb711ac32601f0857 | [] | no_license | unimi-dse/c1377f4b | 5fc30dab792f79e14ca197d81ca141cf8df93f3e | fbe7294d5863143c8307b5c071b5a14854dffdca | refs/heads/master | 2020-12-23T20:47:25.710468 | 2020-02-16T22:58:30 | 2020-02-16T22:58:30 | 237,269,820 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 667 | r | Rank_Countries.R | #'Rank_Countries
#'
#'@description The function shows the number of universities for each country.
#'
#'@return plot
#'
#'@export
Rank_Countries <- function() {
library(dplyr)
library(ggplot2)
#create a data frames with the all universities in Ranking and count them by country
RankingCount <- cwur.data %>% group_by(country) %>% summarise(count=n())
#create the ggplot containing
f1 <- ggplot(RankingCount, aes(x=reorder(country, -count), y=count, fill=country))+geom_bar(stat="identity")+coord_flip()+ theme(legend.position="none")+ labs(x="Country",y="Count")+ ggtitle("Number of universities for each country from 2012 to 2015")
return(f1)
}
|
28657005b17ef29210e05a21b3b80ca2dfb3883f | 6cadd782369f62eba18711238587efaa52b9b7b8 | /tableanalysis.R | dda9585a0c0d54f017e15899324c3eb9ffd8a930 | [] | no_license | derekfranks/tableanalysis | db9b16dbd9aa6e220165daa1351811038cf2ff32 | 23e6e9b552443457c7d1adf4f411fd941f40057d | refs/heads/master | 2021-01-19T16:50:33.167591 | 2014-06-04T00:14:24 | 2014-06-04T00:14:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,672 | r | tableanalysis.R | data.analysis <- function(arguments){
}
make.tables <- function(var, segment) {
#create a table where the row is depvar and column is indvar
tab_1 <- table(var, segment)
print(tab_1)
print(round(prop.table(tab_1,2),2))
}
table.analysis <- function(table, alpha=.05, detail=FALSE, correction="holm") {
#run chisq.test against table
tab_1 <- table
print(tab_1)
print(round(prop.table(tab_1, 2), 2))
tab_1_chisq <- chisq.test(tab_1)
print(tab_1_chisq)
#if pvalue of chisq is < alpha, then proceed with pairwise prop comparisons
if (tab_1_chisq$p.value < alpha) {
# perm <- permutations(ncol(tab_1),2)
comb <- combinations(ncol(tab_1),2)
#create a matrix with the combinations based on number of columns
comp_tab <- matrix(nrow=nrow(tab_1),ncol=ncol(tab_1))
# comp_tab <- as.table(matrix(nrow=nrow(tab_1),ncol=ncol(tab_1)))
comp_tab[is.na(comp_tab)] <- ""
# colnames(comp_tab) <- LETTERS[1:ncol(comp_tab)]
# rownames(comp_tab) <- rownames(tab_1)
#loop through each row of the table and conduct a prop test for EACH column pairing
for (i in 1:nrow(tab_1)) {
for (k in 1:nrow(comb)) {
counts <- c(tab_1[i,comb[k,1]], tab_1[i,comb[k,2]])
totals <- c(sum(tab_1[,comb[k,1]]), sum(tab_1[,comb[k,2]]))
result <- prop.test(counts, totals, correct=F)
result$p.value <- p.adjust(result$p.value, method=correction, n=nrow(comb))
#build comp_table that summarizes the results of the pairwise prop tests
if (result$p.value <= alpha & result$estimate[[1]] < result$estimate[[2]]) {
comp_tab[i,comb[k,2]] <- paste(comp_tab[i,comb[k,2]], letters[comb[k,1]], sep="")
}
if (result$p.value <= alpha & result$estimate[[1]] > result$estimate[[2]]) {
comp_tab[i,comb[k,1]] <- paste(comp_tab[i,comb[k,1]], letters[comb[k,2]], sep="")
}
}
}
print(comp_tab)
if (detail==TRUE) {
print("Pair comparison table:")
print(comb)
}
#loop through each row of the table and conduct a prop test for EACH UNIQUE column pairing
for (i in 1:nrow(tab_1)) {
for (k in 1:nrow(comb)) {
counts <- c(tab_1[i,comb[k,1]], tab_1[i,comb[k,2]])
totals <- c(sum(tab_1[,comb[k,1]]), sum(tab_1[,comb[k,2]]))
result <- prop.test(counts, totals, correct=F)
result$p.value <- p.adjust(result$p.value, method=correction, n=nrow(comb))
if (detail==TRUE) {
print(paste("Row:",as.character(i)," ", "Pair comparison detail:", as.character(k),sep=" "))
print(result)
}
}
}
}
else {stop("Variables are independent of each other")
}
} |
4a37e8f9e1f1137dcf99cd81c7063927162ffc54 | 0f8d1c1435f1417aaf3b01bfdb1fba0a6dce6942 | /man/delete.Rd | 2ec1c6f680a31200eb62343b32b204939d2721e3 | [] | no_license | jburel/rOMERO-gateway | 281711608447c74139096b344bba729dacefd88b | c1f0119675f566b4ae7ab0f91ea65783f5dcd159 | refs/heads/master | 2022-09-26T21:06:34.710460 | 2022-09-07T17:11:38 | 2022-09-07T17:11:38 | 45,210,719 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 290 | rd | delete.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OMERO.R
\name{delete}
\alias{delete}
\title{Deletes an OME object}
\usage{
delete(omero)
}
\arguments{
\item{omero}{The OME object}
}
\description{
Deletes an OME object
}
\examples{
\dontrun{
delete(object)
}
}
|
ec0d1d4dc256d2dcd598c9d0b4005fa9756c8ab6 | 8b707fe9f766df90ed3020daa4b0b403ef830a83 | /man/reactome.ext_name.Rd | abd3785a2ff7732be31f4e9aae447519a96e5688 | [] | no_license | utnesp/Easy-bioMart | 2899b599ff78ad0dee6f6f9f630af265520a0259 | a8d6f34e7b64bab3f25674da7d48e91ca6f93985 | refs/heads/master | 2021-07-20T14:48:34.759649 | 2021-03-18T12:12:17 | 2021-03-18T12:12:17 | 85,055,811 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 821 | rd | reactome.ext_name.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/easybiomart.R
\name{reactome.ext_name}
\alias{reactome.ext_name}
\alias{reactome.ext_name}
\title{Biomart conversion}
\usage{
reactome.ext_name(ext_name.diff.exp, return.annotated.counts = F, ranks = 0,
biomart = mart, pvalueCutoff = 0.05, readable = T, df2 = "",
by.x = "external_gene_name", by.y = "external_gene_name", all = F,
org = "human", universe = "")
reactome.ext_name(ext_name.diff.exp, return.annotated.counts = F, ranks = 0,
biomart = mart, pvalueCutoff = 0.05, readable = T, df2 = "",
by.x = "external_gene_name", by.y = "external_gene_name", all = F,
org = "human", universe = "")
}
\arguments{
\item{ext_name.diff.exp}{input}
\item{ext_name.diff.exp}{input}
}
\examples{
reactome.ext_name()
reactome.ext_name()
}
|
87fe71019b99da717ffd6f3a3ec9401c10209846 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /stream/R/DSD_ReadCSV.R | 2c3411c13df8961e6d9b02adfe5569ab643a624b | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,082 | r | DSD_ReadCSV.R | #######################################################################
# stream - Infrastructure for Data Stream Mining
# Copyright (C) 2013 Michael Hahsler, Matthew Bolanos, John Forrest
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
### FIXME: test class and take
# accepts an open connection
# ... goes to read.table
DSD_ReadCSV <- function(file, k=NA, o=NA,
take=NULL, class=NULL, outlier=NULL, loop=FALSE,
sep=",", header=FALSE, skip=0, colClasses = NA, ...) {
env <- environment()
if(is.na(o) && !is.null(outlier))
stop("The outlier column is defined, but the number of outliers is not supplied")
if(!is.na(o) && is.null(outlier))
stop("The number of outliers is supplied, but the outlier column was not supplied")
# if the user passes a string, create a new connection and open it
if (is(file, "character")) file <- file(file)
# error if no string or connection is passed
if (!is(file, "connection")) stop("Please pass a valid connection!")
# open the connection if its closed
if (!isOpen(file)) open(file)
# filename
filename <- basename(summary(file)$description)
# seekable?
if(loop && !isSeekable(file)) stop("Loop only allowed for seekable connections!")
# read first point to figure out structure!
if(skip>0) readLines(file, n=skip)
point <- read.table(text=readLines(con=file, n=1+header),
sep=sep, header=header, colClasses = colClasses, ...)
# reset stream if possible (otherwise first point is lost)
if(isSeekable(file)) {
seek(file, where=0)
if(skip>0) readLines(file, n=skip)
if(header) readLines(file, n=1)
}
# select columns take
if(!is.null(take)) {
if(is.character(take)) take <- pmatch(take, colnames(point))
if(any(is.na(take))) stop("Invalid column name specified in take!")
point <- point[,take]
}
# dimensions
d <- ncol(point) - !is.null(class)
# header?
if(header) header <- colnames(point)
else header <- NULL
# data types?
colClasses <- sapply(point[1,], class)
### integer -> numeric, factor -> character
colClasses[colClasses == "integer"] <- "numeric"
colClasses[colClasses == "factor"] <- "character"
# class?
if(is.character(class)) {
if(is.null(header)) stop("Only numeric column index allowed if no headers are available!")
class <- pmatch(class, header)
if(is.na(class)) stop("No matching column name for class!")
} else if(!is.null(class)) {
if(!is.null(take)) class <- match(class, take)
if(is.na(class)) stop("Invalid class column index!")
}
# outlier?
if(is.character(outlier)) {
if(is.null(header)) stop("Only numeric column index allowed if no headers are available!")
outlier <- pmatch(outlier, header)
if(is.na(outlier)) stop("No matching column name for outlier indicators!")
} else if(!is.null(outlier)) {
if(!is.null(take)) outlier <- match(outlier, take)
if(is.na(outlier)) stop("Invalid outlier column index!")
}
if(!is.null(outlier) && class(point[1,outlier])!="logical")
stop("Outlier column must have logical values!")
# creating the DSD object
l <- list(
description = paste('File Data Stream (', filename, ')', sep=''),
d = d,
k = k,
o = o,
file = file,
sep = sep,
take = take,
header = header,
colClasses = colClasses,
read.table.args = list(...),
class = class,
outlier = outlier,
loop = loop,
skip = skip,
env = env)
class(l) <- c("DSD_ReadCSV", "DSD_R", "DSD_data.frame", "DSD")
l
}
## it is important that the connection is OPEN
get_points.DSD_ReadCSV <- function(x, n=1,
outofpoints=c("stop", "warn", "ignore"),
cluster = FALSE, class = FALSE, outlier=FALSE, ...) {
.nodots(...)
.DEBUG <- TRUE
#.DEBUG <- FALSE
outofpoints <- match.arg(outofpoints)
noop <- function(...) {}
msg <- switch(outofpoints,
"stop" = stop,
"warn" = warning,
"ignore" = noop
)
n <- as.integer(n)
## remember position
if(!isSeekable(x$file)) pos <- NA else pos <- seek(x$file)
d <- NULL
eof <- FALSE
## only text connections can do read.table without readLine (would be faster)
#if(summary(x$file)$text == "text"){
# suppressWarnings(
# try(d <- do.call(read.table, c(list(file=x$file, sep=x$sep, nrows=n,
# colClasses=x$colClasses), x$read.table.args)),
# silent = TRUE))
#}
try(lines <- readLines(con=x$file, n=n), silent = !.DEBUG)
## EOF?
if(length(lines) < 1) eof <- TRUE
else {
suppressWarnings(
try(d <- do.call(read.table,
c(list(text=lines, sep=x$sep, nrows=n,
colClasses=x$colClasses), x$read.table.args)),
silent = !.DEBUG))
}
if(eof) msg("The stream is at its end (EOF)!")
## loop?
if(is.null(d) || nrow(d) < n || eof) {
if(!x$loop) {
## try to undo read in case of stop
if(outofpoints == "stop" && !is.na(pos)) seek(x$file, pos)
if(!eof) msg("Not enough points in the stream!")
} else { ## looping
while(nrow(d) < n) {
reset_stream(x)
try(lines <- readLines(con=x$file, n=n-nrow(d)), silent = !.DEBUG)
## EOF?
if(length(lines) == 0) eof <- TRUE
else {
d2 <- NULL
suppressWarnings(
try(d2 <- do.call(read.table,
c(list(text=lines, sep=x$sep, nrows=n,
colClasses=x$colClasses), x$read.table.args)),
silent = !.DEBUG))
if(!is.null(d2) && nrow(d2 > 0)) d <- rbind(d, d2)
else msg("Read failed (use smaller n for unreliable sources)!")
}
}
}
}
## no data!
if(is.null(d)) {
if(!eof) msg("Read failed (use smaller n for unreliable sources)!")
## create conforming data.frame with 0 rows
d <- data.frame()
for(i in 1:length(x$colClasses))
d[[i]] <- do.call(x$colClasses[i], list(0))
} else {
## take columns
if(!is.null(x$take)) d <- d[,x$take, drop=FALSE]
}
## remove additional columns from a bad line
if(ncol(d) > x$d+!is.null(class)) d <- d[, 1:(x$d+!is.null(class)), drop=FALSE]
if(nrow(d)>0) {
if(!is.null(x$header)) colnames(d) <- x$header
removals <- c()
### handle missing cluster/class info
if(!is.null(x$class)) {
cl <- d[,x$class]
removals <- c(x$class)
}else{
if(cluster || class) {
cl <- rep(NA_integer_, nrow(d))
}
}
### handle outlier info
if(!is.null(x$outlier)) {
out <- d[,x$outlier]
removals <- c(removals, x$outlier)
}else{
out <- rep(FALSE, nrow(d))
}
if(length(removals)>0) d <- d[,-removals, drop=FALSE]
if(class) d <- cbind(d, class = cl)
if(cluster) attr(d, "cluster") <- cl
if(outlier) attr(d, "outlier") <- out
}
d
}
reset_stream.DSD_ReadCSV <- function(dsd, pos=1) {
pos <- as.integer(pos)
if(!isSeekable(dsd$file)) stop("Underlying conneciton does not support seek!")
seek(dsd$file, where=0)
if(dsd$skip>0) readLines(dsd$file, n=dsd$skip)
if(!is.null(dsd$header)) readLines(dsd$file, n=1)
if(pos>1) readLines(dsd$file, n=pos-1L)
invisible(NULL)
}
close_stream <- function(dsd) {
if(!is(dsd, "DSD_ReadCSV"))
stop("'dsd' is not of class 'DSD_ReadCSV'")
close(dsd$file)
}
|
a1099cfce6b2618ce53ff401290abb53f82d3fe1 | 83810add04e2c27e7c787157a949ddf73b87d14e | /man/dice.Rd | cee1ab2337b66b605f9d737e6b45a9b1afba3911 | [] | no_license | cran/clttools | 021f0d6bcdffd9cfe9d738baebcd1d9580db469f | 1c57ad403228168c8257c0de33f219afc98cc6c3 | refs/heads/master | 2021-01-21T21:48:30.238305 | 2016-02-19T08:49:57 | 2016-02-19T08:49:57 | 41,270,500 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 659 | rd | dice.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/clttools.R
\name{dice}
\alias{dice}
\title{Theoretical Probability Distribution of Rolling Dice}
\usage{
dice(n, prob = NULL)
}
\arguments{
\item{n}{number of trials}
\item{prob}{probability assigned to each possible outcome}
}
\value{
Mean value and corresponding probabilities for all possible outcomes.
}
\description{
Mean and probability of rolling fair or loaded dice
}
\details{
The default probabilty equals to 1/n. All the assigned probabilites must between 0 and 1.
}
\examples{
dice(n = 4)
dice(2, c(0.1, 0.2, 0.2, 0.1, 0.3, 0.1))
}
|
261bf44ca2a10b4286debff76d7133bad9dd4cb0 | e637cebd1ee538624cdbde83146d380f9d266687 | /R/WindNinja/MeshCount/meshCountAnalyzer.R | e89ba404fe30f742151d95a2f3fbb5b68a17d44c | [] | no_license | latwood/fireLabWork | 3809b1c0cfc60a61b764fea6e17d2af8a2354de9 | fe99852e8279fa53bfc512682c433ddcf343b2bc | refs/heads/master | 2021-01-19T16:20:53.764982 | 2018-07-26T05:57:52 | 2018-07-26T05:57:52 | 101,000,951 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,539 | r | meshCountAnalyzer.R | #command to run this program (includes the current file location)
#source('C:\\Users\\latwood\\Documents\\MyDocuments\\Windninja\\meshcount_project\\meshCountAnalyzer.R')
#the 'rm(list = ls())' command deletes everything in the workplace
rm(list=ls())
#IMPORTANT!!!! always call 'rm(list = ls())' before running on any files with different iteration counts and stuff.
#All the old information is kept, so matrices still have the old values. This can throw off the plot dimensions
#Might need to delete some files in the directories before running this program!
#record start time
programstarttime <- proc.time()[3]
#import libraries
library(raster)
library(rasterVis)
library(grid)
#section for main variables that change each time
#this is the path to the main plot outputs. Is the folder tree for the different folder paths holding the files to be processed
mainFileLocation <- "C:\\Users\\latwood\\Documents\\MyDocuments\\Windninja\\meshcount_project\\BigButteSmall\\"
#these are the individual folders in the folder tree holding each of the files to be processed. Currently these names are used as plot titles as well
processingFolderNames <- "20,000 cells"
processingFolderNames[2] <- "50,000 cells"
processingFolderNames[3] <- "100,000 cells"
processingFolderNames[4] <- "Coarse"
processingFolderNames[5] <- "150,000 cells"
processingFolderNames[6] <- "200,000 cells"
processingFolderNames[7] <- "300,000 cells"
processingFolderNames[8] <- "400,000 cells"
processingFolderNames[9] <- "500,000 cells"
processingFolderNames[10] <- "Medium"
processingFolderNames[11] <- "600,000 cells"
processingFolderNames[12] <- "700,000 cells"
processingFolderNames[13] <- "800,000 cells"
processingFolderNames[14] <- "900,000 cells"
processingFolderNames[15] <- "1,000,000 cells"
processingFolderNames[16] <- "Fine"
#this sets up a numeric vector for the manipulated variable. Is a numeric representation of the individual plot titles used as the x axis for the residual plots
numericXlabels <- 20000
numericXlabels[2] <- 50000
numericXlabels[3] <- 100000
numericXlabels[4] <- 100000 #coarse
numericXlabels[5] <- 150000
numericXlabels[6] <- 200000
numericXlabels[7] <- 300000
numericXlabels[8] <- 400000
numericXlabels[9] <- 500000
numericXlabels[10] <- 500000 #medium
numericXlabels[11] <- 600000
numericXlabels[12] <- 700000
numericXlabels[13] <- 800000
numericXlabels[14] <- 900000
numericXlabels[15] <- 1000000
numericXlabels[16] <- 1000000 #fine
#these are the specific file names to be processed in each folder. Currently this is set up to be the same filenames across each folder
velocityAscFileName <- "big_butte_small_220_10_31m_vel.asc"
angleAscFileName <- "big_butte_small_220_10_31m_ang.asc"
#This is the folder name with the converged data to use for all comparisons
convergedName <- processingFolderNames[16]
#these are for controlling output plot size for all regular plots
graphHeight <- 2000
graphWidth <- 2000
#these are for controlling output plot size for all raster plots. Depending on the length and width of the elevation file of interest, this greatly affects general spacing for these plots
#I made the height smaller than the width for the salmon river. Everything else has so far been the same height and width
rasterGraphHeight <- 2000
rasterGraphWidth <- 2000
#section for main variables that don't change each time but could
#this is the number of files to be processed and controls a for loop during the program. It is important that numericXlabels and processingFolderNames are of the same length
filecount <- length(processingFolderNames)
#include functions in this section
#this is a function used by the arrow plotting stuff adapted from Natalie's windtools package
speed2u<-function(s,d){
u <- -s * sin(d * pi/180)
return (u)
}
#this is a function used by the arrow plotting stuff adapted from Natalie's windtools package
speed2v<-function(s,d){
v <- -s * cos(d * pi/180)
return (v)
}
#this is a function used by the arrow plotting stuff adapted from Natalie's windtools package
uv2speed<-function(u,v){
spd <- ((u*u+v*v)**0.5)
return (spd)
}
#this is a function used by the arrow plotting stuff adapted from Natalie's windtools package
binSpeeds <- function(speedVector){
b <- speedVector
range <- 20
b1 <- round((0.25 * range), digits = 1)
b2 <- round((0.5 * range), digits = 1)
b3 <- round((0.75 * range), digits = 1)
min <- 0
max <- 20
for (i in 1:length(speedVector)){
if (speedVector[i] < b1){
b[i] <- paste(min, "-", b1)
}
else if(speedVector[i] < b2){
b[i] <- paste0(b1, "-", b2)
}
else if(speedVector[i] < b3){
b[i] <- paste0(b2, "-", b3)
}
else{
(b[i] <- paste(b3, "-", max))
}
}
b<-as.factor(b)
order<-c(paste(b3, "-", max), paste0(b2, "-", b3), paste0(b1, "-", b2) ,paste(min, "-", b1))
b <- factor(b, levels=order)
return(b)
}
rasterPrep <- function(rVelocity,rAngle,theTitle){
r.brick <- brick(rVelocity,rAngle)
#coarsen if necessary for plotting
r.brick <- aggregate(r.brick, fact=8, fun=mean)
sp <- rasterToPoints(r.brick,spatial=TRUE)
colnames(sp@data) <- c("spd","dir")
crs <- CRS("+proj=longlat +datum=WGS84")
sp <- spTransform(sp,crs)
df <- sp@data
df <- cbind(sp@coords,df)
colnames(df) <- c("lon","lat","spd","dir")
meshcount <- rep(theTitle,length(df$spd))
df <- cbind(meshcount,df)
return(df)
}
#this is a function adapted from Natalie's wind tools for plotting with arrows
bigRasterToVectorMap <- function(df, lat, lon, zoom, maptype,theTitle="Main", colorscale='discrete',
axis_labels=FALSE){
stopifnot(require("ggmap"))
stopifnot(require("grid"))
myMap<-get_map(location = c(lon=lon, lat=lat), zoom=zoom, maptype=maptype)
p <- ggmap(myMap)
if(colorscale=='discrete'){
#scale u and v so that speed = 1, maintaining u:v ratio
#this will allow us to plot vectors of equal length, but oriented in the correct direction
u_scaled<-mapply(speed2u, 2, df$dir)
v_scaled<-mapply(speed2v, 2, df$dir)
speed_bracket <- binSpeeds(df$spd)
df <- cbind(df, u_scaled, v_scaled, speed_bracket)
p <- p + geom_segment(data=df,aes(x=lon+u_scaled/1500.0, y=lat+v_scaled/1500.0,
xend = lon-u_scaled/1500.0, yend = lat-v_scaled/1500.0,
colour = speed_bracket), arrow = arrow(ends="first", length = unit(0.2, "cm")), size = 0.7) +
scale_colour_manual(values = c("red", "darkorange", "darkgreen", "blue"), name="Speed (mph)") +
labs(title=theTitle) + facet_wrap( ~ meshcount)
}
else{
p <- p + geom_segment(data=df, aes(x=lon+u/1500.0, y=lat+v/1500.0,
xend = lon-u/1500.0, yend = lat-v/1500.0,
colour = obs_speed), arrow = arrow(ends="first", length = unit(0.2, "cm")), size = 0.7) +
scale_colour_gradient(limits=c(min(df$spd),max(df$spd)), name="Speed (m/s)", low="blue", high="red")
}
p <- p + theme(plot.title=element_text(size=32))
p <- p + theme(legend.title=element_text(size=28))
p <- p + theme(legend.text=element_text(size = 24))
p <- p + theme(legend.key.size=unit(2.25,"cm"))
p <- p + theme(strip.text=element_text(face="bold",size=rel(1.5)))
p <- p + theme(strip.text.x=element_text(size = 10))
p <- p + theme(strip.text.y=element_text(size = 10))
p <- p + xlab("") + ylab("")
if(axis_labels == TRUE){
p <- p + theme(axis.text.x = element_text(size = 16), axis.text.y = element_text(size = 16))
}
else{
p <- p + theme(axis.text.x = element_blank())
p <- p + theme(axis.ticks.x = element_blank())
p <- p + theme(axis.text.y = element_blank())
p <- p + theme(axis.ticks.y = element_blank())
}
p <- p + theme(plot.margin=unit(c(0,0,0,0),"cm"))
return(p)
}
#this function is for getting the current run time from the start time and the current time
getTime <- function(programstarttime){
currenttime <- proc.time()[3]-programstarttime
if (currenttime < 60)
{
cat(paste("The current runtime is:",currenttime,"seconds\n"))
} else if (currenttime < 3600)
{
cat(paste("The current runtime is:",currenttime,"seconds =",currenttime/60,"minutes\n"))
} else
{
cat(paste("The current runtime is:",currenttime,"seconds =",currenttime/3600,"hours\n"))
}
}
#now run the program
#just setting up the converged information from input variables
convergedVelocityName <- paste(mainFileLocation,convergedName,"\\",velocityAscFileName, sep="")
convergedAngleName <- paste(mainFileLocation,convergedName,"\\",angleAscFileName, sep="")
#set up the converged rasters from the files
convergedVelocity <- raster(convergedVelocityName)
names(convergedVelocity) <- paste("Converged",convergedName)
rVelocityStack <- stack(convergedVelocity)
convergedAngle <- raster(convergedAngleName)
names(convergedAngle) <- paste("Converged",convergedName)
#now set up a loop that processes each of the files for each of these folders
beforemaxVelocities <- 0
beforeminVelocities <- 0
beforemeanVelocities <- 0
aftermaxVelocities <- 0
afterminVelocities <- 0
aftermeanVelocities <- 0
for (i in 1:filecount)
{
#see output location for debugging
print(i)
#set up rasters holding velocity and angle information from the files
rVelocityName <- paste(mainFileLocation,processingFolderNames[i],"\\",velocityAscFileName, sep="")
rAngleName <- paste(mainFileLocation,processingFolderNames[i],"\\",angleAscFileName, sep="")
rVelocity <- raster(rVelocityName)
names(rVelocity) <- processingFolderNames[i]
rAngle <- raster(rAngleName)
names(rAngle) <- processingFolderNames[i]
beforemaxVelocities[i] <- cellStats(rVelocity,stat="max")
beforeminVelocities[i] <- cellStats(rVelocity,stat="min")
beforemeanVelocities[i] <- cellStats(rVelocity,stat="mean")
if (i == 1)
{
df <- rasterPrep(rVelocity,rAngle,processingFolderNames[i])
lat = (min(df$lat)+max(df$lat))/2
lon = (min(df$lon)+max(df$lon))/2
zoom = 13
maptype = "terrain"
} else
{
df <- rbind(df,rasterPrep(rVelocity,rAngle,processingFolderNames[i]))
}
#this part of the program has completed, let the user know. Helps for debugging.
cat(paste("The program has finished processing your",processingFolderNames[i],"file\n"))
getTime(programstarttime)
}
cat("Now plotting windninja outputs\n")
filename <- "MeshCountResults"
theTitle <- "Mesh Count Results"
plotOutput <- paste(mainFileLocation,"\\",filename,".png", sep="")
png(filename=plotOutput,width=graphWidth,height=graphHeight)
p <- bigRasterToVectorMap(df,lat,lon,zoom,maptype,theTitle,axis_labels=FALSE)
print(p)
dev.off()
cat("finished windninja plots\n")
getTime(programstarttime)
cat("Finished all plots\n")
cat("\nThe program has finished\n")
endtime <- proc.time()[3]-programstarttime
if (endtime < 60)
{
cat(paste("The total program runtime is:",endtime,"seconds\n"))
} else if (endtime < 3600)
{
cat(paste("The total program runtime is:",endtime/60,"minutes\n"))
} else
{
cat(paste("The total program runtime is:",endtime/3600,"hours\n"))
}
cat("Have a nice day!\n\n")
line <- readline("Press [enter] or [space] to continue: ") |
405e36bc6be6e3b1dbcc39b10f24b4f9e358eb4e | 102ed19a754cd881c2036f4301398c331818e06c | /STUDENTS-DATA-FRAME-OFFICE-HOURS (2) COMPLETE.R | 0829908186d8438b753de2de6c8f5ef747d3bc72 | [] | no_license | nishkamurthy/My-Projects.github.io | 0bc1cf8857044b70031e673ff6b7ec7f4abe4a1f | 0f1d2a3eee952c440c2194e349ec62a89a3b1950 | refs/heads/master | 2022-12-04T15:45:50.386057 | 2020-08-20T23:54:43 | 2020-08-20T23:54:43 | 281,810,827 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,034 | r | STUDENTS-DATA-FRAME-OFFICE-HOURS (2) COMPLETE.R | #We usually create a data frame with vectors. Create 3 vectors with 2 elements
nm = c("Angela", "Shondra") #Example: Names of patients
ag = c(37, 43) #Add Age of patients\
ins = c(TRUE, TRUE) #Add Insurance - do they have insurance True/False values
#Create a data frame named patients and pass the 3 vectors nm, ag, ins
--code
patients=data.frame(nm,ag,ins)
print(patients)
##ANSWER should like below
## nm ag ins
## 1 Angela 27 TRUE
## 2 Shondra 36 TRUE
#We can also create a data frame with different column names and this is how you do it. Run the statement
patients = data.frame("names"=nm, "ages" = ag, "insurance"=ins)
print(patients)
## names ages insurance
## 1 Angela 27 TRUE
## 2 Shondra 36 TRUE
#We may wish to add rows or columns to our data. We can do this with: #rbind() #cbind(). remember it as r(row)bind..
#For example we can go back to our patient data and say we wish to add another #patient we could just do the following
newPatient = c(names="Liu Jie", age=45, insurance=TRUE)
patients=rbind(patients, newPatient)
print(patients)
## names ages insurance
## 1 Angela 27 TRUE
## 2 Shondra 36 TRUE
## 3 Liu Jie 45 TRUE
## You may get a Warning in `[=.factor`(`*tmp*`, ri, value = "Liu Jie"): invalid factor
## level, NA generated. OR it may not have added the row..
#This warning serves as a reminder to always know what your data type is.
# In this case R has read our data in as a factor so use as.character
# patients$names = as.character(patients$names) #this is ensuring all patients names as character
# patients = rbind(patients, newPatient)
# patients
#if we decide to place another column of data in we could use cbind function
# Next appointments
next.appt = c("09/23/2016", "04/14/2016", "02/25/2016")
#Lets R know these are dates
next.appt = as.Date(next.appt, "%m/%d/%Y")
next.appt
print(patients)
## [1] "2016-09-23" "2016-04-14" "2016-02-25"
#We then have a vector of dates which we can cbind (column bind) in R.
patients = cbind(patients, next.appt)
patients
## names ages insurance next.appt
## 1 Angela 27 TRUE 2016-09-23
## 2 Shondra 36 TRUE 2016-04-14
## 3 Liu Jie 45 TRUE 2016-02-25
##### getting information on a particular column you the format dataframeName$columnName
patients$names
# [1] "Angela" "Shondra" "Liu Jie"
#print ages of all patients using the example above
patients$ages
## "37" "43" "45"
############################TITANIC: Accessing Data Frames
#we will use built in data on Titanic from R. In this case, lets create a new data frame from Titanic
library(datasets)
Titanic
titanic = data.frame(Titanic)
print(summary(titanic))
titanic
#Print Summary of the Titanic dataframe
# Answer
# Class Sex Age Survived Freq
# 1st :8 Male :16 Child:16 No :16 Min. : 0.00
# 2nd :8 Female:16 Adult:16 Yes:16 1st Qu.: 0.75
# 3rd :8 Median : 13.50
# Crew:8 Mean : 68.78
# 3rd Qu.: 77.00
# Max. :670.00
#We can look at the different columns that we have in the data set:
colnames(titanic)
## [1] "Class" "Sex" "Age" "Survived" "Freq"
#print value of Age column using the syntax dataframeName$columNname
titanic$Age
#ANS
# [1] Child Child Child Child Child Child Child Child Adult Adult Adult Adult Adult Adult Adult Adult Child Child Child Child Child Child Child
# [24] Child Adult Adult Adult Adult Adult Adult Adult Adult
# Levels: Child Adult
#Observe the Levels information above Levels: Child Adult. This means how many unique values are there for a particular column.
#Similar to unique() function. Let's use this instead. Use unique(pass the column name.
# Remember: use the syntax dataframeName$columNname for refer to a column.
uniqueAge = unique(titanic$Age)
uniqueAge
# ANS
#Levels: Child Adult
#Now let's print information based on various conditions
# if you need all rows or all columns, simply pass ',' without quotes. else provide indexes based on the
#rules learned during vectors
#to print all rows and all columns.
titanic[,]
#Print first 2 rows of data with all columns using indexing and :.
titanic[2,]
# titanic[2,]
# Class Sex Age Survived Freq
# 2 2nd Male Child No 0
#Print all rows of data with first 3-4th columns using indexing and :.
titanic[,3:4]
#Print first 3, 9, 6th rows and 2nd, 3rd and 5th column. Use the indexing rules for vectors.
#when you want non-continuous columns or rows, you need to pass it in a vector
titanic[c(3,6,9), c(2,3,5)]
#Ans
#Sex Age Freq
#3 Male Child 35
#9 Male Adult 118
#6 Female Child 0
#Print all rows with class, sex and age columns
titanic[, c("Class", "Sex", "Age")]
#Print start of the data using head()
head(titanic)
#Print last few rows of the data using tail()
tail(titanic)
#Access the age information, using the column number
titanic[,3]
## ANS [1] Child Child Child Child Child Child Child Child Adult Adult Adult
## [12] Adult Adult Adult Adult Adult Child Child Child Child Child Child
## [23] Child Child Adult Adult Adult Adult Adult Adult Adult Adult
## Levels: Child Adult
#Use the column name for age instead to get the information:
colnames(titanic)[3]
## ANS [1] Child Child Child Child Child Child Child Child Adult Adult Adult
## [12] Adult Adult Adult Adult Adult Child Child Child Child Child Child
## [23] Child Child Adult Adult Adult Adult Adult Adult Adult Adult
## Levels: Child Adult
#Change column names using index position. change column name for Sex to Gender. Hint - Sex is 3rd column.
colnames(titanic)[3] = "Age"
colnames(titanic)
#Ans[1] "Class" "Sex" "Gender" "Survived" "Freq"
##### Create subset of data frames
## create a data frame using 4-8th row and 2nd and 3rd column of Titanic
d1=data.frame(titanic[4:8, 2:3])
d1
## your turn
#Create a data frame with 2, 3, 4th row and 1st, 2nd, and 3rd column
d2=data.frame(titanic[2:4, 1:3])
d2
#Ans
# Class Sex Age
# 2 2nd Male Child
# 3 3rd Male Child
# 4 Crew Male Child
###################################CHALLENGE and OPTIONAL: ##########################
titanic=data.frame(Titanic)
#####WASNt ABLE TO GET PAST FIRST PROBLEM
#PROBLEM:We wish to know information about a particular class. Find all information on the 1st class passengers
titanic["1st", ] #did this work? NO.. you probably got the below information
## Class Sex Age Survived Freq
## NA <NA> <NA> <NA> <NA> NA
#HINT: What we need to do instead is to use comparison symbol == and pass the condition where Class is equal to 1st" at the row position..
#HINT: Remember to use the column name as titanic$COLUMNAME.try!
titanic= data.frame(titanic$class=="1st")
titanic["1st", ]
(titanic$Class == "1st")
#ANS
# Class Sex Age Survived Freq
# 1 1st Male Child No 0
# 5 1st Female Child No 0
# 9 1st Male Adult No 118
# 13 1st Female Adult No 4
# 17 1st Male Child Yes 5
# 21 1st Female Child Yes 1
# 25 1st Male Adult Yes 57
# 29 1st Female Adult Yes 140
dataset
### What is the frequency of the first class passengers? use "1st" and "Freq" and the comparison operator..
### Hint: in the above example, you had all column. in this question, we are asking you to print only "Freq"
first.class.freq = titanic[titanic$Class=="1st", "Freq"]
first.class.freq
### Another challenge: how many total passengers in first class?
### No hints;) it's really simple. Was taught in the first class to use the a built-in function
### I wasnt kidding when I said, whatever you learned in first class will be used as well...
# Ans
#[1] 325
### Ok! now that you got the hang of it, calculate how many TOTAL male passengers were on the ship called Titanic
--code..
## Ans
## [1] 1731
#PROBLEM: Change column name using names; Change column name for Freq to Frequency. This is a trick problem, but we have done it twice in class similar problems
--code here
colnames(titanic)
# [1]ANS "Class" "Sex" "Age" "Survived" "Frequency"
###CURIOUS TO LEARN MORE? Since we talked about this in class! Let's try to change the row names for Titanic.
### let's try to give rows for 1st class passengers are "First Class"
###
rownames(patients)[patients$ages=="1st"]="First Class"
# we get the following error. So row names are possible, and can be changed but only for certain datasets.
# duplicate 'row.names' are not allowed
# non-unique value when setting 'row.names': ‘First Class’
###### USE of which() function. It returns the indices/positions when the condition is TRUE
which(titanic$Freq>100)
# [1] 9 10 11 12 28 29
which(titanic$Class=="1st")
# [1] 1 5 9 13 17 21 25 29
### THINk!! could you have used this for stock.prices problem? YES! and it would have been so much easier.
## Which() is a useful function to get the column position.
## We will use the concept to change the column name of hp to Horse Power in two steps.
#Find what is the column position of 'hp' in mtcars
mtcars=datasets::mtcars
colPositionForhp = which(names(mtcars)== 'hp')
colPositionForhp
#Ans [1] 4
#Now use this column position to change the name of the column- Simple?? eh?
colnames(mtcars)[colPositionForhp] = "Horse Power"
#test your code by printing the new column names. Does it show you "Horse Power" as a column, yes!
colnames(mtcars)
#[1] "mpg" "cyl" "disp" "Horse Power" "drat" "wt" "qsec" "vs"
#[9] "am" "gear" "carb"
#
#Another use of Which
#Find how many first class passengers rode Titanic.
a=which(titanic$Class=="1st") #Return all positions in Titanic
sum(titanic$Freq[a]) #Sums all the Frequency value of the position which is for 1st
################# IF YOU FINISHED ALL PROBLEMS ABOVE THEN YOU ALREADY KNOW MORE THAN MOST ON DATA FRAMES AT YOUR LEVEL
## VISUALIZATION IS NEXT!!
|
784c1f214ebc510485f0e90344f540d08ec40326 | dda72d9f8b5b08a1f066d9c4188042aaf9dd7596 | /election-analysis-usa.R | dc491d78578c956b4ca182f3b72422170c1f5b9e | [] | no_license | paikalla/bayes-trials | 4ce8055ab35869eb3ab3c7e9c26b4f63096ee16e | f08b96e111aaa217ef3d148ca8c159ecd21ce067 | refs/heads/main | 2023-03-05T07:03:34.116626 | 2021-02-17T13:39:51 | 2021-02-17T13:39:51 | 339,095,663 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,460 | r | election-analysis-usa.R | #install.packages('foreign')
#install.packages('tidyverse')
library(foreign)
library(tidyverse)
library(dplyr)
library(VGAM)
# USA 2008 Presidential Pre-election polling data (STATA dta-file) http://www.stat.columbia.edu/~gelman/book/data/pew_research_center_june_elect_wknd_data.dta
# USA 2008 Presidential Election results http://www.stat.columbia.edu/~gelman/book/data/2008ElectionResult.csv
# setwd("~/.../...")
# --- meet the data --- #
pre_election_polls <- read.dta('pew_research_center_june_elect_wknd_data.dta')
election_results <- read.csv(file = '2008ElectionResult.csv')
# check if datasets have same states in same order (purkka)
col1 <- cbind(summary(pre_election_polls$state))
col2 <- cbind(election_results$state)
cbind(col1, col2)
# remove missing values, Alaska (n=0), Hawaii (n=1)
election_polls_ <- subset(pre_election_polls,
state != "alaska" & state != "hawaii")
# --- prepare the data --- #
# pick ideological voters by state
data_by_state_ideo <- election_polls_ %>%
group_by(state, ideo) %>%
summarise(count = n())
# pick only very liberal voters by state
data_by_state_veryliberal <- subset(data_by_state_ideo,
ideo == "very liberal",
state != "alaska")
# drop factor levels -> int, (count of poll answers per state)
data_by_state_veryliberal$polls_per_state <- as.integer(table(droplevels(election_polls_$state)))
# ratio per state (very liberals / number of poll responses per state)
data_by_state_veryliberal$mean <- data_by_state_veryliberal$count/data_by_state_veryliberal$polls_per_state
# add proportional Obama voters from election data by region (state)
election <- subset(election_results, state != "Alaska" & state != "Hawaii") # remove alaska, hawaii
data_by_state_veryliberal$obama_voters_prop_per_state <- election$vote_Obama_pct / 100
# data ready to use
View(data_by_state_veryliberal)
# --- visualise the data --- #
# scatter plot 1:
# x=: proportion of very liberal poll participants ~ y=: proportion of Obama voters per region
plot(data_by_state_veryliberal$mean,
data_by_state_veryliberal$obama_voters_prop_per_state,
xlab = "Poll: Voters who describe their views as very liberal (proportion per state)",
ylab = "Obama voters (proportional per state)",
col = "green",
main = "Presidential elections of USA 2008",
text(data_by_state_veryliberal$mean,
data_by_state_veryliberal$obama_voters_prop_per_state,
labels = data_by_state_veryliberal$state,
cex= 0.5, pos = 2))
# scatter plot 2:
# x=: number of poll participants ~ y=: proportion of very liberals per region
plot(data_by_state_veryliberal$polls_per_state,
data_by_state_veryliberal$mean,
xlab = "Number of participants of the poll in the state",
ylab = "proportion of very liberals per state",
col = "orange",
main = "Presidential elections of USA 2008",
text(data_by_state_veryliberal$polls_per_state,
data_by_state_veryliberal$mean,
labels = data_by_state_veryliberal$state,
cex= 0.5, pos = 1))
# --- likelihood and empirical Bayes analysis --- #
" Using a little shortcut known called ´empirical Bayes´, not setting priors for the prior parameters,
but estimating the prior parameters from the data.
-> observed number of ’very liberals’ with random variables Y1, . . . Y49 as samples from binomial distributions
-> own parameters θj for each state, and these parameters are a sample from the common beta distribution."
# Drop levels & reform factors to integers
data_by_state_veryliberal <- (droplevels(data_by_state_veryliberal))
data_by_state_veryliberal$count <- as.integer(data_by_state_veryliberal$count)
data_by_state_veryliberal$polls_per_state <- as.integer(data_by_state_veryliberal$polls_per_state) # number of poll answers per state
# negative log likelihood of data given alpha; beta.
# [Likelihood] Beta-binomial PDF for P(X|θ). Modeling how the data X will look like given the parameter θ.
# f(y) ~ Beta-Bin(n, alpha, beta)
ll <- function(alpha, beta) {
-sum(dbetabinom.ab(data_by_state_veryliberal$count,
data_by_state_veryliberal$polls_per_state,
alpha, beta,
log = TRUE)) # log = TRUE takes log of dbetabinom
}
mm <- mle(ll, start = list(alpha = 1, beta = 10), method = "L-BFGS-B")
(alpha0 <- coef(mm)[1])
(beta0 <- coef(mm)[2])
cat("ML estimate: alpha0 = ", alpha0, "\n")
cat("ML estimate: beta0 = ", beta0, "\n")
# mean of Beta(alpha, beta)
mean_beta <- function(alpha, beta) {
alpha / (alpha + beta)
}
# histogram of all state level proportions of very liberal participants
# plotted together with Beta(alpha0, beta0) density
hist(data_by_state_veryliberal$mean,
breaks = 10, col = "light blue",
probability = TRUE,
ylim = c(0,40),
xlab = "Proportion of very liberal poll participants",
main = "Informative prior estimated from the data" )
x <- seq(0,0.1, by = .001)
lines(x,dbeta(x, alpha0, beta0), lwd = 2, col = "darksalmon")
legend('topright', inset = 0.1,
legend = paste0('Beta(', round(alpha0, 1), ', ', round(beta0,1), ')'),
col = "darksalmon", lwd = 4)
# --- Posterior --- #
# posterior means on state level
data_by_state_veryliberal$posterior_means <-
mean_beta(alpha0 + data_by_state_veryliberal$count,
beta0 + data_by_state_veryliberal$polls_per_state - data_by_state_veryliberal$count)
# scatterplot: sample size vs posterior mean
p2 <- plot(data_by_state_veryliberal$polls_per_state,
data_by_state_veryliberal$posterior_means,
type = 'n',
xlab = 'Number of participants',
ylab = 'Ideological ground: Very liberal (proportion, posterior mean)',
ylim = c(0,0.2))
text(data_by_state_veryliberal$polls_per_state,
data_by_state_veryliberal$posterior_means,
labels = data_by_state_veryliberal$state, cex= 0.5, pos = 1)
# Let's compare data scatter plot and posterior plot into one overall graph
par(mfrow=c(1,2))
plot(data_by_state_veryliberal$polls_per_state,
data_by_state_veryliberal$mean,
xlab = "Number of participants per state",
ylab = "proportion of very liberals per state",
col = "orange",
main = "Presidential elections of USA 2008",
text(data_by_state_veryliberal$polls_per_state,
data_by_state_veryliberal$mean,
labels = data_by_state_veryliberal$state,
cex= 0.5, pos = 1))
plot(data_by_state_veryliberal$polls_per_state,
data_by_state_veryliberal$posterior_means,
type = 'n',
xlab = 'Number of participants per state',
ylab = 'Ideological ground: Very liberal (proportion, posterior mean)',
ylim = c(0,0.10),
main = "Posterior results, USA 2008")
text(data_by_state_veryliberal$polls_per_state,
data_by_state_veryliberal$posterior_means,
labels = data_by_state_veryliberal$state,
cex= 0.5, pos = 2, col = "blue")
"The observations are now scattered on a smaller region on y-axis.
We see that especially the locations of states with small participant numbers
have changed. The prior Beta(15.6, 317.5) is quite informative and therefore,
influences the estimates most when the amount of observations ni
is small. California stays almost at the same location while Washington D.C
moved quite close to others"
|
00068fc9ffa8a7bcfba50c313cd836d7cc8313ae | b41dc6bf2a4a5c6d5eb1216b2f53c7c4b1857d27 | /nanoR/man/volcanoPlot.Rd | 97ee49bb1c9d3f3bb87d053533dd62c50d0ec624 | [] | no_license | acolajanni/GENEXPRESSO | 1fedb045b5d3322fad00bac8cca53c2b9df817fc | 086c8346faeae3d8dbb17299c01f94d4e4c6c3c5 | refs/heads/main | 2023-08-31T10:03:01.119557 | 2023-08-21T08:13:47 | 2023-08-21T08:13:47 | 368,509,036 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 471 | rd | volcanoPlot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/volcanoPlot.R
\name{volcanoPlot}
\alias{volcanoPlot}
\title{Volcano Plot}
\usage{
volcanoPlot(dge.result, contrast)
}
\arguments{
\item{nano}{The nano object}
\item{groups}{Group information for the columns of the count matrix}
}
\description{
Generates Volcano Plots for every contrast given
}
\examples{
plotPCA()
}
\keyword{analysis}
\keyword{component}
\keyword{pca}
\keyword{principal}
|
cbd892b56a7399f7265c81f72dcd3a8e03e0315a | 5450042e2cdca616a2c4b07cdeb8ff41fe534f29 | /man/rsaga.close.gaps.Rd | 222457d9bf27deb26b7de58c4aec4eba471b59e7 | [] | no_license | GertS/RSAGA | 78f4b0aefe2e2deed447452f1600be47139d15dc | e2f6b296c45adcb23c364e32eea3cf1baea59251 | refs/heads/master | 2021-01-16T19:57:34.833107 | 2013-07-23T00:00:00 | 2013-07-23T00:00:00 | 31,158,444 | 1 | 0 | null | 2015-02-22T09:32:46 | 2015-02-22T09:32:45 | null | UTF-8 | R | false | false | 2,248 | rd | rsaga.close.gaps.Rd | \name{rsaga.close.gaps}
\alias{rsaga.close.gaps}
\alias{rsaga.close.one.cell.gaps}
\title{SAGA Modules Close Gaps and Close One Cell Gaps}
\usage{
rsaga.close.gaps(in.dem, out.dem, threshold = 0.1, ...)
rsaga.close.one.cell.gaps(in.dem, out.dem, ...)
}
\arguments{
\item{in.dem}{input: digital elevation model (DEM) as
SAGA grid file (default file extension: \code{.sgrd})}
\item{out.dem}{output: DEM grid file without no-data
values (gaps). Existing files will be overwritten!}
\item{threshold}{tension threshold for adjusting the
interpolator (default: 0.1)}
\item{...}{optional arguments to be passed to
\code{\link{rsaga.geoprocessor}}, including the
\code{env} RSAGA geoprocessing environment}
}
\value{
The type of object returned depends on the \code{intern}
argument passed to the \code{\link{rsaga.geoprocessor}}.
For \code{intern=FALSE} it is a numerical error code (0:
success), or otherwise (default) a character vector with
the module's console output.
}
\description{
Close (Interpolate) Gaps
}
\details{
\code{rsaga.close.one.cell.gaps} only fill gaps whose
neighbor grid cells have non-missing data.
In \code{rsaga.close.gaps}, larger tension thresholds can
be used to reduce overshoots and undershoots in the
surfaces used to fill (interpolate) the gaps.
}
\note{
This function uses modules 7 (\code{rsaga.close.gaps} and
6 \code{rsaga.close.one.cell.gaps} from the SAGA library
\code{grid_tools}.
SAGA GIS 2.0.5+ has a new additional module \code{Close
Gaps with Spline}, which can be accessed using
\code{\link{rsaga.geoprocessor}} (currently no R wrapper
available). See \code{rsaga.get.usage("grid_tools","Close
Gaps with Spline")} or in version 2.1.0+ call
\code{rsaga.html.help("grid_tools","Close Gaps with
Spline")}.
}
\examples{
\dontrun{
# using SAGA grids:
rsaga.close.gaps("rawdem.sgrd","dem.sgrd")
# using ASCII grids:
rsaga.esri.wrapper(rsaga.close.gaps,in.dem="rawdem",out.dem="dem")
}
}
\author{
Alexander Brenning (R interface), Olaf Conrad (SAGA
module)
}
\seealso{
\code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
}
\keyword{interface}
\keyword{spatial}
|
151ac0ba5c52b7268a97842d0345d4fe63792df7 | c5de5d072f5099e7f13b94bf2c81975582788459 | /R Extension/RMG/Projects/CollateralAdHoc/ConvertAprimeData.R | 05eb9029aad320d7e527c3767cd05a33f72d4127 | [] | no_license | uhasan1/QLExtension-backup | e125ad6e3f20451dfa593284507c493a6fd66bb8 | 2bea9262841b07c2fb3c3495395e66e66a092035 | refs/heads/master | 2020-05-31T06:08:40.523979 | 2015-03-16T03:09:28 | 2015-03-16T03:09:28 | 190,136,053 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,482 | r | ConvertAprimeData.R | memory.limit( 4095 )
require( reshape )
require( RODBC )
source("H:/user/R/RMG/Projects/CollateralAdHoc/StressFramework.R")
asOfDate = .getAsOfDate()
asOfDate = as.Date("2009-01-02")
aprimeFile = paste( "S:/Risk/Temporary/CollateralAllocation/",
format( asOfDate, "%Y%m%d"), "/SourceData/AllPos_AGMTHScale_preibt_",
format( asOfDate, "%d%b%y"), ".csv", sep="" )
aprimeData = read.csv( aprimeFile )
megaData = .getMegaData( asOfDate )
marginableAgreements = sort( megaData$"Netting Agreement"[which(megaData$CSA != "")] )
nymexAgreements = megaData$"Netting Agreement"[which(megaData$ShortName=="NYMEX")]
marginableAgreements = sort( c( as.character(marginableAgreements),
as.character( nymexAgreements ) ) )
aprime09 = subset( aprimeData, ContractYear=="2009" )
aprime09 = aprime09[,sort(names(aprime09))]
test = subset( aprime09, credit_nettingagreement %in%
marginableAgreements )
aggregate(test$pos_adj, list(test$commodity_grp),
function(x){round( sum(x, na.rm=TRUE ) ) } )
test3 = aprime09[which(aprime09$Marginal != "UnMargin"),]
aggregate(test3$pos_adj, list(test3$commodity_grp),
function(x){round( sum(x, na.rm=TRUE ) ) } )
aprimesMarginableAgreements = sort(unique(subset( aprimeData, Marginal == "Marginal")$credit_nettingagreement))
aprimeMarginableDifferences = setdiff( aprimesMarginableAgreements, marginableAgreements )
print( aprimeMarginableDifferences )
raftMarginableDifferences = setdiff( marginableAgreements, aprimesMarginableAgreements )
print( raftMarginableDifferences )
unique(aprimeData$counterparty[which(aprimeData$credit_nettingagreement %in% aprimeMarginableDifferences[-1])])
aprimesLCHeldAgreements = sort(unique(subset( aprimeData, Marginal == "LCHeld")$credit_nettingagreement))
setdiff( aprimesLCHeldAgreements, marginableAgreements )
aprimeNonMargined = c(aprimesLCHeldAgreements, aprimesMarginableAgreements)
setdiff( aprimeNonMargined, marginableAgreements)
setdiff( marginableAgreements, aprimeNonMargined)
aprimeMap = unique(aprimeData[,c("credit_nettingagreement", "Marginal")])
aprimeMap = aprimeMap[do.call(order, aprimeMap),]
raftMap = unique( megaData[,c("Netting Agreement", "CSA") ])
raftMap$Marginal = ifelse( raftMap$CSA=="", "UnMargin", "Marginal" )
raftMap$CSA = NULL
names( aprimeMap ) = names( raftMap ) = c("netting_agreement", "marginal")
finalMap = merge( raftMap, aprimeMap, by="netting_agreement", suffix=c(".raft", ".aprime"), all=TRUE ) |
6f310a8dc4c6b8e19ab2112091135378cb055017 | d68f5d00ca28a65ce9d01572199c8971045c188e | /R/utils.R | 914fdbc60711d0e9326db76df4171790e311c33e | [
"MIT"
] | permissive | jjesusfilho/tjsp | 84586aa3d6c6df5d33a9b999fadbefe1a7b548e2 | a95a06813873ae1d2d9eb69765d47e3ebf836bf8 | refs/heads/usp | 2023-08-30T17:11:58.951730 | 2019-09-28T08:02:48 | 2019-09-28T08:02:48 | 84,059,211 | 52 | 22 | NOASSERTION | 2019-09-22T07:16:17 | 2017-03-06T10:17:12 | R | UTF-8 | R | false | false | 127 | r | utils.R | is_defined <- function(sym) {
sym <- deparse(substitute(sym))
env <- parent.frame()
exists(sym, env, inherits = FALSE)
}
|
1cd8c4db9bcbc50af4ee262a4ab2474888c1f227 | b14ac1f5eafc0c7ab16d369c74d042fa77952534 | /make_local_repo.R | 11b9bd08ffc34c0e9052aab35c383c02085784dd | [
"MIT"
] | permissive | shinichinamba/R-package-installer | e823940a59ec6e8f7be56b6b9cddd6c0ff0f483d | 3f8f5e32a594f5b73cd66e8f78552c9f8c1c3356 | refs/heads/master | 2023-03-22T16:41:41.370517 | 2021-01-06T06:14:37 | 2021-01-06T06:14:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,707 | r | make_local_repo.R | #!/usr/bin/env Rscript
if (!requireNamespace("optparse", quietly = TRUE)) install.packages("optparse")
suppressPackageStartupMessages(library(optparse))
opts_list <- list(
make_option(c("-q", "--quiet"),
action = "store_true",
default = FALSE,
help = "Print little output"),
make_option(c("-o", "--output"),
type = "character",
default = "r_repos",
help = "An output directory where packages will be downloaded. [Default: %default]"),
make_option(c("-r", "--r_ver"),
type = "character",
default = paste(R.version$major, R.version$minor, sep = "."),
help = "The version of R in the computer you want to install packages.
[Default: the version of R where this script is running (%default)]"),
make_option(c("-b", "--bioc_ver"),
type = "character",
default = if (requireNamespace("BiocManager", quietly = TRUE)) as.character(BiocManager::version()) else NA_character_,
help = "The version of BiocManager in the computer you want to install packages.
[Default: the version of BiocManager in the computer where this script is running (%default)]"),
make_option(c("-t", "--type"),
type = "character",
default = "source",
help = "One of 'source', 'mac.binary', 'mac.binary.el-capitan', 'win.binary'.
If you got error when you specified one of the binary types,
please see 'https://cloud.r-project.org/bin/' to check whether packages are available for your R version.
[Default: %default]"),
make_option(c("-c", "--cran"),
type = "character",
default = "https://cran.rstudio.com/",
help = "Path to CRAN repository. [Default: %default]"
)
)
# Optparse
parser <- OptionParser(
usage = "usage: %prog [options] package1 package2 ...",
option_list = opts_list,
description = "
R package installer: clone R packages you need and their dependencies for computers with no internet access.
CRAN and Bioconductor packages can be specified. GitHub packages are currently not available.")
OPTS <- parse_args(parser, positional_arguments = c(1, Inf))
# set the CRAN repository
opt_repos <- getOption("repos")
opt_repos <- c(opt_repos[!names(opt_repos) == "CRAN"], c(CRAN = OPTS$options$cran))
options(repos = opt_repos)
if (!requireNamespace("miniCRAN", quietly = TRUE)) install.packages("miniCRAN")
suppressPackageStartupMessages(library(miniCRAN))
# get URLs of R package repositories
if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager")
repos <- BiocManager::repositories(version = OPTS$options$bioc_ver)
# get a matrix for available packages
avail_pkgs_list <- vector("list", length(repos))
for (i in seq_along(repos)) {
avail_pkgs_list[[i]] <- pkgAvail(repos[[i]], type = OPTS$options$type, Rversion = OPTS$options$r_ver, quiet = OPTS$options$quiet)
}
avail_pkgs <- do.call(rbind, avail_pkgs_list)
rm("avail_pkgs_list")
# solve package dependencies
req_pkgs <- pkgDep(OPTS$args, availPkgs = avail_pkgs, quiet = OPTS$options$quiet)
# clone dependencies
dir.create(OPTS$options$output)
repo_pkgs <-
makeRepo(req_pkgs,
path = OPTS$options$output,
type = OPTS$options$type,
repos = repos,
Rversion = OPTS$options$r_ver,
quiet = OPTS$options$quiet)
# check
repo_pkgs_names <- sub("_.*", "", basename(repo_pkgs))
failed_pkgs <- setdiff(req_pkgs, repo_pkgs_names)
if (length(failed_pkgs) == 0L) {
if (!OPTS$options$quiet) message("==============================\nAll packages were successfully downloaded.")
} else {
warning(sprintf("%i packages were failed to download: %s", length(failed_pkgs), paste0(failed_pkgs, collapse = ", ")))
}
# download stringi (NO-INTERNET version)
# see https://github.com/gagolews/stringi/blob/master/INSTALL
if (OPTS$options$type == "source" && "stringi" %in% repo_pkgs_names) {
stringi_path <- repo_pkgs[repo_pkgs_names == "stringi"]
repo_dir <- dirname(stringi_path)
stringi_nonet_zip <- file.path(repo_dir, "stringi.zip")
download.file("https://github.com/gagolews/stringi/archive/master.zip", stringi_nonet_zip, quiet = OPTS$options$quiet)
unzip(stringi_nonet_zip, exdir = repo_dir)
file.remove(stringi_path)
cwd <- getwd()
setwd(repo_dir)
Rbuildignore <- readLines("stringi-master/.Rbuildignore")
Rbuildignore <- grep("icu../data", Rbuildignore, invert = TRUE, value = TRUE)
writeLines(Rbuildignore, "stringi-master/.Rbuildignore")
system2("R", c("CMD", "build", "stringi-master"))
setwd(cwd)
file.remove(stringi_nonet_zip)
file.remove(file.path(repo_dir, "stringi-master", list.files(file.path(repo_dir, "stringi-master"), recursive = TRUE, all.files = TRUE)))
file.remove(rev(list.dirs(file.path(repo_dir, "stringi-master"), full.names = TRUE, recursive = TRUE)))
if (!OPTS$options$quiet) message("==============================\n'stringi' was replaced to the NO-INTERNET version.")
}
x <- updateRepoIndex(OPTS$options$output, type = OPTS$options$type, Rversion = OPTS$options$r_ver)
if (!OPTS$options$quiet) message("==============================")
out_dir_path <- file.path(getwd(), OPTS$options$output)
message(
sprintf(
"You can install R packages by 'install.packages(c(%s), type = '%s', repos = '%s')' after you copy '%s' to the computer where you want to install R packages.",
paste0(paste0("'", OPTS$args, "'"), collapse = ", "),
OPTS$options$type,
paste0("file://<PATH_TO_LOCAL_REPOSITORY>/", OPTS$options$output),
OPTS$options$output
)
)
|
a645786bd77fbf71d5a5d6bdaf1be59b98bd9b0d | fdcd0b34d9980a5f080048a1bcc0b56b13df5e7f | /3Stock Price Processing.R | 19bb8efd258507ea997117cfdbb6e716de2486fe | [] | no_license | sidayang/Stock-Performance-Forecasting-with-Decision-Tree | e382f91f02b8c5059f03581883f8fc03dcccb035 | 9796fe599194fddd79f2c0d2b75d3c3e4a2b6475 | refs/heads/master | 2020-03-19T12:54:26.468096 | 2018-06-08T01:16:09 | 2018-06-08T01:16:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,669 | r | 3Stock Price Processing.R | rm(list=ls(all=TRUE))
if (!require(Rlab)) install.packages('Rlab')
library(Rlab)
#Set winrate####
startdate <- "2015-01-01"
enddate <- "2015-12-31"
setwinrate <- 0.5
#Read the data####
index <- read.csv("/Users/ystar/Desktop/backup/index.csv")
index[,"date"] <- as.Date(index[,"date"])
index <- subset(index, index$indexreturn != Inf & index$date>=as.Date(startdate) & index$date<=as.Date(enddate))
index <- index[,c("date","indexreturn")]
allstocks <- read.csv("/Users/ystar/Desktop/backup/allstocks.csv")
allstocks[,"date"] <- as.Date(allstocks[,"date"])
allstocks <- subset(allstocks, allstocks$return != Inf & allstocks$date>=as.Date(startdate) & allstocks$date<=as.Date(enddate))
allstocks <- allstocks[,c("date","id","return")]
stockno <- read.csv("/Users/ystar/Desktop/backup/stockno.csv")[,1]
#Merge the daily return of allstocks and index####
data <- merge(allstocks, index, by= "date")
#Good stock TURE better than the index, vice versa####
data$mark <- as.character(data$return > data$indexreturn)
#Show the result####
showdata <- data[order(data$id,data$date),]
#Caculate the trade day####
trade <- nrow(index)
#Caculate the winrate of each stock####
winno <- numeric()
for (i in 1:length(stockno))
{
winno[i] <- count(data$mark == "TRUE" & data$id == stockno[i])
}
winrate <- winno/trade
#Seperate the good from the bad by setwinrate####
mark <- (winrate >= setwinrate)
#Finish the preperation for data mining####
final <- as.data.frame(cbind(stockno, winno, winrate, mark))
colnames(final)[1] = "id"
final[which(mark == 1),"mark"] <- "Good"
final[which(mark == 0),"mark"] <- "Bad"
write.csv(final,"/Users/ystar/Desktop/backup/markdata.csv")
|
33c54244b371483f613eb721e36cef2ce8833541 | 794ba46c97e3a67ea7d64a7d1978a9b878018fa1 | /ui.R | 479f3c7ec432f3b3ec09c1e2aec67016edd8d02a | [] | no_license | dpr5177/Rejections-Regions | 1a9df6f72de5223f2baff0d7be4ac2a708c20b73 | 2708e46fe0518190496aaef25fc09725be3d3525 | refs/heads/master | 2021-01-22T04:10:54.049815 | 2017-05-25T20:12:51 | 2017-05-25T20:12:51 | 92,440,279 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,570 | r | ui.R |
library(shiny)
library(shinydashboard)
dashboardPage(skin="blue",
#Title
dashboardHeader(title="Rejection Regions",titleWidth=450),
#Sidebar
dashboardSidebar(
width = 260,
sidebarMenu(
menuItem("Overview", tabName = "over", icon = icon("university")),
menuItem("Plot", tabName = "plot", icon = icon("area-chart")),
menuItem("Z Table", tabName = "zTab", icon =icon("table")),
menuItem("Review", tabName = "post", icon = icon("pencil-square"))
)),
#define the body of the app
dashboardBody(
tabItems(
#actual app layout
tabItem(tabName = "over",
fluidRow(
column(5,br(),br(),
img(src="PSU.png")
)
)
),
tabItem(tabName = "plot",
fluidRow(
withMathJax(),
column(4,
br(),
radioButtons("dist","Distribution:",c("Z","t"),selected="Z"),
sliderInput("alpha","Significance level (alpha):",0.05,min=0.005,max=0.250,step=0.005),
radioButtons("tails","Tails type:",c("Two tailed"='two',"Left tailed"='left',"Right tailed"='right')),
conditionalPanel(
condition="input.dist == 't'",
numericInput("tdf","Degrees of Freedom",15)
)
),
column(8,
fluidRow(
plotOutput("plot")
)
)
)
),
tabItem(tabName = "zTab",
fluidRow(
img(src = "NormalTable.png")
)
),
tabItem(tabName = "post",
fluidRow(
#add in latex functionality if needed
withMathJax(),
#two columns for each of the two items
column(6,
#what did they learn
h1("Learning Objectives:"),
selectizeInput("topic","Select the topic you would like to look at. ",choices = c("Population Proportion", "Population Mean", "Difference"), multiple = FALSE),
conditionalPanel(condition = "input.topic =='Population Proportion'",
selectizeInput("ques","Question: ", c("1","2","3","4","5"))
),
conditionalPanel(condition = "input.topic =='Population Mean'",
selectizeInput("ques","Question: ", c("1","2","3","4","5"))
),
br(),br(),br(),
conditionalPanel(condition = "input.topic =='Difference'",
selectizeInput("ques","Question: ", c("1","2","3","4","5"))
)
),
column(6,
#continuation
h1("Also"),
#box to contain description
box(background="blue",width=12,
h4("lksadjfl;kasdfh;lkhfghklshld. ")
)
)
)
)
)
)
)
|
d7a2715467e2cdb43436817ff04286d599a4e0a2 | a89f86b52a65e02b80a464adbbc476f650b6e9d7 | /man/getFolds.Rd | 87df4c6a904c9322a1f814ecad6d52ccb359c592 | [] | no_license | michaelzxu/deepforest | 54150fa758df8fe78178fbf046fd2169df3cd768 | 5833ccbf75317f3e33aff65fb29767f744e8bd31 | refs/heads/master | 2021-01-23T01:18:08.335305 | 2017-05-01T01:51:23 | 2017-05-01T01:51:23 | 85,896,834 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 317 | rd | getFolds.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deepforest_utils.R
\name{getFolds}
\alias{getFolds}
\title{Get n-folds}
\usage{
getFolds(y, nfold = 5, recycle = FALSE, list = TRUE)
}
\description{
Shamelessly taken from \code{\link{caret::createFolds}}
https://topepo.github.io/caret/
}
|
199a2ab39c32693adb54acde0fc8b12268141e73 | 990e73495105153b48f69b3ba067d0b217dae2ad | /Code/htmlStrip.R | e456a94be206356aa62e928b77768e3db2ada6ca | [] | no_license | JH27/inst737 | c0b140d3d94063c80fcb2f45f23a022d982f0fb8 | f8ceaec1fd580439c9fa2192bcf120fedbeafd02 | refs/heads/master | 2021-05-05T10:43:33.961119 | 2017-12-15T05:14:19 | 2017-12-15T05:14:19 | 104,408,639 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,184 | r | htmlStrip.R | # First install the library
install.packages(tm.plugin.webmining)
# Then load the library
library(tm.plugin.webmining)
# While loading, the library might throw an error. If it does, you most likely have to
# update your verion of java on your computer/or install the 64bits version of java in case you have
# the 32bits versions. You can install the 64bits version on top of the 32 bits version. This
# assumes of course that you have a 64bits OS.
# For reference, I used this link:
# https://stackoverflow.com/questions/17376939/problems-when-trying-to-load-a-package-in-r-due-to-rjava
# when I ran into my issue while installing the package. The accepted answer worked for me after I installed the
# 64bits version of java.
# After that read in the sample data and I extracted one post into the variable c. Specifically the body of postid:31133870
# Then use the library on the post like this:
c <- extractHTMLStrip(c)
# The result in the variable c still kept things like "\n" in the post.
# So I used the following gsub funtion to get rid of all "\n" characters in the post:
c <- gsub('\\n', ' ', c)
# The result are acceptable enough to do some text mining on the data.
|
52bd8da36d62e7b79ab9113d7fab55e343ef7538 | b134222e604d1220467993d91baf56925fffd6e7 | /man/partition_by_snp.Rd | 30e5daf538409a61bea797bc75d323bbdf1857f1 | [
"MIT"
] | permissive | welch16/UWCCC.GWAS | 976d1fd722cea31d415f2a61bb766f099d9bc9c4 | 9ffc05b3f65df58e21c6ba6d2f10291c4fbcdcb6 | refs/heads/main | 2023-07-16T13:30:35.362370 | 2021-09-01T01:18:17 | 2021-09-01T01:18:17 | 298,447,389 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 832 | rd | partition_by_snp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/partition_loci.R
\name{partition_by_snp}
\alias{partition_by_snp}
\title{Separates a \code{tibble} with the summary statistics of a GWAS experiment with
columns id, pos, and p (snp id, snp position, and p.value)}
\usage{
partition_by_snp(data, snp_dist = 250000, pval_thr = 1e-08)
}
\arguments{
\item{data}{A \code{tibble} with at least id, pos and p columns}
\item{snp_dist}{an integer to find all SNPs within that distance from the
significant snp. By default 250 kbps.}
\item{pval_thr}{p.value significant threshold. By default 1e-8.}
}
\value{
a nested \code{tibble} with the separated loci
}
\description{
Separates a \code{tibble} with the summary statistics of a GWAS experiment with
columns id, pos, and p (snp id, snp position, and p.value)
}
|
4afbaeba9f179cb9b582e1e16b32cd25e7e4f7a8 | 12ae74bd0ba9d5494d7301b521b45d1bfa5ff84a | /man/set_CRAN_mirror.Rd | 5fd7d52537994c63d7dc10f40c60765d21a55ba5 | [] | no_license | cran/do | 62b609a0f0cc0f0c0cc879adb821b1d9d95b6632 | fa0d7c8f9799326ffa6f0763f490c2873597131b | refs/heads/master | 2021-08-15T11:59:00.793187 | 2021-08-03T10:40:02 | 2021-08-03T10:40:02 | 206,034,685 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 284 | rd | set_CRAN_mirror.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mirror.R
\name{set_CRAN_mirror}
\alias{set_CRAN_mirror}
\title{set CRAN mirror}
\usage{
set_CRAN_mirror(url)
}
\arguments{
\item{url}{mirror url}
}
\value{
set CRAN mirror
}
\description{
set CRAN mirror
}
|
69f25a3cec14bf643dfb71f60a39a59e051dbe7a | c1ecfc09c31b9ce336ea140b56e1b61f7ec9157a | /R/scanr.R | 026552b771ce7b1d7e8a41f2a19e0a7d097c8023 | [] | no_license | fmichonneau/scanr | ce5360f1861e247e367c3f70a4071910927c0c78 | a8690b70f431fc75def4c4ce0d3168329974b295 | refs/heads/master | 2021-01-23T14:12:31.174872 | 2017-06-03T14:29:01 | 2017-06-03T14:29:01 | 93,246,218 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,414 | r | scanr.R | ##' Returns the functions for each file in
##' @title Scan your scripts folders for the functions you are looking
##' for
##' @param pattern a regular expression that matches what you are
##' looking for
##' @param path where to look for your R files
##' @param file_extension the extension for your R files (or other
##' regular expression) to match some files
##' @param ... additional arguments to be passed to \code{grepl} to
##' match the regular expression for what you are looking for
##' @return a tibble
##' @export
##' @importFrom purrr map_df
##' @importFrom tibble data_frame
##' @importFrom dplyr %>% filter mutate
scanr <- function(pattern, path = "R", file_extension = "(r|R)$", ...) {
res <- list.files(path = path, pattern = file_extension, full.names = TRUE) %>%
purrr::map_df(function(x) {
e <- new.env(parent = .GlobalEnv)
on.exit(rm(e))
sys.source(x, envir = e)
r <- ls(envir = e)
tibble::data_frame(file = rep(basename(x), length(r)),
functions = r)
})
if (!missing(pattern)) {
res <- res[grepl(pattern = pattern, x = res$functions, ...), ]
res$with_marks <- mark_matches(res$functions, pattern)
}
structure(res, class = c("scanr", class(res)))
}
print.scanr <- function(r) {
.r <- split(r, r$file)
lapply(names(.r), function(f) {
cat(f, ":\n", sep = "")
if (exists("with_marks", .r[[f]]))
fns <- .r[[f]]$with_marks
else fns <- .r[[f]]$functions
lapply(fns, function(fn) {
cat(" ", fn, "\n")
})
})
invisible(r)
}
##' @importFrom crayon white bgGreen combine_styles
mark_matches <- function(text, pattern, marker = NULL) {
if (is.null(marker)) {
marker <- crayon::combine_styles(crayon::white, crayon::bgGreen)
}
vapply(
tolower(text),
.mark_matches,
character(1),
pattern = pattern,
marker = marker,
USE.NAMES = FALSE
)
}
.mark_matches <- function(text1, pattern, marker) {
word_pos <- gregexpr(pattern, text1)
if (length(word_pos[[1]]) == 1 && word_pos == -1) return(text1)
start <- c(word_pos[[1]])
end <- start + attr(word_pos[[1]], "match.length") - 1
words <- substring(text1, start, end)
regmatches(text1, word_pos) <- marker(words)
text1
}
|
cd359d3561169cc6830c01f1e4748b2fa3345485 | 68647fcc5546a5b2991584ecf8d49dcb5fa0d25e | /R/AOfamiliesv3.R | 24010028b7f08f9bb57beb98d9f5024b9a86735f | [] | no_license | cran/AOfamilies | 9c0cf710e0ac197996e1b7aeba38671a5a6860e4 | 14544457bfe8d20b60e276f45f3da91a7ed82e09 | refs/heads/master | 2021-01-13T01:49:28.019107 | 2014-01-20T00:00:00 | 2014-01-20T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 34,517 | r | AOfamiliesv3.R | ### GLM part
## define the AO transformations (symmetric and asymmetric)
ao.sym <- function(phi, verbose = FALSE)
{
## parameter processing
if(verbose && phi < 0)
warning("sign of phi ignored as ao1(phi) = ao1(-phi)")
#phi <- abs(phi)
phi <- phi [phi >= 0] # positive values for ao.sym
if(phi == 0) {
rval <- make.link("logit")
rval$name <- "ao.sym"
return(rval)
}
linkfun <- function(mu){(2/phi) * (mu^phi - (1 - mu)^phi)/
(mu^phi + (1 - mu)^phi)
}
linkinv <- function(eta){
etastar <- pmin(2/phi - .Machine$double.eps,
pmax(-2/phi + .Machine$double.eps, eta))
if(verbose && !isTRUE(all.equal(as.vector(eta),
as.vector(etastar))))
warning("truncation in inverse link function")
((1 + 0.5 * phi * etastar)^(1/phi))/
((1 + 0.5 * phi * etastar)^(1/phi) +
(1 - 0.5 * phi * etastar)^(1/phi))
}
mu.eta <- function(eta){
phieta1 <- 1 + 0.5 * phi * eta
phieta2 <- 1 - 0.5 * phi * eta
phieta1^((1/phi) - 1) * 0.5/(phieta1^(1/phi) +
phieta2^(1/phi)) - (phieta1^(1/phi)) * (phieta1^((1/phi) - 1) *
0.5 - phieta2^((1/phi) - 1) * (0.5))/(phieta1^(1/phi) +
phieta2^(1/phi))^2
}
valideta <- function(eta) {
if(verbose && !all(abs(0.5 * phi * eta) < 1))
warning("some of the current etas are out of range")
TRUE
}
name <- "ao.sym" # "Aranda-Ordaz symmetric"
## return link-glm object
structure(list(linkfun = linkfun, linkinv = linkinv, mu.eta = mu.eta,
valideta = valideta, name = name), class = "link-glm")
}
ao.asym <- function(phi, verbose = FALSE)
{
## parameter processing
if(phi == 1) {
rval <- make.link("logit")
rval$name <- "ao.asym"
return(rval)
}
if(phi == 0) {
rval <- make.link("cloglog")
rval$name <- "ao.asym"
return(rval)
}
linkfun <- function(mu) log(((1 - mu)^(-phi) - 1)/phi)
linkinv <- function(eta){
if (phi * exp(sum(eta)) <= -1) 1 else
(1 - (1 + phi * exp(eta))^(-1/phi))
}
mu.eta <- function(eta){
exp(eta) * (1 + phi * exp(eta))^(-(1 + phi)/phi)
}
valideta <- function(eta){
if(verbose && !all(phi * exp(eta) > -1))
warning("some of the current etas are out of range")
TRUE
}
name <- "ao.asym" # "Aranda-Ordaz asymmetric"
## return link-glm object
structure(list(linkfun = linkfun, linkinv = linkinv, mu.eta = mu.eta,
valideta = valideta, name = name), class = "link-glm")
}
## define the central fitting function of the package for GLM
ao.glm.fit <- function (x, y, link, phi, weights, maxit = 500, ...){
## fit model repetitively for sequence of transformation parameters
## and store log-likelihoods
logLikvector <- rep (-1/0, length(phi)) ## to begin, define a vector
## of log-lik = -Infinity
## whose elements are to be
## replaced progressively
for (i in 1:length(phi)) {
fit <- try (glm.fit (
x, y, weights = weights, start = rep (0, ncol(x)),
family = binomial (link = link (phi = phi[i])),
...), silent = TRUE)
## keep -Infinity as log-lik if glm procedure did not converge,
## otherwise extract log-lik from fitted model
if (class (fit) == "try-error") {
logLikvector[i] <- -1/0
} else {
logLikvector[i] <- (fit$aic - 2 * ncol(x))/(-2)
}
}
## find the position of the MLE of the transformation parameter
ok_logLik <- logLikvector > -1/0 # record positions of
# log-lik > -Infinity
MLE.pos <- which.max (logLikvector [ok_logLik])
## store the range of valid values for lambda and
## corresponding log-lik
valid.lambda <- phi [which (ok_logLik)]
valid.logLik <- logLikvector [ok_logLik]
## fit model with MLE of transformation parameter
fit.MLE <- glm.fit(x, y, weights = weights,
start = rep (0, ncol(x)),
family = binomial
(link = link (phi = valid.lambda[MLE.pos])),
...)
## test of asymmetry
fit.logit <- glm.fit(x = x, y = y,
weights = weights,
start = rep (0, ncol(x)),
family = binomial (link = "logit"), ...)
lin.pred <- rep (0, length (y))
for (i in 1:length (fit.logit$coef)){
lin.pred <- lin.pred + fit.logit$coef[i] * x[,i]
}
theta <- exp (lin.pred)/(1 + exp(lin.pred))
r <- y * weights
u.stat <- sum (((r - weights * theta)/theta) *
(theta + log (1 - theta)))
i.ll <- sum (weights * ((theta + log (1 - theta))^2)/
(exp (lin.pred)))
i.bl <- c()
for (i in 1:length (fit.logit$coef)){
i.bl <- c(i.bl, sum ((theta + log (1 - theta)) * weights * x[,i] *
(1 - theta)))
}
i.bb <- matrix (ncol = length (fit.logit$coef),
nrow = length (fit.logit$coef))
for (i in 1:length (fit.logit$coef)){
for (j in 1:length (fit.logit$coef)){
i.bb[i,j] <- sum (weights * x[,i] * x[,j] * theta * (1 - theta))}
}
var.u.stat <- i.ll - t (i.bl) %*% solve (i.bb) %*% i.bl
stand.u.stat <- u.stat/var.u.stat
## rename link with numbering code
if (fit.MLE$family[2] == "ao.sym") {
link.bis <- 1
} else {
link.bis <- 2
}
## return list as output
list (link.bis = link.bis,
MLE = valid.lambda[MLE.pos],
MLE.pos = MLE.pos,
fit.MLE = fit.MLE,
fit.MLE.coef = fit.MLE$coefficients,
logLik = valid.logLik[MLE.pos],
valid.lambda = valid.lambda,
valid.logLik = valid.logLik,
stand.u.stat = stand.u.stat,
family = fit.MLE$family,
fitted.values = fit.MLE$fitted.values
)
}
## define the generic function of the package for GLM
ao.glm <- function (x, ...) UseMethod ("ao.glm")
## define the default method of the package for GLM
ao.glm.default <- function (x, y, link, phi, weights, ...){
x <- as.matrix (x)
y <- as.numeric (y)
phi <- as.numeric (phi)
weights <- as.numeric (weights)
fit <- ao.glm.fit (x, y, link, phi, weights, ...)
class (fit) <- c ("ao.glm")
fit
}
## define the print method of the package for GLM
print.ao.glm <- function(x, ...){
cat("Call:\n")
print(x$call)
cat("\nMLE of lambda:\n")
print(x$MLE)
cat("\nLog-likelihood associated with MLE of lambda:\n")
print(x$logLik)
cat("\nMLE coefficients for Aranda-Ordaz regression:\n")
print(x$fit.MLE.coef)
}
## define the formula method of the package for GLM
ao.glm.formula <- function (formula, data = list(), link,
phi = seq (-2, 2, 0.01), weights,
plotit = "TRUE",
plot.spline = "TRUE", ...){
## keep the arguments which should go into the model frame
mf <- match.call (expand.dots = TRUE)
m <- match (c ("formula", "data", "weights"), names (mf), 0)
mf <- mf[c (1, m)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name ("model.frame")
mf <- eval.parent (mf)
## allow model.frame to update the terms object before saving it
mt <- attr (mf, "terms")
## define response variable
y <- model.response (mf, "numeric")
## define design matrix
x <- model.matrix (mt, mf, contrasts)
## retrieve the weights (or define them if not provided)
if (is.null (model.weights (mf))) {
weights <- rep (1, length (y))
} else {
weights <- model.weights (mf)
}
fit <- ao.glm.default (x, y, link, phi, weights, ...)
fit$call <- match.call()
fit$formula <- formula
fit$plotit <- plotit
fit$plot.spline <- plot.spline
fit$model <- mf
fit$design.matrix <- x
fit
}
## define the summary method of the package for GLM
summary.ao.glm <- function (object, ...){
if ( # check if we have enough data with reasonable precision
# to calculate a CI
-(qchisq(0.95, 1)/2) + max (object$valid.logLik) >
min (object$valid.logLik)
&&
min (abs (object$valid.logLik[1:object$MLE.pos] -
(-(qchisq(0.95, 1)/2) +
max (object$valid.logLik)))) < 1
&&
min (abs (
object$valid.logLik[object$MLE.pos:length(object$valid.logLik)] -
(-(qchisq(0.95, 1)/2) +
max (object$valid.logLik)))) < 1){
Lower.bound <- round(object$valid.lambda[
which.min (abs (object$valid.logLik[1:object$MLE.pos] -
(-(qchisq(0.95, 1)/2) +
max (object$valid.logLik))))], 4)
Upper.bound <- round(object$valid.lambda[
object$MLE.pos +
which.min (abs (
object$valid.logLik[object$MLE.pos:
length(object$valid.logLik)] -
(-(qchisq(0.95, 1)/2) +
max (object$valid.logLik))))], 4)
CI.table <- data.frame(Lower.bound, Upper.bound)
row.names(CI.table) <- c("")
}
else{
Lower.bound <- round(min (object$valid.lambda), 4)
Upper.bound <- round(max (object$valid.lambda), 4)
CI.table <- data.frame(Lower.bound, Upper.bound)
row.names(CI.table) <- c("")
}
## get which transformations is included in CI
epsilon <- .Machine$double.eps
if (object$link.bis == 2) {
possible.transfo <- c ("none")
if (epsilon >= Lower.bound && epsilon <= Upper.bound
&&
1 >= Upper.bound){
possible.transfo <- c("cloglog")
}
if (epsilon >= Lower.bound && 1 <= Upper.bound){
possible.transfo <- c("cloglog", "logistic")
}
if (1 >= Lower.bound && 1 <= Upper.bound
&&
epsilon <= Lower.bound){
possible.transfo <- c("logistic")
}
}
if (object$link.bis == 1){
possible.transfo <- c("none")
if (epsilon >= Lower.bound && epsilon <= Upper.bound
&&
0.3955 >= Upper.bound){
possible.transfo <- c("logistic")
}
if (epsilon >= Lower.bound && 0.3955 <= Upper.bound
&&
0.6755 >= Upper.bound){
possible.transfo <- c("logistic", "probit")
}
if (epsilon >= Lower.bound && 0.6755 <= Upper.bound
&&
1 >= Upper.bound){
possible.transfo <- c("logistic", "probit", "arcsine")
}
if (epsilon >= Lower.bound && 1 <= Upper.bound){
possible.transfo <- c("logistic", "probit", "arcsine",
"linear")
}
if (epsilon <= Lower.bound && 0.3955 <= Upper.bound
&&
0.3955 >= Lower.bound && 0.6755 >= Upper.bound){
possible.transfo <- c("probit")
}
if (0.3955 <= Lower.bound && 0.6755 <= Upper.bound
&&
0.6755 >= Lower.bound && 1 >= Upper.bound){
possible.transfo <- c("arcsine")
}
if (0.6755 <= Lower.bound && 1 <= Upper.bound
&&
1 >= Lower.bound){
possible.transfo <- c("linear")
}
}
## plot the profile log likelihood
if (object$plotit == "TRUE" ){
if (object$link.bis == 1){ # create title depending on type of transformation
title <- "Aranda-Ordaz symmetric transformation
Profile log-likelihood plot"
}
else{
title <- "Aranda-Ordaz asymmetric transformation
Profile log-likelihood plot"
}
if (object$plot.spline == "TRUE")
{
spl <- smooth.spline (object$valid.lambda, object$valid.logLik)
object$valid.lambda <- spl$x
object$valid.logLik <- spl$y
}
plot(
x = object$valid.lambda, y = object$valid.logLik,
xlab = expression (lambda), ylab = "Log-likelihood",
type = "l"
, main = title
)
## vertical line for MLE
lines (x = rep (object$valid.lambda[object$MLE.pos], 2),
y = c (min (object$valid.logLik), max (object$valid.logLik)),
type = "l", lty = "dashed", col = "red")
if ( # check if we have enough data with reasonable precision to calculate a CI
-(qchisq(0.95, 1)/2) + max (object$valid.logLik) >
min (object$valid.logLik)
&&
min (abs (object$valid.logLik[1:object$MLE.pos] -
(-(qchisq(0.95, 1)/2) +
max (object$valid.logLik)))) < 1
&&
min (abs (
object$valid.logLik[object$MLE.pos:length(object$valid.logLik)] -
(-(qchisq(0.95, 1)/2) +
max (object$valid.logLik)))) < 1){
## horizontal line for CI
lines (y = rep (-(qchisq(0.95, 1)/2) + max (object$valid.logLik), 2),
x = c (min (object$valid.lambda), max (object$valid.lambda)),
lty = "dashed", col = "blue")
## vertical line for lower bound of CI
lines (x =
rep (object$valid.lambda[
which.min (abs (object$valid.logLik[1:object$MLE.pos] -
(-(qchisq(0.95, 1)/2) +
max (object$valid.logLik))))], 2),
y = c (min (object$valid.logLik),
-(qchisq(0.95, 1)/2) + max(object$valid.logLik)),
lty = "dashed",
col = "blue")
## vertical line for upper bound of CI
lines (x =
rep (object$valid.lambda[object$MLE.pos +
which.min (abs
(object$valid.logLik[object$MLE.pos:
length(object$valid.logLik)] -
(-(qchisq(0.95, 1)/2) +
max (object$valid.logLik))))], 2),
y = c (min (object$valid.logLik),
-(qchisq(0.95, 1)/2) + max (object$valid.logLik)),
lty = "dashed",
col = "blue")
}
else{
lines (x = rep (min(object$valid.lambda), 2),
y = c (min (object$valid.logLik), max (object$valid.logLik)),
type = "l", lty = "dashed", col = "blue")
lines (x = rep (max(object$valid.lambda), 2),
y = c (min (object$valid.logLik), max (object$valid.logLik)),
type = "l", lty = "dashed", col = "blue")
}
par (xpd = NA, oma = c (4, 0, 0, 0))
legend (par ("usr")[1], par ("usr")[3],
c ("MLE estimate", "95% Confidence Interval"),
col = c ("red","blue"),
lty = c ("dashed", "dashed"),
xjust = 0, yjust = 2.75
)
}
call <- object$call
stand.u.stat <- object$stand.u.stat
MLE <- object$MLE
logLik <- object$logLik
plotit <- object$plotit
fit.MLE.table.coef <- summary.glm(object$fit.MLE)$coefficients
res <- list (CI.table = CI.table, call = call,
stand.u.stat = stand.u.stat, plotit = plotit,
MLE = MLE, possible.transfo = possible.transfo,
logLik = logLik,
fit.MLE.table.coef = fit.MLE.table.coef)
class (res) <- "summary.ao.glm"
res
}
## define the print method for the summary method of the package
## for GLM
print.summary.ao.glm <- function (x, ...){
cat("Call:\n")
print(x$call)
cat ("\n")
cat("Test of asymmetry:\n")
cat (paste ("Test statistic =", round (x$stand.u.stat, 3),"\n"))
cat (paste ("P-value =", round (pnorm (x$stand.u.stat), 3),"\n"))
cat("\nMLE of lambda and log-likelihood:\n")
print(data.frame(MLE = x$MLE, logLik = x$logLik,
row.names = c("")))
if (!is.null(x$CI.table)){
cat("\n95% confidence interval for lambda:\n")
print (x$CI.table)
cat("\nTransformations included in confidence interval:\n")
print(x$possible.transfo)
}
cat("\nMLE coefficients for Aranda-Ordaz regression:\n")
printCoefmat (x$fit.MLE.table.coef)
cat ("\n")
}
## define the dose.p method of the package for GLM
ao.dose.p <- function (object, cf = 1:2, p = 0.5,...) {
eta <- object$family$linkfun(p)
b <- object$fit.MLE.coef[cf]
x.p <- (eta - b[1L])/b[2L]
names(x.p) <- paste("p = ", format(p), ":", sep = "")
pd <- -cbind(1, x.p)/b[2L]
var.covar <- solve (t(object$fit.MLE$weights * object$design.matrix)
%*% object$design.matrix)
SE <- sqrt(((pd %*% var.covar[cf, cf]) * pd) %*% c(1, 1))
CI.low <- x.p - qnorm(0.975) * SE
CI.high <- x.p + qnorm(0.975) * SE
res <- list(x.p = x.p, SE = SE, p = p, CI.low = CI.low,
CI.high = CI.high)
class(res) <- "ao.glm.dose"
res
}
## define the print method for the dose.p method of the package
## for GLM
print.ao.glm.dose <- function (x,...){
M <- data.frame(x$x.p, x$SE, x$CI.low,
x$CI.high)
colnames(M) <- c("Dose", "SE", "CI.low", "CI.high")
object <- M
print(M)
}
## define the fitted method of the package for GLM
fitted.ao.glm <- function (object,...){
fitted.values <- object$fitted.values
res <- list (fitted.values = as.numeric(fitted.values))
class (res) <- "fitted.ao.glm"
invisible(res)
}
## define the print method for the fitted method of the package
## for GLM
print.fitted.ao.glm <- function (x, ...){
cat (paste ("Estimate(s):\n"))
print(x$fitted.values)
}
## define the predict method of the package for GLM
predict.ao.glm <- function (object, newdata = NULL, ...) {
if(is.null(newdata)){
predicted.values <- object$fitted.values
}
else {
if (!is.null(object$formula)){
tt <- terms(object$formula)
covariates <- delete.response(tt)
m <- model.frame(covariates, newdata, ...)
x <- model.matrix(covariates, m)
} else {
x <- newdata
}
lp <- object$fit.MLE.coef %*% t(x)
predicted.values <- rep (0, length(lp))
for (i in 1:length(lp)){
if (object$link.bis == 1) {
if (object$MLE != 0){
if (abs(0.5*lp[i]*object$MLE) < 1){
predicted.values[i] <- ((1 + (object$MLE/2)*lp[i])^(1/object$MLE))/
((1 + (object$MLE/2)*lp[i])^(1/object$MLE) +
(1 - (object$MLE/2)*lp[i])^(1/object$MLE))
} else {
if (0.5*lp[i]*object$MLE <= -1){
predicted.values[i] <- 0
} else {
predicted.values[i] <- 1
}
}
}
else {
if (object$MLE == 0){
predicted.values[i] <- exp(lp[i])/(1+exp(lp[i]))
}
}
} else {
if (object$link.bis == 2) {
if (object$MLE != 0){
if (exp(lp[i])*object$MLE > -1){
predicted.values[i] <- 1 - ((1 + exp(lp[i]) *
object$MLE)^(-1/object$MLE))
} else {
predicted.values[i] <- 1
}
}
if (object$MLE == 0){
predicted.values[i] <- 1 - exp(-exp(lp[i]))
}
}
}
}
}
res <- list (predicted.values = predicted.values)
class (res) <- "predict.ao.glm"
invisible(res)
}
## define the print method for the predict method of the package
## for GLM
print.predict.ao.glm <- function (x, ...){
cat (paste ("Estimate(s):\n"))
print(x$predicted.values)
}
## define the plot method of the package for GLM
plot.ao.glm <- function (x, which = 1:4, ...){
## adjust the graphic device
if (length(which) == 1){
par(mfrow = c(1,1))
}
if (length(which) == 2){
par(mfrow = c(1,2))
}
if (length(which) == 3){
par(mfrow = c(2,2))
}
if (length(which) == 4){
par(mfrow = c(2,2))
}
if (length(which) == 5){
par(mfrow = c(3,2))
}
## asign the glm class
class(x$fit.MLE) <- c(x$fit.MLE$class, c("glm", "lm"))
plot(x$fit.MLE,which = which)
}
## define the AIC method of the package for GLM
AIC.ao.glm <- function (object, ...){
class(object$fit.MLE) <- c(object$fit.MLE$class, c("glm", "lm"))
AIC(object$fit.MLE)
}
## define the logLik method of the package for GLM
logLik.ao.glm <- function (object, ...){
class(object$fit.MLE) <- c(object$fit.MLE$class, c("glm", "lm"))
logLik(object$fit.MLE)
}
### QR part
## central fitting function of the package for QR
ao.qr.fit <- function (x, y, weights, kappa,
phi, se, estimation, method,
epsilon, transfo, R,
...){
## define the ao transformation
## theta bounded between 0 and 1
theta <- (y - (min(y)-epsilon))/
((max(y)+epsilon) - (min(y)-epsilon))
## fit model repetitively for sequence of transformation parameters
## and store log-likelihoods
logLikvector <- rep (-1/0, length(phi))
for (i in 1:length(phi)) {
## transformation of theta
if (transfo == "ao.sym"){ # symmetric transfo
if (phi[i] == 0){
ao <- log (theta/(1 - theta)) # define the logit transfo
} else {
ao <- (2/phi[i]) * (theta^phi[i] - (1 - theta)^phi[i])/
(theta^phi[i] + (1 - theta)^phi[i])
}
}
else {
if (transfo == "ao.asym"){ # asymmetric transfo
if (phi[i] == 1){
ao <- log (theta/(1 - theta)) # define the logit transfo
} else {
if (phi[i] == 0){
ao <- log (-log(1 - theta)) # define the cloglog transfo
} else {
ao <- log((1/phi[i])*((1 - theta)^(-phi[i]) - 1))
}
}
}
}
if (estimation == "laplace"){
fit.seq <- try (lqm (ao ~ x - 1,
#tau = kappa,
weights = weights,
...), silent = TRUE)
} else {
if (estimation == "linprog"){
fit.seq <- try (rq (ao ~ x - 1, tau = kappa, method = method,
weights = weights, # ~ x[,2:ncol(x)]
...), silent = TRUE)
}
}
## keep -Infinity as log-lik if qr procedure did not converge,
## otherwise extract log-lik from fitted model
if (class (fit.seq) == "try-error") {
logLikvector[i] <- -1/0
} else {
## extract predictions
if (estimation == "laplace"){
predict.seq <- predict(fit.seq, interval = TRUE, level = 0.95
)
} else {
if (estimation == "linprog"){
predict.seq <- predict(fit.seq, method = method,
type = c("none"),
interval = c("confidence"),
level = 0.95,
newdata = list(x = x),
se = se)
}
}
min.y <- min(y)
max.y <- max(y)
eps <- epsilon
fitted <- rep (0, length(predict.seq))
for (j in 1:(length(predict.seq))){
if (transfo == "ao.sym") {
if (phi[i] != 0){
if (abs(0.5*predict.seq[j]*phi[i]) < 1){
fitted[j] <- min.y + (((max.y + eps) - (min.y - eps)) *
((1 + (phi[i]/2)*predict.seq[j])^(1/phi[i]))/
((1 + (phi[i]/2)*predict.seq[j])^(1/phi[i]) +
(1 - (phi[i]/2)*predict.seq[j])^(1/phi[i])))
} else {
if (0.5*predict.seq[j]*phi[i] <= -1){
fitted[j] <- min.y
} else {
fitted[j] <- max.y
}
}
} else {
if (phi[i] == 0){
if (exp(predict.seq[j])> -1){
fitted[j] <- min.y + ((max.y + eps) - (min.y - eps)) *
(1 - ((1 + exp(predict.seq[j]))^(-1)))
} else {
fitted[j] <- max.y
}
}
}
} else {
if (transfo == "ao.asym"){
if (phi[i] != 0){
if (exp(predict.seq[j])*phi[i] > -1){
fitted[j] <- min.y + ((max.y + eps) - (min.y - eps)) *
(1 - ((1 + exp(predict.seq[j])*phi[i])^(-1/phi[i])))
} else {
fitted[j] <- max.y
}
}
else {
if (phi[i] == 0){
fitted[j] <- min.y + ((max.y + eps) - (min.y - eps)) *
(1 - exp(-exp(predict.seq[j])))
}
}
}
}
}
fitted.data <- matrix (fitted, ncol = 3)
res <- y - fitted.data[,1]
estimated.sigma <- mean(res * (kappa - ifelse(res <= 0 , 1, 0))) #### !!!!!!!!!
logLikvector[i] <- mean (
dal(res, mu = 0, sigma = estimated.sigma,# tau = kappa,
log = T))
}
}
## find the position of the MLE of the transformation parameter
ok.logLik <- logLikvector > -1/0 # record positions of
# log-lik > -Infinity
MLE.pos <- which.max (logLikvector [ok.logLik])
## store the range of valid values for lambda and
## corresponding log-lik
valid.lambda <- phi [which (ok.logLik)]
valid.logLik <- logLikvector [ok.logLik]
## fit model with MLE of transformation parameter
## transformation of theta
if (transfo == "ao.sym"){ # symmetric transfo
if (valid.lambda[MLE.pos] == 0){
ao <- log (theta/(1 - theta)) # define the logit transfo
phi.new <- 1
transfo.new <- "ao.asym"
} else {
ao <- (2/valid.lambda[MLE.pos]) * (theta^valid.lambda[MLE.pos]
- (1 - theta)^valid.lambda[MLE.pos])/
(theta^valid.lambda[MLE.pos] +
(1 - theta)^valid.lambda[MLE.pos])
phi.new <- valid.lambda[MLE.pos]
transfo.new <- "ao.sym"
}
}
else {
if (transfo == "ao.asym"){ # asymmetric transfo
if (valid.lambda[MLE.pos] == 1){
ao <- log (theta/(1 - theta)) # define the logit transfo
} else {
if (valid.lambda[MLE.pos] == 0){
ao <- log (-log(1 - theta)) # define the cloglog transfo
} else {
ao <- log((1/valid.lambda[MLE.pos])*((1 - theta)^(-valid.lambda[MLE.pos]) - 1))
}
}
phi.new <- valid.lambda[MLE.pos]
transfo.new <- "ao.asym"
}
}
if (estimation == "laplace"){
fit <- try (lqm (ao ~ x - 1, #tau = kappa,
weights = weights,
...), silent = TRUE)
} else {
if (estimation == "linprog"){
fit <- try (rq (ao ~ x - 1, tau = kappa, method = method, weights = weights,
...), silent = TRUE)
}
}
## extract coefficients
if (estimation == "laplace"){
fit.s <- summary (fit, R = R)
fit.coef.table <- fit.s$tTable
fit.coef <- fit.coef.table[,1]
} else {
if (estimation == "linprog"){
fit.coef.table <- summary (fit, se = se, R = R)$coef
fit.coef <- fit$coef
}
}
## extract predictions
if (estimation == "laplace"){
predict.all <- predict(fit
, interval = TRUE, level = 0.95
)
} else {
if (estimation == "linprog"){
predict.all <- predict(fit,
type = c("none"),
method = method,
interval = c("confidence"),
level = 0.95,
newdata = list(x = x),
se = se)
}
}
## return list as output
list (fit.coef.table = fit.coef.table,
fit.coef = fit.coef,
valid.lambda = valid.lambda,
valid.logLik = valid.logLik,
MLE = valid.lambda[MLE.pos], MLE.pos = MLE.pos,
logLik.MLE = max (logLikvector [ok.logLik]),
estimation = estimation, kappa = kappa,
resp = y, design.matrix = x,
transfo.new = transfo.new, phi.new = phi.new,
predict.all = predict.all,
transfo = transfo,
epsilon = epsilon,
ao = ao,
fit = fit
)
}
## generic function of the package
ao.qr <- function (x, ...) UseMethod ("ao.qr")
## default method of the package for QR
ao.qr.default <- function (x, y, weights, method,
kappa, phi, estimation,
epsilon, transfo, se, R,
...){
x <- as.matrix (x)
y <- as.numeric (y)
fit <- ao.qr.fit (x, y, weights = weights, kappa = kappa,
phi = phi, estimation = estimation, method = method,
epsilon = epsilon, se = se, R = R,
transfo = transfo, ...)
class (fit) <- c ("ao.qr")
fit
}
## formula method of the package for QR
ao.qr.formula <- function (formula, data = list(),
weights = rep (1, length (y)),
kappa = 0.5, phi = seq(0,1.5,0.005),
estimation = "laplace",
epsilon = 0.001, transfo = "ao.sym",
plotit = "TRUE", method = "br",
se = "boot", R = 100,
...){
## keep the arguments which should go into the model frame
mf <- match.call (expand.dots = TRUE)
m <- match (c ("formula", "data", "weights"), names (mf), 0)
mf <- mf[c (1, m)]
mf$drop.unused.levels <- TRUE
mf[[1]] <- as.name ("model.frame")
mf <- eval.parent (mf)
## allow model.frame to update the terms object before saving it
mt <- attr (mf, "terms")
## define response variable
y <- model.response (mf, "numeric")
## define design matrix
x <- model.matrix (mt, mf, contrasts)
## retrieve the weights (or define them if not provided)
weights <- model.weights (mf)
fit <- ao.qr.default (x, y, weights = weights, kappa, phi,
estimation = estimation, method = method,
epsilon = epsilon, transfo = transfo,
se = se, R = R,
...)
fit$call <- match.call()
fit$formula <- formula
fit$model <- mf
fit$plotit <- plotit
fit$se <- se
fit$R <- R
fit$method <- method
fit
}
## print method of the package for QR
print.ao.qr <- function(x, ...){
cat (paste ("Quantile regression\n"))
cat (paste ("Transformation used:", x$transfo,"\n"))
cat (paste ("call:\n"))
print (x$call)
cat ("\n")
cat (paste ("MLE and log-likelihood:\n"))
print(data.frame(MLE = x$MLE,
logLik = x$logLik.MLE,
row.names = c("")))
cat ("\n")
cat (paste ("Coefficients:\n"))
coef.table <- matrix (
x$fit.coef, nrow = 1, ncol = length (x$fit.coef))
colnames(coef.table) <- colnames(x$design.matrix)
row.names(coef.table) <- c("")
print (coef.table)
}
## summary method of the package for QR
summary.ao.qr <- function (object, ...){
call <- object$call
kappa <- object$kappa
fit.coef.table <- object$fit.coef.table
row.names(fit.coef.table) <- colnames(object$design.matrix)
phi.new <- object$phi.new
valid.lambda <- object$valid.lambda
valid.logLik <- object$valid.logLik
logLik.MLE <- object$logLik.MLE
transfo <- object$transfo
MLE <- object$MLE
plotit <- object$plotit
res <- list (call = call, phi.new = phi.new,
kappa = kappa, valid.lambda = valid.lambda,
fit.coef.table = fit.coef.table,
valid.logLik = valid.logLik,
logLik.MLE = logLik.MLE, plotit = plotit,
transfo = transfo, MLE = MLE
)
class (res) <- "summary.ao.qr"
res
}
## print method for the summary method of the package
## for QR
print.summary.ao.qr <- function (x, ...){
cat("Call:\n")
print(x$call)
cat ("\n")
cat (paste ("Quantile considered (tau):\n"))
print (x$kappa)
cat ("\n")
cat (paste ("MLE and log-likelihood:\n"))
print(data.frame(MLE = x$MLE,
logLik = x$logLik.MLE,
row.names = c("")))
cat ("\n")
if (x$plotit == "TRUE") {
plot (x$valid.lambda,x$valid.logLik, type = "l",
xlab = expression (lambda),
ylab = "Log-likelihood")
lines (x = c(x$valid.lambda[which.max(x$valid.logLik)],
x$valid.lambda[which.max(x$valid.logLik)]),
y = c(min(x$valid.logLik),max(x$valid.logLik)),
lty = "dashed", col = "red")
par (xpd = NA, oma = c (4, 0, 0, 0))
legend (par ("usr")[1], par ("usr")[3],
c ("MLE estimate"), col = c ("red"),
lty = c ("dashed"), xjust = 0, yjust = 3.5)
}
cat (paste ("Coefficients:\n"))
printCoefmat (x$fit.coef.table)
}
## predict method of the package for QR
predict.ao.qr <- function(object, newdata = NULL, ...){
min.y <- min(object$resp)
max.y <- max(object$resp)
eps <- object$epsilon
phi <- object$phi.new
predict.all <- object$predict.all
se <- object$se
if(is.null(newdata)){
fitted <- rep (0, length(predict.all))
for (i in 1:(length(predict.all))){
if (object$transfo.new == "ao.sym"){
if (phi != 0){
if (abs(0.5*predict.all[i]*phi) < 1){
fitted[i] <- min.y + (((max.y + eps) - (min.y - eps)) *
((1 + (phi/2)*predict.all[i])^(1/phi))/
((1 + (phi/2)*predict.all[i])^(1/phi)+
(1 - (phi/2)*predict.all[i])^(1/phi)))
} else {
if (0.5*predict.all[i]*phi <= -1){
fitted[i] <- min.y
} else {
fitted[i] <- max.y
}
}
}
} else {
if (object$transfo.new == "ao.asym"){
if (phi != 0){
if (exp(predict.all[i])*phi > -1){
fitted[i] <- min.y + ((max.y + eps) - (min.y - eps)) *
(1 - ((1 + exp(predict.all[i])*phi)^(-1/phi)))
} else {
fitted[i] <- max.y
}
}
else {
if (phi == 0){
fitted[i] <- min.y + ((max.y + eps) - (min.y - eps)) *
(1 - exp(-exp(predict.all[i])))
}
}
}
}
}
}
else {
if (!is.null(object$formula)){
tt <- terms(object$formula)
covariates <- delete.response(tt)
m <- model.frame(covariates, newdata, ...)
x <- model.matrix(covariates, m)
} else {
x <- newdata
}
if (object$estimation == "laplace"){
predict.all <- predict(object$fit, newdata = x,interval = TRUE,
level = 0.95)
} else {
predict.all <- predict(object$fit, type = c("none"),
interval = c("confidence"),
level = 0.95,
newdata = list(x = x), se = se)
}
fitted <- rep (0, length(predict.all))
for (i in 1:(length(predict.all))){
if (object$transfo.new == "ao.sym"){
if (phi != 0){
if (abs(0.5*predict.all[i]*phi) < 1){
fitted[i] <- min.y + (((max.y + eps) - (min.y - eps)) *
((1 + (phi/2)*predict.all[i])^(1/phi))/
((1 + (phi/2)*predict.all[i])^(1/phi) +
(1 - (phi/2)*predict.all[i])^(1/phi)))
} else {
if (0.5*predict.all[i]*phi <= -1){
fitted[i] <- min.y
} else {
fitted[i] <- max.y
}
}
}
} else {
if (object$transfo.new == "ao.asym"){
if (phi != 0){
if (exp(predict.all[i])*phi > -1){
fitted[i] <- min.y + ((max.y + eps) - (min.y - eps)) *
(1 - ((1 + exp(predict.all[i])*phi)^(-1/phi)))
} else {
fitted[i] <- max.y
}
}
else {
if (phi == 0){
fitted[i] <- min.y + ((max.y + eps) - (min.y - eps)) *
(1 - exp(-exp(predict.all[i])))
}
}
}
}
}
}
fitted <- matrix (fitted, ncol = 3)
colnames (fitted) <- colnames (predict.all)
res <- list (fitted = fitted)
class (res) <- "predict.ao.qr"
invisible(res)
}
## print method for the predict method of the package
## for QR
print.predict.ao.qr <- function (x, ...){
cat (paste ("Estimates:\n"))
print (x$fitted)
}
## fitted method of the package
fitted.ao.qr <- function(object, ...) {
min.y <- min(object$resp)
max.y <- max(object$resp)
eps <- object$epsilon
phi <- object$phi.new
predict.all <- object$predict.all
fitted <- rep (0, length(predict.all))
for (i in 1:(length(predict.all))){
if (object$transfo.new == "ao.sym") {
if (phi != 0){
if (abs(0.5*predict.all[i]*phi) < 1){
fitted[i] <- min.y + (((max.y + eps) - (min.y - eps)) *
((1 + (phi/2)*predict.all[i])^(1/phi))/
((1 + (phi/2)*predict.all[i])^(1/phi) +
(1 - (phi/2)*predict.all[i])^(1/phi)))
} else {
if (0.5*predict.all[i]*phi <= -1){
fitted[i] <- min.y
} else {
fitted[i] <- max.y
}
}
}
} else {
if (object$transfo.new == "ao.asym"){
if (phi != 0){
if (exp(predict.all[i])*phi > -1){
fitted[i] <- min.y + ((max.y + eps) - (min.y - eps)) *
(1 - ((1 + exp(predict.all[i])*phi)^(-1/phi)))
} else {
fitted[i] <- max.y
}
}
else {
if (phi == 0){
fitted[i] <- min.y + ((max.y + eps) - (min.y - eps)) *
(1 - exp(-exp(predict.all[i])))
}
}
}
}
}
fitted <- matrix(fitted, ncol = 3)
colnames (fitted) <- colnames (predict.all)
res <- list(fitted = fitted)
class (res) <- "fitted.ao.qr"
invisible(res)
}
## print method for the fitted method of the package
## for QR
print.fitted.ao.qr <- function (x, ...){
cat (paste ("Estimate(s):\n"))
print(x$fitted)
}
|
afc5ffac99923f74f535c132abfe3a612af4e2bd | 82256676fd6b857b7a9fd54ab339d390c1364ab0 | /R/h2o-package/man/toupper.Rd | 052b3ba1d9c26f14da095c406ef7cfe5cbc2fde1 | [
"Apache-2.0"
] | permissive | ivanliu1989/h2o | 8d0def46c070e78718ba13761f20ef2187545543 | e00b367df0a33c400ae33bc869a236f254f625ed | refs/heads/master | 2023-04-27T20:40:16.666618 | 2014-10-23T02:12:36 | 2014-10-23T02:12:36 | 25,618,199 | 0 | 0 | Apache-2.0 | 2023-04-15T23:24:35 | 2014-10-23T03:49:40 | null | UTF-8 | R | false | false | 360 | rd | toupper.Rd | \name{toupper}
\alias{toupper}
\title{Change the elements of a character vector to lower case}
\description{ \code{toupper}, a method for the \code{\link{toupper}} base method. }
\usage{ toupper(x) }
\arguments{
\item{x}{An \code{\linkS4class{H2OParsedData}} object with a single factor column or an R data frame.}
}
\details{
Changes the case to upper.
}
|
dacefd25556e11c25a034b60fd44cf85014781b9 | 38c16978738ffac95bfcf1e78fcb243fc4195305 | /man/create_P_matrix.Rd | e94782f26974b8d4a1feb8832db924d3fd99d57c | [] | no_license | ebenmichael/balancer | ca3e2f733c52450d8e7b5b1a4ebd0d182713d4eb | 55173367e2c91f1a3ce47070f8430c6686a049bd | refs/heads/master | 2023-07-10T20:52:54.547666 | 2023-06-20T14:40:01 | 2023-06-20T14:40:01 | 129,783,286 | 7 | 3 | null | 2023-05-16T19:21:44 | 2018-04-16T17:47:11 | R | UTF-8 | R | false | true | 439 | rd | create_P_matrix.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standardize.R
\name{create_P_matrix}
\alias{create_P_matrix}
\title{Create the P matrix for an QP that solves min_x 0.5 * x'Px + q'x}
\usage{
create_P_matrix(n, aux_dim)
}
\arguments{
\item{X}{n x d matrix of covariates}
\item{Z}{Vector of group indicators}
}
\value{
P matrix
}
\description{
Create the P matrix for an QP that solves min_x 0.5 * x'Px + q'x
}
|
3376eccafd1c4d8c39bc726456d9436d014216df | 3c74005b8e24b3621274f18fe2c45da56a7ebc89 | /Prediction of Graduate Admission Acceptance.R | ad16381f3df442c0e9402f6a7c408ef64582574e | [] | no_license | purplepatch-everyday/Projects_in_R | 36355bb07a0c79fefc9497f699c4699568b65a52 | 2554e70e7a4712cba809fc9066c3d334c47db17b | refs/heads/main | 2023-07-26T00:40:57.097070 | 2021-09-08T04:06:30 | 2021-09-08T04:06:30 | 390,327,400 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,252 | r | Prediction of Graduate Admission Acceptance.R | # 대학원 입시 데이터를 활용한 합격류 예측
data <- read.csv("university.csv", header = TRUE)
head(data,3)
str(data)
# 결측치 확인
sum(is.na(data))
# 유니크 값 확인
# 대략적인 값의 분포 확인 가능
unique(data$GRE.Score)
unique(data$TOEFL.Score)
unique(data$University.Rating)
unique(data$SOP)
unique(data$LOR)
unique(data$Research)
# 제일 중요한것은 taget값 확인
unique(data$Chance.of.Admit)
max(data$Chance.of.Admit)
min(data$Chance.of.Admit)
table(data$University.Rating)
table(data$Research)
# 변수 산점도
plot(data)
# 회귀분석을 통해 합격률 예측
set.seed(2021)
newdata <- data
View(newdata)
train_ratio <- 0.8
datatotal <- sort(sample(nrow(newdata),nrow(newdata)*train_ratio))
train <- newdata[datatotal,]
test <- newdata[-datatotal,]
# 로지스틱 회귀 분석
library(caret)
ctrl <- trainControl(method = "repeatedcv",repeats = 5)
logistic <- train(Chance.of.Admit~.,
data = train,
method = "glm",
trControl=ctrl,
preProcess=c("center","scale"),
metric="RMSE")
# glm = generalized linear model
# 데이터 전처리 필요!
# 평가 방식: RMSE
# 분류 문제일땐, accuracy
# 예측 문제일땐, RMSE
logistic
logistic_pred <- predict(logistic,newdata=test)
logistic_pred
# 엘라스틱넷
ctrl <- trainControl(method = "repeatedcv",repeats = 5)
elastic <- train(Chance.of.Admit~.,
data = train,
method = "glmnet",
trControl=ctrl,
preProcess=c("center","scale"),
metric="RMSE")
elastic
# 평가하기
# 최적화 값
# alpha 가 0 일때 L1, Lasso Regression
# 1 일때 L2, Ridge Regression
# lambda 전체 제약식의 크기
elasticnet_pred <- predict(elastic, newdata=test)
postResample(pred=elasticnet_pred,obs = test$Chance.of.Admit)
# RMSE가 낮을수록 좋음
# 랜덤포레스트
rf <- train(Chance.of.Admit~.,
data = train,
method = "rf",
trControl=ctrl,
preProcess=c("center","scale"),
metric="RMSE")
rf
plot(rf)
# 평가하기
rf_pred <- predict(rf, newdata=test)
postResample(pred=rf_pred,obs = test$Chance.of.Admit)
# 선형 서포트 벡터 머신
svmlinear <- train(Chance.of.Admit~.,
data = train,
method = "svmLinear",
trControl=ctrl,
preProcess=c("center","scale"),
metric="RMSE")
svmlinear
# 예측하기
svmlinear_pred <- predict(svmlinear,newdata=test)
postResample(pred = svmlinear_pred, obs = test$Chance.of.Admit)
#커널 서포트 벡터 머신
svmkernel <- train(Chance.of.Admit~.,
data = train,
method = "svmPoly",
trControl=ctrl,
preProcess=c("center","scale"),
metric="RMSE")
svmkernel
# degree = 차수
# scale = 내적크기 조절
# C = 비용
plot(svmkernel)
# 예측하기
svmkernel_pred <- predict(svmkernel, newdata = test)
postResample(pred = svmkernel_pred,obs = test$Chance.of.Admit)
# 모형결과 비교 하면 최종적으로 엘라스틱넷이 가장 좋은 모형으로 평가가 된다.
|
bb936b8645673757d341d574eec904e0045fc3d8 | aeb58f5f6f35bfe2eae165cd6400b512510e9859 | /Plot 5.R | 06a625b1869a30b177a630588efc0217bc8b00eb | [] | no_license | jrmilks74/Exploratory_Data_Analysis_project_2 | 99bd5cfda9211357dbae14189771bedcd5936133 | 063686cf4f6783ec10412e5868ffb02c4c9b33e0 | refs/heads/main | 2023-02-03T20:08:20.116143 | 2020-12-21T00:21:21 | 2020-12-21T00:21:21 | 323,128,681 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 702 | r | Plot 5.R | setwd("~/Desktop/Data_science/Exploratory Data Analysis Week 4 project")
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
names(NEI)
names(SCC)
PM2.5 <- merge(NEI, SCC, by = "SCC")
vehicles <- subset(PM2.5, PM2.5$type =="ON-ROAD" & fips == "24510")
vehicles.sum <- aggregate(Emissions~year + SCC.Level.Two, vehicles, FUN = sum)
ggplot(vehicles.sum, aes(x = year, y = Emissions, col = SCC.Level.Two, group = SCC.Level.Two)) +
theme_bw() +
geom_point() +
geom_line() +
labs(title = "2.5 PM Emissions in Baltimore City",
subtitle = "Vehicles",
x = "Year",
y = "Emissions (tons)") |
7be50dc99e5568a10deb64a4e6ef19b7d01ee88e | c924b68bf4d6af6d570558af1c5925795a3ae814 | /engraver/man/set_names.Rd | 8cb9752c97770262643d724f97325cc27fec1f42 | [] | no_license | igrave/engraver | d6cf83cc697fb8d983e89b9858208637d1285d33 | 0f4ba27f29bf9a806b7a516abedf51dc85c04a0a | refs/heads/master | 2020-03-20T01:31:48.705603 | 2018-12-14T11:45:45 | 2018-12-14T11:45:45 | 137,079,083 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 272 | rd | set_names.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xmlFunctions.R
\name{set_names}
\alias{set_names}
\title{Title}
\usage{
set_names(xml, names)
}
\arguments{
\item{xml}{object}
\item{names}{new names}
}
\value{
Nothing
}
\description{
Title
}
|
21ae20e0b7588df238211b8c33af4b6dbca0eb9d | abf3967d9b20c9ca3719911e1558920f670e0688 | /cachematrix.R | 1630b2822716f553096d663ac66f08f328c1900b | [] | no_license | ariadniCH/ProgrammingAssignment2 | 52f784d407c460b8f8495de7ec4c6f56029aa0bf | 1f48f173792224653418e4d4e7afc10b3aedba3e | refs/heads/master | 2021-01-19T20:33:40.927506 | 2017-04-17T15:19:22 | 2017-04-17T15:19:22 | 88,519,379 | 0 | 0 | null | 2017-04-17T15:01:39 | 2017-04-17T15:01:39 | null | UTF-8 | R | false | false | 976 | r | cachematrix.R | ## The first function makeCacheMatrix essentially caches both the original and the inverted matrix
# wheareas the second function cacheSolve checks if the matrix's inverse has been cached and if not it calculates its inverse
## makeCacheMatrix caches both and the original and the inverted matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set,
get = get,
setinverse = setinverse ,
getinverse = getinverse)
}
## cacheSolve checks if the inverted matrix has been cached and if yes it returns it but if not it calculates it
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinverse(m)
m
## Return a matrix that is the inverse of 'x'
}
|
5a4c1522506362c2fe6affdd1c57bb3f5c67da42 | aca8ce5a83444bc99420584246371f247059a2ed | /man/slice.tree.Rd | 888e8de359275c4de68bf9c6d0f3fd38a2aebb7a | [] | no_license | dwbapst/dispRity | c4bddd0c95350781a5b996d9748d0385d6584035 | e7c317cb043ac5e9704fb9bf35bc09d57355b4dd | refs/heads/master | 2021-07-14T06:51:14.944904 | 2017-10-20T14:39:09 | 2017-10-20T14:39:09 | 107,689,056 | 0 | 0 | null | 2017-10-20T14:37:10 | 2017-10-20T14:37:10 | null | UTF-8 | R | false | true | 1,093 | rd | slice.tree.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slice.tree.R
\name{slice.tree}
\alias{slice.tree}
\title{Time slicing a tree.}
\usage{
slice.tree(tree, age, model, FAD, LAD)
}
\arguments{
\item{tree}{A \code{phylo} object with a \code{root.time} element.}
\item{age}{A single \code{numeric} value indicating where to perform the slice.}
\item{model}{One of the following models: \code{"acctran"}, \code{"deltran"}, \code{"punctuated"} or \code{"gradual"}. Is ignored if \code{method = "discrete"}.}
\item{FAD, LAD}{The first and last occurrence data.}
}
\description{
Time slicing through a phylogenetic tree (function modified from paleotree::timeSliceTree).
}
\examples{
set.seed(1)
## Generate a random ultrametric tree
tree <- rcoal(20)
## Add some node labels
tree$node.label <- letters[1:19]
## Add its root time
tree$root.time <- max(tree.age(tree)$ages)
## Slice the tree at age 0.75
tree_75 <- slice.tree(tree, age = 0.75, "deltran")
}
\seealso{
\code{\link[paleotree]{timeSliceTree}}, \code{\link{time.subsamples}}.
}
\author{
Thomas Guillerme
}
|
2ec78ec6354ff5520d9f2b0f028a550635e60656 | b56dea76b16f100e3f6a34f704e80a749f909aed | /make_plot.R | 594d8decb6dadc0e6d90e8a843e28beab70b30df | [] | no_license | xnie/adr | cdfd057a6d7ab05953d4b5952afde5d94e39bba5 | e848a4327f35dab78b4abc4e9c385f39f556850e | refs/heads/master | 2022-10-25T03:41:55.248082 | 2020-06-13T20:18:17 | 2020-06-13T20:18:17 | 260,052,856 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,954 | r | make_plot.R | rm(list=ls())
library(tidyverse)
library(RColorBrewer)
library(latex2exp)
library(grid)
library(tidyr)
library(dplyr)
pal = brewer.pal(n = 8, name = "Dark2")
plotsize = function(x,y) options(repr.plot.width=x, repr.plot.height=y)
plotsize(8,15)
make_regret_plot = function(data, fnm) {
df = data%>%
select(n, wadr_regret, wipw_regret, fq_regret, wadr_regretupper, wipw_regretupper, fq_regretupper, wadr_regretlower, wipw_regretlower, fq_regretlower) %>%
gather(v, value, wadr_regret:fq_regretlower) %>%
separate( v,
into = c("method", "line"))%>%
arrange(n) %>%
spread(line, value)
ggplot(df, aes(x=n,
y=log(regret),
colour=method)) +
geom_point(size=3) +
geom_line(size=2) +
scale_x_log10()+
theme_bw(base_size = 11) +
geom_ribbon(aes(ymin=log(regretlower), ymax=log(regretupper), fill="grey"),
alpha=0.2) +
guides(fill=FALSE) +
scale_color_manual(name="method",
breaks=c("wadr", "wipw","fq"),
labels = c("ADR",
"IPW",
"Q-Opt"),
values =c(pal[3], pal[1], pal[2])) +
scale_fill_manual(values = c(pal[3], pal[1], pal[2]))
ggsave(fnm)
}
make_mse_plot = function(data, fnm) {
df = data%>%
select(n, wadr_mse, wipw_mse, wadr_mseupper, wipw_mseupper, wadr_mselower, wipw_mselower) %>%
gather(v, value, wadr_mse:wipw_mselower) %>%
separate( v,
into = c("method", "line"))%>%
arrange(n) %>%
spread(line, value)
ggplot(df, aes(x=n,
y=log(mse),
colour=method)) +
geom_point(size=3) +
geom_line(size=2) +
scale_x_log10()+
theme_bw(base_size = 11) +
geom_ribbon(aes(ymin=log(mselower), ymax=log(mseupper), fill=method),
alpha=0.2) +
guides(fill=FALSE) +
scale_color_manual(name="method",
breaks=c("wadr", "wipw"),
labels = c("ADR",
"IPW"),
values =c(pal[1], pal[2]))+
scale_fill_manual(values = c(pal[1], pal[2]))
ggsave(fnm)
}
make_eval_mse_plot = function(data, fnm) {
df = data%>%
select(n, wadr_evalmse, wipw_evalmse, waipw_evalmse, q_evalmse, wadr_evalmseupper, wipw_evalmseupper, wadr_evalmselower, wipw_evalmselower, waipw_evalmseupper, waipw_evalmselower, q_evalmseupper, q_evalmselower) %>%
gather(v, value, wadr_evalmse:q_evalmselower) %>%
separate( v,
into = c("method", "line"))%>%
arrange(n) %>%
spread(line, value)
ggplot(df, aes(x=n,
y=log(evalmse),
colour=method)) +
geom_point(size=3) +
geom_line(size=2) +
ylab("log(mse)")+
scale_x_log10()+
theme_bw(base_size = 11) +
geom_ribbon(aes(ymin=log(evalmselower), ymax=log(evalmseupper), fill=method),
alpha=0.2) +
guides(fill=FALSE) +
scale_color_manual(name="method",
breaks=c("wadr", "wipw", "waipw", "q"),
labels = c("ADR",
"IPW",
"AIPW",
"Q-Eval"),
values =c(pal[3], pal[1], pal[4], pal[2]))+
scale_fill_manual(values = c(pal[3], pal[1], pal[4], pal[2]))
ggsave(fnm)
}
make_policy_eval_plot = function(data, fnm) {
oracle_value = mean(data$ora_v)
df = data%>%
select(n, wadr_value, wipw_value, waipw_value, q_value, wadr_valueupper, wipw_valueupper, waipw_valueupper, q_valueupper, wadr_valuelower, wipw_valuelower, waipw_valuelower, q_valuelower) %>%
gather(v, value, wadr_value:q_valuelower) %>%
separate( v,
into = c("method", "line"))%>%
arrange(n) %>%
spread(line, value)
ggplot(df, aes(x=n,
y=value,
colour=method)) +
geom_point(size=3) +
geom_hline(yintercept=oracle_value,
color = "black", size=2) +
geom_line(size=2) +
scale_x_log10()+
theme_bw(base_size = 11) +
geom_ribbon(aes(ymin=valuelower, ymax=valueupper, fill=method),
alpha=0.2) +
guides(fill=FALSE) +
scale_color_manual(name="method",
breaks=c("wadr", "wipw", "waipw", "q"),
labels = c("ADR",
"IPW",
"AIPW",
"Q-Eval"),
values =c(pal[3], pal[1], pal[4], pal[2]))+
scale_fill_manual(values = c(pal[3], pal[1], pal[4], pal[2]))
ggsave(fnm)
}
alpha = 1
for (setup in c(1:2)) {
out = read.csv(paste0("results-simu", setup, "-all.csv"))
out$wadr_regretupper = out$wadr_regret + alpha*out$wadr_regret_se
out$wadr_regretlower = out$wadr_regret - alpha*out$wadr_regret_se
out$wipw_regretupper = out$wipw_regret + alpha*out$wipw_regret_se
out$wipw_regretlower = out$wipw_regret - alpha*out$wipw_regret_se
out$fq_regretupper = out$fq_regret + alpha*out$fq_regret_se
out$fq_regretlower = out$fq_regret - alpha*out$fq_regret_se
out$wadr_mseupper = out$wadr_mse + alpha*out$wadr_mse_se
out$wadr_mselower = out$wadr_mse - alpha*out$wadr_mse_se
out$wipw_mseupper = out$wipw_mse + alpha*out$wipw_mse_se
out$wipw_mselower = out$wipw_mse - alpha*out$wipw_mse_se
out$wadr_valueupper = out$wadr_value + alpha*out$wadr_value_se
out$wadr_valuelower= out$wadr_value - alpha*out$wadr_value_se
out$wipw_valueupper = out$wipw_value + alpha*out$wipw_value_se
out$wipw_valuelower= out$wipw_value - alpha*out$wipw_value_se
out$waipw_valueupper = out$waipw_value + alpha*out$waipw_value_se
out$waipw_valuelower= out$waipw_value - alpha*out$waipw_value_se
out$q_valueupper = out$q_value + alpha*out$q_value_se
out$q_valuelower= out$q_value - alpha*out$q_value_se
out$wadr_evalmseupper = out$wadr_evalmse + alpha*out$wadr_evalmse_se
out$wadr_evalmselower= out$wadr_evalmse - alpha*out$wadr_evalmse_se
out$wipw_evalmseupper = out$wipw_evalmse + alpha*out$wipw_evalmse_se
out$wipw_evalmselower= out$wipw_evalmse - alpha*out$wipw_evalmse_se
out$waipw_evalmseupper = out$waipw_evalmse + alpha*out$waipw_evalmse_se
out$waipw_evalmselower= out$waipw_evalmse - alpha*out$waipw_evalmse_se
out$q_evalmseupper = out$q_evalmse + alpha*out$q_evalmse_se
out$q_evalmselower= out$q_evalmse - alpha*out$q_evalmse_se
if (setup == 2) {
for (noise in unique(out$noise)){
for (beta in unique(out$beta)) {
for (sigma in unique(out$sigma)) {
data = out[out$beta==beta & out$sigma==sigma & out$noise ==noise,]
fnm = paste("setup", setup, "noise", noise, "beta", beta, "sigma", sigma, sep="-")
fnm = str_replace(fnm, "\\.", "_")
make_regret_plot(data, paste0("plots/regret-", fnm, ".pdf"))
make_mse_plot(data, paste0("plots/mse-", fnm, ".pdf"))
make_policy_eval_plot(data, paste0("plots/eval-", fnm, ".pdf"))
make_eval_mse_plot(data, paste0("plots/evalmse-", fnm, ".pdf"))
}
}
}
}
if (setup == 1) {
for (noise in unique(out$noise)) {
data = out[out$noise==noise,]
fnm = paste("setup", setup, "noise", noise, sep="-")
fnm = str_replace(fnm, "\\.", "_")
make_regret_plot(data, paste0("plots/regret-", fnm, ".pdf"))
make_mse_plot(data, paste0("plots/mse-", fnm, ".pdf"))
make_policy_eval_plot(data, paste0("plots/eval-", fnm, ".pdf"))
make_eval_mse_plot(data, paste0("plots/evalmse-", fnm, ".pdf"))
}
}
}
|
8558c0e7bde0f8c848cd537626b404df6f4e246f | cd1090c8c6ee59b5f2ef6a197e775a5a50d52fdd | /sa_pipeline.R | b62489c71e2836464c64060e7083a735ee0bb9b6 | [
"MIT"
] | permissive | adrianclo/phenotyper | d659f38e538501abfd2e26b7fbba5f8972dffb06 | 0292a943177c1df7ef3a9920b379fe51f24d39bf | refs/heads/master | 2023-04-16T02:53:15.185435 | 2023-01-22T10:47:17 | 2023-01-22T10:47:17 | 195,074,465 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 289 | r | sa_pipeline.R | source("sa_functions.R")
# INITIAL PIPELINE----------------------------------------------------
# FILL IN DATA DIRECTORY
data_dir <- "ADD/HERE/YOUR/DIRECTORY"
# ml <- readRDS(paste0(data_dir, ".RDS")) # if ml already saved in .RDS format
ml <- import_raw_cw(data_dir = data_dir, zip = F) |
9de2c5bcbd14eb1297c7d5b5a5617c969d6f41fe | f4268a1c8be6a274cbc8810800b848ce4538816a | /data_kiwi/main_clust.r | 08d3cdaf536fca92b4753e86a9b4b23d0c709a06 | [] | no_license | Chloe-bmnt/Stage_M2 | 1e56c9247069ecff3a1c6c1e836ec5d9665d7223 | 39a55d1a5a7f12c62da00fb39f529902cf1f7008 | refs/heads/master | 2023-05-13T18:04:06.562263 | 2021-06-03T11:39:38 | 2021-06-03T11:39:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,082 | r | main_clust.r | library(getopt,quietly=TRUE, warn.conflicts=FALSE)
args <- commandArgs(trailingOnly=TRUE)
spec <- matrix(c(
"mainFile","f",1, "character",
"weightFile","w",1, "character",
"finalFile","a",1,"character",
"nCPU","n",1,"numeric"),
byrow=TRUE, ncol=4)
opt <- getopt(spec)
print(opt$workDir)
setwd("./")
source("../model/global.r")
# test_data<-lista[[30]]
poids_kiwi<-read_csv(opt$weightFile,col_names=c("t","y"))
test_data<-loadData(data = opt$mainFile,trans_sheet = "Transcripts",prot_sheet = "Proteins",F)
test_list<-test_data$parse
days_kiwi<-test_list[[1]][["DPA"]]
fitWe<<-"double_sig"
coef_poids<-fitPoids(poids_kiwi$t,poids_kiwi[,2],fitWe)
poids_coef<<-coef_poids$coefs
formula_poids<<-coef_poids$formula
val_mu<-mu(c(poids_kiwi$t),fitWe,poids_coef,formula_poids,dpa_analyse = NULL)
data_mu<-data.frame("DPA"=c(poids_kiwi$t),"Mu"=val_mu)
g_mu<-ggplot(data_mu,aes(x=DPA,y=Mu))+geom_line()+theme+xlab("DPA")+ylab(bquote("Growth rate "~(days^-1)))
data_rel_mu<-data.frame("DPA"=c(poids_kiwi$t),"RGR"=val_mu/fitted(coef_poids[["formula"]]))
g_rel_mu<-ggplot(data_rel_mu,aes(x=DPA,y=RGR))+geom_line()+theme+xlab("DPA")+ylab(bquote("Relative growth rate "~(days^-1)))
ggarrange(g_mu,g_rel_mu,ncol = 2)
ksmin=3*4*3*3.6*24
score=0
cont<-0
dir.create("solK")
numCores <- opt$nCPU
print(numCores)
# cl <- makeCluster(detectCores()-1, type='PSOCK')
# registerDoParallel(numCores)
res_list<-mclapply(test_list,function(el){
tryCatch({
cont<-cont+1
res<-list()
el[["Protein_val"]]<-na.omit(el[["Protein_val"]])
bound_ks<-c(4.5e-3*mean(el$Transcrit_val,na.rm = T)/mean(el$Protein_val,na.rm = T),1440*mean(el$Transcrit_val,na.rm = T)/mean(el$Protein_val,na.rm = T))
norm_data<-normaMean(el$Protein_val,el$Transcrit_val,ksmin)
fitR<<-"3_deg_log"
fittedmrna<<-fit_testRNA(el$DPA,norm_data$mrna,fitR)
el$plot_mrna<-plotFitmRNA(el$DPA,norm_data$mrna,solmRNA(el$DPA,fittedmrna$coefs,fitR),el[["Transcrit_ID"]])
par_k<-solgss_Borne(el$DPA,as.vector(norm_data$prot),as.numeric(norm_data$ks),bound_ks,"LM")
if (!is.null(par_k)){
par_k[["plot_fit_prot"]]<-plotFitProt(el$DPA,as.vector(norm_data$prot),par_k$prot_fit,el[["Transcrit_ID"]])
el$SOL<-par_k
res[["TranscritID"]]<-el[["Transcrit_ID"]]
res[["Weight formula"]]<-"Double sigmoid"
res[["Weight error"]]<-coef_poids[["error"]]
res[["mRNA formula"]]<-fitR
res[["mRNA error"]]<-fittedmrna[["error"]]
res[["Mean mRNA concentration"]]<-mean(el[["Transcrit_val"]],na.rm = T)
res[["Mean protein concentration"]]<-mean(el[["Protein_val"]],na.rm = T)
res[["Starting protein concentration value"]]<-unname(el[["SOL"]][["solK"]][1,1])
res[["ks"]]<-unname(el[["SOL"]][["solK"]][2,1])*res[["Mean protein concentration"]]/res[["Mean mRNA concentration"]]
res[["Normalized ks"]]<-unname(el[["SOL"]][["solK"]][2,1])
res[["kd"]]<-unname(el[["SOL"]][["solK"]][3,1])
res[["Fitting error value"]]<-el[["SOL"]][["error"]][["errg"]][1]
res[["Fitting error score"]]<-el[["SOL"]][["error"]][["score"]]
res[["Fitting error message"]]<-el[["SOL"]][["error"]][["message"]]
res[["Optimization error score"]]<-el[["SOL"]][["opt_eval"]][["score"]]
res[["Optimization error message"]]<-el[["SOL"]][["opt_eval"]][["message"]]
res[["Valid confidence ellipse"]]<-el[["SOL"]][["validEllipse"]]
}
return(list("Table results"=res,"Data"=el))
},error=function(e){cat("ERROR :",conditionMessage(e)," for ",el[["Transcrit_ID"]], "\n")})
},mc.cores = numCores,mc.preschedule=TRUE)
all_data<-do.call("list",lapply(res_list,function(x) x[["Data"]]))
res_list<-do.call("list",lapply(res_list,function(x) x[["Table results"]]))
valid_res<-Filter(function(x) {length(x) > 0}, res_list)
del_results<-Filter(function(x) {length(x) == 0}, res_list)
final_table<-rbindlist(valid_res)
for (el in all_data){
el[["plot_mrna"]]
el[["SOL"]][["plot_fit_prot"]]
el[["SOL"]][["confEllipsePlot"]]
}
write.csv(final_table,opt$finalFile)
# save(test_list,valid_res,del_results,file=path.expand("./resultsv1.RData"))
|
8f1c151312bbc4423081d1c49f24a1597ebddc34 | cb61b810460b061c78de4c00e5d0e6457cecc025 | /R/computePOD.R | 5171dc79390bdbebd6b030df381c1fc6339529b3 | [] | no_license | cran/POD | 81305ccfe8e0bfbd2795054968418b41f5bcec01 | 967173c315395ab69853ff4f87a880f880ec6f8a | refs/heads/master | 2021-07-15T00:22:24.966094 | 2020-06-30T07:40:07 | 2020-06-30T07:40:07 | 169,495,453 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,012 | r | computePOD.R | #' @title Compute the Probability Of Detection (POD)
#'
#' @description Compute the Probability Of Detection (POD) in qualitative PCR experiments carried out by a single laboratory.
#'
#' @param x Nominal DNA concentrations (numeric vector)
#' @param lambda The fraction of detected DNA fragments (numeric scalar)
#' @param b correction parameter (numeric scalar)
#' @return The POD function as described in Uhlig et al., 2015
#'
#' @references
#' Uhlig et al. Accred Qual Assur (2015) 20: 75. https://doi.org/10.1007/s00769-015-1112-9
#'
#' @export
#' @name computePOD
#'
#' @examples
#' # the optimal POD
#' computePOD(exp(seq(1, 10, 1)), 1, 1)
#' # some other POD
#' computePOD(exp(seq(1, 10, 1)), 0.5, 1.29)
computePOD <- function(x, lambda=1, b=1){
if( length(lambda)>1 ){
warning("Taking only first value of 'lambda'")
lambda <- lambda[1]
}
if( length(b)>1 ){
warning("Taking only first value of 'b'")
b <- b[1]
}
return( 1-exp( (-1) * lambda * x**b ) )
}
|
f299820e758e2184079276b8e23fbbb9494a9454 | cc28e068e96b5d85079bae9afa2f445d3fd65b22 | /src/_evaluation/evaluate.R | 3180f936d55b16737c7f3b90e71e835c7df1d722 | [] | no_license | mayrop/thesis | 652867f0493ffa52a52f835bd6907767642575bc | a609b4e71e3ae7a8e20ae133f805095c4c55727b | refs/heads/master | 2020-05-25T17:21:51.642376 | 2019-11-02T01:19:06 | 2019-11-02T01:19:06 | 187,906,763 | 2 | 0 | null | 2019-11-02T01:19:09 | 2019-05-21T20:13:23 | R | UTF-8 | R | false | false | 1,982 | r | evaluate.R | # Get the metrics for the 3 algorithms
for (algorithm in c("glm", "svm", "rf")) {
# train data
my_results[["train"]][[algorithm]] <- c(
name = config$algorithms[[algorithm]],
my_models[[algorithm]]$results
)
# test data
my_results[["test"]][[algorithm]] <- c(
name = config$algorithms[[algorithm]],
my_models[[algorithm]]$evaluation$metrics
)
}
##################################################
#
# Metrics for all classifiers...
df1 <- rbindlist(my_results[["train"]], fill = TRUE) %>%
dplyr::mutate_if(
is.numeric, plyr::round_any, accuracy = .001, f = floor
) %>%
select(
-parameter, -sigma, -C, -mtry
) %>%
mutate(
Set = "Train",
AccuracyText = paste(Accuracy, " (", AccuracySD, ")", sep=""),
SensitivityText = paste(Sensitivity, " (", SensitivitySD, ")", sep=""),
SpecificityText = paste(Specificity, " (", SpecificitySD, ")", sep=""),
PPVText = paste(PPV, " (", PPVSD, ")", sep=""),
NPVText = paste(NPV, " (", NPVSD, ")", sep=""),
AUCText = paste(AUC, " (", AUCSD, ")", sep="")
) %>%
select(
name,
Set,
AccuracyText,
SensitivityText,
SpecificityText,
PPVText,
NPVText,
AUCText
)
df2 <- rbindlist(my_results[["test"]], fill = TRUE) %>%
dplyr::mutate_if(
is.numeric, plyr::round_any, accuracy = .001, f = floor
) %>%
mutate(
Set = "Test",
AccuracyText = as.character(Accuracy),
SensitivityText = as.character(Sensitivity),
SpecificityText = as.character(Specificity),
PPVText = as.character(PosPredValue),
NPVText = as.character(NegPredValue),
AUCText = as.character(AUC)
) %>%
select(
name,
Set,
AccuracyText,
SensitivityText,
SpecificityText,
PPVText,
NPVText,
AUCText
)
as.data.frame(
t(
union(df1, df2) %>%
# Putting 2 cols into 1
tidyr::unite("name", c(name, Set))
)
) %>%
dplyr::select(
V1,
V4,
V2,
V5,
V3,
V6
)
|
a71412a2d0539461ea6d36959c1f2774be709999 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/DQBF-TRACK-2018/E1+A1/Experiments/tentrup_pec_multiplier_3_2/tentrup_pec_multiplier_3_2.R | 69545584bca4df255b392b10e0a5ea218474775a | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 700 | r | tentrup_pec_multiplier_3_2.R | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1324
c
c Performing A1-Autarky iteration.
c Running Lingeling ...
c
c Remaining clauses count after A-Reduction: 1324
c
c Input Parameter (command line, file):
c input filename dqbf18//tentrup_pec_multiplier_3_2.dqdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 526
c no.of clauses 1324
c no.of taut cls 2
c
c Output Parameters:
c remaining no.of clauses 1324
c
c dqbf18//tentrup_pec_multiplier_3_2.dqdimacs 526 1324 E1+A1 [] 2 99 427 NONE
|
007a78f3cd4fed968877188aafbf789db2b9e909 | b1e3070bb70ab9f831ee05259f36066a553af095 | /cachematrix.R | 6122848629c0bf7ea08747c13d4a0a41b62f99db | [] | no_license | mloxton/ProgrammingAssignment2 | 8a092ce785ed012e32d06d8bbed8c499f65b80d7 | 80554cf813dbdca104964c89ef6eeb365280b991 | refs/heads/master | 2020-12-24T22:30:11.052879 | 2015-03-03T14:12:21 | 2015-03-03T14:12:21 | 31,554,806 | 0 | 0 | null | 2015-03-02T18:06:46 | 2015-03-02T18:06:45 | null | UTF-8 | R | false | false | 979 | r | cachematrix.R | ## The two functions together receive a matrix and perform a matrix inversion
## If the matrix cannot be inverted, the function stops with a warning.
## The <<- operator is used in case the inversion has already been done
## This function builds the recipient matrix structure, and tests to see
## if it has been done before
makeCacheMatrix <- function(x = matrix()) {
my.matrix<<-x #have we seen you before
cacheSolve(my.matrix)
}
## This function tests if the matrix can be inverted, and then
## uses solve() to perfrom an inversion
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
test<-det(x) #set up a test to see if we can invert this matrix
if (round(test,digit=5)){ #very small values are effectively zero
stop("Determinant is zero, matrix cannot be inverted")
}
inv.x<<-solve(x) #invert the matrix using solve, but <<- in case we did it before
inv.x #make it the last value so that the function returns it
}
|
d1406ba7e8f25b475f0fc3e4735f682101702a75 | bd52b68e4cf28e04d499b186cf66ea56e7e1a012 | /HW11/Q6.R | 528c95e5c7af68dc82873d5eecec3c4cfd341c78 | [] | no_license | arakhsha/DA_Spring_2018_HWs | ac96ea0b434f0fcb686b46e5edfe8f35688b5db0 | 551f511671bc80ebd76a532076d121b7ff92f24d | refs/heads/master | 2020-03-23T10:59:07.836458 | 2018-07-20T15:07:00 | 2018-07-20T15:07:00 | 141,474,942 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 221 | r | Q6.R | # library(h2o)
# h2o.init()
model = h2o.glm(
x = c('LONGITUDE', 'LATITUDE', 'FOCAL_DEPTH', 'EQ_PRIMARY'),
y = 'TOTAL_DEATHS',
family = 'poisson',
training_frame = as.h2o(disaster),
nfolds = 5)
summary(model) |
d06e03fa46e294af443e41cd51012597b9ad0c1d | 5319f346e19fc12c1bc76076f542d4c1c1088eff | /Plot2.R | 6be1e8676272154839b3e196d15268de0f97ac87 | [] | no_license | SuhasKamath1991/Exploratory-Data-Analysis-Week-1-Project | dc1885e1dce437ad89e3b74551b4e5300ca07747 | 738a3966d7114e6a8450fc9bc75aad30edfc8b62 | refs/heads/master | 2021-08-10T12:59:15.259791 | 2017-11-12T15:47:14 | 2017-11-12T15:47:14 | 110,436,014 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 624 | r | Plot2.R | data <- "./household_power_consumption.txt"
fulldata <- read.csv(data, header=T, sep=';', na.strings="?", stringsAsFactors=F)
datetime <- strptime(paste(fulldata$Date, fulldata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
fulldata <- cbind(fulldata, datetime)
fulldata$Date <- as.Date(fulldata$Date, format = "%d/%m/%Y")
fulldata$Time <- format(fulldata$Time, format = "%H:%M:%S")
reqdata <- subset(fulldata, Date == "2007-02-01"|Date == "2007-02-02")
png("plot2.png", height = 480, width = 480)
plot(reqdata$datetime, reqdata$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = " ")
dev.off() |
d3ce4d0520f45ace47d28287fd87247abb86b88d | 50fa7861b6ced4e0b0ddadda35abb94d09342866 | /EDA_Project2/Q5 Baltimore Motor Vehicle Emission Trends.R | 034386d47d4e9463d8b47b15a69971219083ecb4 | [] | no_license | tribetect/ExData_Plotting2 | 1ba55ae7654e6f8ee169de5df5ab115861b0d81f | 09cc5b15263004f54f9234e44e4d4f32d687312c | refs/heads/master | 2016-09-06T12:58:08.434116 | 2015-06-14T21:43:13 | 2015-06-14T21:43:13 | 37,047,433 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,708 | r | Q5 Baltimore Motor Vehicle Emission Trends.R | #EDA Project 2 Question 5
#----------------------------------------------------------------------------
#
# How have emissions from motor vehicle sources changed
# from 1999-2008 in Baltimore City?
# fips == "24510"
#----------------------------------------------------------------------------
#Pre-flight check: if both data files are available in the working directory:
foundData <- (("summarySCC_PM25.rds" %in% dir()) && #AND
("Source_Classification_Code.rds" %in% dir())) #End of file checks
#Terminate script, if datafiles not found in working directory
error_message = paste0("Terminating Script: Data Files Not Found in Current Working Directory: ", getwd())
if(!foundData) { stop(error_message) }
require(ggplot2)
require(plyr) #fast subsetting
#create a PNG device
png(file = "plotQ5.png", bg = "transparent", width = 480, height = 480, units = "px")
#readRDS() the two files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Subset coal sources
motorIndex <- grep("Onroad",
SCC$Data.Category,
ignore.case = TRUE)
#Motor vehicle source search index, 1709 of 11,717 records in SCC
SCC_motor <- SCC[motorIndex,]$SCC #SCC codes (factor vector) for motor vehicles
SCC_motor <- trimws(as.character(SCC_motor)) #cleanup for match-and-subset NEI next
#subset NEI with plyr/mutate for Baltimore motor vehicles
NEI_motor <- mutate(NEI, motorYN = (NEI$SCC %in% SCC_motor)) #tag motor vehicles obervations TRUE/FALSE:
#FALSE TRUE
#1020590 5477061
NEI_motor_Baltimore <- subset(NEI_motor, fips == 24510 & motorYN == TRUE)
#down to 1515 observations; drop 1.02 million FALSE, keep 5.5mil TRUEs, then subset Baltimore
#Use TApply to calculate Emission sums by year
annualPM25 <- tapply(NEI_motor_Baltimore$Emission,
NEI_motor_Baltimore$year, sum, simplify = TRUE)
annualPM25 <- as.data.frame(annualPM25)#Y for plot
years <- as.integer(dimnames(annualPM25)[[1]]) #X for plot
#GGPlot object prep
result <- ggplot(data = annualPM25, mapping = aes(factor(years), annualPM25, group = 1))
#visualize quantities through point sizes
sizes <- as.integer(annualPM25[,1]/20)
#"Paint" the visualization
result <- result + geom_point(color = sizes, size = sizes)+geom_line()
result <- result + labs(x = "Year", y = "Emissions (Tons of PM25)", title = "Baltimore Motor Vehicle Emissions (1999 - 2008)")
result <- result + geom_text(aes(label=as.integer(annualPM25)),hjust=-1, vjust=1)
print(result)
#Finishup
dev.off()
#Pending items:
## add a watermark:
#mtext(text = paste("BY github/tribetect", Sys.time()),
# side = 4, line = 0.5, col = "green")
|
cb7e0c887077f19406aeeb471da9e84d8839876f | 321ee9cd23aa55d54a656f6926fd4a489ded1ffa | /09_Database.R | 03f85fc8cbd2632de50c44b976732bed43dce3ac | [] | no_license | shayan-taheri/statistics | 4b9f93404eff6721290d2e37a869fe96c4a205a5 | 1c669250d99a9ebcfa3293d62476bc9d7e43a7ba | refs/heads/master | 2020-05-16T17:56:13.481613 | 2013-12-16T00:36:16 | 2013-12-16T00:36:16 | 183,210,432 | 0 | 1 | null | 2019-04-24T10:57:18 | 2019-04-24T10:57:18 | null | UTF-8 | R | false | false | 198 | r | 09_Database.R | library(RODBC)
dbhandle <- odbcDriverConnect('driver={SQL Server};server=localhost;database=mip_portal;trusted_connection=true')
res <- sqlQuery(dbhandle, 'select * from information_schema.tables')
|
c93f00f21221fba60bb049752a5b8015ab8e282c | 1165412544f687e4eb94abd5a02b6cc636f2b738 | /man/Zplot.Rd | 852b21db2d48e100ba116e661395c5e4eb0850f9 | [] | no_license | mpierrejean/c3co | e70258299a8df79d648cb3e9f021a808b11dacc7 | 08bd5d936327753c30eebcc3cb9616bb0747e6db | refs/heads/master | 2020-05-24T09:56:25.917358 | 2017-03-13T15:42:43 | 2017-03-13T15:42:43 | 84,845,185 | 0 | 0 | null | 2017-03-13T15:49:22 | 2017-03-13T15:49:22 | null | UTF-8 | R | false | true | 429 | rd | Zplot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotFunctions.R
\name{Zplot}
\alias{Zplot}
\title{Function to plot Latent profiles}
\usage{
Zplot(df, ylab, ylim = c(0, 4))
}
\arguments{
\item{df}{data.frame object output from \code{createZdf}}
\item{ylab}{Label of y-axis}
\item{ylim}{define limits for y-axis}
}
\value{
plot of Latent profiles
}
\description{
Function to plot Latent profiles
}
|
788e9fb327e98d8773292a3948ccb2cba8b01785 | 780badc25c7cf0825bbe92259437c3a95b4730e4 | /run_analysis.R | 94b899c9115463ab96b433c78a268d919fdf279d | [] | no_license | LSedman/Getting-and-cleaning-data | c820d0d3972012419172e86138a51a02d7bfd152 | f52b05479f1e8db07a341c79f19bbd34f75f627a | refs/heads/master | 2020-03-29T17:28:48.685001 | 2014-05-23T21:48:37 | 2014-05-23T21:48:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,892 | r | run_analysis.R | url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# check if folder or zip file for dataset exists and download and unzip zip file if necessary
if (!file.exists("UCI HAR Dataset") & !file.exists("getdata_projectfiles_UCI HAR Dataset.zip")) {
download.file(url, destfile="getdata_projectfiles_UCI HAR Dataset.zip", method="curl")
}
if (!file.exists("UCI HAR Dataset")){
unzip("getdata_projectfiles_UCI HAR Dataset.zip")
}
# read data into R
f <- read.table("./UCI HAR Dataset/features.txt", head=F)
a <- read.table("./UCI HAR Dataset/activity_labels.txt")
s_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
s_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
# merge measured data with subjects and activity labels
X_test <- cbind(s_test, y_test, X_test)
X_train <- cbind(s_train, y_train, X_train)
# merge training and test data
X <- rbind(X_train, X_test)
colnames(X) <- c("subject", "activity", as.character(f[,2]))
# rename activities
X$activity[X$activity == 1] <- as.character(a$V2[as.numeric(a$V1)==1])
X$activity[X$activity == 2] <- as.character(a$V2[as.numeric(a$V1)==2])
X$activity[X$activity == 3] <- as.character(a$V2[as.numeric(a$V1)==3])
X$activity[X$activity == 4] <- as.character(a$V2[as.numeric(a$V1)==4])
X$activity[X$activity == 5] <- as.character(a$V2[as.numeric(a$V1)==5])
X$activity[X$activity == 6] <- as.character(a$V2[as.numeric(a$V1)==6])
X$activity <- tolower(X$activity)
X$activity <- gsub(pattern="_", x=X$activity, replacement="")
X$activity <- gsub(pattern="down", x=X$activity, replacement="Down")
X$activity <- gsub(pattern="up", x=X$activity, replacement="Up")
# extract data corresponding to the mean and standard deviation of each feature
n <- c(grep("std\\(\\)", names(X)), grep("mean\\(\\)", names(X)))
n <- sort(n)
Y <- X[, n]
Y <- cbind(X[,1:2], Y)
# make variable names more readable: remove "()" and "-" from column names,
# change "BodyBody" to "Body", "mean" to "Mean" and "std" to "Std"
names(Y) <- gsub(pattern="\\(|\\)", x=names(Y), replacement="")
names(Y) <- gsub(pattern="-", x=names(Y), replacement="")
names(Y) <- gsub(pattern="BodyBody", x=names(Y), replacement="Body")
names(Y) <- gsub(pattern="mean", x=names(Y), replacement="Mean")
names(Y) <- gsub(pattern="std", x=names(Y), replacement="Std")
# create new data set with the average of each variable for each activity
# and each subject.
aggY <- aggregate(Y[, 3:68], by=list(Y$subject, Y$activity), FUN=mean)
colnames(aggY) <- colnames(Y)
write.table(aggY, file="tidyData.txt", quote=F, sep="\t", col.names=T,
row.names=F)
|
f069eb0e881e58844248fe894150db29a795daeb | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ergm/examples/g4.Rd.R | 7cf6fecea77f3d6af1f3f27143c6021a3a606e11 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 271 | r | g4.Rd.R | library(ergm)
### Name: g4
### Title: Goodreau's four node network as a "network" object
### Aliases: g4
### Keywords: datasets
### ** Examples
data(g4)
summary(ergm(g4 ~ odegree(3), estimate="MPLE"))
summary(ergm(g4 ~ odegree(3), control=control.ergm(init=0)))
|
4050f7c0a042ae882c5bfce618186d5135abda0f | 57c81c8f14cfec0de37480972b62781d09bb917d | /plot4.R | 68ff94a0a483f36d7c5405d52cf52d1ece6161db | [] | no_license | DeathTruck/ExData_Plotting1 | 0245dd4d28079242ee76dba5172fdcd958a34236 | 7c99278f43d72f16893109e6af0a646180e53590 | refs/heads/master | 2021-01-11T03:33:07.837623 | 2016-10-16T05:51:50 | 2016-10-16T05:51:50 | 71,020,368 | 0 | 0 | null | 2016-10-15T23:54:58 | 2016-10-15T23:54:58 | null | UTF-8 | R | false | false | 2,409 | r | plot4.R | # This R code reads in data from UCI data archive on power consumption
# The code then subsets the data, slightly cleans it, and plots a
# time series, Global active Power, 3 sub_meterings, Voltage, and global
# reactive power in 4 panels
# Code written by Nik Buenning 10/15/2016
# Set working directory (this will need to be changed to reproduce the results)
# change. This directory should contain the data file.
setwd("/Users/buenning/data_science/exploratory_data_analysis")
# needed libraries
library(lubridate)
# Read in data, and subset the data over the desired time interval
all_data <- read.table("household_power_consumption.txt", sep=";", header = TRUE)
date_var <- as.Date(all_data$Date, format = "%d/%m/%Y")
new_data = subset(all_data, year(date_var) == 2007 & month(date_var) == 2 & day(date_var) <= 2)
data_names <- names(all_data)
# Make date and time variable
new_dates <- as.character(new_data$Date)
new_time <- as.character(new_data$Time)
comb_times <- strptime(paste(new_dates, new_time, sep=" "), format = "%d/%m/%Y %H:%M:%S")
# Get Global Active Power and replace ?s with NAs
gap <- as.character(new_data$Global_active_power)
gap2 <- as.numeric(gsub("\\?", "NA", gap))
# Get Global Active Power and replace ?s with NAs
volt <- as.character(new_data$Voltage)
volt2 <- as.numeric(gsub("\\?", "NA", volt))
# Get Global Active Power and replace ?s with NAs
grp <- as.character(new_data$Global_reactive_power)
grp2 <- as.numeric(gsub("\\?", "NA", grp))
# Get sub_metering data
sub1 <- as.numeric(as.character(new_data$Sub_metering_1))
sub2 <- as.numeric(as.character(new_data$Sub_metering_2))
sub3 <- as.numeric(as.character(new_data$Sub_metering_3))
# Plotting Code, Output to a png file
png(filename="plot4.png", width = 480, height = 480)
par(mfcol = c(2, 2))
# top left
plot(comb_times, gap2, type ="l", xlab="",
ylab="Global Active Power (kilowatts)")
# bottom left
plot(comb_times, sub1, type ="n", xlab="",
ylab="Energy sub metering")
lines(comb_times, sub1, col = "black")
lines(comb_times, sub2, col = "red")
lines(comb_times, sub3, col = "blue")
legend("topright", col = c("black", "red", "blue"), bty="n",
legend=data_names[7:9], lty=c(1, 1, 1))
# top right
plot(comb_times, volt2, type ="l", xlab="date/time",
ylab="Voltage")
# bottom right
plot(comb_times, grp2, type ="l", xlab="date/time",
ylab="Global_reactive_power")
dev.off()
|
54a70720cb8e1bf8391799a156bfe809d4ce20ea | 28b7aadb62e9c3dc45cfa6cec6a30bfc20baf5d1 | /man/maPermMerge.Rd | d0967caa747d8a8bbe957ad4b93fe0fb8c3e507d | [
"MIT"
] | permissive | jeffbhasin/methylaction | 7aa2d6835fbcab9851654e9664bea09e0442c8dd | d98ce2757b85e1fc8ec8ec891bb93b35feff519d | refs/heads/master | 2021-01-18T23:06:44.970958 | 2016-07-20T19:31:19 | 2016-07-20T19:31:19 | 32,554,873 | 5 | 5 | MIT | 2020-05-31T20:01:36 | 2015-03-20T01:04:40 | R | UTF-8 | R | false | false | 788 | rd | maPermMerge.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/perms.r
\name{maPermMerge}
\alias{maPermMerge}
\title{Merge permutations generated and saved by maPerm()}
\usage{
maPermMerge(dir = ".")
}
\arguments{
\item{dir}{Directory containing RData files saved by maPerm() where save was equal to TRUE}
}
\value{
A list of DMRs arising from each permutation or bootstrap, merged across all the detected RData files.
}
\description{
The function maPerm() with save=TRUE will save permutations into RData files. Move all these RData files into the same directory, and indicate this directory using "dir". The output from maPermMerge() will be a list of DMRs detected in all permutations, and can be given to maPermFdr() to compute false discovery rates (FDRs).
}
|
684e11e6cbfa4b2d945b86f24fe14c76c92b7435 | 828f885e708e688126547cedbf41644a00662579 | /_TEMPLATE BLANK R SCRIPT.R | 631a80c19305bb18f3bca3c22e0398abbc54a06a | [] | no_license | ElrondHubbard/squibbs | 5b41d62d2ca1586dedac2247d67dd93c8f56d881 | 063a8fef5bc14d96f880ed17285b4218714e51a7 | refs/heads/master | 2021-01-19T19:21:39.847693 | 2018-12-12T04:12:19 | 2018-12-12T04:12:19 | 83,724,465 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 891 | r | _TEMPLATE BLANK R SCRIPT.R | # TEMPLATE add brief notes here
# TEMPALTE add brief notes here
# Load libraries
install.packages(c("dplyr" , "reshape2", "tidyr", "tidyverse", "knitr", "purrr", "stringdist", "RODBC", "tools", "editData"))
library(dplyr)
library(reshape2)
library(tidyr)
library(tidyverse)
library(knitr)
library(purrr)
library(stringdist)
library(RODBC)
library(tools)
library(editData)
squibbs <-odbcConnect("squibbs_PRD",uid="ykaslow",pwd="Gungus69")
# See R file below for template queries of ADV data to run through R
# N:/AIS/Yerik/_Template Scripts/_SQL_QUERIES_IN_R.R
# Remember can only query, not write or update, but is PRD aka real-time
# See R cheats file for basic commands etc at
# N:/AIS/Yerik/R_Cheats.xlsx
# FOR ANY SQL QUERIES RUN - add date to name of R file, so know data are
# Current in ADV as of date...easy to leave one address file sitting for a week |
f4c32a4f2542e9a3f2a9720fbefb9de50e162604 | 56b87244b365a26819e7cabcbd39406148e87db6 | /tests/testthat/test-bin.R | 074b6b28163b505992ad3f51b9fe50f2375fd9ad | [] | no_license | d-callan/plot.data | fd5c6a54a5eb6f186f29d6a910e095508f3f8533 | eed0ab2dcde88a4a399cc51ebe8135be5fff811d | refs/heads/master | 2023-02-21T00:08:19.135764 | 2021-01-26T15:16:39 | 2021-01-26T15:16:39 | 288,548,043 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,365 | r | test-bin.R | context('bin')
test_that("binProportion() returns an appropriately sized data.table", {
dt <- binProportion(data.xy,'y', binWidth=.1)
expect_is(dt, 'data.table')
expect_equal(nrow(dt),1)
expect_equal(names(dt), c('binLabel', 'binStart', 'value'))
dt <- binProportion(data.xy, 'y','group', binWidth=.1)
expect_is(dt, 'data.table')
expect_equal(nrow(dt),4)
expect_equal(names(dt), c('group', 'binLabel', 'binStart', 'value'))
dt <- binProportion(data.xy, 'y', NULL, 'panel', binWidth=.1)
expect_is(dt, 'data.table')
expect_equal(nrow(dt),4)
expect_equal(names(dt), c('panel', 'binLabel', 'binStart', 'value'))
dt <- binProportion(data.xy, 'y', 'group', 'panel', binWidth=.1)
expect_is(dt, 'data.table')
expect_equal(nrow(dt),16)
expect_equal(names(dt), c('group', 'panel', 'binLabel', 'binStart', 'value'))
})
test_that("binProportion() returns consistent results", {
dt <- binProportion(data.xy,'y', binWidth=.1)
expect_equal_to_reference(dt,"proportion.rds")
dt <- binProportion(data.xy, 'y','group', binWidth=.1)
expect_equal_to_reference(dt,"proportion.group.rds")
dt <- binProportion(data.xy, 'y', NULL, 'panel', binWidth=.1)
expect_equal_to_reference(dt,"proportion.panel.rds")
dt <- binProportion(data.xy, 'y', 'group', 'panel', binWidth=.1)
expect_equal_to_reference(dt,"proportion.group.panel.rds")
})
|
90a584965697e0aeba6ee770e6cd85b104aae156 | 088f3ce1026a0b7066bf3cc5bd0ea644bcca7ae9 | /jhu-scraper.R | a632594d138c5538dc83bf01748864aa1ab3e5b5 | [] | no_license | dlm1223/covid-model | 7474d99eded7ce135324e3ae7be9c95c8de98d17 | 765e4376990c2533393559da8e9500b9848ac993 | refs/heads/master | 2022-04-24T00:31:10.961030 | 2020-04-25T03:45:55 | 2020-04-25T03:45:55 | 258,650,717 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,850 | r | jhu-scraper.R | #run this to get updated data. then run 1-organize-data.R, then 2-model-fitting.
library(data.table)
library(plyr)
library(dplyr)
library(reshape2)
wd<-""
##READING IN DATA FROM JHU GITHUB AND RESHAPE WIDE TO LONG
recoveries_time_series<-read.csv("https://raw.github.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv")
recoveries_time_series<-melt(recoveries_time_series, id.vars=c("Province.State", "Country.Region", "Lat", "Long"), variable.name = "Date", value.name ="Recoveries")
recoveries_time_series$Date<-as.Date(recoveries_time_series$Date, format="X%m.%d.%y")
recoveries_time_series<-recoveries_time_series[, !colnames(recoveries_time_series)%in% c("Lat", "Long")]
tail(recoveries_time_series)
str(recoveries_time_series)
write.csv(recoveries_time_series, file=paste0(wd, "externaljhu/recoveries.csv"), row.names = F)
recoveries_time_series[recoveries_time_series$Country.Region=='US',]
confirmed.US<-read.csv("https://raw.github.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv")
confirmed.US<-melt(confirmed.US, id.vars=colnames(confirmed.US)[!grepl("20", colnames(confirmed.US))], variable.name = "Date", value.name="ConfirmedCases")
colnames(confirmed.US)<-gsub("_", ".", colnames(confirmed.US))
confirmed.US$Date<-as.Date(confirmed.US$Date, format="X%m.%d.%y")
write.csv(confirmed.US, file=paste0(wd, "externaljhu/cases_us_county.csv"), row.names = F)
confirmed.US<-data.table(confirmed.US)[, list(ConfirmedCases=sum(ConfirmedCases)),by=c("Province.State", "Country.Region", "Date")]
tail(confirmed.US)
write.csv(confirmed.US, file=paste0(wd, "externaljhu/cases_us.csv"), row.names = F)
deaths.US<-read.csv("https://raw.github.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv")
deaths.US<-melt(deaths.US, id.vars=colnames(deaths.US)[!grepl("20", colnames(deaths.US))], variable.name = "Date", value.name="Fatalities")
colnames(deaths.US)<-gsub("_", ".", colnames(deaths.US))
deaths.US$Date<-as.Date(deaths.US$Date, format="X%m.%d.%y")
write.csv(deaths.US, file=paste0(wd, "externaljhu/deaths_us_county.csv"), row.names = F)
head(deaths.US)
deaths.US<-data.table(deaths.US)[, list(Fatalities=sum(Fatalities)),by=c("Province.State", "Country.Region", "Date")]
write.csv(deaths.US, file=paste0(wd, "externaljhu/deaths_us.csv"), row.names = F)
#deaths.US[deaths.US$Province.State=="New York",]
confirmed.global<-read.csv("https://raw.github.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
confirmed.global<-melt(confirmed.global, id.vars=colnames(confirmed.global)[!grepl("20", colnames(confirmed.global))], variable.name = "Date", value.name="ConfirmedCases")
colnames(confirmed.global)<-gsub("_", ".", colnames(confirmed.global))
confirmed.global<-confirmed.global[, !colnames(confirmed.global)%in% c("Lat", "Long")]
confirmed.global$Date<-as.Date(confirmed.global$Date, format="X%m.%d.%y")
tail(confirmed.global)
write.csv(confirmed.global, file=paste0(wd, "externaljhu/cases.csv"), row.names = F)
deaths.global<-read.csv("https://raw.github.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv")
deaths.global<-melt(deaths.global, id.vars=colnames(deaths.global)[!grepl("20", colnames(deaths.global))], variable.name = "Date", value.name="Fatalities")
colnames(deaths.global)<-gsub("_", ".", colnames(deaths.global))
deaths.global<-deaths.global[, !colnames(deaths.global)%in% c("Lat", "Long")]
deaths.global$Date<-as.Date(deaths.global$Date, format="X%m.%d.%y")
tail(deaths.global)
write.csv(deaths.global, file=paste0(wd, "externaljhu/deaths.csv"), row.names = F)
|
268d062f8fcbafbdc06bcc4f807ef1e2b7381194 | f09a810505fe2ffde1fa2b8831ab12f7a32ff563 | /R/on.off.R | 23a563fc94955f694f1ce8360e1a6c14d8780897 | [] | no_license | cran/activpalProcessing | 6e33fe1d1da88448b559f223e5420934f7d75bd8 | e644678422294d27a74bb062deb07d266919605d | refs/heads/master | 2021-01-10T13:17:33.190479 | 2016-12-14T07:13:15 | 2016-12-14T07:13:15 | 50,388,342 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,772 | r | on.off.R | on.off <-
function(directory=directory,id,visit,name.of.log.on.off,data)
{
on.off.log <- read.csv(paste(directory,name.of.log.on.off,".csv",sep=""))
on.off.log$id <- as.character(on.off.log$id)
on.off.log <- on.off.log[on.off.log$id==id& on.off.log$visit==visit,]
on.off.log$date.on <- paste(on.off.log$date.on.month,on.off.log$date.on.day,on.off.log$date.on.year,sep="/")
on.off.log$time.on <- paste(on.off.log$time.on.hour,on.off.log$time.on.minute,on.off.log$time.on.seconds,sep=":")
on.off.log$date.off <- paste(on.off.log$date.off.month,on.off.log$date.off.day,on.off.log$date.off.year,sep="/")
on.off.log$time.off <- paste(on.off.log$time.off.hour,on.off.log$time.off.minute,on.off.log$time.off.seconds,sep=":")
on.off.log$date.time.on <- paste(on.off.log$date.on, on.off.log$time.on, sep=" ")
on.off.log$date.time.off <- paste(on.off.log$date.off, on.off.log$time.off, sep=" ")
on.off.log$date.time.on <- strptime(on.off.log$date.time.on,"%m/%d/%Y %H:%M:%S")
on.off.log$date.time.off <- strptime(on.off.log$date.time.off,"%m/%d/%Y %H:%M:%S")
on.off.log$hours.on <- as.vector(difftime(strptime(on.off.log$date.time.off,format="%Y-%m-%d %H:%M:%S"),strptime(on.off.log$date.time.on,format="%Y-%m-%d %H:%M:%S"), units="hours"))
# if on/off times recorded - loop through and label time monitor is not worn
if(dim(on.off.log)[1]>0)
{
data$off <- 1
for (t in (1:dim(on.off.log)[1]))
{
on <- strptime(on.off.log$date.time.on[t],"%Y-%m-%d %H:%M:%S")
off <- strptime(on.off.log$date.time.off[t],"%Y-%m-%d %H:%M:%S")
n <- dim(data)[1]
inds <- (1:n)[((data$time>=on)&(data$time<=off))]
if (length(inds)>0)
data$off[inds] <- 0
}
if(dim(on.off.log)[1]==0)
data$off <- "No.On.Off.Log"
} #end bed loop
return(data)
}
|
5265439644309a42220ba518d4cc3d1868fd320f | 1d5e1e4d326eeb42542f15cce34239598368c21f | /ui.R | 6b11ce5cc56966dbb63563b3f6bd28ac0bfadcd3 | [
"MIT"
] | permissive | hly89/PatientNet | 52002606576d1a0d904ec5c5a626911b3eb67d1a | b937535a17bbeba571b4a93b376ae03a27490607 | refs/heads/master | 2021-06-04T10:53:24.178662 | 2021-04-05T19:59:17 | 2021-04-05T19:59:17 | 95,745,639 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,839 | r | ui.R | .libPaths("/home/shiny/libs")
library(openxlsx)
library(visNetwork)
library(igraph)
library(shiny)
library(shinydashboard)
library(shinyBS)
library(shinyjs)
#load("Zaman.27.12.rda")
# source("NetworkPipeline.R")
source("./Rsource/SwitchButton.R")
shinyUI(dashboardPage(
dashboardHeader(title = "PatientNet",
tags$li(class = "dropdown notifications-menu",
HTML('<a href="#" class="dropdown-toggle" data-toggle="dropdown">
<i class="fa fa-user"></i>
<span class="label label-primary">1</span></a>
<ul class="dropdown-menu">
<li> <ul class="menu">
<li> <a href="mailto:liye.he@helsinki.fi">
<i class="fa fa-envelope-o" aria-hidden="true"></i>
Author: Liye He <liye.he@helsinki.fi>
</a>'))),
dashboardSidebar(
# custom css for buttons and sliderinput
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "allcss.css"),
tags$link(rel = "stylesheet", type = "text/css", href = "tooltip-curved.css"),
tags$link(rel = "stylesheet", type = "text/css", href = "drop.css"),
tags$script(src = "feedback_source.js"),
tags$script(src = "feedback.js"), #also to top button
tags$script(src = "tour.js")
),
downloadButton(outputId = "loadExData_small", label = "example data", class = "butEx"),
div(
id = "filetype",
switchButton(inputId = "separate", label = "Separate input files", value = FALSE)
),
#,
div(id = "singleinput",
tags$form(id = "testing",
conditionalPanel(
condition = " input.separate != true",
fileInput('dataAll', 'Upload a single file in xlsx format', accept = c(".xlsx"))
)
)
),
conditionalPanel(
condition = " input.separate == true",
fileInput('drugTarget', 'Upload drug target data', accept = c(".csv")),
fileInput('dss', 'Upload drug response data', accept = '.csv'),
fileInput('mut', 'Upload mutation data', accept = '.csv'),
fileInput('exp', 'Upload gene expression data', accept = '.csv')
),
div(id = "net", actionButton("Nets", "Construct Patient Network")),
div(id='simplify', selectInput("Simplify", "Simplify Network:", choices=c("Select max path length"))),
hr(),
div(id = "save", valueBox(downloadButton("Save", "Save"),
"Save results", icon = icon("floppy-save", lib = "glyphicon"),
width = 13, color = "red"))
),
dashboardBody(
useShinyjs(),
div(
br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),
fluidRow(column(offset = 3, width = 6,
div(
HTML('<div class="primary_header"><h1>Welcome to PatientNet</h1>
<h2>a shiny web application for visualizing patient-specific cancer vulnerability networks</h2></div>'
), br(),
HTML('<button type="button" id="buttonTour" class="btn btn-primary btn-lg">
<span class="glyphicon glyphicon-play"></span>
Start the tour
</button>'), id = "startour")
)), id = "wraptour"),
uiOutput("showInputData"),
div(id = "networks", uiOutput("showNetwork"))
)
))
# shinyUI(fluidPage(
# # titlePanel("Visualize patient-specific network"),
# # sidebarLayout(
# # sidebarPanel(
# # fileInput(inputId = "data", "Upload data", accept = "xlsx"),
# # checkboxInput("visualiz", "Visualize", value = FALSE)
# # ),
# # mainPanel(
# # uiOutput('tabs'),
# # conditionalPanel(condition = "input.visualiz == true",
# # visNetworkOutput('zaman.sub'))
# #
# # )
# # )
# titlePanel("Visualize patient-specific network"),
# fluidRow(
# column(3,
# fileInput(inputId = "data", "Upload data", accept = "xlsx"),
# checkboxInput("visualiz", "Visualize", value = FALSE)
# ),
# column(9,
# uiOutput('tabs'),
# fluidRow(column(9,
# conditionalPanel(condition = "input.visualiz == true",
# visNetworkOutput('zaman.sub', height = "900"))),
# column(3, conditionalPanel(
# condition = "input.visualiz == true", imageOutput("extraLegend")
# ))
# )
#
# )
# )
# )
# ) |
b9e10b81c76a3c26836dbdf0e89c8dd754ba7006 | df37f20429d902b39b6aa812f7c1ac4fefbd7eb3 | /R/03_baseline_characteristics.R | b6f599775231e26eb944d17c4bdd1c9c2e6ca890 | [
"MIT"
] | permissive | yoffeash/asthmacardiac | fbe342a64963f130d0f6b2d1356e560c1ffe20c1 | 99ba0076b105dcec6ff5b4024776be63226e4c5c | refs/heads/master | 2020-03-26T09:54:59.652892 | 2018-12-19T19:21:24 | 2018-12-19T19:21:24 | 144,771,567 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 609 | r | 03_baseline_characteristics.R | ### baseline characteristics ###
#############Updated Table 1 #######################################################################################
##BSA based cardiac measurement##
stat.desc(cardiac_asthma$vol_epi_RV_BSAI)
stat.desc(cardiac_asthma$vol_epi_LV_BSAI)
stat.desc(cardiac_asthma$RV_LV_epi_vol_BSAI)
##heart rate##
stat.desc(cardiac_asthma$heart_rate)
length(na.omit(cardiac_asthma$heart_rate))
##age at diagnosis##
stat.desc(cardiac_asthma$age_dx)
length(na.omit(cardiac_asthma$age_dx))
##duration of disease##
stat.desc(cardiac_asthma$duration_dx)
length(na.omit(cardiac_asthma$duration_dx)) |
6ea9a1cae95d824445ea4531797f3d9d6e836565 | b72a579eddbd2e20a0d154a4704fa28dc89adf5f | /code/Jingning/UKBB_ancestry_prediction/ancestry_prediction/*backup/5_1000G_PCA.R | bd336d6b88a5de1db1255383f860eab5edcf39dd | [] | no_license | andrewhaoyu/multi_ethnic | cf94c2b02c719e5e0cbd212b1e09fdd7c0b54b1f | d1fd7d41ac6b91ba1da8bb8cd1b2b0768f28b984 | refs/heads/master | 2023-06-24T20:47:18.268972 | 2023-06-13T15:30:14 | 2023-06-13T15:30:14 | 214,069,397 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,450 | r | 5_1000G_PCA.R |
suppressMessages(library('plink2R'))
genos = read_plink(paste("/dcs04/nilanjan/data/jzhang2/UKBB/ancestry_prediction/1000G_final/chr2122"), impute="avg")
pc1000G <- prcomp(genos$bed, center=TRUE, scale=TRUE, rank=10)
saveRDS(pc1000G, "/dcs04/nilanjan/data/jzhang2/UKBB/ancestry_prediction/models/pc1000G_5.rds")
writeLines(rownames(pc1000G$rotation), "/dcs04/nilanjan/data/jzhang2/UKBB/ancestry_prediction/models/pc1000G_5.snps")
saveRDS(pc1000G$rotation, "/dcs04/nilanjan/data/jzhang2/UKBB/ancestry_prediction/models/pc1000G_5.loadings")
saveRDS(pc1000G$x, "/dcs04/nilanjan/data/jzhang2/UKBB/ancestry_prediction/models/pc1000G_5.pcscores")
scov <- cov(t(genos$bed))
saveRDS(scov,"/dcs04/nilanjan/data/jzhang2/UKBB/ancestry_prediction/models/hdpca/scov.rds")
ev <- eigen(scov)
saveRDS(ev,"/dcs04/nilanjan/data/jzhang2/UKBB/ancestry_prediction/models/hdpca/ev.rds")
genomes_sd <- apply(genos$bed, MARGIN=2, sd)
genomes_mean <- apply(genos$bed, MARGIN=2, mean)
save(genomes_sd, genomes_mean, file="/dcs04/nilanjan/data/jzhang2/UKBB/ancestry_prediction/models/hdpca/geno1000G.sdmean.RData")
library(hdpca)
#First estimate the number of spikes and then adjust test scores based on that
train.eval<-ev$values
n<-nrow(genos$fam)
p<-nrow(genos$bim)
m<-select.nspike(train.eval,p,n,n.spikes.max=50,evals.out=FALSE)$n.spikes ## 33
save(train.eval, n, p, m, file="/dcs04/nilanjan/data/jzhang2/UKBB/ancestry_prediction/models/hdpca/parameters.RData")
|
1d1de528fd0fbff2add9d72d4a472889c2817b23 | 9d90fb5e801dcccbb2d7a4f4c1ff18cfe0c0406f | /R/CGU.R | 4b19374588cac4dfce3b72636244c91d96868b0d | [] | no_license | WangJJ-xrk/COM | fac171f7a348f258c523b2e5ddc7c5139b0cecc1 | 62982566d61272f1045bff05f62d040bc80e56d3 | refs/heads/master | 2022-12-12T13:29:20.491621 | 2020-08-17T07:59:39 | 2020-08-17T07:59:39 | 288,110,950 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,468 | r | CGU.R | #' Significance Calculation of A Set of Generalized U Statistics.
#'
#'\code{zhangjtMat} calculates a matrix of p values for a set of generatlized U
#'statistics, with each element corresponding to a generalized U statistic.
#'
#' @param E the similarity matrix of one of the variates or multivariates.
#' @param G the similarity matrix of the other one of the variates or
#' multivariates.
#' @param seq0 a set of powers for the similarity matrixes.
#'
#' @return a matrix of p values.
#' @export
#'
#' @examples
#' library(MASS)
#' n = 50
#' p = 100
#' k = 15
#' sigmax = diag(rep(0.5,k)) + matrix(0.5,k,k)
#' sigmay = diag(rep(1,p))
#' for(i in 1:p){
#' for(j in 1:p){
#' sigmay[i,j] = 0.5^abs(i-j)
#' }
#' }
#' r1 = 0.05
#' beta0 = r1*matrix(rbinom(k*p,1,0.9), k, p)
#' x = mvrnorm(n, rep(0,k), sigmax)
#' y = x%*%beta0 + mvrnorm(n, rep(0,p), sigmay)
#' Kx = calcKerMat_Cen(x)
#' Ky = calcKerMat_Cen(y)
#' seq1 = c(0.5,1,2)
#' zhangjtMat(Kx,Ky,seq1)
zhangjtMat <- function(E,G,seq0){
n = dim(E)[1]
eigE = eigen(E)
Eval = eigE$values
Evec = eigE$vectors
kqe = sum(Eval > 0.0001)
EV = Evec[,1:kqe]
EVa = Eval[1:kqe]
eigG = eigen(G)
Gval = eigG$values
Gvec = eigG$vectors
kq = sum(Gval > 0.0001)
GV = Gvec[,1:kq]
GVa = Gval[1:kq]
numKM = length(seq0)
mat0 = matrix(NA,numKM,numKM)
for(e11 in 1:numKM){
for(g11 in 1:numKM){
e1 = seq0[e11]
g1 = seq0[g11]
EVa1 = diag(EVa^e1)
E1 = EV %*% EVa1 %*% t(EV)
GVa1 = diag(GVa^g1)
G1 = GV %*% GVa1 %*% t(GV)
mat0[e11,g11] = zhangjtAppro(E1, G1)
}
}
return(mat0)
}
#' Significance calculation for the Cauchy-combined Omnibus Test (COT) statistic.
#'
#' @param mat0 a matrix of p values to be combined.
#'
#' @return the p value of the Cauchy-combined Omnibus Test (COT) statistic.
#' @export
#'
#' @examples
#' library(MASS)
#' n = 50
#' p = 100
#' k = 15
#' sigmax = diag(rep(0.5,k)) + matrix(0.5,k,k)
#' sigmay = diag(rep(1,p))
#' for(i in 1:p){
#' for(j in 1:p){
#' sigmay[i,j] = 0.5^abs(i-j)
#' }
#' }
#' r1 = 0.05
#' beta0 = r1*matrix(rbinom(k*p,1,0.9), k, p)
#' x = mvrnorm(n, rep(0,k), sigmax)
#' y = x%*%beta0 + mvrnorm(n, rep(0,p), sigmay)
#' Kx = calcKerMat_Cen(x)
#' Ky = calcKerMat_Cen(y)
#' seq1 = c(0.5,1,2)
#' mat1 = zhangjtMat(Kx,Ky,seq1)
#' COT(mat1)
COT <- function(mat0){
mat1 = tan((0.5-mat0)*pi)
k = dim(mat0)[1]
sta1 = sum(mat1)/(k^2)
pval = 0.5 - atan(sta1)/pi
return(pval)
}
|
0439156b9ed6a2395a3efe2c8dd017ef22a2db5e | 3d908ac9dc1e201d7c869f09e6a09cb209386152 | /plot2.R | ed71bc5409af448cc6c695433c6de5191ba5e03b | [] | no_license | helcb33/ExData_Plotting1 | 9439b794a6554f385db9435efdd593d8a57a913b | 87265ee6d76b7b5b9b2eafa624150b62030331dd | refs/heads/master | 2021-01-21T02:24:08.858315 | 2015-05-10T19:37:11 | 2015-05-10T19:37:11 | 35,254,435 | 0 | 0 | null | 2015-05-08T02:32:37 | 2015-05-08T02:32:37 | null | UTF-8 | R | false | false | 569 | r | plot2.R | ##Reading the data
hpc <- read.table("household_power_consumption.txt", sep=";",
header=TRUE, na.string="?")
##"Date" class
hpc$Date <- as.Date(hpc$Date, format="%d/%m/%Y")
##Subsetting
hpc2 <- hpc[(hpc$Date=="2007-02-01" | hpc$Date=="2007-02-02"), ]
##"Time" class
hpc2$Time <- strptime(paste(hpc2$Date, hpc2$Time), "%Y-%m-%d %H:%M:%S")
##Plot2
plot(hpc2$Time,hpc2$Global_active_power, type="l",
xlab="", ylab="Global Active Power (kilowatts)")
##Generating PNG file
dev.copy(png, file="plot2.png", height=480, width=480, units="px")
dev.off() |
6c76e2c4e59617e47e736fada1e94e88da77688c | 3d99f2ed3b2f0ce30e5ef81c45e721036eae38a8 | /R/MainLassoLDATraining.R | 28ccde5a5dc56aebce830feefd211fcca14fd900 | [] | no_license | IMB-Computational-Genomics-Lab/scGPS | 296244465064fd9dc2f9fcfb80af6b43fa9ae797 | 0c3746eaad27b804b61d310f57c24a71935cfcfa | refs/heads/master | 2021-06-01T13:48:53.819039 | 2020-12-03T00:43:35 | 2020-12-03T00:43:35 | 111,049,242 | 4 | 5 | null | 2018-10-26T05:59:04 | 2017-11-17T02:47:39 | R | UTF-8 | R | false | false | 37,546 | r | MainLassoLDATraining.R | #' Main model training function for finding the best model
#' that characterises a subpopulation
#'
#' @description Training a haft of all cells to find optimal ElasticNet and LDA
#' models to predict a subpopulation
#' @param mixedpop1 is a \linkS4class{SingleCellExperiment} object from the
#' train mixed population
#' @param mixedpop2 is a \linkS4class{SingleCellExperiment} object from the
#' target mixed population
#' @param genes a vector of gene names (for ElasticNet shrinkage); gene symbols
#' must be in the same format with gene names in subpop2. Note that genes are
#' listed by the order of importance, e.g. differentially expressed genes that
#' are most significan, so that if the gene list contains too many genes, only
#' the top 500 genes are used.
#' @param cluster_mixedpop1 a vector of cluster assignment in mixedpop1
#' @param c_selectID a selected number to specify which subpopulation to be used
#' for training
#' @param out_idx a number to specify index to write results into the list
#' output. This is needed for running bootstrap.
#' @param standardize a logical value specifying whether or not to standardize
#' the train matrix
#' @param listData list to store output in
#' @param trainset_ratio a number specifying the proportion of cells to be part
#' of the training subpopulation
#' @param LDA_run logical, if the LDA run is added to compare to ElasticNet
#' @param log_transform boolean whether log transform should be computed
#' @return a \code{list} with prediction results written in to the indexed
#' \code{out_idx}
#' @export
#' @author Quan Nguyen, 2017-11-25
#' @examples
#'
#' c_selectID<-1
#' out_idx<-1
#' day2 <- day_2_cardio_cell_sample
#' mixedpop1 <-new_scGPS_object(ExpressionMatrix = day2$dat2_counts,
#' GeneMetadata = day2$dat2geneInfo, CellMetadata = day2$dat2_clusters)
#' day5 <- day_5_cardio_cell_sample
#' mixedpop2 <-new_scGPS_object(ExpressionMatrix = day5$dat5_counts,
#' GeneMetadata = day5$dat5geneInfo, CellMetadata = day5$dat5_clusters)
#' genes <-training_gene_sample
#' genes <-genes$Merged_unique
#' listData <- training(genes,
#' cluster_mixedpop1 = colData(mixedpop1)[, 1],
#' mixedpop1 = mixedpop1, mixedpop2 = mixedpop2, c_selectID,
#' listData =list(), out_idx=out_idx, trainset_ratio = 0.5)
#' names(listData)
#' listData$Accuracy
training <- function(genes = NULL, cluster_mixedpop1 = NULL,
mixedpop1 = NULL, mixedpop2 = NULL, c_selectID = NULL, listData = list(),
out_idx = 1, standardize = TRUE, trainset_ratio = 0.5, LDA_run = FALSE,
log_transform = FALSE) {
# subsammpling--------------------------------------------------------------
# taking a subsampling size of trainset_ratio of the cluster_select out for
# training
subpop1cluster_indx <- which(cluster_mixedpop1 == c_selectID) #class 1
message(paste0("Total ", length(subpop1cluster_indx),
" cells as source subpop"))
subremaining1_indx <- which(cluster_mixedpop1 != c_selectID)#remaining class
message(paste0("Total ", length(subremaining1_indx),
" cells in remaining subpops"))
subsampling <- round(length(subpop1cluster_indx) * trainset_ratio)
message(paste0("subsampling ", subsampling,
" cells for training source subpop"))
subpop1_train_indx <- sample(subpop1cluster_indx, subsampling,
replace = FALSE)
# check if there is a very big cluster present in the dataset
# C/2 = SubSampling > length(total - C, which is cluster_compare)
if (length(subremaining1_indx) > subsampling) {
message(paste0("subsampling ", subsampling,
" cells in remaining subpops for training"))
subremaining1_train_indx <- sample(subremaining1_indx, subsampling,
replace = FALSE)
} else {
message(paste0("no subsampling, using ", length(subremaining1_indx),
" cells in remaining subpops for training"))
subremaining1_train_indx <- subremaining1_indx
}
# done subsampling----------------------------------------------------------
# select top 500 genes for the gene list used in the model------------------
# The DE_result is a sorted list by p-values
if (length(genes) > 500) {
genes <- genes[seq_len(500)]
}
# select genes in both mixedpop1 and mixedpop2
names1 <- elementMetadata(mixedpop1)[, 1]
subpop1_selected_genes <- names1[which(names1 %in% genes)]
names2 <- elementMetadata(mixedpop2)[, 1]
genes_in_both <- names2[which(names2 %in% subpop1_selected_genes)]
genes_in_both_idx1 <- which(names1 %in% genes_in_both)
message(paste0("use ", length(genes_in_both_idx1),
" genes for training model"))
# done selecting genes------------------------------------------------------
# prepare ElasticNet training
# matrices-----------------------------------------
# prepare predictor matrix containing both clustering classes
predictor_S1 <- mixedpop1[genes_in_both_idx1, c(subpop1_train_indx,
subremaining1_train_indx)]
if (log_transform) {
predictor_S1 <- assays(predictor_S1)$logcounts
} else {
predictor_S1 <- assays(predictor_S1)$counts
}
message(paste0("use ", dim(predictor_S1)[1], " genes ",
dim(predictor_S1)[2], " cells for testing model"))
# generate categorical response
# assign class labels############ rename the group of remaining clusters
c_compareID <- unique(cluster_mixedpop1)
c_compareID <- paste0(c_compareID[-which(c_compareID == c_selectID)],
collapse = "_")
message(paste0("rename remaining subpops to ", c_compareID))
# change cluster number to character
c_selectID <- as.character(c_selectID)
# done assigning class labels####
# set all y values to cluster select (character type) length = #cells
y_cat = rep(c_selectID, ncol(predictor_S1))
# replace values for cluster compare get indexes for cells in predictor_S1
# belong to 'remaining class'
remainingClass_Indx_in_y <- which(colnames(predictor_S1) %in%
colnames(mixedpop1[, subremaining1_train_indx]))
# change value of the cluster id for the remaining class
y_cat[remainingClass_Indx_in_y] <- rep(c_compareID,
length(remainingClass_Indx_in_y))
message(paste0("there are ", length(remainingClass_Indx_in_y),
" cells in class ", c_compareID, " and ",
length(y_cat) - length(remainingClass_Indx_in_y), " cells in class ",
c_selectID))
# Done prepare ElasticNet training matrices---------------------------------
# prepare training matrices------------------------------------------------
# fitting with lda, also with cross validation
dataset <- t(predictor_S1) #(note predictor_S1 =t(gene_S1))
# remove genes with no variation across cells and duplicated genes
Zero_col <- which(colSums(dataset) == 0)
duplicated_col <- which(duplicated(colnames(dataset)) == TRUE)
if (length(c(Zero_col, duplicated_col)) != 0) {
message(paste0("removing ", length(c(Zero_col, duplicated_col)),
" genes with no variance"))
dataset <- dataset[, -c(Zero_col, duplicated_col)]
}
# scaled and centered data before training standardizing data (centered and
# scaled) - this step is not necessary in the function glmnet
standardizing <- function(X) {
X <- X - mean(X)
X <- X/sd(X)
return(X)
}
if (standardize) {
message("standardizing prediction/target dataset")
dataset <- t(apply(dataset, 1, standardizing))
#dataset is transposed after standardised and scaled
}
dataset <- as.data.frame(dataset)
dataset$Cluster_class <- as.character(as.vector(y_cat))
# remove NA before prediction (if a gene has zero variance, it will have NA
# values after standardization)
dataset <- na.omit(dataset)
# fitting with cross validation to find the best ElasticNet model
message("performning elasticnet model training...")
cvfit = cv.glmnet(as.matrix(dataset[, -which(colnames(dataset) ==
"Cluster_class")]), as.vector(dataset$Cluster_class),
family = "binomial", type.measure = "class")
# fit LDA
if (LDA_run) {
message("performning LDA model training...")
trainControl <- trainControl(method = "repeatedcv", number = 10,
repeats = 3)
fit.lda <- train(Cluster_class ~ ., preProcess = NULL, data = dataset,
method = "lda", metric = "Accuracy", trControl = trainControl,
na.action = na.omit, standardize = FALSE)
}
# Done fitting the ElasticNet and LDA models--------------------------------
# Extract coefficient Beta for a gene for an optimized lambda value---------
message("extracting deviance and best gene features...")
cvfit_out <- as.matrix(coef(cvfit, s = cvfit$lambda.min))
cvfit_out <- as.data.frame(cvfit_out)
# Find number of genes with coefficient different to 0
cvfit_out$name <- row.names(cvfit_out)
sub_cvfit_out <- cvfit_out[cvfit_out$`1` != 0, ]
# Extract deviance explained
log <- utils::capture.output({
t_DE <- as.matrix(print(cvfit$glmnet.fit))
})
dat_DE <- as.data.frame(t_DE)
colnames(dat_DE) <- c("Dfd", "Deviance", "lambda")
# Get the coordinate for lambda that produces minimum error
dat_DE_Lambda_idx <- c()
for (i in seq_len(nrow(dat_DE))) {
if (dat_DE$lambda[i] == round(cvfit$lambda.min,
nchar(dat_DE$lambda[i]) - 2)) {
dat_DE_Lambda_idx <- c(dat_DE_Lambda_idx, i)
}
}
if (length(dat_DE_Lambda_idx) > 0) {
dat_DE <- dat_DE[seq_len(dat_DE_Lambda_idx[1]), ]
message(paste0("lambda min is at location ", dat_DE_Lambda_idx[1]))
} else {
message("no lambda min found, please check output ...")
}
dat_DE_fm_DE <- dat_DE %>% group_by(dat_DE$Dfd) %>%
summarise(Deviance = max(dat_DE$Deviance))
dat_DE_fm_DE <- as.data.frame(dat_DE_fm_DE)
dat_DE_fm_DE$DEgenes <- paste0("genes_cluster", c_selectID)
remaining <- c("remaining DEgenes")
dat_DE_fm_DE <- rbind(dat_DE_fm_DE, remaining)
# Done extracting the coefficients------------------------------------------
# fit the model on the leave-out data to estimate accuracy------------------
# Keep all cells except for those used in the training set note that:
# subpop1cluster_indx is for the total cells in the source subpop
# subpop1_train_indx is for the subsampled cells as in
# subpop1cluster_indx[sample(seq_len(ncol(mixedpop1)),subsampling ,
# replace = F)]
cluster_select_indx_Round2 <- subpop1cluster_indx[-which(subpop1cluster_indx
%in% subpop1_train_indx)]
message(paste0("the leave-out cells in the source subpop is ",
length(cluster_select_indx_Round2)))
# subremaining1_train_indx is the index for remaining subpops used for
# training
# subremaining1_indx is the total cells in the remaining subpops
subremaining1_train_order <- which(subremaining1_indx %in%
subremaining1_train_indx)
#to remove remaininng class already used for training
# Select cluster_compare_indx_Round2 as the remaining cells in class 2
# (opposite to class 1)
if ((length(subremaining1_indx) - length(subremaining1_train_indx)) >
subsampling) {
cluster_compare_indx_Round2 <- sample(subremaining1_indx[
-subremaining1_train_order], subsampling, replace = FALSE)
message(paste0("use ", subsampling,
" target subpops cells for leave-out test set"))
} else {
cluster_compare_indx_Round2 <-
subremaining1_indx[-subremaining1_train_order]
#use all of the class 2 cells after taken those for training
message(paste0("use ", length(cluster_compare_indx_Round2),
" target subpop cells for leave-out test set"))
}
# the cells present in select (class 1) vs compare (remaining class, i.e.
# class 2) c(cluster_select_indx_Round2, cluster_compare_indx_Round2)
# use the leave-out dataset in mixed pop1 to evaluate the model
temp_S2 <- mixedpop1[, c(cluster_select_indx_Round2,
cluster_compare_indx_Round2)] #genes_in_trainset_idx_ordered
if (log_transform) {
temp_S2 <- assays(temp_S2)$logcounts
} else {
temp_S2 <- assays(temp_S2)$counts
}
predictor_S2 <- t(temp_S2)
# before prediction, check genes in cvfit and predictor_S2 are compatible
# Get gene names
gene_cvfit <- cvfit$glmnet.fit$beta@Dimnames[[1]]
names <- colnames(predictor_S2)
cvfitGenes_idx <- match(gene_cvfit, as.character(names))
#names are genes in target subpop
if (length(cvfitGenes_idx) == 0) {
message("No genes in the model present in the data...check your input")
}
# check how many genes in gene_cvfit but not in mixedpop2
to_add <- which(is.na(cvfitGenes_idx) == TRUE)
if (length(to_add) > 0) {
message(paste0("add ", length(to_add),
" random indexes for genes in model but not in target subpop, ",
"later to be replaced by 0"))
to_add_idx <- sample(cvfitGenes_idx[-to_add], length(to_add),
replace = FALSE)
cvfitGenes_idx[to_add] <- to_add_idx #replace NA indexes by random
}
if (length(cvfitGenes_idx) > 0) {
predictor_S2 <- predictor_S2[, cvfitGenes_idx]
}
# replace gene values for those in model but not in prediction dataset by NA
if (length(to_add) > 0) {
message("Replacing missing genes by NA...")
predictor_S2[, to_add] <- NA
}
# standardizing the leave-out target and source subpops
if (standardize) {
message("standardizing the leave-out target and source subpops...")
predictor_S2 <- t(apply(predictor_S2, 1, standardizing))
}
# remove NA before prediction (if a gene has zero variance, it will have NA
# values after standardization)
predictor_S2 <- na.omit(predictor_S2)
# Evaluation: predict ElasticNet Start prediction for estimating accuracy
message("start ElasticNet prediction for estimating accuracy...")
predict_clusters <- predict(cvfit, newx = predictor_S2, type = "class",
s = cvfit$lambda.min)
# Evaluation: predict LDA
if (LDA_run) {
message(paste0("start LDA prediction for estimating accuracy for ",
nrow(predictor_S2), " cells and ", ncol(predictor_S2), " genes..."))
lda_predict <- predict(fit.lda, predictor_S2)
}
# Done validation test to estimate accuracy---------------------------------
# Estimate accuracy---------------------------------------------------------
# Reference classification for comparing predicted results this
# cellNames_cluster is to calculate the accuracy
cellNames_cluster <- cbind(colnames(mixedpop1), cluster_mixedpop1)
# Compare to original clusering classes to check for accuracy (just for the
# source subpop), using the initial cellNames_cluster
predictor_S2_name <- row.names(predict_clusters)
predict_label <- predict_clusters[, 1]
predict_index <- which(cellNames_cluster[, 1] %in% predictor_S2_name)
original_cluster <- cellNames_cluster[predict_index, ]
original_cluster <- original_cluster[order(original_cluster[, 2],
decreasing = TRUE), ]
original_cluster <- as.data.frame(original_cluster)
# for ElasticNet
predict_cluster_dat <- as.data.frame(predict_label)
predict_cluster_dat$cellnames <- row.names(predict_cluster_dat)
# merge original and predicted datasets
compare <- merge(original_cluster, predict_cluster_dat, by.x = "V1",
by.y = "cellnames")
# fing how many predictions are accurate
cluster_select_predict <- subset(compare,
(as.numeric(compare[, 2]) == c_selectID & compare$predict_label ==
c_selectID) | (as.numeric(compare[, 2]) != c_selectID &
compare$predict_label != c_selectID))
accurate <- dim(cluster_select_predict)[1]
inaccurate <- dim(compare)[1] - dim(cluster_select_predict)[1]
list_acc_inacc <- list(accurate, inaccurate)
message(paste0("evaluation accuracy ElasticNet ",
accurate/(accurate + inaccurate)))
# for LDA
if (LDA_run) {
predict_cluster_dat_LDA <- as.data.frame(lda_predict)
predict_cluster_dat_LDA$cellnames <- row.names(predictor_S2)
# merge original and predicted datasets
compare <- merge(original_cluster, predict_cluster_dat_LDA, by.x = "V1",
by.y = "cellnames")
# fing how many predictions are accurate
cluster_select_predict_LDA <- subset(compare,
(as.numeric(compare[, 2]) == c_selectID & compare$lda_predict ==
c_selectID) | (as.numeric(compare[, 2]) != c_selectID &
compare$lda_predict != c_selectID))
accurate_LDA <- dim(cluster_select_predict_LDA)[1]
inaccurate_LDA <- dim(compare)[1] - dim(cluster_select_predict_LDA)[1]
list_acc_inacc <- list(accurate_LDA, inaccurate_LDA)
message(paste0("evaluation accuracy LDA ",
accurate_LDA/(accurate_LDA + inaccurate_LDA)))
}
# done estimate accuracy----------------------------------------------------
# write the 5 lists into the output object
listData$Accuracy[[out_idx]] <- list(list_acc_inacc)
listData$ElasticNetGenes[[out_idx]] <- list(sub_cvfit_out)
listData$Deviance[[out_idx]] <- list(dat_DE_fm_DE)
listData$ElasticNetFit[[out_idx]] <- list(cvfit)
if (LDA_run) {
listData$LDAFit[[out_idx]] <- list(fit.lda)
} else {
listData$LDAFit[[out_idx]] <- list(NA)
}
listData$predictor_S1[[out_idx]] <- list(t(dataset))
return(listData)
}
#' Main prediction function applying the optimal ElasticNet and LDA models
#'
#' @description Predict a new mixed population after training the model for a
#' subpopulation in the first mixed population.
#' All subpopulations in the new target mixed population will be predicted,
#' where each targeted subpopulation will have a transition score from the
#' orginal subpopulation to the new subpopulation.
#' @param listData a \code{list} object containing trained results for the
#' selected subpopulation in the first mixed population
#' @param mixedpop2 a \linkS4class{SingleCellExperiment} object from the target
#' mixed population of importance, e.g. differentially expressed genes that are
#' most significant
#' @param out_idx a number to specify index to write results into the list
#' output. This is needed for running bootstrap.
#' @param cluster_mixedpop2 a vector of cluster assignment for mixedpop2
#' @param standardize a logical of whether to standardize the data
#' @param LDA_run logical, if the LDA prediction is added to compare to
#' ElasticNet, the LDA model needs to be trained from the training before
#' inputting to this prediction step
#' @param c_selectID a number to specify the trained cluster used for prediction
#' @param log_transform boolean whether log transform should be computed
#' @return a \code{list} with prediction results written in to the index
#' \code{out_idx}
#' @export
#' @author Quan Nguyen, 2017-11-25
#' @examples
#' c_selectID<-1
#' out_idx<-1
#' day2 <- day_2_cardio_cell_sample
#' mixedpop1 <-new_scGPS_object(ExpressionMatrix = day2$dat2_counts,
#' GeneMetadata = day2$dat2geneInfo, CellMetadata = day2$dat2_clusters)
#' day5 <- day_5_cardio_cell_sample
#' mixedpop2 <-new_scGPS_object(ExpressionMatrix = day5$dat5_counts,
#' GeneMetadata = day5$dat5geneInfo, CellMetadata = day5$dat5_clusters)
#' genes <-training_gene_sample
#' genes <-genes$Merged_unique
#' listData <- training(genes,
#' cluster_mixedpop1 = colData(mixedpop1)[, 1], mixedpop1 = mixedpop1,
#' mixedpop2 = mixedpop2, c_selectID, listData =list(), out_idx=out_idx)
#' listData <- predicting(listData =listData, mixedpop2 = mixedpop2,
#' out_idx=out_idx, cluster_mixedpop2 = colData(mixedpop2)[, 1],
#' c_selectID = c_selectID)
#'
predicting <- function(listData = NULL, cluster_mixedpop2 = NULL,
mixedpop2 = NULL, out_idx = NULL, standardize = TRUE, LDA_run = FALSE,
c_selectID = NULL, log_transform = FALSE) {
# predictor_S1 is the dataset used for the training phase
# (already transposed)
predictor_S1 <- listData$predictor_S1[[out_idx]][[1]] #1 for extract matrix
cvfit_best <- listData$ElasticNetFit[[out_idx]][[1]]
fit.lda <- listData$LDAFit[[out_idx]][[1]]
my.clusters <- cluster_mixedpop2
if (log_transform) {
ori_dat_2 <- assays(mixedpop2)$logcounts
} else {
ori_dat_2 <- assays(mixedpop2)$counts
}
# standardizing data (centered and scaled)
standardizing <- function(X) {
X <- X - mean(X)
X <- X/sd(X)
return(X)
}
if (standardize) {
message("standardizing target subpops before prediction...")
ori_dat_2 <- t(apply(ori_dat_2, 1, standardizing))
}
list_predict_clusters_ElasticNet <- vector(mode = "list",
length = length(unique(my.clusters)))
list_predict_clusters_LDA <- vector(mode = "list",
length = length(unique(my.clusters)))
list_cell_results <- vector(mode = "list")
for (clust in unique(my.clusters)) {
message(paste0("predicting from source to target subpop ", clust,
"..."))
c_selectID_2 <- clust
cluster_select <- which(my.clusters == c_selectID_2) #select cells
message(paste0("number of cells in the target subpop ", clust, " is ",
length(cluster_select)))
# Get gene names from the trained model
target_genes <- elementMetadata(mixedpop2)[, 1]
# A function to match target dataset
matching_genes <- function(target_dataset, model_genes,
cluster_idx = cluster_select) {
target_genes <- row.names(target_dataset)
targetGenes_remove <- which(!(as.character(target_genes) %in%
model_genes)) #genes in target but not in modelGenes
message(paste0("Number of genes in the target data, ",
"but not in model genes is ", length(targetGenes_remove)))
target_dataset <- target_dataset[-targetGenes_remove, ]
# update target genes
target_genes <- row.names(target_dataset)
# assumming the same gename formats between target and model
modelGenes_idx <- match(model_genes, as.character(target_genes))
#which indexes of the target genes that match model genes
message(paste0("Number of genes in the model present ",
"in the target data is ",
length(na.omit(modelGenes_idx))))
if (length(na.omit(modelGenes_idx)) == 0) {
message(paste0("No genes in the model present in the data...",
"check your input"))
}
# check how many genes in model_genes but not in mixedpop2 and add
# those genes into the target
to_add <- which(is.na(modelGenes_idx) == TRUE)
message(paste0("There are ", length(to_add),
" genes that are in the model, ",
"but not in target subpopulations"))
# update target_dataset
predictor_S2_temp <- target_dataset[na.omit(modelGenes_idx),
cluster_select]
if (length(to_add) > 0) {
message(paste0("for dim conformation, add ", length(to_add),
" random indexes for genes in model",
" but not in target subpop, later to be replaced by 0"))
# if the genes not in the target dataset,
# set the expression values to 0
to_add_df <- as.data.frame(matrix(0, nrow = length(to_add),
ncol = length(cluster_select))) #columns = number of cells
row.names(to_add_df) <- gsub("`", "", model_genes[to_add])
#format special names with '`MCM3AP-AS1`'
message(paste0("genes to be added to target subpop are ",
as.vector(row.names(to_add_df))))
colnames(to_add_df) <- colnames(predictor_S2_temp)
predictor_S2_temp <- rbind(predictor_S2_temp, to_add_df)
}
message(paste0("the prediction (target) subop has ",
dim(predictor_S2_temp)[1], " genes and ",
dim(predictor_S2_temp)[2], " cells. The trained model has ",
length(model_genes), " genes"))
message("first 10 genes in model ")
message(head(model_genes, n = 10))
message("first 10 genes in target ")
message(head(row.names(predictor_S2_temp), n = 10))
return(predictor_S2_temp)
}
# update predictor dataset
predictor_S2_temp <- matching_genes(target_dataset = ori_dat_2,
model_genes = cvfit_best$glmnet.fit$beta@Dimnames[[1]],
cluster_idx = cluster_select)
# predict ElasticNet:
message("running elasticNet classification...")
predict_clusters_ElasticNet <- predict(cvfit_best,
newx = t(predictor_S2_temp), type = "class",
s = cvfit_best$lambda.min)
list_cell_results[[clust]] <- predict_clusters_ElasticNet
ElasticNet_result <- as.data.frame(table(predict_clusters_ElasticNet))
#convert table() to 2x2 dataframe,
# it will always have 2 variable names: $name, Freq
report_ElasticNet <- paste0("ElasticNet for subpop", c_selectID_2,
" in target mixedpop2")
# report class probability
if (ncol(ElasticNet_result) == 2) {
ElasticNet_cluster_idx <- which(ElasticNet_result[, 1] ==
c_selectID)
if (length(ElasticNet_cluster_idx) > 0) {
predict_clusters_ElasticNet_proportion <- as.numeric(
ElasticNet_result[ElasticNet_cluster_idx, 2])/
sum(as.numeric(ElasticNet_result[, 2])) * 100
message(paste0("class probability prediction ElasticNet",
" for target subpop ", clust, " is ",
predict_clusters_ElasticNet_proportion))
} else {
predict_clusters_ElasticNet_proportion = 0
message(paste0("class probability prediction ElasticNet ",
"for target subpop ", clust, " is 0"))
}
predict_clusters_ElasticNet <- list(report_ElasticNet,
predict_clusters_ElasticNet_proportion)
} else {
message(paste0("ElasticNet for target subpop ", clust,
" has no solution"))
predict_clusters_ElasticNet <- list(report_ElasticNet, "NA")
}
# write to the next list level in the list if bootstrap is used
list_predict_clusters_ElasticNet <- c(list_predict_clusters_ElasticNet,
predict_clusters_ElasticNet)
# done checking---------------------------------------------------------
# predict
# LDA:
if (LDA_run)
{
# newdataset <- as.data.frame(t(ori_dat_2[,cluster_select]))
# for better LDA conversion, the target data should not be
# standardised
if (log_transform) {
ori_dat_2 <- assays(mixedpop2)$logcounts
} else {
ori_dat_2 <- assays(mixedpop2)$counts
}
newdataset <- matching_genes(target_dataset = ori_dat_2,
model_genes = fit.lda$finalModel$xNames,
cluster_idx = cluster_select)
message("running LDA classification...")
predict_clusters_LDA <- predict(fit.lda, t(newdataset),
na.action = na.omit)
LDA_result <- as.data.frame(table(predict_clusters_LDA))
# convert table() to 2x2 dataframe, it will always have 2
# variable names: $name,
report_LDA <- paste0("LDA for subpop ", c_selectID_2,
" in target mixedpop2")
if (ncol(LDA_result) == 2) {
LDA_cluster_idx <- which(LDA_result[, 1] == c_selectID)
predict_clusters_LDA <- as.numeric(LDA_result[
LDA_cluster_idx, 2])/
sum(as.numeric(LDA_result[, 2])) * 100
message(paste0("class probability prediction LDA ",
"for target subpop ",
clust, " is ", predict_clusters_LDA))
# write prediction output
predict_clusters_LDA <- list(report_LDA,
predict_clusters_LDA)
} else {
message(paste0("prediction LDA has no solution for ",
"target subpop ", clust))
# write prediction output
predict_clusters_LDA <- list(report_LDA, "NA")
}
list_predict_clusters_LDA <- c(list_predict_clusters_LDA,
predict_clusters_LDA)
} # end LDA run
} # end the loop through all subpops
# write to the next list level in the list if bootstrap is used to write
# prediction result
listData$ElasticNetPredict[out_idx] <- list(
list_predict_clusters_ElasticNet)
if (LDA_run) {
listData$LDAPredict[out_idx] <- list(list_predict_clusters_LDA)
} else {
listData$LDAPredict[out_idx] <- list(NA)
}
listData$cell_results <- list_cell_results
return(listData)
}
#' BootStrap runs for both scGPS training and prediction
#'
#' @description ElasticNet and LDA prediction for each of all the
#' subpopulations in the new mixed population after training the model for a
#' subpopulation in the first mixed population. The number of bootstraps to be
#' run can be specified.
#' @seealso \code{\link{bootstrap_parallel}} for parallel options
#' @param listData a \code{list} object, which contains trained results for the
#' first mixed population
#' @param mixedpop1 a \linkS4class{SingleCellExperiment} object from a mixed
#' population for training
#' @param mixedpop2 a \linkS4class{SingleCellExperiment} object from a target
#' mixed population for prediction
#' @param cluster_mixedpop1 a vector of cluster assignment for mixedpop1
#' @param cluster_mixedpop2 a vector of cluster assignment for mixedpop2
#' @param c_selectID the root cluster in mixedpop1 to becompared to clusters in
#' mixedpop2
#' @param genes a gene list to build the model
#' @param verbose a logical whether to display additional messages
#' @param nboots a number specifying how many bootstraps to be run
#' @param trainset_ratio a number specifying the proportion of cells to be part
#' of the training subpopulation
#' @param LDA_run logical, if the LDA prediction is added to compare to
#' ElasticNet
#' @param log_transform boolean whether log transform should be computed
#' @return a \code{list} with prediction results written in to the index
#' \code{out_idx}
#' @export
#' @author Quan Nguyen, 2017-11-25
#' @examples
#' day2 <- day_2_cardio_cell_sample
#' mixedpop1 <-new_scGPS_object(ExpressionMatrix = day2$dat2_counts,
#' GeneMetadata = day2$dat2geneInfo, CellMetadata = day2$dat2_clusters)
#' day5 <- day_5_cardio_cell_sample
#' mixedpop2 <-new_scGPS_object(ExpressionMatrix = day5$dat5_counts,
#' GeneMetadata = day5$dat5geneInfo, CellMetadata = day5$dat5_clusters)
#' genes <-training_gene_sample
#' genes <-genes$Merged_unique
#' cluster_mixedpop1 <- colData(mixedpop1)[,1]
#' cluster_mixedpop2 <- colData(mixedpop2)[,1]
#' c_selectID <- 2
#' test <- bootstrap_prediction(nboots = 1, mixedpop1 = mixedpop1,
#' mixedpop2 = mixedpop2, genes=genes, listData =list(),
#' cluster_mixedpop1 = cluster_mixedpop1,
#' cluster_mixedpop2 = cluster_mixedpop2, c_selectID = c_selectID)
#' names(test)
#' test$ElasticNetPredict
#' test$LDAPredict
bootstrap_prediction <- function(nboots = 1, genes = genes,
mixedpop1 = mixedpop1, mixedpop2 = mixedpop2, c_selectID = NULL,
listData = list(), cluster_mixedpop1 = NULL, cluster_mixedpop2 = NULL,
trainset_ratio = 0.5, LDA_run = TRUE, verbose = FALSE,
log_transform = FALSE) {
if (verbose) {
for (out_idx in seq_len(nboots)) {
listData <- training(genes = genes, mixedpop1 = mixedpop1,
mixedpop2 = mixedpop2, trainset_ratio = trainset_ratio, c_selectID,
listData = listData, out_idx = out_idx,
cluster_mixedpop1 = cluster_mixedpop1, standardize = TRUE,
LDA_run = LDA_run, log_transform = log_transform)
message(paste0("done training for bootstrap ", out_idx,
", moving to prediction..."))
listData <- predicting(listData = listData, mixedpop2 = mixedpop2,
out_idx = out_idx, standardize = TRUE,
cluster_mixedpop2 = cluster_mixedpop2,
LDA_run = LDA_run, c_selectID = c_selectID,
log_transform = log_transform)
}
} else {
for (out_idx in seq_len(nboots)) {
listData <- suppressMessages(training(genes = genes,
mixedpop1 = mixedpop1, mixedpop2 = mixedpop2,
trainset_ratio = trainset_ratio, c_selectID,
listData = listData, out_idx = out_idx,
cluster_mixedpop1 = cluster_mixedpop1, standardize = TRUE,
LDA_run = LDA_run, log_transform = log_transform))
message(paste0("done training for bootstrap ", out_idx,
", moving to prediction..."))
listData <- suppressMessages(predicting(listData = listData,
mixedpop2 = mixedpop2, out_idx = out_idx, standardize = TRUE,
cluster_mixedpop2 = cluster_mixedpop2,
LDA_run = LDA_run, c_selectID = c_selectID,
log_transform = log_transform))
}
}
return(listData)
}
#' BootStrap runs for both scGPS training and prediction
#' with parallel option
#'
#' @description same as bootstrap_prediction, but with an multicore option
#' @param listData a \code{list} object, which contains trained results for the
#' first mixed population
#' @param mixedpop1 a \linkS4class{SingleCellExperiment} object from a mixed
#' population for training
#' @param mixedpop2 a \linkS4class{SingleCellExperiment} object from a target
#' mixed population for prediction
#' @param cluster_mixedpop1 a vector of cluster assignment for mixedpop1
#' @param cluster_mixedpop2 a vector of cluster assignment for mixedpop2
#' @param genes a gene list to build the model
#' @param nboots a number specifying how many bootstraps to be run
#' @param ncores a number specifying how many cpus to be used for running
#' @param c_selectID the root cluster in mixedpop1 to becompared to clusters in
#' mixedpop2
#' @return a \code{list} with prediction results written in to the index
#' \code{out_idx}
#' @export
#' @author Quan Nguyen, 2017-11-25
#' @examples
#' day2 <- day_2_cardio_cell_sample
#' mixedpop1 <-new_scGPS_object(ExpressionMatrix = day2$dat2_counts,
#' GeneMetadata = day2$dat2geneInfo, CellMetadata = day2$dat2_clusters)
#' day5 <- day_5_cardio_cell_sample
#' mixedpop2 <-new_scGPS_object(ExpressionMatrix = day5$dat5_counts,
#' GeneMetadata = day5$dat5geneInfo, CellMetadata = day5$dat5_clusters)
#' genes <-training_gene_sample
#' genes <-genes$Merged_unique
#' #prl_boots <- bootstrap_parallel(ncores = 4, nboots = 1, genes=genes,
#' # mixedpop1 = mixedpop2, mixedpop2 = mixedpop2, c_selectID=1,
#' # listData =list())
#' #prl_boots[[1]]$ElasticNetPredict
#' #prl_boots[[1]]$LDAPredict
#'
bootstrap_parallel <- function(ncores = 4, nboots = 1, genes = genes,
mixedpop1 = mixedpop1, mixedpop2 = mixedpop2, c_selectID, listData = list(),
cluster_mixedpop1 = NULL, cluster_mixedpop2 = NULL) {
bootstrap_single <- function(genes = genes, mixedpop1 = mixedpop1,
mixedpop2 = mixedpop2, c_selectID = c_selectID, out_idx = 1,
listData = list(), cluster_mixedpop1 = NULL, cluster_mixedpop2 = NULL) {
listData <- training(genes = genes, mixedpop1 = mixedpop1,
mixedpop2 = mixedpop2, c_selectID, listData = listData,
out_idx = 1, cluster_mixedpop1 = cluster_mixedpop1)
listData <- predicting(listData = listData, mixedpop2 = mixedpop2,
out_idx = 1, standardize = TRUE,
cluster_mixedpop2 = cluster_mixedpop2)
return(listData)
}
BiocParallel::register(BiocParallel::MulticoreParam(workers = ncores,
progressbar = TRUE))
listData <- BiocParallel::bplapply(seq_len(nboots), bootstrap_single,
genes = genes, mixedpop1 = mixedpop1, mixedpop2 = mixedpop2,
c_selectID = c_selectID, listData = list())
return(listData)
}
|
d895443ec3d544adbfab66fffc587c9d3801bfe5 | 44e97a4f153fede9ad36444426382599a0b5ad34 | /man/pathfindr.Rd | a8564f195f1f639f50fa746f2f9f31f829e1c50a | [
"MIT"
] | permissive | egeulgen/pathfindR | 3f58046ad48ecb7698c604bc9959e7a70d49b53a | 857f4b70566ea3a0413c015fd9580c15137a1615 | refs/heads/master | 2023-08-22T15:13:58.185804 | 2023-08-22T12:57:44 | 2023-08-22T12:57:44 | 115,432,311 | 149 | 28 | NOASSERTION | 2023-08-27T15:21:31 | 2017-12-26T15:11:22 | R | UTF-8 | R | false | true | 2,365 | rd | pathfindr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pathfindr.R
\docType{package}
\name{pathfindR}
\alias{pathfindR}
\alias{pathfindR-package}
\title{pathfindR: A package for Enrichment Analysis Utilizing Active Subnetworks}
\description{
pathfindR is a tool for active-subnetwork-oriented gene set enrichment analysis.
The main aim of the package is to identify active subnetworks in a
protein-protein interaction network using a user-provided list of genes
and associated p values then performing enrichment analyses on the identified
subnetworks, discovering enriched terms (i.e. pathways, gene ontology, TF target
gene sets etc.) that possibly underlie the phenotype of interest.
}
\details{
For analysis on non-Homo sapiens organisms, pathfindR offers utility functions
for obtaining organism-specific PIN data and organism-specific gene sets data.
pathfindR also offers functionalities to cluster the enriched terms and
identify representative terms in each cluster, to score the enriched terms
per sample and to visualize analysis results.
}
\seealso{
See \code{\link{run_pathfindR}} for details on the pathfindR
active-subnetwork-oriented enrichment analysis
See \code{\link{cluster_enriched_terms}} for details on methods of enriched
terms clustering to define clusters of biologically-related terms
See \code{\link{score_terms}} for details on agglomerated score calculation
for enriched terms to investigate how a gene set is altered in a given sample
(or in cases vs. controls)
See \code{\link{term_gene_heatmap}} for details on visualization of the heatmap
of enriched terms by involved genes
See \code{\link{term_gene_graph}} for details on visualizing terms and
term-related genes as a graph to determine the degree of overlap between the
enriched terms by identifying shared and/or distinct significant genes
See \code{\link{UpSet_plot}} for details on creating an UpSet plot of the
enriched terms.
See \code{\link{get_pin_file}} for obtaining organism-specific PIN data and
\code{\link{get_gene_sets_list}} for obtaining organism-specific gene sets data
}
\author{
\strong{Maintainer}: Ege Ulgen \email{egeulgen@gmail.com} (\href{https://orcid.org/0000-0003-2090-3621}{ORCID}) [copyright holder]
Authors:
\itemize{
\item Ozan Ozisik \email{ozanytu@gmail.com} (\href{https://orcid.org/0000-0001-5980-8002}{ORCID})
}
}
|
527fa0367ec470cec2f154ba404c68bb444933c6 | 933fb24d384e1474ec2d3dadb6ef13435984390f | /src/estatistica_descritiva_bd_zarc.R | 6aaa69bb6851e6950b0de7c624e40cc9ad59e5f8 | [] | no_license | dvictori/ptf_zarc | 286f3ae83d2c7d0f96a638c50d28e72484c7cc45 | c00dcf53465602ddedaf4a17ac1fa8d592978953 | refs/heads/master | 2023-06-14T20:41:12.972871 | 2021-07-06T13:46:09 | 2021-07-06T13:46:09 | 290,854,237 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,030 | r | estatistica_descritiva_bd_zarc.R | # Estatísticas básicas do conjunto de dados principal
library(tidyverse)
library(Ternary)
source('src/grupo_subgrupo_textural.R')
# Carregandos dados da base PTF
dados <- read.csv2("dados/bd_zarc.csv")
dados$AD <- dados$AD * 10
# Conferindo se a soma dos dados de granulometria dão 100 (100%)
soma <- dados$AT + dados$SIL + dados$ARG
max(soma) # valor maximo da serie de somas
min(soma) # valor minimo da serie de somas
dados$grupo <- grupo_textural(dados$AT, dados$SIL, dados$ARG)
dados$subgrupo <- subgrupo_textural(dados$AT, dados$SIL, dados$ARG)
summary(dados)
ggplot(dados) +
geom_density(aes(x = AT)) +
labs(x = 'Areia Total [%]', y = 'FDP Kernel')
ggsave('figs/distribuicao_at.png')
ggplot(dados) +
geom_density(aes(x = SIL)) +
labs(x = 'Silte [%]', y = 'FDP Kernel')
ggsave('figs/distribuicao_sil.png')
ggplot(dados) +
geom_density(aes(x = ARG)) +
labs(x = 'Argila [%]', y = 'FDP Kernel')
ggsave('figs/distribuicao_arg.png')
ggplot(dados) +
geom_density(aes(x = AD)) +
labs(x = 'Água disponível - AD - [mm cm⁻¹]', y = 'FDP Kernel') +
geom_vline(aes(xintercept = median(AD))) +
geom_vline(aes(xintercept = quantile(AD, 0.25)), lty = 'dashed') +
geom_vline(aes(xintercept = quantile(AD, 0.75)), lty = 'dashed')
ggsave('figs/distribuicao_AD.png')
dados_long <- dados %>%
pivot_longer(c(AT, SIL, ARG),
names_to = 'fração') %>%
mutate(fração = factor(fração, levels = c('AT', 'SIL', 'ARG'),
labels = c('Areia Total', 'Silte', 'Argila')))
ggplot(dados_long) +
geom_density(aes(x = value, fill = fração), alpha = 0.4) +
labs(x = 'porcento', y = 'FDP Kernel', fill = 'Granulometria')
ggsave('figs/distribuicao_fracoes_over.png')
ggplot(dados_long) +
geom_density(aes(x = value)) +
facet_wrap(~fração) +
labs(x = 'Porcento', y = 'FDP Kernel')
ggsave('figs/distribuicao_fracoes_grade.png')
ggplot(dados_long) +
geom_boxplot(aes(x = fração, y = value)) +
labs(x = 'Frações texturais', y = 'Porcento')
ggsave('figs/boxplot_fracoes.png')
resumo <- dados %>%
group_by(subgrupo) %>%
summarise(mediana = round(median(AD),2),
obs = n())
ggplot(dados) +
geom_boxplot(aes(x = subgrupo, y = AD)) +
geom_text(data = resumo,
aes(x = subgrupo, y = mediana + 0.08, label = mediana)) +
geom_text(data = resumo,
aes(x = subgrupo, y = 2.6, label = paste0('(', obs, ')'))) +
labs(x = 'Subgrupamento textural', y = 'AD [mm cm⁻¹]')
ggsave('figs/boxplot_AD_subgrupo_textural.png')
#### Triangulo com as amostras ####
# salvando configurações padrão do gráfico
# para voltar ao normal depois
orig.par <- par(no.readonly = TRUE)
# Selecionando uma paleta de cores aos valores de AD em 5 classes
paleta <- c('#d7191c','#fdae61','#ffffbf','#abdda4','#2b83ba')
quebras_amostra <- cut(dados$AD,
breaks = seq(min(dados$AD),
max(dados$AD),
len = 6),
include.lowest = TRUE)
cores <- paleta[quebras_amostra]
# salvar em alta resolução
png('figs/amostras_ternario.png', width = 4,
height = 4, units = 'in', res = 300)
# reduzindo as margens do plot
par(mar = c(1,1,2,1))
TernaryPlot(
alab = 'Argila - [%]', blab = 'Silte - [%]', clab = 'Areia Total - [%]')
AddToTernary(points, dados[c('ARG', 'SIL', 'AT')],
cex = 0.5, pch = 21, bg = cores, col = 'grey35')
legend('topright',
title = 'AD [mm cm⁻¹]',
legend = levels(quebras_amostra),
cex=0.6, bty='n', pch=21, pt.cex=1,
pt.bg = paleta, col = 'grey25')
#dev.copy(png, 'figs/amostras_ternario.png')
dev.off()
# voltando configurações da área do gráfico
par(orig.par)
#### Descritiva dos resultados do modelo M2 ####
dados_modelo <- read.csv2('resultados/Preditos_triangulo_AD_m2.csv')
dados_modelo$grupo <- grupo_textural(dados_modelo$AT, dados_modelo$SIL, dados_modelo$ARG)
dados_modelo$subgrupo <- subgrupo_textural(dados_modelo$AT, dados_modelo$SIL, dados_modelo$ARG)
# convertendo unidade para mm/cm
dados_modelo[4:6] <- dados_modelo[4:6] * 10
ggplot(dados_modelo) +
geom_density(aes(x = ADm2)) +
labs(x = 'Água disponível calculada- ADcal - [mm cm⁻¹]', y = 'FDP Kernel') +
geom_vline(aes(xintercept = median(ADm2))) +
geom_vline(aes(xintercept = quantile(ADm2, 0.25)), lty = 'dashed') +
geom_vline(aes(xintercept = quantile(ADm2, 0.75)), lty = 'dashed')
ggsave('figs/distribuicao_AD_modelo2.png')
resumo_modelo <- dados_modelo %>%
group_by(subgrupo) %>%
summarise(mediana = round(median(ADm2),2),
obs = n())
ggplot(dados_modelo) +
geom_boxplot(aes(x = subgrupo, y = ADm2)) +
geom_text(data = resumo_modelo,
aes(x = subgrupo, y = mediana + 0.08, label = mediana)) +
geom_text(data = resumo_modelo,
aes(x = subgrupo, y = 3, label = paste0('(', obs, ')'))) +
labs(x = 'Subgrupamento textural', y = 'ADcal [mm cm⁻¹]')
ggsave('figs/boxplot_AD_subgrupo_textural_modelo2.png')
|
979af21e8745cf0a644a2f242546da3cc2db4233 | 4d95e5ceddbfaedd8c984fd5dc04036d2f6e88e6 | /maplet/man/mt_load_olink.Rd | 3f07cef151c574aca0e20c107f52caa7bbc054db | [] | no_license | ZWCharlie/maplet | 864603059867f3dae25f856832056dd780dde4a1 | 21a331bbb939b940b19ceb94105198c4213f2ed8 | refs/heads/main | 2023-04-12T00:44:49.776031 | 2021-04-27T00:04:44 | 2021-04-27T00:04:44 | 348,540,703 | 0 | 0 | null | 2021-03-17T01:15:52 | 2021-03-17T01:15:50 | null | UTF-8 | R | false | true | 1,244 | rd | mt_load_olink.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mt_load_olink.R
\name{mt_load_olink}
\alias{mt_load_olink}
\title{Load Olink-format data}
\usage{
mt_load_olink(D, file)
}
\arguments{
\item{D}{\code{SummarizedExperiment} input. Missing if first step in pipeline.}
\item{file}{Name of NPX file exported from NPX manger.}
}
\value{
If first step in pipeline, creates \code{SummarizedExperiment} object. Populates empty assay (note: 2^NPX), colData, and rowData data frames.
}
\description{
Loads data from an Olink-format Excel file.
Uses Olink's R code read_NPX taken from
\href{https://github.com/Olink-Proteomics/OlinkRPackage/tree/master/OlinkAnalyze}{https://github.com/Olink-Proteomics/Olink
RPackage/tree/master/OlinkAnalyze}.
In case the Olink file is not in XLSX format, but CSV or TSV text format:
\itemize{
\item you may need to remove all Ctrl data columns
\item save file as xlsx (make sure to rename SEPT9 back from Excel date)
\item don't keep the Excel file open while running R - this throws a file access denied error
}
}
\examples{
\dontrun{D <-
# load data
mt_load_olink(file=system.file("extdata", "example_data/sampledata.xlsx", package = "maplet")) \%>\%
...}
}
\author{
KS
}
|
a9e79308f6df737ed15728a40c18e7649f3230ba | 6e76530d40d0784c76ddea68fc879d73c55fe39e | /targetGenerator.R | 45f595e24245f19b584e9866b2a23bf314f1f4f8 | [] | no_license | DanColumbia/Futures | 5057ae2fdc7f2f3f5d17677e5d4f826020a4e0a5 | b82877916b5b2e2900a52f1f1720433ea0515aa8 | refs/heads/master | 2020-05-01T00:52:54.496410 | 2015-10-21T17:51:46 | 2015-10-21T17:51:46 | 34,427,373 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,550 | r | targetGenerator.R | library(data.table)
library(TTR)
library(dplyr)
library(zoo)
targetGenerator <- function(priceSeries, executionWindow = 1, fixedHoldingPeriod = TRUE, holdingPeriod = 5)
{
# fixedHoldingPeriod = FALSE to exit trade on next reverse signal, = TRUE to exit after period of holdingPeriod
ps <- priceSeries
ps[,date:=as.Date(dateTime)]
# get time point ranking within each day
ps[,timeIndex:=time(dateTime)]
ps <- cbind(ps, timeInDay=ave(ps$timeIndex, ps$date, FUN=rank))
ps[,timeIndex:=NULL]
ps[,timeInDay:=as.numeric(timeInDay)]
ps$executionPrice <- rollapply(ps$index, executionWindow, mean)
if(fixedHoldingPeriod){# sell at # of holdingPeriod days later, in the morning only
ps$profit<-ifelse(ps$timeInDay==1,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==2,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-1) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==3,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-2) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==4,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-3) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==5,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-4) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==6,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-5) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==7,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-6) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==8,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-7) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==9,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-8) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==10,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-9) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==11,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-10) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==12,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-11) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==13,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-12) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==14,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-13) - ps$executionPrice)/ps$executionPrice,
ifelse(ps$timeInDay==15,ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-14) - ps$executionPrice)/ps$executionPrice,
ps$profit <- 100*(lead(ps$executionPrice, holdingPeriod*16-15) - ps$executionPrice)/ps$executionPrice)
))))))))))))))
}else{
ps$profit <- 1
}
}
sapply(ps$test, function(i) which.min())
a <- which(ps$test>=0)
b <- which(ps$test<0)
# ps$exitTest<-ifelse(ps$timeInDay==1,ps$profit <- lead(ps$executionPrice, holdingPeriod*16),
# ifelse(ps$timeInDay==2,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-1),
# ifelse(ps$timeInDay==3,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-2),
# ifelse(ps$timeInDay==4,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-3),
# ifelse(ps$timeInDay==5,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-4),
# ifelse(ps$timeInDay==6,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-5),
# ifelse(ps$timeInDay==7,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-6),
# ifelse(ps$timeInDay==8,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-7),
# ifelse(ps$timeInDay==9,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-8),
# ifelse(ps$timeInDay==10,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-9),
# ifelse(ps$timeInDay==11,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-10),
# ifelse(ps$timeInDay==12,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-11),
# ifelse(ps$timeInDay==13,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-12),
# ifelse(ps$timeInDay==14,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-13),
# ifelse(ps$timeInDay==15,ps$profit <- lead(ps$executionPrice, holdingPeriod*16-14),
# ps$profit <- lead(ps$executionPrice, holdingPeriod*16-15)
# )))))))))))))))
|
2821603eee632032b6ba2fafdb290fb42c72a5e7 | 50e15a8ba4b9458be45fe579392fc6785667f4c9 | /fund_arules.R | 384c3aea891ce92d20198472083cf2fe2e62398a | [] | no_license | jhgil92/fund_arules | 63045e848305d11a845390f83ad052736926c4ed | 6a0e26a6e2c988c01a8ba29350d283f852e2b776 | refs/heads/master | 2020-04-14T07:40:08.736690 | 2019-01-01T06:54:35 | 2019-01-01T06:54:35 | 163,718,798 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,447 | r | fund_arules.R | ## 공개 포트폴리오 연관성 분석
# 공개 포트폴리오 데이터 : https://github.com/jhgil92/fund/blob/master/%EA%B3%B5%EA%B0%9C%20%ED%8F%AC%ED%8A%B8%ED%8F%B4%EB%A6%AC%EC%98%A4.ipynb
# 펀드 정보 데이터 : https://github.com/jhgil92/fund_info_crawling/blob/master/fund_info_crawling_v2.R
# setwd(".//r//arules")
# data load
port <- read.csv("./input/portfolio.csv", row.names =NULL)
fund_info <- read.csv("./input/fund_info.csv", row.names = NULL)
port %>% head
fund_info %>% head
# library load
library(arules)
library(dplyr)
library(stringr)
# data preprocessing
port %>%
select(c(-1,-7)) -> port
tr_list <- list()
length(tr_list) <- nrow(port)
for(i in 1:length(tr_list)){
port[i,] %>%
t %>%
as.vector %>%
lapply(function(x){return(x[!is.na(x)])}) %>%
unlist -> tr_list[[i]]
cat("\n", i)
}
tr <- as(tr_list, 'transactions')
summary(tr)
inspect(tr[1:5])
# itemInfo에 type 칼럼을 추가
tr@itemInfo %>%
t %>%
as.vector -> fund_name
return_type <- function(i){
if(fund_name[i] %in% fund_info$FP_KRN_NAME){
ind <- which(fund_info$FP_KRN_NAME==fund_name[i])
return(fund_info$TYPE_NAME[ind] %>% as.character)
}else{
return("NOT SET")
}
}
lapply(1:length(fund_name), return_type) %>%
unlist -> level2
tr@itemInfo <- cbind(tr@itemInfo,level2)
return_type_2 <- function(i){
if(fund_name[i] %in% fund_info$FP_KRN_NAME){
ind <- which(fund_info$FP_KRN_NAME==fund_name[i])
return(fund_info$LRG_TYPE_NAME[ind] %>% as.character)
}else{
return("NOT SET")
}
}
lapply(1:length(fund_name), return_type_2) %>%
unlist -> level3
tr@itemInfo <- cbind(tr@itemInfo,level3)
# itemInfo의 depth를 활용하여 transaction data를 추가
tr2 <- aggregate(tr, 'level2')
tr3 <- aggregate(tr, 'level3')
## apriori 알고리즘으로 연관성 분석 시행
# apriori(data, parameter=list(support=0.1, confidence=0.8, minlen=1, maxlen=10, smax=1))
# support=최소지지도, confidence=최소신뢰도, minlen=최소물품수(lhs+rhs), maxlen=최대물품수(lhs+rhs), smax=최대지지도
# 위의 숫자들은 default값, parameter를 따로 지정안하면 default값을 기준으로 계산함
tr %>%
apriori(parameter = list(support=0.05)) %>%
sort(by='lift') %>%
inspect
tr2 %>%
apriori(parameter = list(support=0.05)) %>%
sort(by='lift') %>%
inspect
tr3 %>%
apriori(parameter = list(support=0.001, confidence=0.3)) %>%
sort(by='lift') %>%
inspect
|
b7c94c56904efd62ed96e0fa5baa7bd0dd814caf | 4315fd829fcd7da3ea48ef2b0143e50046b83cab | /table_2.R | c1a88c86b39e07f64942e5846d95a045f4c5e133 | [] | no_license | aurielfournier/Fournier_Mengel_Krementz_Sora_Autumn_Habitat_Use | b0ce341524492c25abc01aa2a7a9847302afd791 | c12199e6afd434e3d2554d0615608af82b458549 | refs/heads/master | 2020-12-25T15:17:33.222293 | 2016-08-06T01:56:51 | 2016-08-06T01:56:51 | 62,965,126 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,563 | r | table_2.R | library(ResourceSelection)
library(ggplot2)
library(gridExtra)
library(tidyr)
library(dplyr)
library(raildata)
library(auriel)
library(reshape)
library(grid)
data("allveg") # or allveg.csv
allveg <- allveg[allveg$bv!="day",] # removing the veg points where the bird was detected during the day
allveg <- allveg[allveg$averagewater<=900,] # removing veg points where water is under 900. values entered over 900 indicated that the water was too deep to be measured
allveg <- allveg[!is.na(allveg$bv),] # removing points where the status was not recorded
table2a <- allveg[,c("year","averagewater","short")] %>%
gather("variable","value",-year) %>%
group_by(year, variable) %>%
summarise(median=median(value, na.rm=TRUE),
min=min(value, na.rm=TRUE),
max=max(value, na.rm=TRUE)) %>%
select(variable, everything()) %>%
ungroup() %>%
mutate(year = as.character(year))
table2 <- allveg[,c("year","averagewater","short")] %>%
gather("variable","value",-year) %>%
group_by(variable) %>%
summarise(year="all",
median=median(value, na.rm=TRUE),
min=min(value, na.rm=TRUE),
max=as.integer(max(value, na.rm=TRUE))) %>%
bind_rows(table2a) %>%
arrange(variable, year)
write.csv(table2, file="~/Dissertation_Chapter_2_Sora_Habitat/table2.csv", row.names = FALSE)
|
d4398e9d412403bd62fe8161e4bb2cf76cb1020c | 8a601a85ea2dee26595f1bd1456963341d296c3a | /figure-ireg-exact-kstar-cost.R | c0ba054917b704234f3c08597e9a69d435ac088d | [] | no_license | tdhock/breakpointError-orig | 02324fbc9316ae42530b49ac622f43b0d9a77344 | 5d273a22eeffc0fb0750498b4192cb86c9fa2891 | refs/heads/master | 2016-08-05T03:48:18.071316 | 2015-11-24T17:01:06 | 2015-11-24T17:01:06 | 19,539,405 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 801 | r | figure-ireg-exact-kstar-cost.R | load("data/exact.cost.RData")
ann.set <- "detailed.low.density"
source("scripts/ireg.signals.R")
library(reshape2)
int.df <- do.call(rbind,lapply(ireg.signals[1],function(pid.chr){
intervals <- exact.cost[[ann.set]][[pid.chr]]
intervals[["optimal number of segments"]] <- intervals$segments
intervals.limited <-
intervals[,c("optimal number of segments","cost","min.L","max.L")]
molt <- melt(intervals.limited,id=c("min.L","max.L"))
data.frame(molt,pid.chr)
}))
library(ggplot2)
p <- ggplot()+
geom_segment(aes(min.L,value,xend=max.L,yend=value),data=int.df,lwd=0.8)+
facet_grid(variable~.,scales="free",space="free")+
xlab("log(gamma) degree of smoothness")+
scale_y_continuous("",breaks=0:20,minor_breaks=NULL)
pdf("figure-ireg-exact-kstar-cost.pdf",h=3.5)
print(p)
dev.off()
|
4eff8c39ef3600f18daeca5f17178c7255325205 | 87c73fd0fb4225ac3b6a7fff9f943abc80ca8be0 | /MoistureAnalysis R files/TestMoistureLogv2CleanData.R | 16a435cce25f333c0981eef9e33a9d1131c29d0e | [] | no_license | DJ-Prince/OPC | 1f562e31f30b4fa6f0f27bc5f1696aa86dcf4857 | 9283b685b644f3904619e465b21b6c6aaed4ac6c | refs/heads/master | 2020-04-20T08:04:57.360232 | 2019-02-15T13:49:39 | 2019-02-15T13:49:39 | 168,728,982 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 165 | r | TestMoistureLogv2CleanData.R | library(knitr)
library(rmarkdown)
knit("TestMoistureLogv2CleanData.Rmd")
# render the generated markdown file.
render("TestMoistureLogv2CleanData.md")
Sys.sleep(3) |
69d82027b5dea8c1577192dc16d4d2530f654478 | 4dd05c5789fbf09aeb6fdc1229ccf9b8b3caf511 | /man/wikipediaTraffic.Rd | 6967ba7ae3728280d57768f2466adac4a41e8f29 | [
"MIT"
] | permissive | dschmeh/seoR | 2d553d3a06aea53c572d30cb9cd2c3a91e2eb8b1 | 9908f41b3f026930bde83028b8669f70d3a1030d | refs/heads/master | 2023-01-04T10:07:53.267464 | 2022-12-21T12:33:59 | 2022-12-21T12:33:59 | 114,384,766 | 39 | 7 | MIT | 2018-06-13T12:59:18 | 2017-12-15T15:29:25 | R | UTF-8 | R | false | true | 1,318 | rd | wikipediaTraffic.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wikipediaTraffic.R
\name{wikipediaTraffic}
\alias{wikipediaTraffic}
\title{Function to get Pageview Data of a given Wikipedia Article}
\usage{
wikipediaTraffic(page, start, end, project = "en.wikipedia.org",
platform = "all-access", agent = "user", granularity = "daily")
}
\arguments{
\item{page}{The Page you want to get the traffic for for example cat.}
\item{start}{The start date. Format = YYYY-MM-DD}
\item{end}{The end date. Format = YYYY-MM-DD}
\item{project}{The Wikipedia Project you want to retrieve the data from. Default is en.wikipedia.org. A list of possible projects can be found here <https://meta.wikimedia.org/w/api.php?action=sitematrix&formatversion=2>}
\item{platform}{One of: all-access (default), desktop, mobile-app, mobile-web}
\item{agent}{One of 'user' (human reader, standard), 'spider' (search engine crawler), 'bot' (WMF bots) or 'all-agents'(user, spider and bot)}
\item{granularity}{The time unit for the response data. As of today, the only supported granularity for this endpoint is daily and monthly.
wikipediaTraffic()}
}
\description{
This function allows to get the pageviews for a given Wikiepdia article.
}
\examples{
wikipediaTraffic("R (programming language)",'2018-01-01','2018-01-10')
}
|
7a85c2f7372c5599b6673043412a552d3cb770ef | 2e0b18721959cf04addbc1b9f07188b5ce352ebc | /tests/testthat/lib/vendor/foo/bar.R | 0b2e3457619abf25f554905690a92f71655cc790 | [
"MIT"
] | permissive | aclemen1/modulr | cde8ed13e708d8207362006c0dc38f4cc81edb65 | 0162dde8a7281380b82d2446841520f3299f87df | refs/heads/master | 2023-05-25T19:41:37.659195 | 2023-05-18T10:42:45 | 2023-05-18T10:43:37 | 41,587,794 | 9 | 0 | null | null | null | null | UTF-8 | R | false | false | 51 | r | bar.R | "3rd_party_modules/foo/bar" %provides% {
"bar"
}
|
9b7e9b842ff99139dc99721e50bf9f4e06d6f8d4 | bbe5f055bbd73d4e1391956f8324df9c209321fa | /example_analysis/paperSpecific/analysisPaper1.R | 204bade561545e2238d9c0d9adc488495e1ccb68 | [] | no_license | schmiedc/SynActJ_Shiny | c96e2eaab9ea90966f62c5b391c9625187563dda | 1228d03f910e7d6fae1f958779e2778d61e4447d | refs/heads/main | 2023-08-27T18:10:34.080065 | 2021-11-08T17:59:43 | 2021-11-08T17:59:43 | 387,575,249 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,107 | r | analysisPaper1.R | setwd("/data1/FMP_Docs/Repositories/plugins_FMP/SynActJ_Shiny/")
source("dataProcessing.R")
source("saveData.R")
source("plotData.R")
library(gridExtra)
library(tidyverse)
library(ggplot2)
# ==============================================================================
#
# DESCRIPTION: Plot Ctrl vs treatment from output data
#
# AUTHOR: Christopher Schmied,
# CONTACT: schmied@dzne.de
# INSITUTE: Leibniz-Forschungsinstitut f r Molekulare Pharmakologie (FMP)
# Cellular Imaging - Core facility
# Campus Berlin-Buch
# Robert-Roessle-Str. 10
# 13125 Berlin, Germany
#
# BUGS:
# NOTES:
# DEPENDENCIES: ggplot2: install.packages("ggplot2")
# plyr: install.packages("plyr")
# gridExtra: install.packages("gridExtra")
# tidyverse: install.packages("tidyverse")
#
# VERSION: 2.0.0
# CREATED: 2018-05-24
# REVISION: 2021-10-21
#
# ==============================================================================
# where to get the files
indir = "/data1/FMP_Docs/Projects/Publication_SynActJ/DataAnalysis/pHluorin_data/output/"
# where to save the data
outdir = "/data1/FMP_Docs/Projects/Publication_SynActJ/DataAnalysis/pHluorin_data/Routput/"
resultname = "Test"
# Time resolution in seconds
timeResolution = 2
# when stimulation happens
# these frames are used for calcuating the mean intensity
# then this value is used for the surface normalization
frameStimulation = 5
# further settings
labelSignal = "Spot"
labelBackground = "background"
# ------------------------------------------------------------------------------
# Load data
# ------------------------------------------------------------------------------
# get raw data
table.signal <- collectList(indir, labelSignal, timeResolution)
table.background <- collectList(indir, labelBackground, timeResolution)
# extracting experimental information from file name
table.signal <- table.signal %>% separate(name,
sep ="_", c("day", "treatment", "number"),
remove=FALSE)
table.background <- table.background %>% separate(name,
sep ="_", c("day", "treatment", "number"),
remove=FALSE)
# ------------------------------------------------------------------------------
# Extract and plot number and area of ROIs
# ------------------------------------------------------------------------------
# reduce data for number of ROIs and area
singleData_area <- subset(table.signal, variable == "area")
singleData_area <- subset(singleData_area, time == 0)
# compute & plot number of ROIs
roiNumber <- singleData_area %>% group_by(treatment) %>% dplyr::summarize(count = n())
ggplot(data=roiNumber, aes(x=treatment, y=count)) +
geom_bar(stat="identity") +
ylab("Count") +
scale_y_continuous(limits = c(0, 6000), breaks = seq(0, 6000, by = 500)) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
# compute average area of ROI
ggplot(data=singleData_area, aes(x=treatment, y=value)) +
geom_boxplot(outlier.colour="black", outlitreatmenter.shape=16, outlier.size=2, notch=FALSE) +
expand_limits(y = 0) +
scale_y_continuous(limits = c(0, 15), breaks = seq(0, 15, by = 1)) +
ylab("Area (Micron)") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
# ------------------------------------------------------------------------------
# Filter extracted traces
# ------------------------------------------------------------------------------
table.signal_mean_filter <- subset(table.signal, variable == "mean")
peaks <- table.signal_mean_filter %>% group_by(name, roi) %>% dplyr::summarise(value = max(value))
peaks_frame <- left_join(peaks, table.signal_mean_filter, by = c("name", "roi", "value"))
# filter traces where peak is in the stimulation range ( < 20s)
filtered_peaks <- peaks_frame %>% filter(time < 20)
filtered_peaks_2 <- filtered_peaks %>% select(c(-value, -frame, -time, -variable, -value) )
filtered_signal <- left_join(filtered_peaks_2, table.signal_mean_filter, by = c("name", "roi", "day", "treatment", "number"))
# ------------------------------------------------------------------------------
# Averaging and normalization
# ------------------------------------------------------------------------------
# calculates average mean intensity per frame per experiment
table.signal_mean <- subset(filtered_signal, variable == "mean")
table.signal_avg <- table.signal_mean %>% group_by(day, treatment, frame, time) %>% dplyr::summarize(mean=mean(value), N = length(value), sd = sd(value), se = sd / sqrt(N))
table.background_mean <- subset(table.background, variable == "mean")
table.background_avg <- table.background_mean %>% group_by(day, treatment, frame, time) %>% dplyr::summarize(mean=mean(value), N = length(value), sd = sd(value), se = sd / sqrt(N))
# generate final table
forBackgroundSubtraction <- merge(table.signal_avg, table.background_avg, by=c("day", "treatment", "frame", "time"), suffixes=c(".sig",".back"))
# normalize mean signal with mean background intensity
forBackgroundSubtraction$mean.corr <- forBackgroundSubtraction$mean.sig - forBackgroundSubtraction$mean.back
forBackgroundSubtraction$name <- paste0(forBackgroundSubtraction$day, "_", forBackgroundSubtraction$treatment)
# surface normalization
surfaceNormalized <- surfaceNormalisation(forBackgroundSubtraction, frameStimulation)
# peak normalization
peakNormalized <- peakNormalisation(surfaceNormalized)
finalTable <- sortFrames(peakNormalized)
# ------------------------------------------------------------------------------
# Plot per individual movie surface and peak normalized data
# ------------------------------------------------------------------------------
ggplot(data=finalTable, aes(x=time, y=surf_norm, group = name, color = name)) +
geom_line() +
theme_light() +
xlab("Time (s)") +
ylab("Norm. fluorescence intensity (A.U.)") +
ggtitle("Surf Norm ") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
ggplot(finalTable, aes(x=time, y=peak_norm, group = name, color = name)) +
geom_line() +
theme_light() +
xlab("Time (s)") +
ylab("Norm. fluorescence intensity (A.U.)") +
ggtitle("Peak Norm") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
# ------------------------------------------------------------------------------
# Plot per treatment surface and peak normalized data
# ------------------------------------------------------------------------------
finalTable_avg_surf <- finalTable %>% group_by(treatment, frame, time) %>% dplyr::summarize(mean=mean(surf_norm), N = length(surf_norm), sd = sd(surf_norm), se = sd / sqrt(N))
ggplot(finalTable_avg_surf, aes(x=time, y=mean, group = treatment, color = treatment)) +
geom_line() +
theme_light() +
xlab("Time (s)") +
ylab("Norm. fluorescence intensity (A.U.)") +
ggtitle("Avg. Surf Norm") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
finalTable_avg_peak <- finalTable %>% group_by(treatment, frame, time) %>% dplyr::summarize(mean=mean(peak_norm), N = length(peak_norm), sd = sd(peak_norm), se = sd / sqrt(N))
ggplot(finalTable_avg_peak, aes(x=time, y=mean, group = treatment, color = treatment)) +
geom_line() +
theme_light() +
xlab("Time (s)") +
ylab("Norm. fluorescence intensity (A.U.)") +
ggtitle("Avg. Peak Norm") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
# ------------------------------------------------------------------------------
# Compute and plot peaks based on surface normalization
# ------------------------------------------------------------------------------
# compute peaks
peaks <- finalTable %>% group_by(name) %>% dplyr::summarise(max = max(surf_norm))
peaks$deltaMax <- peaks$max - 1
peaks <- peaks %>% separate(name, sep ="_", c("day", "treatment"), remove=FALSE)
# plot peak difference
ggplot(data=peaks, aes(x=treatment, y=deltaMax)) +
geom_boxplot(outlier.colour="black") +
ylab("delta F (exocytosis)") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) |
7bdf2e9616a022f9f754df111e9733b7974fe130 | 29585dff702209dd446c0ab52ceea046c58e384e | /corcounts/R/berechne.partial.corr.R | 4efa0e7c47d9077bf3606595695908bf09834c64 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 1,197 | r | berechne.partial.corr.R | `berechne.partial.corr` <-
function(number,Cin,ntemp, upper.triangle.index, upper.triangle.values) {
index.in <- upper.triangle.index[number,]
# berechne pc mit vermindertem und vollem Index (entspricht höchstem k)
pc.vi <- Cin[index.in[1],index.in[2]]
if (ntemp-sum(index.in==0)>2) {
for (k in 3:(ntemp-sum(index.in==0))) {
index.benoetigt.1 <- c(index.in[k],index.in[1])
if (k>3) {
for (m in 3:(k-1)) {
index.benoetigt.1 <- c(index.benoetigt.1, index.in[m])
}
}
index.benoetigt.1 <- c(index.benoetigt.1,0)
l <- number-1
while (sum(upper.triangle.index[l,1:k] == index.benoetigt.1)<k) { l <- l-1 }
pc.1 <- upper.triangle.values[l]
index.benoetigt.2 <- c(index.in[k],index.in[2])
if (k>3) {
for (m in 3:(k-1)) {
index.benoetigt.2 <- c(index.benoetigt.2, index.in[m])
}
}
index.benoetigt.2 <- c(index.benoetigt.2,0)
l <- number-1
while (sum(upper.triangle.index[l,1:k] == index.benoetigt.2)<k) { l <- l-1 }
pc.2 <- upper.triangle.values[l]
pc.vi <- (pc.vi-pc.1*pc.2)/(sqrt((1-pc.1^2)*(1-pc.2^2)))
}
}
pc <- pc.vi
return(pc)
}
|
d427191388b6c1e5956415488109139c36938aa0 | ede7e471270ec4a694bf375a7634481c6db7c014 | /config.R | 38a2c005b6d12aad5a82784b607357790bb9bdce | [
"MIT"
] | permissive | wikimedia-research/OneRing | f1b7bb2f0ab8c87f391e3fb10d69fb993a2c7df4 | 177a239e33ee2bf0670aa178ae08d42721ff42db | refs/heads/master | 2016-08-08T03:49:10.629510 | 2014-11-14T17:50:36 | 2014-11-14T17:50:36 | 26,187,625 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 84 | r | config.R | #Libraries
library(WMUtils)
#Variables
SAVE_DB = "staging"
SAVE_TABLE = "pageviews" |
3500eab25f1ee1a9d2f8dd2771acd272fba1e9af | 4e76af0d6f21266a58f8352a2e63bf539937aad1 | /plot1.R | 0a924dda24a8fa45d860226687427b8cb1dc5378 | [] | no_license | dechang227/Exploratory-Data-Analysis-Project1 | 7a213344ca2f08d14f9221884e4f8297d2464f27 | e7723b98c73b0fcefbe50c486e4fa0167e10aa50 | refs/heads/master | 2021-01-10T20:46:51.285541 | 2014-05-15T05:57:53 | 2014-05-15T05:57:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 331 | r | plot1.R | hpc<-read.csv.sql("household_power_consumption.txt",
sql = 'select * from file where Date == "1/2/2007" or Date == "2/2/2007"',
header = TRUE, sep = ";")
png("plot1.png")
hist(hpc$Global_active_power, xlab = "Global Active Power(kilowatts)", col ="red", main="Global Active Power")
dev.off()
|
0b9f7e94aa221130654452200c4de946db6cb4e4 | 29f8f3ee59c366ea408633d183614bc39b49b26d | /TEMPLATE/template_gis.R | 4ad1c5a0df757d2ad369168e9e6f74ff4f13cb47 | [] | no_license | souzajvp/analytical_codes | 92db345dc75f128c2f25fb7b28f0891139ffea98 | dcc49662253ba1dbd4f54b8c4caea40232632783 | refs/heads/master | 2023-05-23T06:06:12.058469 | 2021-06-07T18:11:00 | 2021-06-07T18:11:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,691 | r | template_gis.R | ####
#Spatial Autocorrelation
####
http://www.bias-project.org.uk/ASDARcourse/unit6_slides.pdf
http://www.people.fas.harvard.edu/~zhukov/Spatial6.pdf
https://cran.r-project.org/doc/contrib/intro-spatial-rl.pdf
http://spatial.ly/r/
http://spatial.ly/2013/04/analysis-visualisation-spatial-data/
https://pakillo.github.io/R-GIS-tutorial/
http://www.springer.com/us/book/9781461476177
http://gis.humboldt.edu/OLM/r/Spatial%20Analysis%20With%20R.pdf
http://gis.stackexchange.com/questions/45327/tutorials-to-handle-spatial-data-in-r
####
#An Introduction to Spatial Regression Analysis in R
###
library(spdep)
data(columbus)
summary(columbus)
#tranform a nb document (neighbourhood document) into a listw objetct which specifies the weights for each nb
col.listw <- nb2listw(col.gal.nb)
#Apply the Moran I test for residuals spatial autocorrelation
#arguments are a lm object with the regression predictors and the litsw object
col.moran <- lm.morantest(columbus.lm,col.listw)
#Lagrange Multiplier Test Statistics for Spatial Autocorrelation
columbus.lagrange <- lm.LMtests(columbus.lm,col.listw,test=c("LMerr","RLMerr","LMlag","RLMlag","SARMA"))
#Maximum Likelihood Estimation of the Spatial Lag Model
columbus.lag <- lagsarlm(CRIME ~ INC + HOVAL,data=columbus,col.listw)
#OSL estimator
lagCRIME <- lag.listw(col.listw,CRIME)
wrong.lag <- lm(CRIME ~ lagCRIME + INC + HOVAL)
summary(wrong.lag)
#Maximum Likelihood Estimation of the Spatial Error Model
columbus.err <- errorsarlm(CRIME ~ INC + HOVAL,data=columbus,col.listw)
#Spatial Durbin Model
columbus.durbin <- lagsarlm(CRIME ~ INC+HOVAL,data=columbus,col.listw,type="mixed")
durbin.test1 <- LR.sarlm(columbus.durbin,columbus.err) |
58ebb56c4bf862d2c6b5bbde074616dd266aceab | 3fc7df9cdbf1a4e0ad8030fb5c09e4bfb0e24f7b | /Options_Intro.R | a720706d30e8b7c9696cf1e51cf109396723d155 | [] | no_license | YifeiLuo96/FM5990_excercise | 9c5db124fc7eebaafe3f21d7c4286b5dd711d3fc | f0fd6c667ea16d2aba8adf3f549966a86529f678 | refs/heads/master | 2020-03-30T10:27:28.627945 | 2018-10-01T16:35:01 | 2018-10-01T16:35:01 | 151,120,914 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,888 | r | Options_Intro.R | # install packages
install.packages("dplyr")
install.packages("readr")
install.packages("ggplot2")
library(ggplot2)
library(dplyr)
library(readr)
# 1. read the data set and load it into dataframe
df_options_intro <- read_csv("data_options_intro.csv")
# 3. grabe the columns and print
k <- df_options_intro$strike
implied_vol <- df_options_intro$implied_vol
ask_price <- df_options_intro$ask
# 4. to determine the number of rows
nrow(df_options_intro)
# 5.take a look at the options data and confirm the value
View(df_options_intro)
# 6. expiration date of all the options
as.Date(df_options_intro$expiration, "%m/%d/%Y")
distinct(df_options_intro, expiration)
as.Date(df_options_intro$trade_date, "%m/%d/%Y")
distinct(df_options_intro, trade_date)
# 7. to determine how many underlyings
distinct(df_options_intro, underlying)
# 8. creat subdataframes
df_SPY <- filter(df_options_intro, underlying == "SPY")
df_IWM <- filter(df_options_intro, underlying == "IWM")
df_QQQ <- filter(df_options_intro, underlying == "QQQ")
df_DIA <- filter(df_options_intro, underlying == "DIA")
# 9/10. draw line graph with strike price and implied_vol
qplot(x = strike, y = implied_vol, geom = "point", data = df_SPY, main = "SPY_plot")
qplot(x = strike, y = implied_vol, geom = "point", data = df_IWM, main = "IWM_plot")
qplot(x = strike, y = implied_vol, geom = "point", data = df_QQQ, main = "QQQ_plot")
qplot(x = strike, y = implied_vol, geom = "point", data = df_DIA, main = "DIA_plot")
# 11. break down SPY into Call and Put
df_SPY_puts <- filter(df_SPY, type == "put")
df_SPY_calls <- filter(df_SPY, type == "call")
# 12. the numbers of puts and calls respectively
nrow(df_SPY_puts)
nrow(df_SPY_calls)
# 13. create a graph with strike and bid_price
qplot(x = strike, y = bid, data = df_SPY_calls, main = "Call Options")
qplot(x = strike, y = bid, data = df_SPY_puts, main = "Put Options")
|
f250bdf43e7e600df4f15d414bda01b23e185019 | fbb6782ed5e47af3dc4e3fb7ecb192cff8ef74c1 | /r/simulation_settings/7_implementation_vs_structure_comparison.R | f0a51d7793846047cb2c3b60e7fbc9049bb4dcf4 | [] | no_license | mdsteiner/efacomp_OSF | 581a54f454e7223470abd48432453437050b5bbc | 80e22d42aed6a4fd346023f2c0d7450826ad0501 | refs/heads/master | 2023-03-14T10:08:42.776433 | 2021-03-03T15:32:33 | 2021-03-03T15:32:33 | 292,044,522 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,103 | r | 7_implementation_vs_structure_comparison.R | if(!require(tidyverse)) install.packages("tidyverse"); library(tidyverse)
if(!require(EFAtools)) install.packages("EFAtools"); library(EFAtools)
### helper function ============================================================
describe <- function(x, d = 2) {
mn <- mean(x, na.rm = TRUE)
mdn <- median(x, na.rm = TRUE)
low <- min(x, na.rm = TRUE)
up <- max(x, na.rm = TRUE)
st <- paste0("M = ", EFAtools:::.numformat(mn, d), "; Mdn = ",
EFAtools:::.numformat(mdn, d), "; [",
EFAtools:::.numformat(low, d), ", ",
EFAtools:::.numformat(up, d), "]")
cat(st)
}
### data preparation ===========================================================
model_control <- expand.grid(
case = names(population_models$loadings)[1:27],
cors = names(population_models$phis_3),
N = c(450),
stringsAsFactors = FALSE)
model_control$case_ids <- 1:nrow(model_control)
current_case_ids <- 1:nrow(model_control)
settings <- expand.grid(
comm_meth = c("unity", "mac", "smc"), # initial communality estimation methods
criterion_type = c("max_individual", "sums"), # citerion types
abs_eigen = c(TRUE, FALSE), # absolute eigenvalues yes or no
conv_crit = c(1e-3, 1e-6), # convergence criterion
var_type = c("svd", "kaiser"), # varimax type
p_type = c("unnorm", "norm"),
k = c(3, 4),
stringsAsFactors = FALSE)
# define k according to earlier simulation studies
settings$k[settings$k == 3 & settings$p_type == "norm"] <- 2
settings$setting_id <- 1:nrow(settings)
# read in data
recovery <- readRDS("output/simulation_settings/recovery_450.RDS")
### fit values between settings vs across cases ================================
dat <- recovery %>%
group_by(case_ids, setting_id) %>%
summarise(
p_fc = mean(diff_factor_corres > 0, na.rm = TRUE),
m_g = mean(g, na.rm = TRUE),
p_hey = mean(heywood, na.rm = TRUE)
) %>%
ungroup()
### RMSE
# across implementations
dat %>%
group_by(case_ids) %>%
arrange(m_g) %>% slice(1, n()) %>%
mutate(diff_g = m_g - lag(m_g)) %>%
drop_na() %>%
pull(diff_g) %>%
describe()
# across structures
dat %>%
group_by(setting_id) %>%
arrange(m_g) %>% slice(1, n()) %>%
mutate(diff_g = m_g - lag(m_g)) %>%
drop_na() %>%
pull(diff_g) %>%
describe()
### Heywood cases
# across implementations
dat %>%
group_by(case_ids) %>%
arrange(p_hey) %>% slice(1, n()) %>%
mutate(diff_hey = p_hey - lag(p_hey)) %>%
drop_na() %>%
pull(diff_hey) %>%
describe()
# across structures
dat %>%
group_by(setting_id) %>%
arrange(p_hey) %>% slice(1, n()) %>%
mutate(diff_hey = p_hey - lag(p_hey)) %>%
drop_na() %>%
pull(diff_hey) %>%
describe()
### factor correspondences
# across implementations
dat %>%
group_by(case_ids) %>%
arrange(p_fc) %>% slice(1, n()) %>%
mutate(diff_p = p_fc - lag(p_fc)) %>%
drop_na() %>%
pull(diff_p) %>%
describe()
# across structures
dat %>%
group_by(setting_id) %>%
arrange(p_fc) %>% slice(1, n()) %>%
mutate(diff_p = p_fc - lag(p_fc)) %>%
drop_na() %>%
pull(diff_p) %>%
describe()
|
a842913448b8c9fde60a5f6e6d44613ca490ba99 | 4f04e55d2fb8910f8e13150e037bfd987a11b431 | /session_4_mod_1/regression.R | 4a6dbdcf21bbaac111dc436905c4e6f9489dfbfd | [
"MIT"
] | permissive | PakistanAI/DS_Weekends | e542feaa96676368369318d67b89ce4d13ca1b75 | 87ebf5b984fb86a23cfbd24dcef2c1950b545be6 | refs/heads/master | 2020-03-19T10:14:55.993257 | 2018-09-27T17:26:47 | 2018-09-27T17:26:47 | 136,354,756 | 24 | 9 | null | null | null | null | UTF-8 | R | false | false | 879 | r | regression.R | #Execute the command if you dont have libraries installed
install.packages("MASS")
install.packages("ISLR")
library(MASS)
library(ISLR)
# Simple Linear Regression
fix(Boston)
names(Boston)
lm.fit=lm(medv~lstat)
lm.fit=lm(medv~lstat,data=Boston)
attach(Boston)
lm.fit=lm(medv~lstat)
lm.fit
summary(lm.fit)
names(lm.fit)
coef(lm.fit)
confint(lm.fit)
predict(lm.fit,data.frame(lstat=(c(5,10,15))), interval="confidence")
predict(lm.fit,data.frame(lstat=(c(5,10,15))), interval="prediction")
plot(lstat,medv)
abline(lm.fit)
abline(lm.fit,lwd=3)
abline(lm.fit,lwd=3,col="red")
plot(lstat,medv,col="red")
plot(lstat,medv,pch=20)
plot(lstat,medv,pch="+")
plot(1:20,1:20,pch=1:20)
par(mfrow=c(2,2))
plot(lm.fit)
plot(predict(lm.fit), residuals(lm.fit))
plot(predict(lm.fit), rstudent(lm.fit))
plot(hatvalues(lm.fit))
which.max(hatvalues(lm.fit))
|
c97b19792bd34f136c385c4a1e9a49184900c71b | 70e8f7e34e8e653ed16917c0e00289ab865f7e51 | /CommonR/R/optimization.R | cd210640e1f1bf7ac3496066e2ee3981fd919784 | [] | no_license | shubhomoydas/aad | 424466af25c0dc24f7c58ef0db5fd969ebb91e98 | ea5e6712da7759d31db13017bda5125e97d442fd | refs/heads/master | 2021-01-13T10:15:48.482572 | 2017-12-26T04:12:19 | 2017-12-26T04:12:19 | 69,590,183 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,401 | r | optimization.R | #BTLS Summary of this function goes here
fnBtls <- function( X0, fn, dfn, ..., beta=0.5, gamma=0.2, maxiters=20 ) {
tol <- 10e-4;
X1 <- X0;
k <- 0;
Fx <- c();
while (k < maxiters) {
k <- k + 1;
fx <- fn(X1, ...);
dx <- dfn(X1, ...);
dx2 <- sum(dx^2);
Fx <- c(Fx,fx);
if (dx2 <= tol) {
break;
}
if (length(Fx) > 10 && var(Fx[(length(Fx)-10+1):length(Fx)]) < tol) {
# if the function value has converged
break;
}
alpha <- 1;
while (fn(X1-alpha*dx, ...) > fx - gamma*alpha*(dx2)) {
alpha <- alpha*beta;
}
#cat("alpha",alpha,"\n")
X1 <- X1-alpha*dx;
}
return (list(X1=X1, k=k, Fx=Fx))
}
# Newton-Raphson optimization for 2-derivative functions
fnNewtonRaphson <- function( X0, fn, dfn, d2fn, ..., beta=0.5, gamma=0.2 ) {
tol <- 10e-4;
X1 <- X0;
k <- 0;
Fx <- c();
prev_fx <- 0
while (k < 1000) {
k <- k + 1;
fx <- fn(X1, ...);
dx <- dfn(X1, ...); #print(dx);
d2x <- d2fn(X1, ...); #print(d2x);
dxnt <- -solve(d2x)%*%dx;
dxnt2 <- t(dxnt)%*%dxnt;
Fx <- c(Fx,fx);
l2x <- -t(dx)%*%dxnt;
if (abs(l2x/2) <= tol || (k > 1 && abs(fx-prev_fx) <= 1e-6)) {
break;
}
alpha <- 1;
while (fn(X1+alpha*dxnt, ...) > fx + gamma*alpha*(dxnt2)) {
alpha <- alpha*beta;
}
X1 <- X1+alpha*dxnt;
prev_fx <- fx
}
return (list(X1=X1, k=k, Fx=Fx))
}
|
b0befd27a0face15b4c9e85baa21c0254149a468 | 290dcf2dcab08672cd8e7e6910bfd36cc024824d | /inst/ACORN-app/www/R/output/across_hospital_logo.R | fa4bef7bb536642edddf625508576e9b22dab359 | [] | no_license | ocelhay/ACORN | ceda5a0edbd455507261c30043ff84f94c88e86f | 755384ba96364fb223a1928a814417c5b8b17079 | refs/heads/master | 2020-12-07T07:54:54.420477 | 2020-12-03T22:25:06 | 2020-12-03T22:25:06 | 232,677,929 | 0 | 1 | null | 2020-09-11T21:46:16 | 2020-01-08T23:07:29 | R | UTF-8 | R | false | false | 854 | r | across_hospital_logo.R | output$hospital_image <- renderUI({
req(patient())
if(patient()$site_id[1] == "0") {
return(
tags$a(href='http://acornamr.net', tags$img(src = 'img_ACORN_logo.png', class = 'logo'))
)
}
if(patient()$site_id[1] == "1") {
return(
# HTML("Placeholder for 'site 1' logo")
img(src = 'img_Partners_OUCRU.jpg', alt = "OUCRU", width = "95%", id = "hospital-image")
)
}
if(patient()$site_id[1] == "2") {
return(
# HTML("Placeholder for 'site 2' logo")
img(src = 'img_Partners_LOMWRU.jpg', alt = "LOMWRU", width = "95%", id = "hospital-image")
)
}
if(patient()$site_id[1] == "3") {
return(
img(src = 'img_Partners_COMRU.jpg', alt = "COMRU", width = "100%", id = "hospital-image")
)
}
})
output$data_info <- renderUI({
req(data_details())
HTML(data_details())
}) |
e1a593b9ebdb3110a74b658c412d9909cc90e439 | f0a0c9f78bc1375280cf9ffa8d7946dfdaab65f3 | /MyProject.R | ca73ddd72591d729c200b210d9c0fae0b476be79 | [] | no_license | bakshivishal/Data-Science-Projects | dd79b1c25da644b26c814983812847b1429de10d | 486190490b236ad8a442d2bf76c3c0a403c672ff | refs/heads/master | 2022-02-11T10:37:10.587632 | 2019-08-16T16:27:30 | 2019-08-16T16:27:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,811 | r | MyProject.R | ##############################################################
# Author: Vishal Bakshi
# Project: Predict chances of release for marijuana possession
# Date: August 16th 2019
##############################################################
##install packages(to run this code) if required
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(e1071)) install.packages("e1071", repos = "http://cran.us.r-project.org")
##load libraries
library(tidyverse)
library(caret)
##Download and read "arrest records" dataset
arrest_data <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/carData/Arrests.csv")
##Analyze dataset attributes
str(arrest_data)
##Remove Column X from dataset - this is simply the sequence no. of records
##and not useful for prediction/analysis
arrest_data <- arrest_data[,-1]
##Check for NAs in the dataset
summary(arrest_data)
##Partition dataset in to training and test sets
##Training set is 80% of dataset and test set is 20% of dataset
#set the seed to 1 - if using R 3.5 or earlier, use `set.seed(1)` instead
set.seed(1, sample.kind="Rounding")
test_index <- createDataPartition(y = arrest_data$released, times = 1, p = 0.2, list = FALSE)
training_set <- arrest_data[-test_index,]
test_set <- arrest_data[test_index,]
#Let's run some simple visualizations by age, year and "previous checks" for
#quick insights
hist(training_set$age)
hist(training_set$year)
hist(training_set$checks)
############################
#Data Analysis
############################
##Now we will analyze data for some insights and store it in "insights" table
##For each of the insight calculation below, we calculate probability of a person
##not getting released if he/she met certain criteria
##For example: person is white, black, male, female etc.
#Check the probability of not getting released if person's colour is black
x1 <- sum(training_set$colour == "Black")
x2 <- sum(training_set$colour == "Black" & training_set$released == "No")
per <- x2/x1
insights <- data_frame(Criteria = "Person is Black", Probability_Not_Released = per)
#Check the probability of not getting released if person's colour is white
x1 <- sum(training_set$colour == "White")
x2 <- sum(training_set$colour == "White" & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person is White",
Probability_Not_Released = per))
#Check the probability of not getting released if person is male
x1 <- sum(training_set$sex == "Male")
x2 <- sum(training_set$sex == "Male" & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person is Male",
Probability_Not_Released = per))
#Check the probability of not getting released if person is female
x1 <- sum(training_set$sex == "Female")
x2 <- sum(training_set$sex == "Female" & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person is Female",
Probability_Not_Released = per))
#Check the probability of not getting released if person is employed
x1 <- sum(training_set$employed == "Yes")
x2 <- sum(training_set$employed == "Yes" & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person is Employed",
Probability_Not_Released = per))
#Check the probability of not getting released if person is unemployed
x1 <- sum(training_set$employed == "No")
x2 <- sum(training_set$employed == "No" & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person is Not Employed",
Probability_Not_Released = per))
#Check the probability of not getting released if person is canadian citizen
x1 <- sum(training_set$citizen == "Yes")
x2 <- sum(training_set$citizen == "Yes" & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person is Citizen",
Probability_Not_Released = per))
#Check the probability of not getting released if person is non-citizen
x1 <- sum(training_set$citizen == "No")
x2 <- sum(training_set$citizen == "No" & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person is Non-Citizen",
Probability_Not_Released = per))
#Check the probability of not getting released if person has 0 checks
x1 <- sum(training_set$checks == 0)
x2 <- sum(training_set$checks == 0 & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person has 0 checks",
Probability_Not_Released = per))
#Check the probability of not getting released if person has atleast 1 check
x1 <- sum(training_set$checks > 0)
x2 <- sum(training_set$checks > 0 & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person has atleast 1 check",
Probability_Not_Released = per))
#Check the probability of not getting released if person has atleast 2 checks
x1 <- sum(training_set$checks > 1)
x2 <- sum(training_set$checks > 1 & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person has atleast 2 checks",
Probability_Not_Released = per))
#Check the probability of not getting released if person has atleast 3 checks
x1 <- sum(training_set$checks > 2)
x2 <- sum(training_set$checks > 2 & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person has atleast 3 checks",
Probability_Not_Released = per))
#Check the probability of not getting released if person has atleast 4 checks
x1 <- sum(training_set$checks > 3)
x2 <- sum(training_set$checks > 3 & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person has atleast 4 checks",
Probability_Not_Released = per))
#Check the probability of not getting released if person has atleast 5 checks
x1 <- sum(training_set$checks > 4)
x2 <- sum(training_set$checks > 4 & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person has atleast 5 checks",
Probability_Not_Released = per))
#Check the probability of not getting released if person is less than 18
#years old i.e. he/she is not an adult
x1 <- sum(training_set$age < 18 )
x2 <- sum(training_set$age < 18 & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person's age is less than 18 years",
Probability_Not_Released = per))
#Check the probability of not getting released if person is between 18 and 30 years old
x1 <- sum(training_set$age >=18 & training_set$age <= 30 )
x2 <- sum(training_set$age >= 18 & training_set$age <= 30 & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person's age is between 18 and 30 years",
Probability_Not_Released = per))
#Check the probability of not getting released if person age is more than 30
x1 <- sum(training_set$age > 30)
x2 <- sum(training_set$age > 30 & training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "Person's age is more than 30 years",
Probability_Not_Released = per))
#Check the probability of not getting released across entire training set
x1 <- length(training_set$released)
x2 <- sum(training_set$released == "No")
per <- x2/x1
insights <- bind_rows(insights,
data_frame(Criteria = "All persons in entire training set",
Probability_Not_Released = per))
#####################
#End of Data Analysis
#####################
####################
#Modeling Methods
####################
##Now we will run some models and store the "accuracy" of each model
##in "accuracy_results" table
#Method 1 - Baseline model - since majority of people were released, predict
#all as released against test dataset
pred <- "Yes"
accur <- sum(pred == test_set$released)/length(test_set$released)
accuracy_results <- data_frame(method = "Baseline - Predict all persons as released", Accuracy = accur)
#Method 2 - Predict using glm model across all predictors
train_glm <- train(released ~ ., method = "glm", data = training_set)
y_hat_glm <- predict(train_glm, test_set, type = "raw")
accur <- confusionMatrix(y_hat_glm, test_set$released)$overall[["Accuracy"]]
accuracy_results <- bind_rows(accuracy_results,
data_frame(method="glm - all predictors",
Accuracy = accur))
#Method 3 - Predict using glm model across predictors - colour and checks
train_glm <- train(released ~ colour + checks, method = "glm", data = training_set)
y_hat_glm <- predict(train_glm, test_set, type = "raw")
accur <- confusionMatrix(y_hat_glm, test_set$released)$overall[["Accuracy"]]
accuracy_results <- bind_rows(accuracy_results,
data_frame(method="glm - colour + checks",
Accuracy = accur))
#Method 4 - Predict using glm model across predictors - colour,
#citizen, sex, age, checks
train_glm <- train(released ~ colour + citizen + checks + sex + age, method = "glm", data = training_set)
y_hat_glm <- predict(train_glm, test_set, type = "raw")
accur <- confusionMatrix(y_hat_glm, test_set$released)$overall[["Accuracy"]]
accuracy_results <- bind_rows(accuracy_results,
data_frame(method="glm - color + citizen + sex + age + checks",
Accuracy = accur))
#Method 5 - Predict using knn model across all predictors
train_knn <- train(released ~ ., method = "knn", data = training_set)
y_hat_knn <- predict(train_knn, test_set, type = "raw")
accur <- confusionMatrix(y_hat_knn, test_set$released)$overall[["Accuracy"]]
accuracy_results <- bind_rows(accuracy_results,
data_frame(method="knn - all predictors",
Accuracy = accur))
########################################################
#Print the Data Analysis and Model Peroformance Results
########################################################
#Output the results for insights table
insights %>% knitr::kable()
#Output the accuracy results for all models and show the best model
#with highest accuracy value
accuracy_results %>% knitr::kable()
print(c("The highest accuracy is for knn model using all predictors. The accuracy value is:", accur))
|
67a0dce1063ecf5f27341afa19a18d34ef37aedb | 2af395f8db24489698ae33a07c3638b2e3462503 | /Neural_Network.R | 0ed7f66067bcbfcf09090c25953c9d92fd9b1a60 | [] | no_license | rajkstats/Data_Mining_Masters_Coursework | dfe14719152bd1486e3d90fbbc41cbfb527def36 | 10828bcb68f3f56e5ea898380c159348fe17faf2 | refs/heads/master | 2021-05-02T13:18:56.049466 | 2018-02-08T12:20:25 | 2018-02-08T12:20:25 | 120,756,907 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,035 | r | Neural_Network.R | if(!require(neuralnet))
install.packages("neuralnet")
german <- read.csv(file.choose()) #locate the data file here.
apply(german,2,function(x) sum(is.na(x)))
class(german)
str(german)
set.seed(123)
maxs <- apply(german, 2, max)
mins <- apply(german, 2, min)
scaled <- as.data.frame(scale(german, center = mins, scale = maxs - mins))
n <- dim(german)[1]
samp <- sample(1:n, 0.8*n)
train <- scaled[samp, ]
test <- scaled[-samp, ]
library(neuralnet)
name <- names(train)
f <- as.formula(paste("Creditability ~", paste(name[-1], collapse = " + ")))
creditnet <- neuralnet(f , data=train, hidden = 6, linear.output = FALSE)
plot(creditnet)
# Training Data:
res1 <- compute(creditnet, train[, -1])
result1 <- data.frame(actual=train[, 1], prediction=round(res1$net.result))
x <- table(result1)
x
error1<- sum(x[1,2], x[2,1])/sum(x)
error1
# Test Data:
res2 <- compute(creditnet, test[, -1])
result2 <- data.frame(actual=test[, 1], prediction=round(res2$net.result))
y <- table(result2)
y
error2<- sum(y[1,2], y[2,1])/sum(y)
error2
|
2ca601b5c644068a536edf3d9c5d0f962849a690 | ca9fd5f85316595a3b6cd992d7f1c2cdb2d9ef91 | /man/dPosteriorPredictive.CatHDP.Rd | 8f4a62fc32c576735584a4dd028bfc558323a030 | [
"MIT"
] | permissive | seanahmad/Bayesian-Bricks | 58c15a81e19191863c5b44ba1b8a5a3ccbf110e0 | 4876e9bacf9561354220a18835829f5274598622 | refs/heads/master | 2023-02-17T04:01:58.141713 | 2021-01-16T05:43:16 | 2021-01-16T05:43:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,028 | rd | dPosteriorPredictive.CatHDP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Dirichlet_Process.r
\name{dPosteriorPredictive.CatHDP}
\alias{dPosteriorPredictive.CatHDP}
\title{Posterior predictive density function of a "CatHDP" object}
\usage{
\method{dPosteriorPredictive}{CatHDP}(obj, z, k, j, LOG = TRUE, ...)
}
\arguments{
\item{obj}{A "CatHDP" object.}
\item{z}{integer, the elements of the vector must all greater than 0, the samples of a Categorical distribution.}
\item{k}{integer, the elements of the vector must all greater than 0, the samples of a Categorical distribution.}
\item{j}{integer, group label.}
\item{LOG}{Return the log density if set to "TRUE".}
\item{...}{Additional arguments to be passed to other inherited types.}
}
\value{
A numeric vector, the posterior predictive density.
}
\description{
Generate the the density value of the posterior predictive distribution of the following structure:
\deqn{G|gamma \sim DP(gamma,U)}
\deqn{pi_j|G,alpha \sim DP(alpha,G), j = 1:J}
\deqn{z|pi_j \sim Categorical(pi_j)}
\deqn{k|z,G \sim Categorical(G), \textrm{ if z is a sample from the base measure G}}
where DP(gamma,U) is a Dirichlet Process on positive integers, gamma is the "concentration parameter", U is the "base measure" of this Dirichlet process, U is an uniform distribution on all positive integers. DP(alpha,G) is a Dirichlet Process on integers with concentration parameter alpha and base measure G. Categorical() is the Categorical distribution. See \code{dCategorical} for the definition of the Categorical distribution. \cr
In the case of CatHDP, z and k can only be positive integers. \cr
The model structure and prior parameters are stored in a "CatHDP" object. \cr
Posterior predictive density = p(z,k|alpha,gamma,U,j)
}
\references{
Teh, Yee W., et al. "Sharing clusters among related groups: Hierarchical Dirichlet processes." Advances in neural information processing systems. 2005.
}
\seealso{
\code{\link{CatHDP}}, \code{\link{dPosteriorPredictive.CatHDP}}
}
|
c0cba20b69413d155605a24ab1319e3bb8b38730 | 21824b003bbc2e3c2423ca05333b37b0712caf69 | /Working Folder/integrateIt/man/Trapezoid.Rd | 2350ba6cd79acfed0cd9dd14e29cdb180841bfa3 | [] | no_license | GangyiSun/PS5 | 22a2b2bb113f0689b93ee71cc5b1ed5d2bc1747d | 088cdf951b6de4ac21e9de20bb6c1c6374294f67 | refs/heads/master | 2021-01-25T12:36:21.507579 | 2018-03-06T18:16:20 | 2018-03-06T18:16:20 | 123,482,013 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 761 | rd | Trapezoid.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Trapezoid.R
\docType{class}
\name{Trapezoid-class}
\alias{Trapezoid-class}
\alias{initialize,Trapezoid-method}
\alias{getArea,Trapezoid-method}
\alias{print,Trapezoid-method}
\title{A Trapezoid object}
\usage{
\S4method{getArea}{Trapezoid}(object)
\S4method{print}{Trapezoid}(x, ...)
}
\description{
Object of class \code{Trapezoid} is created by the \code{integrateIt} function
}
\details{
An object of the class `Trapezoid' has the following slots:
\itemize{
\item \code{x} a vector of x values
\item \code{y} a vector of y values
\item \code{area} area under curve defined by x and y, calculated using the trapezoid method
}
}
\author{
Gangyi Sun: \email{gangyi.sun@wustl.edu}
}
|
af1093268e609aa489871186c10ef02ccd4be51a | 04782055bc8062ea939cbd996e8be074402a67a2 | /assignments/assignment_3/homework_3_jerrison_li.r | 26bf8bd7648470b6a512d9493b6b24a87300f810 | [] | no_license | jerrison/Wharton_STAT705 | 21435bcf7c094f71074402a2257ca0c470cf3907 | 1529e31e8856a4b7a74ff63d720b1aee7c13ae4d | refs/heads/master | 2021-09-26T08:31:27.006836 | 2018-10-28T00:08:01 | 2018-10-28T00:08:01 | 146,604,827 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,788 | r | homework_3_jerrison_li.r |
#### STAT 705 FALL 2018 ASSIGNMENT 3 (Functions)
#### NAME:
#### If in a question I refer to a function that we have not seen in class,
#### then use the help facility to find out about it.
#### Insert your answers under each question.
#### Submit your solutions to Canvas as a plain text file.
#### OBJECTIVE: in this homework you will build a set of functions that can be used
#### to automatically identify outlier prone columns in a financial dataset.
#### In this assignment you do NOT have to worry about missing values (NAs).
#### There is no need to accommodate NAs in your functions.
#### The data frame, stocks.df, in the .RData file called "hw3_2018.Rdata" contains the date (first column) and then the
#### daily prices for 11 different stocks.
#### The first task is to create a new data frame that has just the daily returns for these stocks.
#### Recall that the daily return is defined as (price today - price yesterday)/price yesterday.
#### Q1. (20pts.)
#a. Write a function that takes a numeric vector and returns a vector of "returns".
# The returns will have a length of one less than the input vector. You do not need to pad the vector with a leading NA.
# At this point you do not have to check the argument passed to the function.
# Call the function "daily.ret" and paste the code that defines the function below.
# Code:
# daily.ret <- function(num_vector) {
# return(diff(x = num_vector, lag = 1) / head(x = num_vector, n = -1))
daily.ret <- function(num_vector) {
return(diff(x = num_vector, lag = 1) / head(x = num_vector, n = -1))
}
#b. Use your daily.ret function on the vector seq(10) and paste the output below.
# Call: daily.ret(num_vector = seq(10))
# Output: 1 0.5 0.333333333333333 0.25 0.2 0.166666666666667 0.142857142857143 0.125 0.111111111111111
daily.ret(num_vector = seq(10))
#c. Refine your function so that if it is called with anything but a numeric argument
# it stops and produces an informative error message.
# Use the "if" and "is.numeric" functions to achieve this goal. Paste the refined function below.
# Code:
# daily.ret <- function(num_vector) {
# if(!is.numeric(num_vector)) { # check if num_vector is numeric
# stop("JL: input vector is not numeric")
# }
# return(diff(x = num_vector, lag = 1) / head(x = num_vector, n = -1))
# }
daily.ret <- function(num_vector) {
if(!is.numeric(num_vector)) { # check if num_vector is numeric
stop("JL: input vector is not numeric")
}
return(diff(x = num_vector, lag = 1) / head(x = num_vector, n = -1))
}
#d. Call the daily.ret function on the vector c(1,2,"hello", "world") and paste
# the output below.
# Call: daily.ret(c(1, 2, "hello", "world"))
# Output:
# Error in daily.ret(c(1, 2, "hello", "world")): JL: input vector is not numeric
# Traceback:
# 1. daily.ret(c(1, 2, "hello", "world"))
# 2. stop("JL: input vector is not numeric") # at line 13 of file <text>
# daily.ret(c(1, 2, "hello", "world"))
#e. Read in the .RData file called "hw3_2018.Rdata". It contains a data frame
# called "stock.df".
# Using the "sapply" function (it will return a matrix here rather than a list),
# sapply your daily.ret function to all the columns of the stock.df data frame
# (excluding the date column).
# The output will be a matrix, but you can store it as a data frame by applying
# the "as.data.frame" function to it.
# Store the daily returns in a new data frame called "returns.df"
# Call:
# load("/Users/jerrison/Wharton_MBA/Wharton_STAT705/data/hw3_2018.rdata")
# returns.df <- as.data.frame(x = sapply(X = stock.df[, 2:length(stock.df)],
# FUN = daily.ret))
load("/Users/jerrison/Wharton_MBA/Wharton_STAT705/data/hw3_2018.rdata")
returns.df <- as.data.frame(x = sapply(X = stock.df[, 2:length(stock.df)],
FUN = daily.ret))
#f Add a column named "DATE" at the *start* of the returns.df data frame that has the date of the daily returns.
# Print the column names of this data frame below.
# Call: returns.df <- cbind(DATE = stock.df$Date[2:length(stock.df$Date)],
# returns.df)
# print(colnames(returns.df))
# Output: [1] "DATE" "AMZN" "BP" "CALA" "CVX" "GBTC" "GERN" "GPRO" "JPM" "MNKD"
# [11] "TSLA" "XOM"
returns.df <- cbind(DATE = stock.df$Date[2:length(stock.df$Date)],
returns.df)
print(colnames(returns.df))
#g Using either the apply (or sapply) and mean function, write code to find the
# mean daily return for each stock and
# print these means (you need to show your code, and cut and paste the results
# of the print statement).
# Call: print(sapply(X = returns.df[2:length(returns.df)], FUN = mean))
# Output:
# AMZN BP CALA CVX GBTC
# 0.0028983734 0.0009700299 -0.0035623699 0.0003260614 0.0033998966
# GERN GPRO JPM MNKD TSLA
# 0.0048968531 -0.0019336444 0.0010160846 0.0020154994 -0.0005588027
# XOM
# 0.0003619135
print(sapply(X = returns.df[2:length(returns.df)], FUN = mean))
#### Q2. (15pts.) Financial returns are well known to often be heavy tailed
# ("fat tailed") compared to a normal distribution.
#### One numerical measure of just how heavy tailed a distribution is, is
# called the "excess kurtosis".
#a. Write a function in R that takes a single numeric vector argument, call it
# y, and finds the excess kurtosis of the vector.
# The definition of excess kurtosis we will use is in the document
# "kurtosis.pdf" in the Assignments folder on Canvas.
# You R code should *not* use a for loop in the calculation.
# Call your function "kurtosis" and cut and paste your function here.
# If you have your function defined correctly calling kurtosis(y = seq(10))
# should return -1.2.
# Call: kurtosis
# Output:
# function (y)
# {
# n <- length(y)
# sample_mean <- mean(x = y)
# sample_sd <- sd(x = y)
# summation <- sum(sapply(X = y, FUN = function(y) {
# ((y - sample_mean)/sample_sd)^4
# }))
# return(((n * (n + 1))/((n - 1) * (n - 2) * (n - 3))) * summation -
# ((3 * (n - 1)^2)/((n - 2) * (n - 3))))
# }
kurtosis <- function(y) {
n <- length(y)
sample_mean <- mean(x = y)
sample_sd <- sd(x = y)
summation <- sum(sapply(X = y, FUN = function(y) {
((y - sample_mean)/sample_sd)^4
}))
return( ((n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))) * summation -
((3 * (n - 1)^2) / ((n - 2) * (n - 3))) )
}
kurtosis
#b. Use your kurtosis function with the following vector and print the result:
# Call: kurtosis(y = my.vec)
# Output: 1.17475978639633
my.vec <- c(2.10921657, -0.89218616, -0.23085193, 0.39297494, 1.09767915,
-0.13936090, -0.36960242, -0.02307942)
kurtosis(y = my.vec)
#c. Refine your kurtosis function, so that if it is called with anything other
# than a numeric argument
# it stops and produces an informative error message. Use the "if" and
# "is.numeric" functions to achieve this goal.
# Cut and paste your refined function here and show the results of calling it
# with the argument kurtosis(y = letters).
# Call:
# kurtosis <- function(y) {
# if(!is.numeric(x = y)) {
# stop("JL: input vector is not numeric")
# }
# n <- length(y)
# sample_mean <- mean(x = y)
# sample_sd <- sd(x = y)
# summation <- sum(sapply(X = y, FUN = function(y) {
# ((y - sample_mean)/sample_sd)^4
# }))
# return( ((n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))) * summation -
# ((3 * (n - 1)^2) / ((n - 2) * (n - 3))) )
# }
# kurtosis(y = letters)
# Output:
# Error in kurtosis(y = letters): JL: input vector is not numeric
# Traceback:
# 1. kurtosis(y = letters)
# 2. stop("JL: input vector is not numeric") # at line 9 of file <text>
kurtosis <- function(y) {
if(!is.numeric(x = y)) {
stop("JL: input vector is not numeric")
}
n <- length(y)
sample_mean <- mean(x = y)
sample_sd <- sd(x = y)
summation <- sum(sapply(X = y, FUN = function(y) {
((y - sample_mean)/sample_sd)^4
}))
return( ((n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))) * summation -
((3 * (n - 1)^2) / ((n - 2) * (n - 3))) )
}
# kurtosis(y = letters)
#d. Given the definition of excess kurtosis, it does not make sense to use
# this function unless the length of the
# input vector is strictly greater than three. Add another argument check to
# your kurtosis function,
# that stops and outputs an error message if the length of the input vector y,
# is less than four.
# Cut and paste your improved kurtosis function here and show the results of
# calling it with the argument, kurtosis(y = seq(3)).
# New function:
# kurtosis <- function(y) {
# if(!is.numeric(x = y)) {
# stop("JL: input vector is not numeric")
# }
# if(length(x = y) < 4) {
# stop("JL: input vector length is less than 4")
# }
# n <- length(y)
# sample_mean <- mean(x = y)
# sample_sd <- sd(x = y)
# summation <- sum(sapply(X = y, FUN = function(y) {
# ((y - sample_mean)/sample_sd)^4
# }))
# return( ((n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))) * summation -
# ((3 * (n - 1)^2) / ((n - 2) * (n - 3))) )
# }
# Call:
# kurtosis(y = seq(3))
# Output:
# Error in kurtosis(y = seq(3)): JL: input vector length is less than 4
# Traceback:
# 1. kurtosis(y = seq(3))
# 2. stop("JL: input vector length is less than 4") # at line 16 of file <text>
kurtosis <- function(y) {
if(!is.numeric(x = y)) {
stop("JL: input vector is not numeric")
}
if(length(x = y) < 4) {
stop("JL: input vector length is less than 4")
}
n <- length(y)
sample_mean <- mean(x = y)
sample_sd <- sd(x = y)
summation <- sum(sapply(X = y, FUN = function(y) {
((y - sample_mean)/sample_sd)^4
}))
return( ((n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))) * summation -
((3 * (n - 1)^2) / ((n - 2) * (n - 3))) )
}
# kurtosis(y = seq(3))
#e. Add a logical argument called "excess" to your kurtosis function, that
# defaults to TRUE.
# When excess = TRUE the function should return the excess kurtosis as before,
# but when excess = FALSE is should return the excess kurtosis + 3.
# You can review Class 6, slide 23 to see how I returned a value that depended
# on a logical condition.
# Cut and paste your refined function here and show the results of calling it
# with the arguments
# kurtosis(y = seq(10), excess = FALSE)
# Refined function:
# kurtosis <- function(y, excess = TRUE) {
# if(!is.numeric(x = y)) {
# stop("JL: input vector is not numeric")
# }
# if(length(x = y) < 4) {
# stop("JL: input vector length is less than 4")
# }
# n <- length(y)
# sample_mean <- mean(x = y)
# sample_sd <- sd(x = y)
# summation <- sum(sapply(X = y, FUN = function(y) {
# ((y - sample_mean)/sample_sd)^4
# }))
# excess_kurtosis = ( ((n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))) *
# summation -
# ((3 * (n - 1)^2) / ((n - 2) * (n - 3))) )
# if(excess == TRUE) {
# return(excess_kurtosis)
# }
# return(excess_kurtosis + 3)
# }
# Call:
# kurtosis(y = seq(10), excess = FALSE)
# Output:
# 1.8
kurtosis <- function(y, excess = TRUE) {
if(!is.numeric(x = y)) {
stop("JL: input vector is not numeric")
}
if(length(x = y) < 4) {
stop("JL: input vector length is less than 4")
}
n <- length(y)
sample_mean <- mean(x = y)
sample_sd <- sd(x = y)
summation <- sum(sapply(X = y, FUN = function(y) {
((y - sample_mean)/sample_sd)^4
}))
excess_kurtosis = ( ((n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))) *
summation -
((3 * (n - 1)^2) / ((n - 2) * (n - 3))) )
if(excess == TRUE) {
return(excess_kurtosis)
}
return(excess_kurtosis + 3)
}
kurtosis(y = seq(10), excess = FALSE)
#### Q3. (15pts.) Applying the "kurtosis" function you wrote in Q2 to the
# numeric columns of "returns.df" you created in Q1.
#### Below you will apply the "kurtosis" function you just wrote, to the
# numeric columns of "returns.df".
#### You can identify the numeric columns with the built in R command
# "is.numeric".
#### It plays the role here of the "is.dichotomous" function we created in
# class.
#### You can use the "sapply" command (a special version of lapply) on the data
# frame to identify the numeric columns.
#### It returns a vector rather than a list which can save time having to
# "unlist" the result.
#### Note: if you try to use "apply" on the data frame, apply will first coerce
# the data frame to a matrix, which can
#### only contain one variable type, which will be character, so is.numeric
# would report
#### all FALSE which is not what you want.
#a. Write code to extract just the numeric columns of returns.df into their own
# new data frame, called justnum.df
# Your code needs to identify the numeric columns programatically.
# Paste the code that you used for the extraction and programatically report
# how many columns you extracted.
# Call:
# numeric_cols <- sapply(X = returns.df, FUN = is.numeric)
# justnum.df <- returns.df[, numeric_cols]
# print(paste("Extracted", sum(numeric_cols), "numeric columns from returns.df"))
# Output:
# "Extracted 11 numeric columns from returns.df"
numeric_cols <- sapply(X = returns.df, FUN = is.numeric)
justnum.df <- returns.df[, numeric_cols]
print(paste("Extracted", sum(numeric_cols), "numeric columns from returns.df"))
#b. Paste the code needed to apply the kurtosis function to the justnum.df data
# frame (use the default value for the excess argument).
# Print the output from the kurtosis command when applied to the numeric
# columns. Use either sapply or the apply command, no looping allowed.
# Call:
# print(sapply(X = justnum.df, FUN = kurtosis))
# Output:
# AMZN BP CALA CVX GBTC GERN GPRO JPM
# 13.839662 1.729705 7.565025 2.978323 2.044382 8.270739 4.528455 2.300904
# MNKD TSLA XOM
# 42.082549 4.292236 4.286294
print(sapply(X = justnum.df, FUN = kurtosis))
#c. Write code to programatically identify which stock has the highest kurtosis
# in the justnum.df data frame. That is, your code needs to find
# the stock with the maximum kurtosis and print both the stock name and its
# excess kurtosis value.
# Call:
# print(sort(x = sapply(X = justnum.df, FUN = kurtosis), decreasing = TRUE)[1])
# Output:
# MNKD
# 42.08255
print(sort(x = sapply(X = justnum.df, FUN = kurtosis), decreasing = TRUE)[1])
|
2afbc73c7159fa33a1cbae10761216f40c30dffd | d45f38e0f2265d9fb52cb4ac1c2843eabf296c3a | /R-scripts/mir_pathway-bubble-20150309.r | 03ce2c9574382684acdb858ff4d8b440b829c378 | [] | no_license | dyusuf/sRNA-toolkit | cc944b330a0785b5d29a2086d63b5b8a7d7621f0 | 5ed7889954432dd4ee8c8320885ca11bb48037ff | refs/heads/master | 2020-06-11T08:17:06.040517 | 2016-12-06T12:04:30 | 2016-12-06T12:04:46 | 75,726,293 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 32,420 | r | mir_pathway-bubble-20150309.r | setwd("/home/dilmurat/Work/combined-experiments/")
library(plyr)
library(ggplot2)
mirpath <- read.csv("miRPath_pathway_miRNA.csv", header=T, sep=",")
mirpath <- subset(mirpath, p_value<=0.001)
for (i in names(mirpath)) {mirpath[i] <- droplevels(mirpath[i])}
mirpath$pathway <- gsub("_", " ", mirpath$pathway)
library(RColorBrewer)
getPalette = colorRampPalette(brewer.pal(8, "Accent"))
colourCount = length(unique(mirpath$miRNA))
sum_pathway <- ddply(mirpath, c("pathway"), summarise,
total = sum(gene_number))
mirpath$pathway <- factor(mirpath$pathway, levels = sum_pathway[order(sum_pathway$total, decreasing=F), "pathway"])
mirpath$miRNA <- factor(mirpath$miRNA, levels=as.vector(arrange(count(as.vector(mirpath$miRNA)), desc(freq))$x))
# mirpath$miRNA <- factor(mirpath$miRNA, levels=
# c('hsa-miR-192-5p',
# 'hsa-miR-25-3p',
# 'hsa-miR-142-5p',
# 'hsa-miR-30e-5p',
# 'hsa-miR-22-3p',
# 'hsa-miR-423-5p',
# 'hsa-miR-92a-3p',
# 'hsa-miR-103a-3p',
# 'hsa-miR-486-5p',
# 'hsa-miR-182-5p',
# 'hsa-miR-16-5p',
# 'hsa-miR-15a-5p',
# 'hsa-miR-451a',
# 'hsa-miR-27b-3p',
# 'hsa-miR-126-5p',
# 'hsa-miR-191-5p',
# 'hsa-miR-10a-5p',
# 'hsa-miR-143-3p',
# 'hsa-miR-21-5p',
# 'hsa-miR-10b-5p'))
mirpath$plasma_abundance <- "NA"
for (i in levels(mirpath$miRNA)){mirpath[mirpath["miRNA"] == i, ]$plasma_abundance <-
sort(subset(cor_exo_plasma, grepl(strsplit(i, "-")[[1]][3], rna))$plasma_abundance, decreasing=T)[1]}
# assuming there would not be miRNAs in mirpath$miRNA which are derived from the same locus
mirpath_class <- read.csv("mirpath_annotation-20141127.csv", header=T, sep="\t")
mirpath_class$pathway <- gsub("_"," ", mirpath_class$pathway)
mirpath$class <- "NA"
for (i in levels(mirpath$pathway)){mirpath[mirpath["pathway"] == i, ]$class <-
as.character(subset(mirpath_class, pathway == i)$class)}
notable_pathway <- c(
"PI3K-Akt_signaling_pathway",
"MAPK_signaling_pathway",
"Ubiquitin_mediated_proteolysis",
"Wnt_signaling_pathway",
"mTOR_signaling_pathway",
"ErbB_signaling_pathway",
"Focal_adhesion",
"Regulation_of_actin_cytoskeleton",
"T_cell_receptor_signaling_pathway",
"Neurotrophin_signaling_pathway",
"Long-term_potentiation",
"Cholinergic_synapse",
"Glutamatergic_synapse",
"Endocytosis",
"Insulin_signaling_pathway"
)
notable_pathway <- gsub("_", " ", notable_pathway)
mirpath$notable_pathway <- "NA"
for (i in levels(mirpath$pathway)){if (i %in% notable_pathway)
{mirpath[mirpath["pathway"] == i, ]$notable_pathway <- i}
else
{mirpath[mirpath["pathway"] == i, ]$notable_pathway <- "others"}
}
mirpath$miRNA <- factor(gsub("hsa-", "", mirpath$miRNA))
cluster1 <- c("miR-22-3p", "miR-25-3p",
"miR-142-5p", "miR-192-5p",
"miR-30e-5p")
cluster2 <- c("miR-486-5p", "miR-92a-3p",
"miR-16-5p", "miR-451a",
"miR-182-5p", "miR-103a-3p",
"miR-15a-5p")
cluster3 <- c("miR-191-5p", "miR-126-5p",
"miR-10a-5p", "miR-27b-3p",
"miR-21-5p")
mirpath$cluster <- "NA"
for (i in levels(mirpath$miRNA)){
if (i %in% cluster1)
{mirpath[mirpath["miRNA"] == i, ]$cluster <- "cluster 1"}
else if (i %in% cluster2)
{mirpath[mirpath["miRNA"] == i, ]$cluster <- "cluster 2"}
else if (i %in% cluster3)
{mirpath[mirpath["miRNA"] == i, ]$cluster <- "cluster 3"}
else
{mirpath[mirpath["miRNA"] == i, ]$cluster <- "others"}
}
mirpath_summery <- ddply(mirpath, c("class", "notable_pathway", "cluster"), summarise,
gene_number = sum(gene_number)
)
class_summery <- ddply(mirpath, c("class", "cluster"), summarise,
gene_number = sum(gene_number)
)
mirpath_summery$class <- factor(mirpath_summery$class, levels = class_summery[order(class_summery$gene_number, decreasing=F), "class"])
mirpath_summery$notable_pathway <- factor(mirpath_summery$notable_pathway, levels=c(notable_pathway,"others"))
mirpath_summery_2_3 <- subset(mirpath_summery, cluster %in% c("cluster 2", "cluster 3"))
mirpath_4 <- subset(mirpath,
miRNA %in% c("miR-486-5p", "miR-92a-3p", "miR-126-5p", "miR-27b-3p")
)
mirpath_summery_4 <- ddply(mirpath_4, c("class", "notable_pathway", "cluster"), summarise,
gene_number = sum(gene_number)
)
pdf("mirpathway_class.pdf", width=9, height=10)
ggplot(mirpath_summery_2_3, aes(x=class, y=gene_number, fill=notable_pathway)) +
geom_bar(stat="identity") +
scale_fill_manual(values=getPalette(colourCount), name="Pathway") +
theme(axis.text.y = element_text(size=7, colour="#000000")) +
theme(axis.text.x = element_text(size=7, colour="#000000")) +
theme(legend.text=element_text(size=7), legend.title = element_text(size=8)) +
coord_flip() +
ylab("Number of target genes") +
xlab("Pathway") +
facet_wrap(~cluster, ncol=3)
dev.off()
mirpath$plasma_abundance <- as.numeric(mirpath$plasma_abundance)
mirpath$miRNA <- factor(mirpath$miRNA, levels=unique(as.vector(arrange(mirpath, desc(plasma_abundance))$miRNA)))
ordered_RNA <- gsub("hsa-", "", levels(mirpath$miRNA))
mirpath$miRNA <- factor(mirpath$miRNA, levels=ordered_RNA)
mirpath_2_3 <- subset(mirpath, cluster %in% c("cluster 2", "cluster 3"))
miRNA_levels <- levels(mirpath_2_3$miRNA)
# write.table(mirpath_2_3, file = "mirpath_2_3.csv", quote=F, sep="\t")
#order data with libraoffice
mirpath_2_3 <- read.csv("mirpath_2_3.csv", header=T, sep="\t")
mirpath_2_3$pathway <- factor(mirpath_2_3$pathway, levels = rev(unique(as.vector(mirpath_2_3$pathway))))
mirpath_2_3$miRNA <- factor(mirpath_2_3$miRNA, levels = miRNA_levels)
p_pathway <- ggplot(mirpath_2_3, aes(x=miRNA, y=pathway)) +
geom_point(aes(color=log10(plasma_abundance), size=gene_number)) +
scale_size_area(max_size = 3.5, name="Number of\ntarget genes") +
# scale_colour_gradientn(colours=c("red","violet","blue")) +
# geom_text(aes(y=as.numeric(pathway), label= round(gene_number,0)), hjust=1.3,
# size=2) +
scale_color_gradient( low = "#132B43",
high = "red", name="Relative\nnormalized\nread count\n(log10)") +
theme_minimal() +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=90, vjust=0.7, hjust=0.7)) +
theme(legend.text = element_text(size=6), legend.title=element_text(size=6.5)) +
xlab("miRNA") +
ylab("Pathway") +
theme(axis.title = element_text(size=8, colour="#000000")) +
# ggtitle("A") +
theme(plot.title = element_text(hjust = 0)) +
facet_wrap(~cluster, scale="free_x")
pdf("fig5_pathway.pdf")
p_pathway
dev.off()
top5_pathway <- levels(mirpath$pathway)[54:58]
Neurotrophin_signaling_pathway_mirs <- as.vector(subset(mirpath, pathway=="Neurotrophin_signaling_pathway")$miRNA)
MAPK_signaling_pathway_mirs <- as.vector(subset(mirpath, pathway=="MAPK_signaling_pathway")$miRNA)
Focal_adhesion_mirs <- as.vector(subset(mirpath, pathway=="Focal_adhesion")$miRNA)
Pathways_in_cancer_mirs <- as.vector(subset(mirpath, pathway=="Pathways_in_cancer")$miRNA)
PI3K_Akt_signaling_pathway_mirs <- as.vector(subset(mirpath, pathway=="PI3K-Akt_signaling_pathway")$miRNA)
intersect(c(Neurotrophin_signaling_pathway_mirs, MAPK_signaling_pathway_mirs),
c(Focal_adhesion_mirs, Pathways_in_cancer_mirs,
PI3K_Akt_signaling_pathway_mirs))
top5_mirpath <- subset(mirpath, pathway %in% top5_pathway)
top5_mirs <- unique(top5_mirpath$miRNA)
key_seqs$individual_id <- as.factor(toupper(key_seqs$individual_id))
key_seqs$condition <- as.factor(sapply(key_seqs$condition, gsub, pattern="exosome145", replacement="exosome"))
key_series_plot <- subset(key_seqs,
sequence %in% c(
"GGCTGGTCCGATGGTAGTGGGTTATCAGAACT",
"GTTTCCGTAGTGTAGTGGTTATCACGTTCGCCT",
"CGCGACCTCAGATCAGACGTGGCGACCCGCTGAAT",
"TATTGCACTTGTCCCGGCCTGT",
"TCCTGTACTGAGCTGCCCCGAG",
# "TGTAAACATCCTTGACTGGAAGCT",
"TTCACAGTGGCTAAGTTCTG",
# "CATTGCACTTGTCTCGGTCTGA",
# "AAGCTGCCAGTTGAAGAACTGT",
"TAGCTTATCAGACTGATGTTGA",
"TTTGGCAATGGTAGAACTCACA",
"TAGCAGCACGTAAATATTGGCG",
"TAGCAGCACATAATGGTTTG",
"CATTATTACTTTTGGTACGCG",
"AAGCTGCCAGTTGAAGAACTGT"
))
key_series_9_seqs <- subset(key_seqs,
rna %in% c("miR-486-5p", "yRF-Y4-5p",
"miR-92a-3p", "miR-16-5p",
"miR-21-5p", "miR-30e-5p-3'R2",
"miR-126-5p", "yRF-Y4-3p",
"tRF-tRNA-Val(CAC/AAC)", "miR-22-3p"
))
key_series_9_seqs$rna <- droplevels(key_series_9_seqs$rna)
library(scales)
time_dynamics <- function(name) {
ggplot(subset(key_series_9_seqs, rna==name), aes(x=time_point, y=log(abundance+1,10), group=condition, color=condition)) +
theme_minimal() +
geom_line(size=0.5) +
geom_point(aes(x=time_point, y=log(abundance+1,10)), size=1) +
# scale_color_manual(values=c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF"), name= "RNA") +
scale_x_datetime(breaks = unique(key_seqs$time_point),
labels = date_format("%H:%M")) +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=45, vjust=0.4)) +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(legend.text=element_text(size=6), legend.title = element_text(size=6.5)) +
scale_color_brewer(palette="Paired", name="Sample") +
# scale_color_brewer(palette="Set1", name="RNA") +
xlab("Time point") + ylab("Normalized relative read count (log10)") +
theme(axis.title = element_text(size=8, colour="#000000")) +
facet_wrap(~individual_id, nrow=4, scale = "free_y") +
ggtitle(name) +
theme(plot.title = element_text(hjust = 0))
}
pdf("dynamics_time_9seqs.pdf")
time_dynamics("yRF-Y4-5p")
time_dynamics("miR-21-5p")
time_dynamics("miR-126-5p")
time_dynamics("yRF-Y4-3p")
time_dynamics("tRF-tRNA-Val(CAC/AAC)")
time_dynamics("miR-486-5p")
time_dynamics("miR-92a-3p")
time_dynamics("miR-16-5p")
time_dynamics("miR-30e-5p-3'R2")
time_dynamics("miR-22-3p")
dev.off()
key_series_y4_5p <- subset(key_series_plot,
rna == "yRF-Y4-5p")
# wide_form <- reshape(key_series_y4_5p, idvar = c("sequence", "rna", "sample_id"), timevar = "condition", direction = "wide")
# cor(wide_form$abundance.plasma, wide_form$abundance.exosome145)
# plot(wide_form$abundance.plasma, wide_form$abundance.exosome145)
p_y4_5p <- ggplot(key_series_y4_5p, aes(x=time_point, y=log(abundance+1,10), group=condition, color=condition)) +
theme_minimal() +
geom_line(size=0.5) +
geom_point(aes(x=time_point, y=log(abundance+1,10)), size=1) +
# scale_color_manual(values=c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF"), name= "RNA") +
scale_x_datetime(breaks = unique(key_seqs$time_point),
labels = date_format("%H:%M")) +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=45, vjust=0.4)) +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(legend.text=element_text(size=6), legend.title = element_text(size=6.5)) +
scale_color_brewer(palette="Paired", name="Sample") +
# scale_color_brewer(palette="Set1", name="RNA") +
xlab("Time point") + ylab("Normalized relative read count (log10)") +
theme(axis.title = element_text(size=8, colour="#000000")) +
facet_wrap(~individual_id, nrow=4, scale = "free_y") +
ggtitle("B") +
theme(plot.title = element_text(hjust = 0))
key_series_mir_486_5p <- subset(key_series_plot,
rna == "miR-486-5p")
p_486_5p <- ggplot(key_series_mir_486_5p, aes(x=time_point, y=log(abundance+1,10), group=condition, color=condition)) +
theme_minimal() +
geom_line(size=0.5) +
geom_point(aes(x=time_point, y=log(abundance+1,10)), size=1) +
scale_x_datetime(breaks = unique(key_seqs$time_point),
labels = date_format("%H:%M")) +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=45, vjust=0.4)) +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(legend.text=element_text(size=6), legend.title = element_text(size=6.5)) +
scale_color_brewer(palette="Paired", name="Sample", guide=F) +
xlab("Time point") + ylab("Normalized relative read count (log10)") +
theme(axis.title = element_text(size=8, colour="#000000")) +
facet_wrap(~individual_id, nrow=4, scale = "free_y") +
ggtitle("A") +
theme(plot.title = element_text(hjust = 0))
#load 150319_qPCR-Anna-plots.Rdata for the experimental results
# write.table(all, file = "diurnal-dynamics-qPCR-Anna-20150323.csv", quote=F, sep="\t", row.names=F)
diurnal_qPCR <- read.csv("diurnal-dynamics-qPCR-Anna-20150323.csv", header=T, sep="\t")
diurnal_qPCR$timeF <- as.POSIXct(diurnal_qPCR$timeF)
diurnal_qPCR$sampleType <- factor(diurnal_qPCR$sampleType, levels=c("plasma", "exosome fraction", "protease treated exosome fraction"))
yRF5p <- subset(diurnal_qPCR, RNA=="yRF5p" )
yRF5p_qPCR_fig <- ggplot(yRF5p, aes(x=timeF, y=log(normalized.abundance,10), group=sampleType, color=sampleType)) +
theme_minimal() +
geom_line(size=0.5) +
geom_point(aes(x=timeF, y=log(normalized.abundance,10)), size=1) +
scale_x_datetime(breaks = unique(yRF5p$timeF),
labels = date_format("%H:%M")) +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=45, vjust=0.4)) +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(legend.text=element_text(size=6), legend.title = element_text(size=6.5)) +
scale_color_manual(values=c("#1F78B4", "#A6CEE3", "#B2DF8A"), name="Sample type") +
xlab("Time point") + ylab("Normalized relative RNA abundance (log10)") +
theme(axis.title = element_text(size=8, colour="#000000")) +
facet_wrap(~Individual, nrow=4, scale = "free_y") +
ggtitle("D") +
theme(plot.title = element_text(hjust = 0))
pdf("yRF5p-qPCR-Anna-20150323.pdf")
yRF5p_qPCR_fig
dev.off()
miR486_qPCR <- subset(diurnal_qPCR, RNA=="miR486" )
miR486_qPCR_fig <- ggplot(miR486_qPCR, aes(x=timeF, y=log(normalized.abundance,10), group=sampleType, color=sampleType)) +
theme_minimal() +
geom_line(size=0.5) +
geom_point(aes(x=timeF, y=log(normalized.abundance,10)), size=1) +
scale_x_datetime(breaks = unique(yRF5p$timeF),
labels = date_format("%H:%M")) +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=45, vjust=0.4)) +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(legend.text=element_text(size=6), legend.title = element_text(size=6.5)) +
scale_color_manual(values=c("#1F78B4", "#A6CEE3", "#B2DF8A"),
name="Sample type") +
xlab("Time point") + ylab("Normalized relative RNA abundance (log10)") +
theme(axis.title = element_text(size=8, colour="#000000")) +
facet_wrap(~Individual, nrow=4, scale = "free_y") +
ggtitle("C") +
theme(plot.title = element_text(hjust = 0))
pdf("miR486-qPCR-Anna-20150323.pdf")
miR486_qPCR_fig
dev.off()
library(reshape)
absolute_number_qPCR <- read.csv("absoluteNumbersExosomePlasma_anna.csv", header=T, sep="\t")
absolute_number_qPCR_long <- melt(absolute_number_qPCR[, c(1, 2, 3, 7)])
absolute_number_qPCR_long$individual <- absolute_number_qPCR_long$sample
absolute_number_qPCR_long$individual <- substr(absolute_number_qPCR_long$individual, 1, 3)
absolute_number_qPCR_long$time_point <- absolute_number_qPCR_long$sample
absolute_number_qPCR_long$time_point <- substr(absolute_number_qPCR_long$time_point, 5, 5)
absolute_number_qPCR_long$time_point <- gsub( "4", "10:30", absolute_number_qPCR_long$time_point)
absolute_number_qPCR_long$time_point <- gsub( "5", "12:00", absolute_number_qPCR_long$time_point)
# absolute_number_qPCR_long$time_point <- as.POSIXct(absolute_number_qPCR_long$time_point)
absolute_number_qPCR_long$compartment <- gsub( "crude exosomes", "exosome fraction", absolute_number_qPCR_long$compartment)
absolute_number_qPCR_long$compartment <- gsub( "protease treated plasma exosomes", "protease treated exosome fraction", absolute_number_qPCR_long$compartment)
absolute_number_qPCR_long$compartment <- factor(absolute_number_qPCR_long$compartment, levels = c("plasma", "exosome fraction",
"protease treated exosome fraction"))
yRF5p_abs <- subset(absolute_number_qPCR_long, target=="yRF-Y4-5p")
yRF5p_abs$time_point <- as.factor(sapply(yRF5p_abs$time_point, gsub, pattern="10:30", replacement="10:30 AM"))
yRF5p_abs$time_point <- as.factor(sapply(yRF5p_abs$time_point, gsub, pattern="12:00", replacement="12:00 PM"))
yRF5p_abs_fig <- ggplot(yRF5p_abs, aes(x=time_point, y=log(value,10), fill=compartment)) +
theme_minimal() +
geom_boxplot() +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=45, vjust=0.4)) +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(legend.text=element_text(size=6), legend.title = element_text(size=6.5)) +
scale_fill_manual(values=c("#1F78B4", "#A6CEE3", "#B2DF8A"), name="Sample type") +
xlab("Time point") + ylab("Number of molecules per milliliter plasma (log10)") +
theme(axis.title = element_text(size=8, colour="#000000")) +
ggtitle("B") +
theme(plot.title = element_text(hjust = 0))
miR486_abs <- subset(absolute_number_qPCR_long, target=="miR-486-5p")
miR486_abs$time_point <- as.factor(sapply(miR486_abs$time_point, gsub, pattern="10:30", replacement="10:30 AM"))
miR486_abs$time_point <- as.factor(sapply(miR486_abs$time_point, gsub, pattern="12:00", replacement="12:00 PM"))
miR486_abs_fig <- ggplot(miR486_abs, aes(x=time_point, y=log(value,10), fill=compartment)) +
theme_minimal() +
# geom_bar(position="dodge") +
geom_boxplot() +
# scale_x_datetime(breaks = unique(absolute_number_qPCR_long$time_point),
# labels = date_format("%H:%M")) +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=45, vjust=0.4)) +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(legend.text=element_text(size=6), legend.title = element_text(size=6.5)) +
scale_fill_manual(values=c("#1F78B4", "#A6CEE3", "#B2DF8A"), name="Sample type") +
xlab("Time point") + ylab("Number of molecules per milliliter plasma (log10)") +
theme(axis.title = element_text(size=8, colour="#000000")) +
# facet_wrap(~individual, nrow=4, scale = "free_y") +
ggtitle("A") +
theme(plot.title = element_text(hjust = 0))
library(grid)
# Define layout for the plots (2 rows, 2 columns)
layt <- grid.layout(nrow = 2, ncol = 3, heights = c(4/8, 4/8), widths = c(3/9,
3/9, 3/9), default.units = c("null", "null"))
# View the layout of plots
# grid.show.layout(layt)
tmp <- ggplotGrob(p_y4_5p)
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
legend$vp$x <- unit(0.5, 'npc')
legend$vp$y <- unit(0.2, 'npc')
ylab <- tmp$grobs[[19]]
ylab$vjust <- 8
pdf("fig3_Y4_mir486_dynamics.pdf", width=10, height=7)
# Draw plots one by one in their positions
grid.newpage()
pushViewport(viewport(layout = layt))
print(p_486_5p + guides(fill=FALSE), vp = viewport(layout.pos.row = 1, layout.pos.col = 1))
print(p_y4_5p + theme(legend.position="none"), vp = viewport(layout.pos.row = 2, layout.pos.col = 1))
print(miR486_qPCR_fig + theme(legend.position="none"), vp = viewport(layout.pos.row = 1, layout.pos.col = 2))
print(yRF5p_qPCR_fig + theme(legend.position="none"), vp = viewport(layout.pos.row = 2, layout.pos.col = 2))
print(miR486_abs_fig + theme(legend.position="none"), vp = viewport(layout.pos.row = 1, layout.pos.col = 3))
print(yRF5p_abs_fig + theme(legend.position="none"), vp = viewport(layout.pos.row = 2, layout.pos.col = 3))
dev.off()
#!!!---20150702
#data from qPCR to describe dynamics
#data from RNA-Seq to describe dynamics
#data from qPCR to describe the abundance distribution in the three compartments
#combine the results into one figure
cluster1 <- c(
"miR-22-3p",
"miR-25-3p",
"miR-423-5p",
"miR-142-5p-5'L2-3'L3",
"miR-192-5p",
"miR-30e-5p-3'R2" #qPCR
)
cluster3 <- c(
'yRF-Y4-5p', #qPCR
'miR-191-5p',
"miR-27b-3p-3'L1",
'miR-126-5p',
'miR-21-5p', #qPCR
"miR-143-3p-3'L1",
"miR-10a-5p-3'L1",
'tRF-tRNA-Val(CAC/AAC)', #qPCR
'tRF-tRNA-Gly(GCC/CCC)'
)
cluster2 <- c(
'miR-486-5p', #qPCR
"miR-486-5p-3'L1",
"miR-486-5p-3'~A",
"miR-486-5p-3'~U",
"miR-486-5p-3'L2",
"miR-486-5p-3'L1~A",
"miR-486-5p-3'~AA",
"miR-486-5p-3'R1",
'miR-92a-3p', #qPCR
"miR-92a-3p-3'L1",
"miR-92a-3p-3'L1~A",
'miR-16-5p', #qPCR
"miR-451a-3'L1",
'miR-451a',
"miR-451a-3'R1",
"miR-451a-3'L2",
"miR-22-3p-3'L1",
"miR-25-3p-3'L2",
"miR-423-5p-3'L2",
"miR-423-5p-3'L1",
"miR-103a/107-3p-3'L4",
"miR-15a-5p-3'L2",
"miR-182-5p-3'L2"
)
clusterNA <- c(
"yRF-Y4-5p-3'L1",
"yRF-Y4-5p-3'R1",
'yRF-Y4-3p', #qPCR
"yRF-Y4-3p-3'L4",
"miR-10b-5p-3'L1",
'rRF-RNA28S5'
)
diurnal_qPCR <- read.csv("diurnal-dynamics-qPCR-Anna-20150323.csv", header=T, sep="\t")
diurnal_qPCR$timeF <- as.POSIXct(diurnal_qPCR$timeF)
diurnal_qPCR$sampleType <- factor(diurnal_qPCR$sampleType, levels=c("plasma", "exosome fraction", "protease treated exosome fraction"))
diurnal_qPCR_plasma <- subset(diurnal_qPCR, sampleType=="plasma")
diurnal_qPCR_plasma$sampleType <- droplevels(diurnal_qPCR_plasma$sampleType)
diurnal_qPCR_plasma$RNA <- as.factor(sapply(diurnal_qPCR_plasma$RNA, gsub, pattern="miR126", replacement="miR-126-5p"))
diurnal_qPCR_plasma$RNA <- as.factor(sapply(diurnal_qPCR_plasma$RNA, gsub, pattern="miR16", replacement="miR-16-5p"))
diurnal_qPCR_plasma$RNA <- as.factor(sapply(diurnal_qPCR_plasma$RNA, gsub, pattern="miR30e", replacement="miR-30e-5p-3'R2"))
diurnal_qPCR_plasma$RNA <- as.factor(sapply(diurnal_qPCR_plasma$RNA, gsub, pattern="miR486", replacement="miR-486-5p"))
diurnal_qPCR_plasma$RNA <- as.factor(sapply(diurnal_qPCR_plasma$RNA, gsub, pattern="miR92", replacement="miR-92a-3p"))
diurnal_qPCR_plasma$RNA <- as.factor(sapply(diurnal_qPCR_plasma$RNA, gsub, pattern="tRFval", replacement="tRF-tRNA-Val(CAC/AAC)"))
diurnal_qPCR_plasma$RNA <- as.factor(sapply(diurnal_qPCR_plasma$RNA, gsub, pattern="miR126", replacement="miR-126-5p"))
diurnal_qPCR_plasma$RNA <- as.factor(sapply(diurnal_qPCR_plasma$RNA, gsub, pattern="yRF3p", replacement="yRF-Y4-3p"))
diurnal_qPCR_plasma$RNA <- as.factor(sapply(diurnal_qPCR_plasma$RNA, gsub, pattern="yRF5p", replacement="yRF-Y4-5p"))
cluster3_plasma <- subset(diurnal_qPCR_plasma, RNA %in% cluster3)
cluster3_summary <- ddply(cluster3_plasma, c("RNA", "Time.Point", "timeF", "sampleType"), summarise,
N = sum(!is.na(normalized.abundance)),
mean=mean(normalized.abundance, na.rm=TRUE),
sd=sd(normalized.abundance, na.rm=TRUE),
se=sd/sqrt(N),
min=log10(mean-se),
max=log10(mean+se),
log10_mean=log(mean, 10))
cluster3_summary$RNA <- factor(cluster3_summary$RNA, levels=c("yRF-Y4-5p", "miR-126-5p", "tRF-tRNA-Val(CAC/AAC)"))
cluster3_dynamics <- ggplot(cluster3_summary, aes(x=timeF, y=log10_mean, group=sampleType, color=sampleType)) +
theme_minimal() +
geom_line(size=0.5) +
geom_point(aes(x=timeF, y=log10_mean), size=1.5) + theme(legend.position="none") +
geom_errorbar(aes(ymax = max, ymin=min), position="dodge", width=0.5) +
scale_x_datetime(breaks = unique(cluster3_summary$timeF),
labels = date_format("%I:%M %p")) +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=75, hjust=0.9)) +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(legend.text=element_text(size=6), legend.title = element_text(size=6.5)) +
scale_color_manual(values=c("#1F78B4", "#A6CEE3", "#B2DF8A"), name="Sample type", guide=FALSE) +
xlab("Time point") + ylab("Normalized relative RNA abundance \n (log10(mean) \u00B1 s.d.)") +
theme(axis.title = element_text(size=8, colour="#000000")) +
facet_wrap(~RNA, nrow=4, scale = "free_y") +
theme(strip.text = element_text(size=8)) +
ggtitle("F") +
theme(plot.title = element_text(hjust = 0))
cluster2_plasma <- subset(diurnal_qPCR_plasma, RNA %in% cluster2)
cluster2_summary <- ddply(cluster2_plasma, c("RNA", "Time.Point", "timeF", "sampleType"), summarise,
N = sum(!is.na(normalized.abundance)),
mean=mean(normalized.abundance, na.rm=TRUE),
sd=sd(normalized.abundance, na.rm=TRUE),
se=sd/sqrt(N),
min=log10(mean-se),
max=log10(mean+se),
log10_mean=log(mean, 10))
cluster2_summary$RNA <- factor(cluster2_summary$RNA, levels=c("miR-486-5p", "miR-92a-3p", "miR-16-5p"))
cluster2_dynamics <- ggplot(cluster2_summary, aes(x=timeF, y=log10_mean, group=sampleType, color=sampleType)) +
theme_minimal() +
geom_line(size=0.5) +
geom_point(aes(x=timeF, y=log10_mean), size=1.5) + theme(legend.position="none") +
geom_errorbar(aes(ymax = max, ymin=min), position="dodge", width=0.5) +
scale_x_datetime(breaks = unique(cluster2_summary$timeF),
labels = date_format("%I:%M %p")) +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=75, hjust=0.9)) +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(legend.text=element_text(size=6), legend.title = element_text(size=6.5)) +
scale_color_manual(values=c("#1F78B4", "#A6CEE3", "#B2DF8A"), name="Sample type", guide=FALSE) +
xlab("Time point") + ylab("Normalized relative RNA abundance \n (log10(mean) \u00B1 s.d.)") +
theme(axis.title = element_text(size=8, colour="#000000")) +
facet_wrap(~RNA, nrow=4, scale = "free_y") +
theme(strip.text = element_text(size=8)) +
ggtitle("E") +
theme(plot.title = element_text(hjust = 0))
key_series_9_seqs_plasma <- subset(key_seqs,
rna %in% c("miR-486-5p", "yRF-Y4-5p",
"miR-92a-3p", "miR-16-5p",
"miR-21-5p", "miR-30e-5p-3'R2",
"miR-126-5p", "yRF-Y4-3p",
"tRF-tRNA-Val(CAC/AAC)", "miR-22-3p"
) &
condition == "plasma")
key_series_9_seqs_plasma$rna <- droplevels(key_series_9_seqs$rna)
key_series_9_seqs_plasma$condition <- droplevels(key_series_9_seqs_plasma$condition)
key_series_9_seqs_plasma_summary <- ddply(key_series_9_seqs_plasma, c("rna", "time_point", "condition"), summarise,
N = sum(!is.na(abundance)),
mean=mean(abundance, na.rm=TRUE),
sd=sd(abundance, na.rm=TRUE),
se=sd/sqrt(N),
min=log10(mean-se),
max=log10(mean+se),
log10_mean=log(mean, 10))
cluster2_RNAseq_summary <- subset(key_series_9_seqs_plasma_summary, rna %in% cluster2)
cluster2_RNAseq_summary$rna <- factor(cluster2_RNAseq_summary$rna, levels=c("miR-486-5p", "miR-92a-3p", "miR-16-5p"))
cluster2_dynamics_RNAseq <- ggplot(cluster2_RNAseq_summary, aes(x=time_point, y=log10_mean, group=condition, color=condition)) +
theme_minimal() +
geom_line(size=0.5) +
geom_point(aes(x=time_point, y=log10_mean), size=1.5) + theme(legend.position="none") +
geom_errorbar(aes(ymax = max, ymin=min), position="dodge", width=0.5) +
scale_x_datetime(breaks = unique(cluster2_RNAseq_summary$time_point),
labels = date_format("%I:%M %p")) +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=75, hjust=0.9)) +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(legend.text=element_text(size=6), legend.title = element_text(size=6.5)) +
scale_color_manual(values=c("#1F78B4", "#A6CEE3", "#B2DF8A"), guide=FALSE) +
xlab("Time point") + ylab("Normalized relative RNA abundance \n (log10(mean) \u00B1 s.d.)")+
theme(axis.title = element_text(size=8, colour="#000000")) +
facet_wrap(~rna, nrow=4, scale = "free_y") +
theme(strip.text = element_text(size=8)) +
ggtitle("C") +
theme(plot.title = element_text(hjust = 0))
cluster3_RNAseq_summary <- subset(key_series_9_seqs_plasma_summary, rna %in% c("yRF-Y4-5p", "miR-126-5p", "tRF-tRNA-Val(CAC/AAC)"))
cluster3_RNAseq_summary$rna <- factor(cluster3_RNAseq_summary$rna, levels=c("yRF-Y4-5p", "miR-126-5p", "tRF-tRNA-Val(CAC/AAC)"))
cluster3_dynamics_RNAseq <- ggplot(cluster3_RNAseq_summary, aes(x=time_point, y=log10_mean, group=condition, color=condition)) +
theme_minimal() +
geom_line(size=0.5) +
geom_point(aes(x=time_point, y=log10_mean), size=1.5) + theme(legend.position="none") +
geom_errorbar(aes(ymax = max, ymin=min), position="dodge", width=0.5) +
scale_x_datetime(breaks = unique(cluster3_RNAseq_summary$time_point),
labels = date_format("%I:%M %p")) +
theme(axis.text.x = element_text(size=6, colour="#000000", angle=75, hjust=0.9)) +
theme(axis.text.y = element_text(size=6, colour="#000000")) +
theme(legend.text=element_text(size=6), legend.title = element_text(size=6.5)) +
scale_color_manual(values=c("#1F78B4", "#A6CEE3", "#B2DF8A"), guide=FALSE) +
xlab("Time point") + ylab("Normalized relative RNA abundance \n (log10(mean) \u00B1 s.d.)") +
theme(axis.title = element_text(size=8, colour="#000000")) +
facet_wrap(~rna, nrow=4, scale = "free_y") +
theme(strip.text = element_text(size=8)) +
ggtitle("D") +
theme(plot.title = element_text(hjust = 0))
library(grid)
# Define layout for the plots (2 rows, 2 columns)
layt <- grid.layout(nrow = 2, ncol = 3, heights = c(4/8, 4/8), widths = c(3/9,
3/9, 3/9), default.units = c("null", "null"))
# View the layout of plots
# grid.show.layout(layt)
tmp <- ggplotGrob(p_y4_5p)
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
legend$vp$x <- unit(0.5, 'npc')
legend$vp$y <- unit(0.2, 'npc')
ylab <- tmp$grobs[[19]]
ylab$vjust <- 8
pdf("fig3_cluster2-3_dynamics.pdf", width=10, height=7)
# Draw plots one by one in their positions
grid.newpage()
pushViewport(viewport(layout = layt))
print(miR486_abs_fig + theme(legend.position="none"), vp = viewport(layout.pos.row = 1, layout.pos.col = 1))
print(yRF5p_abs_fig + theme(legend.position="none"), vp = viewport(layout.pos.row = 2, layout.pos.col = 1))
print(cluster2_dynamics_RNAseq + guides(fill=FALSE), vp = viewport(layout.pos.row = 1, layout.pos.col = 2))
print(cluster3_dynamics_RNAseq + theme(legend.position="none"), vp = viewport(layout.pos.row = 2, layout.pos.col = 2))
print(cluster2_dynamics + theme(legend.position="none"), vp = viewport(layout.pos.row = 1, layout.pos.col = 3))
print(cluster3_dynamics + theme(legend.position="none"), vp = viewport(layout.pos.row = 2, layout.pos.col = 3))
dev.off()
|
84d18df36ad137e6fec7d25f3c0357868392626d | 47f1132132c8c0cae04a74c7c12658f834829e73 | /initial_filter.R | 4c0152735f2ed6aecb470a423558fe8d96a50c6d | [] | no_license | L-Dogg/pdrpy_ztm | fc3e0129f2e539d1417eeb36e089b85a8e8dff9e | 7eec121ce5f7ff89543d41f4bb6a87f785305e6e | refs/heads/master | 2021-03-19T18:12:07.061822 | 2017-05-31T18:52:12 | 2017-05-31T18:52:12 | 90,857,770 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,766 | r | initial_filter.R | # Zadaniem tego skryptu jest odfiltrowanie błędnych danych,
# które mogły się zapisać w analizowanych plikach json na skutek błędu GPS
library(jsonlite)
library(dplyr)
library(lubridate)
source('tools.R')
TRAMS_SOUTH <- 52.118013 - 0.05 # Zajezdnia Kabaty
TRAMS_NORTH <- 52.309925 + 0.05 # Zajezdnia Żoliborz
TRAMS_WEST <- 20.893515 - 0.05 # Os. Górczewska
TRAMS_EAST <- 21.018733 + 0.05 # Żerań Wschodni
#Filters and sorts trams and saves to ($filename)-filtered.json
filter_trams <- function(filename, day_of_month, month)
{
trams <- fromJSON(filename)$results %>%
filter(Lon >= TRAMS_WEST, Lat <= TRAMS_NORTH) %>%
filter(Lon <= TRAMS_EAST, Lat >= TRAMS_SOUTH) %>%
filter(day(Time) >= day_of_month) %>%
filter(month(Time) >= month) %>%
distinct %>%
arrange(Time)
output_name <- paste(substring(filename, 1, nchar(filename) - 5), "-filtered.json", sep="")
write(toJSON(trams, pretty = TRUE), file = output_name)
}
filter_lowfloor_trams <- function(filename, day_of_month, month)
{
trams <- fromJSON(filename)$results %>%
filter(Lon >= TRAMS_WEST, Lat <= TRAMS_NORTH) %>%
filter(Lon <= TRAMS_EAST, Lat >= TRAMS_SOUTH) %>%
filter(day(Time) >= day_of_month) %>%
filter(month(Time) >= month) %>%
distinct %>%
arrange(Time)
trams["Hour"] <- hour(sub("T", trams$Time, replacement = " "))
trams["Minute"] <- minute(sub("T", trams$Time, replacement = " "))
output_name <- paste(substring(filename, 1, nchar(filename) - 5), "-filtered.json", sep="")
write(toJSON(trams, pretty = TRUE), file = output_name)
}
filter_lowfloor_trams_csv <- function(filename, day_of_month, month)
{
data <- as.data.frame(read.csv(filename))
colnames(data) <- c("Time", "Lat", "Lon", "FirstLine", "Lines", "Brigade", "LineBrigade", "Status", "LowFloor")
trams <- data %>%
filter(Lon >= TRAMS_WEST, Lat <= TRAMS_NORTH) %>%
filter(Lon <= TRAMS_EAST, Lat >= TRAMS_SOUTH) %>%
filter(day(Time) >= day_of_month) %>%
filter(month(Time) >= month) %>%
distinct %>%
arrange(Time)
trams["Hour"] <- hour(sub("T", trams$Time, replacement = " "))
trams["Minute"] <- minute(sub("T", trams$Time, replacement = " "))
output_name <- paste(substring(filename, 1, nchar(filename) - 5), "-filtered.json", sep="")
write(toJSON(trams, pretty = TRUE), file = output_name)
}
filter_lowfloor_trams_csv('data\\2016-03-21\\20160321_tramwaje.csv', 21, 3)
filter_lowfloor_trams_csv('data\\2016-03-22\\20160322_tramwaje.csv', 22, 3)
filter_lowfloor_trams_csv('data\\2016-03-23\\20160323_tramwaje.csv', 23, 3)
BUSES_SOUTH <- 52.080793 # Piaseczno
BUSES_NORTH <- 52.408296 # Legionowo
BUSES_WEST <- 20.833262 # Piastów
BUSES_EAST <- 21.354953 # Halinów
filter_buses <- function(filename, day_of_month, month)
{
initial <- fromJSON(filename)$results %>%
filter(Lon >= BUSES_WEST, Lat <= BUSES_NORTH) %>%
filter(Lon <= BUSES_EAST, Lat >= BUSES_SOUTH) %>%
filter(day(Time) >= day_of_month) %>%
filter(month(Time) >= month) %>%
distinct %>%
arrange(Time)
normalLines <- initial %>% filter(Lines %in% NormalBusLines)
fastPeriodicLines <- initial %>% filter(Lines %in% FastPeriodicBusLines)
fastLines <- initial %>% filter(Lines %in% FastBusLines)
zoneLines <- initial %>% filter(Lines %in% ZoneBusLines)
zoneSupplementaryLines <- initial %>% filter(Lines %in% ZoneSupplementaryBusLines)
zonePeriodicLines <- initial %>% filter(Lines %in% ZonePeriodicBusLines)
specialLines <- initial %>% filter(Lines %in% SpecialBusLines)
expressLines <- initial %>% filter(Lines %in% ExpressBusLines)
nightLines <- initial %>% filter(Lines %in% NightBusLines)
filteredLines <- list(initial, normalLines, fastPeriodicLines, fastLines, zoneLines, zoneSupplementaryLines, zonePeriodicLines,
specialLines, expressLines, nightLines)
fileSuffixes <- c("initial", "normal", "fast-periodic", "fast", "zone", "zone-supplementary", "zone-periodic", "special",
"express", "night")
result <- sapply(1:length(filteredLines), function (i) { saveToFile(filteredLines[[i]], filename, fileSuffixes[i])})
}
saveToFile <- function (data, filepath, fileSuffix)
{
output_name <- paste(substring(filepath, 1, nchar(filepath) - 5), "-", fileSuffix, "-filtered.json", sep="")
write(toJSON(data, pretty = TRUE), file = output_name)
}
filter_buses("data\\14-05\\buses.json", 14, 5)
filter_buses("data\\14-05\\buses2.json", 14, 5)
filter_buses("data\\14-05\\buses3.json", 14, 5)
filter_buses("data\\15-05\\buses3.json", 15, 5)
filter_trams("data\\14-05\\trams.json", 14, 5)
filter_trams("data\\14-05\\trams2.json", 14, 5)
filter_trams("data\\14-05\\trams3.json", 14, 5)
filter_trams("data\\15-05\\trams3.json", 15, 5) |
80fa36fd931cb929309de9881cf7ae143fa9c16b | 76dbce75d2127a9304e2bce5f929898e11bedf54 | /code/plan/03_processing.R | 7d17d69ad0d576fe129d794e2b406cf16f3ea3b9 | [] | no_license | milescsmith/rnaseq_drake | dcabc8f702e9d9faf1095412c49f22178a253e71 | 982591fa14979dee72b22aa24304a30c8b25f8df | refs/heads/main | 2023-04-02T10:08:49.527920 | 2021-03-26T15:00:16 | 2021-03-26T15:00:16 | 344,952,270 | 0 | 0 | null | 2021-03-26T14:50:56 | 2021-03-05T22:40:53 | R | UTF-8 | R | false | false | 49 | r | 03_processing.R | vsd = vst(dds_processed)
vsd_exprs = assay(vsd)
|
fb890a680fb08e56ab74b8290e32a5b45c7e3d39 | 3a1002a87bd2f41bcb0e814e12102d9de711581a | /model 23-06-21 with typhoon and drought as independent scenarios.R | 6bb84e56c2d8d53ec995874e1ee71e18134bf889 | [] | no_license | JMVogelsang/coping_up_with_climate-change_a_look_into_farmers_decision_strategy | 2bd820fa4631e1855745593d6cc4156f463d6ec5 | 9f7457505d94ff09d6b62609936ededec73bf07d | refs/heads/main | 2023-07-13T17:36:03.777818 | 2021-08-17T08:33:35 | 2021-08-17T08:33:35 | 376,070,251 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,013 | r | model 23-06-21 with typhoon and drought as independent scenarios.R | #### Input table for values in time period (decade) ####
input_estimates_random_decade <- data.frame(variable = c("Precipitation", "Wind", "Temperature", "Soil_quality",
"Pests", "Weeds", "Pathogenes", "Rice_price", "Labour_cost",
"Irrigation_cost", "Fertilizer_cost", "Pesticide_cost", "Machinery_cost",
"Rice_yield_attained", "Rice_yield_potential", "var_CV"),
lower = c(0.05, 0.05, 0.05, 0.05, 0.1, 0.05, 0.05, 0.6, 50, 30, 20, 10, 40, 2000, 4000, 25),
median = NA,
upper = c(0.55, 0.75, 0.2, 0.15, 0.2, 0.1, 0.15, 0.8, 70, 40, 30, 20, 80, 4000, 8000, 25),
distribution = c("posnorm", "posnorm", "posnorm", "posnorm", "posnorm", "posnorm", "posnorm", "posnorm",
"posnorm", "posnorm", "posnorm", "posnorm", "posnorm", "posnorm", "posnorm", "const"),
label = c("% yield loss", "% yield loss", "% yield loss", "% yield loss", "% yield loss", "% yield loss",
"% yield loss", "USD/kg", "USD/ha", "USD/ha", "USD/ha", "USD/ha", "USD/ha", "kg/ha", "kg/ha",
"Coefficient of variation"),
Description = c("Yield loss due to too much rain (flood) or due to too little rain (drought)",
"Yield loss due to heavy wind events (Typhoon)", "Yield loss due to heat stress",
"Yield loss due to variation in soil quality", "Yield loss due to pest infestation",
"Yield loss due to weed infestation", "Yield los due to pathogene infestation",
"Rice market price", "Labour market cost", "Price of irrigation",
"Price of fertilizer", "Price of pesticide", "Price of machinery", "Rice yield attained",
"Rice yield potential", "Coefficient of variation (measure of relative variability)"))
input_estimates_random_decade
#### Model function ####
library(decisionSupport)
rice_function <- function(){
# adding variation in time series to variable rice yield and rice price
yields <- vv(var_mean = Rice_yield_potential,
var_CV = var_CV,
n = 10)
prices <- vv(var_mean = Rice_price,
var_CV = var_CV,
n = 10)
# assuming that Typhoon event would destroy rice harvest
typhoon_adjusted_yield <- chance_event(chance = Wind,
value_if = 0,
value_if_not = Rice_yield_potential,
n = 10,
CV_if = 50,
CV_if_not = 5)
# assuming that drought event would destroy rice harvest
drought_adjusted_yield <- chance_event(chance = Precipitation,
value_if = 0,
value_if_not = Rice_yield_potential,
n =10,
CV_if = 50,
CV_if_not = 5)
# yield losses dependent on % yield loss due to temperature risk, soil quality, pests, weeds and pathogens.
yield_loss <- Temperature + Soil_quality + Pests + Weeds + Pathogenes
# overall cost as sum of labour, irrigation, fertilizer, pesticide and machinery cost.
overall_costs <- Labour_cost + Irrigation_cost + Fertilizer_cost + Pesticide_cost + Machinery_cost
# calculate profit when there is a typhoon
profit_typhoon <- ((typhoon_adjusted_yield * ((1 - yield_loss - Precipitation)/1)) * prices) - overall_costs
# calculate profit when there is no typhoon
profit_no_typhoon <- ((yields * ((1 - yield_loss - Precipitation)/1)) * prices) - overall_costs
# calculate profit when there is a drought
profit_drought <- ((drought_adjusted_yield * ((1 - yield_loss - Wind)/1)) * prices) - overall_costs
# calculate profit when there is no drought
profit_no_drought <- ((yields * ((1 - yield_loss - Wind)/1)) * prices) - overall_costs
# Calculate net present value (NPV) and discount for typhoon/ drought
# typhoon
NPV_typhoon <- discount(profit_typhoon, discount_rate = 25, calculate_NPV = TRUE)
NPV_no_typhoon <- discount(profit_no_typhoon, discount_rate = 5, calculate_NPV = TRUE)
# drought
NPV_drought <- discount(profit_drought, discount_rate = 25, calculate_NPV = TRUE)
NPV_no_drought <- discount(profit_no_drought, discount_rate = 5, calculate_NPV = TRUE)
# calculate the overall NPV of the decision
NPV_decision <- NPV_no_typhoon + NPV_no_drought - NPV_typhoon - NPV_drought
return(list(NPV_typhoon = NPV_typhoon,
NPV_no_typhoon = NPV_no_typhoon,
NPV_drought = NPV_drought,
NPV_no_drought = NPV_no_drought,
NPV_decision = NPV_decision))
}
#### Monte Carlo Simulation ####
# Run the Monte Carlo simulation using the model function and data from input_estimates.
rice_mc_simulation <- mcSimulation(estimate = as.estimate(input_estimates_random_decade),
model_function = rice_function,
numberOfModelRuns = 10000,
functionSyntax = "plainNames")
rice_mc_simulation
##### Visualize model output graphically ####
# graphic output with typhoon/ no typhoon and drought/ no drought
plot_distributions(mcSimulation_object = rice_mc_simulation,
vars = c("NPV_typhoon", "NPV_no_typhoon", "NPV_drought", "NPV_no_drought"),
method = 'smooth_simple_overlay',
base_size = 12,
x_axis_name = "Financial outcome in $ per ha")
# graphic output with typhoon
plot_distributions(mcSimulation_object = rice_mc_simulation,
vars = "NPV_typhoon",
method = "boxplot_density",
old_names = "NPV_typhoon",
new_names = "Outcome distribution for profits in $/ha in potential decade for scenario with Typhoon.")
# graphic output without typhoon
plot_distributions(mcSimulation_object = rice_mc_simulation,
vars = "NPV_no_typhoon",
method = "boxplot_density",
old_names = "NPV_no_typhoon",
new_names = "Outcome distribution for profits in $/ha in potential decade for sceanrio without Typhoon.")
# graphic output with and without typhoon - smooth
plot_distributions(mcSimulation_object = rice_mc_simulation,
vars = c("NPV_typhoon", "NPV_no_typhoon"),
method = 'smooth_simple_overlay',
base_size = 10,
x_axis_name = "Outcome distribution for profits in $/ha in potential decade for Typhoon/ No Typhoon scenario.")
# graphic output with and without typhoon - histo
plot_distributions(mcSimulation_object = rice_mc_simulation,
vars = c("NPV_drought", "NPV_no_drought"),
method = 'smooth_simple_overlay',
base_size = 10,
x_axis_name = "Outcome distribution for profits in $/ha in potential decade for drought/ no drought scenario.")
|
dcbd54b04ce999778395f476cb4e136f32bd5107 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/divDyn/examples/collapse.Rd.R | 9f6e15e95d8a284ef7e5cdf310f2ae705c41188f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 609 | r | collapse.Rd.R | library(divDyn)
### Name: seqduplicated
### Title: Determination and omission of consecutive duplicates in a
### vector.
### Aliases: seqduplicated collapse
### ** Examples
# example vector
examp <- c(4,3,3,3,2,2,1,NA,3,3,1,NA,NA,5, NA, 5)
# seqduplicated()
seqduplicated(examp)
# contrast with
duplicated(examp)
# with NA removal
seqduplicated(examp, na.rm=TRUE)
# the same with collapse()
collapse(examp)
# contrast with
unique(examp)
# with NA removal
collapse(examp, na.rm=TRUE)
# with NA removal, no breaking
collapse(examp, na.rm=TRUE, na.breaks=FALSE)
|
73e28a2eb45b1bd0aa31046381b3c8e9d54a7bec | b3a6bcd5f0a47572af2435b8aabe25a45d5b43e1 | /RTI_Exercise_JustinFowler.R | 1cd0b15b1ef9f5f77ad6261e2db5d54e0484dfbd | [] | no_license | justinmfowler/exercises | e7acbdc32fbb9c4c9f05440d0a3ecd7cec2a483b | 8c36b9355dc5912ec188b9f6966c74efff0196d7 | refs/heads/master | 2021-01-16T08:54:50.801227 | 2015-01-29T15:29:35 | 2015-01-29T15:29:35 | 30,026,027 | 0 | 0 | null | 2015-01-29T15:18:22 | 2015-01-29T15:18:22 | null | UTF-8 | R | false | false | 3,353 | r | RTI_Exercise_JustinFowler.R | # RTI Exercise for Data Scientist 3 application
# Justin Fowler 01/29/2015
# Possible libraries to use
library(aod)
# library(foreign)
library(gmodels)
# library(vcd)
# library(epiR)
# library(NSM3)
# library(MASS)
library(rms)
library(ggplot2)
# library(lsmeans)
# library(contrast)
library(pROC)
# library(caTools)
# library(lmtest)
# Reading in data
# records is the original file called 'records' that was provided (not used)
# records_flat is the flattened file containing all tables from the database
# csvs were retrieved from SQLite
records <- read.csv("C:/MSA/Spring/Job Apps/RTI/records.csv")
View(records)
records_flat <- read.csv("C:/MSA/Spring/Job Apps/RTI/records_flat.csv")
View(records_flat)
# Create factors for categorical variables
records_flat$over_50k <- factor(over_50k)
records_flat$workclass <- factor(workclass)
records_flat$education_level <- factor(education_level)
records_flat$marital_status <- factor(marital_status)
records_flat$occupation <- factor(occupation)
records_flat$relationship <- factor(relationship)
records_flat$race <- factor(race)
records_flat$sex <- factor(sex)
records_flat$country <- factor(country)
# Attach file for easier use
attach(records_flat)
names(records_flat)
# Checking various descriptive statistics
table(over_50k)
hist(age)
hist(hours_week)
table(workclass)
table(education_level, education_num) # these 2 variables provide the same information, don't use both
table(marital_status, relationship)
table(occupation)
table(sex)
table(race)
table(country)
table(over_50k, capital_gain)
table(over_50k, capital_loss) # the capital variables seem to create separation issues
# I'm not as familiar with R as I'd like
# I did what I could in the time I had, and will comment on things I missed
# Ideally I would create training, validation, and testing sets here
# I should also decide what to do with missing values (?)
# I should also check the logistic regression assumption for the continuous variables
# More time should be spent checking for interactions or collinearity too, I don't use any interactions
# Here I'm attempting to create a logistic regression
# I leave out education_num because it is redundant with education_level (perfect multicollinearity)
# I also leave out capital_gain and capital_loss due to separation issues
# Some of the variables could also possibly be combined or recoded for better results
# Use the summary function to see the parameter estimates
Logit.Model <- glm(over_50k ~ age + workclass + education_level + marital_status + occupation + relationship + race + sex + hours_week, family="binomial")
summary(Logit.Model)
# Many of the variables have signicicance at alpha=0.05
# However, a tighter significance level should be used
# To evaluate the categorical variables, I would want to obtain the type III analysis of effects (I tried, no luck)
# I attempted variable selection techniques, but it didn't seem to help
# Logit.ModelI <- glm(over_50k ~ 1, family=binomial(logit))
# step(Logit.ModelI, ~ age + workclass + education_level + marital_status + occupation + race + sex + hours_week + country, direction="forward")
# Obtaining and plotting ROC curve
# 0.88 area under the curve
Model.ROC <- roc(Logit.Model$y, Logit.Model$fitted)
print(Model.ROC)
plot(Model.ROC)
# With more time I would like to dive into further diagnostics as well
|
7567e016d700cca6500b2df48dbd376f4706cf77 | 3a650ae97b7d0c7eac5f1bbf0a9291218e9538e3 | /add_col_hpg.R | 6edb9fe6012a002d3f6460b717b4aea96449c98f | [] | no_license | liulihe954/Meth-Alternative-Splicing | 14eb721754be1d0000dbda9f359d3808573c6d41 | c22bb857e128285a4fa6be1014f241f573ae6a03 | refs/heads/master | 2023-03-28T10:09:18.223120 | 2021-03-29T19:11:31 | 2021-03-29T19:11:31 | 299,501,677 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,068 | r | add_col_hpg.R | load('/blue/mateescu/lihe.liu/AltSplicing/AltSplicing-R/meth_prop/out/DEXSeq_final.rda')
load('/blue/mateescu/lihe.liu/AltSplicing/AltSplicing-R/meth_prop/out/2b_assemble/assembled/integrate_by_gene_withCproportion.rda')
###
library(tidyverse)
aggrate_by_gene = integrate_by_gene_out %>%
#dplyr::slice(1:20) %>%
group_by(groupID) %>%
mutate(all_exon_prop = sum(count_sig[startsWith(featureID,'E')])/sum(count_all[startsWith(featureID,'E')])) %>%
mutate(all_intron_prop = sum(count_sig[startsWith(featureID,'I')])/sum(count_all[startsWith(featureID,'I')]))
aggrate_by_gene_to_join = aggrate_by_gene %>%
group_by(groupID) %>%
dplyr::select(all_exon_prop,all_intron_prop) %>%
slice(1)
# here we have prop information
head(aggrate_by_gene)
# here we have significance information
out = DEXSeq_final
out_reduce = out %>%
as_tibble() %>%
group_by(groupID) %>%
dplyr::select(-c(3:5,7:35))
head(out_reduce)
load('all_pval1.rda')
load('all_pval2.rda')
load('all_pval3.rda')
load('all_pval4.rda')
load('all_pval5.rda')
all_pval = c(all_pval1,all_pval2,all_pval3,all_pval4,all_pval5)
aggrate_by_gene_withp = aggrate_by_gene %>%
data.frame() %>%
mutate(pvalue = all_pval) %>%
as_tibble() %>%
relocate(pvalue,.after = featureID)
save(aggrate_by_gene_withp,file = 'aggrate_by_gene_withp.rda')
# 1
all_pval1 = c()
all_geneid = (aggrate_by_gene %>% pull(groupID))[1:50000]
all_featureid = (aggrate_by_gene %>% pull(featureID))[1:50000]
#
for (i in seq_along(all_geneid)){
if (i %% 100 == 0){print(i)}
if (startsWith(all_featureid[i],'I')){
all_pval1[i] = 1
} else {
all_pval1[i] = out_reduce %>%
dplyr::filter(groupID == all_geneid[i] & featureID == all_featureid[i]) %>%
pull(pvalue)
}
}
save(all_pval1,file = '/blue/mateescu/lihe.liu/AltSplicing/AltSplicing-R/meth_prop/combine_tmp/all_pval1.rda')
# 2
all_pval2 = c()
all_geneid = (aggrate_by_gene %>% pull(groupID))[50001:100000]
all_featureid = (aggrate_by_gene %>% pull(featureID))[50001:100000]
#
for (i in seq_along(all_geneid)){
if (i %% 100 == 0){print(i)}
if (startsWith(all_featureid[i],'I')){
all_pval2[i] = 1
} else {
all_pval2[i] = out_reduce %>%
dplyr::filter(groupID == all_geneid[i] & featureID == all_featureid[i]) %>%
pull(pvalue)
}
}
save(all_pval2,file = '/blue/mateescu/lihe.liu/AltSplicing/AltSplicing-R/meth_prop/combine_tmp/all_pval2.rda')
# 3
all_pval3 = c()
all_geneid = (aggrate_by_gene %>% pull(groupID))[100001:150000]
all_featureid = (aggrate_by_gene %>% pull(featureID))[100001:150000]
#
for (i in seq_along(all_geneid)){
if (i %% 100 == 0){print(i)}
if (startsWith(all_featureid[i],'I')){
all_pval3[i] = 1
} else {
all_pval3[i] = out_reduce %>%
dplyr::filter(groupID == all_geneid[i] & featureID == all_featureid[i]) %>%
pull(pvalue)
}
}
save(all_pval3,file = '/blue/mateescu/lihe.liu/AltSplicing/AltSplicing-R/meth_prop/combine_tmp/all_pval3.rda')
# 4
all_pval4 = c()
all_geneid = (aggrate_by_gene %>% pull(groupID))[150001:200000]
all_featureid = (aggrate_by_gene %>% pull(featureID))[150001:200000]
#
for (i in seq_along(all_geneid)){
if (i %% 100 == 0){print(i)}
if (startsWith(all_featureid[i],'I')){
all_pval4[i] = 1
} else {
all_pval4[i] = out_reduce %>%
dplyr::filter(groupID == all_geneid[i] & featureID == all_featureid[i]) %>%
pull(pvalue)
}
}
save(all_pval4,file = '/blue/mateescu/lihe.liu/AltSplicing/AltSplicing-R/meth_prop/combine_tmp/all_pval4.rda')
# 5
all_pval5 = c()
all_geneid = (aggrate_by_gene %>% pull(groupID))[200001:271710]
all_featureid = (aggrate_by_gene %>% pull(featureID))[200001:271710]
#
for (i in seq_along(all_geneid)){
if (i %% 100 == 0){print(i)}
if (startsWith(all_featureid[i],'I')){
all_pval5[i] = 1
} else {
all_pval5[i] = out_reduce %>%
dplyr::filter(groupID == all_geneid[i] & featureID == all_featureid[i]) %>%
pull(pvalue)
}
}
save(all_pval5,file = '/blue/mateescu/lihe.liu/AltSplicing/AltSplicing-R/meth_prop/combine_tmp/all_pval5.rda')
|
6c9b68ef93ab1ae7c9daa76722cd367d8c3947eb | 6f2cccf7c9aaa8f473ca1749ebd4e0f62b3452e8 | /FlowingData/00 Visualization in R/Week_2/02 Separate Chart Components/04 How to Display Text in R/labels-tutorial.R | d1f463f1e3027fc493cb39fd53f6dbb96e556ce7 | [] | no_license | abudish/Course_Materials_and_Certificates | 01138ef2203800f6c279cfd68d8ad5eea4a07c5d | fee27859569624f1e8df143001e1ab995b10088c | refs/heads/master | 2021-06-04T09:15:20.516215 | 2020-12-17T18:16:19 | 2020-12-17T18:16:19 | 109,249,386 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,709 | r | labels-tutorial.R | # Hello, world.
plot(0, 0, type="n", xlim=c(0, 2), ylim=c(0, 2), xlab="", ylab="")
text(1, 1, 'Hello, world.')
# Hello, world. x3
plot(0, 0, type="n", xlim=c(0, 2), ylim=c(0, 2), xlab="", ylab="")
text(1, 1, 'Hello, world.') # Middle
text(1, 2, 'Hello, top of world.') # Top
text(1, 0, 'Hello, bottom of world.') # Bottom
# Hello, world. x3 in one call
plot(0, 0, type="n", xlim=c(0, 2), ylim=c(0, 2), xlab="", ylab="")
x <- c(1, 1, 1)
y <- c(1, 2, 0)
labels <- c('Hello, world.', 'Hello, top of world.', 'Hello, bottom of world.')
text(x, y, labels)
# Available font families
names(pdfFonts())
plot(0, 0, type="n", xlim=c(0, 2), ylim=c(0, 2), xlab="", ylab="")
text(x, y, labels, family='Courier')
text(1, 1.5, 'Helvetica', family='Helvetica')
text(1, 0.5, 'Bookman', family='Palatino')
# Font size
plot(0, 0, type="n", xlim=c(0, 2), ylim=c(0, 2), xlab="", ylab="")
text(x, y, labels, family='Courier', cex=1.0)
text(1, 1.5, 'Helvetica', family='Helvetica', cex=3.0)
text(1, 0.5, 'Bookman', family='Palatino', cex=0.5)
# Color
plot(0, 0, type="n", xlim=c(0, 2), ylim=c(0,20), xlab="", ylab="", xaxt="n", yaxt="n", bty="n")
rect(0, 0, 2, 30, col="black")
for (i in 1:40) {
text(1, i/2, colors()[i], col=colors()[i], cex=runif(1, 0.2, 1.3))
}
# Put it into practice with real data
load('unisexCnts.RData')
nameTotals <- rowSums(unisexCnts)
plot(0, 0, type="n", xlim=c(-5, 105), ylim=c(-5,105), xlab="", ylab="", xaxt="n", yaxt="n", bty="n")
x <- runif(length(nameTotals), 0, 100)
y <- runif(length(nameTotals), 0, 100)
text(x, y, names(nameTotals), cex=2*sqrt(nameTotals/max(nameTotals)))
# Supplement the area graph (http://datafl.ws/21a) with labels
source('areagraph.R')
areaGraph(unisexCnts)
|
575aced85ac09e8176b1929f0d7b1151ad2926ef | 89513b41bd021137c72fa7cf3e57bcef4bf6c933 | /scRNAseq_R_scripts/Figure 4.R | 55490ac48397ebbfa7b3870a49dd78524e421c92 | [] | no_license | lifan36/Zhan-Fan-et-al-2019-scRNAseq | 7166156ca6702a41990236e9da386c44440b6609 | c008b3ff49d567a1abe9d9eed42afed4e9a27b42 | refs/heads/master | 2020-10-27T10:29:10.176374 | 2020-10-24T22:09:53 | 2020-10-24T22:09:53 | 260,483,347 | 0 | 0 | null | 2020-10-24T22:16:42 | 2020-05-01T14:47:22 | R | UTF-8 | R | false | false | 3,353 | r | Figure 4.R | # Figure 4a
library(ggplot2)
library(Seurat)
library(ggrepel)
library(RColorBrewer)
library(dplyr)
library(cowplot)
library(reshape2)
library(MAST)
library(patchwork)
# Load in Seurat object
setwd("")
data<-readRDS("elife_microglial_cells_only.rds")
Idents(data) <- "Condition"
# Find markers for D0 vs Ctrl:
marker <- FindMarkers(data, ident.1 = "D0", ident.2 = "Ctrl", logfc.threshold = 0, test.use = "MAST")
write.csv(marker, "D0vsCtrl_markers.csv")
# Volcano plot of marker genes =====
# Identify DEGs for cluster 5 markers
marker$colours <- c("NC")
marker$colours[marker$avg_logFC >= 0.5 & marker$p_val_adj <= 0.05] <- c("UP")
marker$colours[marker$avg_logFC <= -0.5 & marker$p_val_adj <= 0.05] <- c("DN")
# Selected genes to highlight
genes_select_mature <- c("Cx3cr1", "Csf1r", "Mafb", "Tmem119","P2ry12","ApoE")
genes_to_plot_mature <- marker[row.names(marker) %in% genes_select_mature, ]
genes_to_plot_mature$Cluster <- "Mature"
genes_select_immature <- c("Lgals3", "Il1b","Cd52","Cd74","Lyz2")
genes_to_plot_immature <- marker[row.names(marker) %in% genes_select_immature, ]
genes_to_plot_immature$Cluster <- c("Immature")
genes_to_plot <- rbind(genes_to_plot_mature, genes_to_plot_immature)
# Set color palette
my_color <- c("#2B8CBE", "#D7301F", "skyblue","seashell3", "plum1")
my_color_1 <- c("#2B8CBE","Grey", "#D7301F")
# Plot volcano plot
dev.off()
ggplot() +
geom_point(data=marker, aes(x=avg_logFC, y=-log10(p_val_adj), colour=colours),
shape=19, alpha=1, size=1) +
scale_color_manual(values = my_color_1,
name="DEGs",
breaks=rev(names(table(marker$colours))),
labels=rev(names(table(marker$colours)))) +
geom_point(data=genes_to_plot,
aes(x=avg_logFC, y=-log10(p_val_adj)),
shape=19, alpha=1, size=3) +
geom_text_repel(data=genes_to_plot,
aes(x=avg_logFC, y=-log10(p_val_adj), label = row.names(genes_to_plot)),
color="black", fontface = 'bold',size = 5, box.padding = 0.5,
point.padding = 0.5, segment.size=0.25, segment.colour="black") +
ylab("-Log10[FDR]") + xlab("Log2FC") +
ggtitle("D0 vs Ctrl")+
theme_bw()+
theme(panel.grid.major.x = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour = "black", size = 1),
axis.text.x = element_text(colour = "black", size=15),
axis.text.y = element_text(colour = "black", size=15),
axis.title.x = element_text(colour = "black", size=15),
axis.title.y = element_text(colour = "black", size=15),
plot.title = element_text(size = 15, face = "bold")) +
theme(aspect.ratio = 1) +
scale_x_continuous(breaks=seq(-2, 2, 1), limits=c(-2, 2))+
NoLegend()
ggsave("Volcano_D0vsCtrl.pdf", plot = last_plot(), device = "pdf", path = "",
scale = 0.8, width = 7, height = 7, units = c("in"),
dpi = 600, limitsize = FALSE)
# #Figure 4b
#change the color code
FeaturePlot(LG180_integrated, features = c("Lgals3")) &
theme( plot.title = element_text( face = "italic") )
#Figure 4c
Cluster_4 <- subset(data, idents = "4")
VlnPlot(Cluster_4, features = "Lgals3", group.by = "Condition", pt.size = 0.1) &
theme( plot.title = element_text( face = "italic") )
|
14d3f09b32a42810b65252544ddc9b18ebdc3367 | 9ba135184226f0f7583860aa35a29dfd11a0e4e5 | /data/early_work_tidybugComps/Rwork_fromTopOrder_csvOutput_toPlot.R | 3e4832eebe2d7c7bc18790de1e114f1184fc1efa | [] | no_license | devonorourke/COIdatabases | 8dab7fbb55b00b6c3109f5240c49d16a7655f9a1 | 3a4738921551d199504439557a0e61b6ed76d1e3 | refs/heads/master | 2023-01-23T05:17:17.167932 | 2020-12-09T16:12:53 | 2020-12-09T16:12:53 | 286,090,194 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,995 | r | Rwork_fromTopOrder_csvOutput_toPlot.R | -modify format of file for plot
##### start here....
library(stringr)
library(ggplot2)
library(tidyr)
setwd("/scratch/dro49/qiimetmp/tidybug_crossval")
orderlist <- read.csv("topOrder.list", header=FALSE, stringsAsFactors=FALSE)
taxalist <- str_replace(orderlist$V1, "o__", "")
filenames <- paste(getwd(),"/", "tidybug_tmpdir_",taxalist,"_evalClassifications/data.tsv",sep="")
dat_list <- lapply(filenames,function(i){
tmp <- read.delim(i, header=TRUE, skip=2)
tmp <- tmp[,-1]
colnames(tmp) <- c("Level", "Precision", "Recall", "F.Measure", "Dataset")
taxaname_1 <- str_replace(i, "/scratch/dro49/qiimetmp/tidybug_crossval/tidybug_tmpdir_", "")
taxaname_2 <- str_replace(taxaname_1, "_evalClassifications/data.tsv", "")
tmp$Dataset <- taxaname_2
tmp
})
dat_df <- do.call("rbind", dat_list) %>%
pivot_longer(c(-Dataset, -Level), names_to="Metric", values_to="Value")
rm(dat_list, taxalist, orderlist, filenames)
write.csv(dat_df, file="topOrder_data.csv", quote=FALSE, row.names = FALSE)
########## to plot:
library(tidyverse)
library(scales)
library(scico)
dat_df <- read.csv(file="~/Desktop/coi_qiime_tmp/topOrder_data.csv")
count_df <- read.csv(file="~/Desktop/coi_qiime_tmp/top16order_seqCounts.csv")
dat_df <- merge(dat_df, count_df)
dat_df$logVals <- log10(dat_df$nSeqs)
dat_df$PlotLabel <- paste0(dat_df$Dataset, "n = ", dat_df$logVals)
ggplot(dat_df %>% filter(Metric == 'F.Measure'),
aes(x=Level, y=Value, group=Dataset, color=logVals)) +
facet_wrap(~Dataset) +
geom_point() +
geom_line() +
scale_colour_scico(palette = 'batlow', begin = 0.2,
breaks=c(4,4.60206,5.20412,5.80618),
labels=c('10,000','40,000','160,000','640,000')) +
scale_x_continuous(labels=c("Phylum", "Class", "Order", "Family", "Genus", "Species")) +
theme_bw() +
theme(axis.text.x=element_text(angle=45, hjust=1, size=7),
panel.grid.minor = element_blank()) +
labs(x="\nRank level", y="F-measure\n", color="num Seqs")
|
0086afdc320ad86895390a0136939f08eb9d9468 | 493583c405b9e6267b25b7db400ee32f18ae092f | /man/Data_get_workflow.Rd | 2ff9cf37b8ea09321c007bc6c7174fabd51b7b08 | [
"MIT"
] | permissive | dbescond/iloData | 69a3e2b78b3d868799384c1dd085b1e1e87c44cd | c4060433fd0b7025e82ca3b0a213bf00c62b2325 | refs/heads/master | 2021-01-21T19:54:33.877674 | 2018-07-05T11:30:47 | 2018-07-05T11:30:47 | 92,175,594 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 568 | rd | Data_get_workflow.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Data_backup_cmd.r, R/Data_get_workflow.r
\name{Data_backup_cmd}
\alias{Data_backup_cmd}
\alias{Data_get_workflow}
\title{backup do for Master files}
\usage{
Data_backup_cmd(wd)
Data_get_workflow()
}
\description{
not longer used
helper to work with bulk download processing workflow
}
\examples{
## Not run:
## End(**Not run**)
## Not run:
## End(**Not run**)
}
\author{
ILO / bescond
ILO / bescond
}
\keyword{ILO,}
\keyword{microdataset,}
\keyword{preprocessing}
\keyword{processing}
|
5128af1070b6ac600a215e2841128ea26fed18c1 | 5ba797d66deda5cdb2e34469090dec56356631d8 | /script.r | cfb87c9535aa3fe9cddf13c31927cdb44fc7c1fb | [] | no_license | JonMinton/mort_ternary | 23ed980bd75a408d535d27cb07fd70e2d7f7c725 | c011f94e4b077dc6558a1d8d9a17bfb63db3096d | refs/heads/master | 2020-06-11T19:48:00.845697 | 2015-02-25T14:53:54 | 2015-02-25T14:53:54 | 31,318,080 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 436 | r | script.r | rm(list=ls())
require(plyr)
require(tidyr)
require(dplyr)
require(ggplot2)
require(ggtern)
counts <- read.csv("data/counts.csv")
counts <- counts %>%
tbl_df %>%
mutate(death_rate=death_count/population_count)
eng <- counts %>%
filter(country=="GBRTENW") %>%
filter(sex=="total") %>%
filter(age < 80)
ggtern(aes(x=age, y=year, z=log(death_rate)), data=eng) + geom_point()
# Rejecting this approach - doesn't seem useful |
4bd11c1918ff2bcbb70363d622e076f2667773b8 | 8da89ff84396fb73d5b139398e351adf0cf1a080 | /man/tdp1718_check_6.Rd | 1511a0f716aae5cac710fa74d95ae22a2a3a5295 | [] | no_license | UBESP-DCTV/rexams | f4ce0347213a2eac678414910082c3daf8d9f977 | f2353d7db89b8386be0feeeda365e82489881db5 | refs/heads/master | 2020-03-20T10:50:10.725246 | 2018-09-10T11:32:37 | 2018-09-10T11:32:37 | 137,386,189 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 354 | rd | tdp1718_check_6.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checks_tdp1718.R
\name{tdp1718_check_6}
\alias{tdp1718_check_6}
\title{tdp check 6}
\usage{
tdp1718_check_6(text_var = "causa_del_decesso")
}
\arguments{
\item{text_var}{\link{chr} name of the variable to transform}
}
\value{
logical (invisibly)
}
\description{
tdp check 6
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.