blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f62c788844806ab2bf127f6500c1e993461ca9c
|
3a153e714918af3684d9341d250089576e727b21
|
/workflow/Code/resamp_analysis.R
|
f357a0fd5678922da89a57e56d94407992b65352
|
[] |
no_license
|
SarahVal/geb_text_class_2020
|
434a6ded390805a70682616667111b08d5fafa1c
|
64206d7759df7af7b1bc37ff65e56e78f8391438
|
refs/heads/master
| 2023-03-23T23:51:33.990376
| 2020-10-09T12:39:43
| 2020-10-09T12:39:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,068
|
r
|
resamp_analysis.R
|
####
# R script for analysing the AUC scores etc of the resampled models
####
# Clear environment
rm(list = ls())
graphics.off()
# Load packages
# cvAUC
require(cvAUC)
# H measure
require(hmeasure)
# ROCR
require(ROCR)
# ggplot
require(ggplot2)
# Functions
# Function to convert string of numbers to vector
str_to_vector <- function(input_str){
# Remove [ and ] and ,
str <- gsub("\\[|\\]|,", "", input_str)
# Separate by ' '
str <- strsplit(str, ' ')[[1]]
# Create vector to store output
vect <- rep(99, length(str))
# Convert characters to floats and store in vect
for (i in 1:length(str)){
vect[i] <- as.numeric(str[i])
}
return (vect)
}
# Function to calculate metrics for cv predictions given an input df
cv_metrics_calc_resamp <- function(input_df){
# Convert prob/class cols to character
input_df$CV_Predicted_Probs <- as.character(input_df$CV_Predicted_Probs)
input_df$CV_True_Class <- as.character(input_df$CV_True_Class)
input_df['UniqueID'] <- paste(as.character(input_df$Attribute), as.character(input_df$Seed_number), as.character(input_df$Stop_words), sep = '_')
# Create list to store model metrics
model_metrics_list <- list()
# Go through each model
for (i in unique(input_df$UniqueID)){
sub_df <- subset(input_df, input_df$UniqueID == i)
# Create list to store stats for this model
model_metrics_list[[i]] <- list()
# Create lists/vectors to store values from each fold for cvAUC
y_pred_list <- list()
y_true_list <- list()
fold_id_list <- list()
y_pred_vect <- c()
y_true_vect <- c()
fold_id_vect <- c()
h_vect <- c()
# Loop through each cv fold
for (j in 1:dim(sub_df)[1]){
# Create list to store fold metrics
model_metrics_list[[i]][[paste('Fold', j, sep = '_')]] <- list()
# Extract useful values from row of interes
tmp_pred <- str_to_vector(sub_df$CV_Predicted_Probs[j])
tmp_true <- str_to_vector(sub_df$CV_True_Class[j])
tmp_fold <- rep(j, length(tmp_true))
# Add values to lists/ vectors for later analysis
y_pred_list[[j]] <- tmp_pred
y_true_list[[j]] <- tmp_true
fold_id_list[[j]] <- tmp_fold
y_pred_vect <- c(y_pred_vect, tmp_pred)
y_true_vect <- c(y_true_vect, tmp_true)
fold_id_vect <- c(fold_id_vect, tmp_fold)
model_metrics_list[[i]][[paste('Fold', j, sep = '_')]][['Predictions']] <- tmp_pred
model_metrics_list[[i]][[paste('Fold', j, sep = '_')]][['True_Labels']] <- tmp_true
if (j == 1){
# Extract predictions and true lables
tmp_pred_test <- str_to_vector(sub_df$Test_Predicted_Probs[j])
tmp_true_test <- str_to_vector(sub_df$Test_True_Class[j])
pred_test <- prediction(tmp_pred_test, tmp_true_test)
model_metrics_list[[i]][['Test_AUC']] <- AUC(tmp_pred, tmp_true)
}
}
# Calculate cv AUC metrics and add to list
model_metrics_list[[i]][['cvAUC']] <- cvAUC(y_pred_list, y_true_list, folds = fold_id_list)
model_metrics_list[[i]][['cicvAUC']] <- ci.cvAUC(y_pred_vect, y_true_vect, folds = fold_id_vect)
# Calculate av ROC
cv_pred <- prediction(y_pred_list, y_true_list)
model_metrics_list[[i]][['cv_ROC']] <- performance(cv_pred, 'tpr', 'fpr')
}
return (model_metrics_list)
}
# Function to create AUC/H df
create_resamp_AUC_H_df <- function(input_list, input_df){
# List columns in input_df
cols <- colnames(input_df)
# Remove unnecessary columns
# "N_training_docs", "Classifier", "Cost_function", "CV_folds", "Fold", "CV_Predicted_Probs", "CV_True_Class", "N_test_docs", "Test_Predicted_Probs" "Test_True_Class"
indices_rm <- which(cols == 'N_training_docs' | cols == 'Cost_function' | cols == 'CV_folds' |
cols == 'Fold' | cols == 'CV_Predicted_Probs' | cols == 'CV_True_Class' | cols == 'N_test_docs' |
cols == 'Test_Predicted_Probs' | cols == 'Test_True_Class')
cols <- cols[-indices_rm]
# Add avAUC and avH cols
cols <- c(cols, c("avAUC"))
# Create df
AUC_H_df <- data.frame(matrix(nrow = length(input_list), ncol = length(cols)))
names(AUC_H_df) <- cols
df_indices <- seq(1, dim(input_df)[1], 10)
# Extract relevant info from input_list/input_df
for (i in 1:length(input_list)){
for (j in 1:length(cols)){
if (cols[j] == 'avAUC'){
AUC_H_df[i,j] <- input_list[[i]]$cicvAUC$cvAUC
next
}
else{
AUC_H_df[i,j] <- as.character(input_df[df_indices[i], which(colnames(input_df) == cols[j])])
}
}
}
return (AUC_H_df)
}
min_mean_se_max_resamp <- function(x) {
df <- data.frame('ymin' = min(x$avAUC),
'q2_5' = as.numeric(quantile(x$avAUC, 0.025)),
'lower' = mean(x$avAUC) - sd(x$avAUC)/sqrt(length(x$avAUC)),
'lo_CI' = mean(x$avAUC) - 1.96*(sd(x$avAUC)/sqrt(length(x$avAUC))),
'middle' = mean(x$avAUC),
'q50' = as.numeric(quantile(x$avAUC, 0.5)),
'up_CI' = mean(x$avAUC) + 1.96*(sd(x$avAUC)/sqrt(length(x$avAUC))),
'upper' = mean(x$avAUC) + sd(x$avAUC)/sqrt(length(x$avAUC)),
'q97_5' = as.numeric(quantile(x$avAUC, 0.975)),
'ymax' = max(x$avAUC))
return(df)
}
####
# Main code
####
# Load resampled data
lr_lpi_resamp_df <- read.csv('../Results/Model_metrics/LR/lpi_resample_metrics.csv')
lr_predicts_resamp_df <- read.csv('../Results/Model_metrics/LR/predicts_resample_metrics.csv')
nn_lpi_resamp_df <- read.csv('../Results/Model_metrics/NN/lpi_resample_metrics.csv')
nn_predicts_resamp_df <- read.csv('../Results/Model_metrics/NN/predicts_resample_metrics.csv')
# Calculate metrics
lr_lpi_resamp_metr_list <- cv_metrics_calc_resamp(input_df = lr_lpi_resamp_df)
lr_predicts_resamp_metr_list <- cv_metrics_calc_resamp(input_df = lr_predicts_resamp_df)
nn_lpi_resamp_metr_list <- cv_metrics_calc_resamp(input_df = nn_lpi_resamp_df)
nn_predicts_resamp_metr_list <- cv_metrics_calc_resamp(input_df = nn_predicts_resamp_df)
# Export to df
lr_lpi_resamp_auc_df <- create_resamp_AUC_H_df(input_list = lr_lpi_resamp_metr_list, input_df = lr_lpi_resamp_df)
lr_predicts_resamp_auc_df <- create_resamp_AUC_H_df(input_list = lr_predicts_resamp_metr_list, input_df = lr_predicts_resamp_df)
nn_lpi_resamp_auc_df <- create_resamp_AUC_H_df(input_list = nn_lpi_resamp_metr_list, input_df = nn_lpi_resamp_df)
nn_predicts_resamp_auc_df <- create_resamp_AUC_H_df(input_list = nn_predicts_resamp_metr_list, input_df = nn_predicts_resamp_df)
# Add dataset col
lr_lpi_resamp_auc_df['Dataset'] <- 'LPD'
lr_predicts_resamp_auc_df['Dataset'] <- 'PREDICTS'
nn_lpi_resamp_auc_df['Dataset'] <- 'LPD'
nn_predicts_resamp_auc_df['Dataset'] <- 'PREDICTS'
lr_lpi_resamp_auc_df['Model'] <- 'LR A'
lr_predicts_resamp_auc_df['Model'] <- 'LR A'
nn_lpi_resamp_auc_df['Model'] <- 'CNN A'
nn_predicts_resamp_auc_df['Model'] <- 'CNN A'
# Rbind
resamp_auc_df <- rbind(lr_lpi_resamp_auc_df[c('Dataset', 'Model', 'avAUC')],
lr_predicts_resamp_auc_df[c('Dataset', 'Model', 'avAUC')],
nn_lpi_resamp_auc_df[c('Dataset', 'Model', 'avAUC')],
nn_predicts_resamp_auc_df[c('Dataset', 'Model', 'avAUC')])
resamp_bp_df <- ddply(resamp_auc_df, .(Dataset, Model), min_mean_se_max_resamp)
# resamp_bp_df$ymin
# 0.9696655 0.9815266 0.9734725 0.9880587
# Load original model scores
lr_lpi_orig_mod_df <- read.csv('../Results/Model_metrics/LR/lpi_models_to_use.csv')[1,]
lr_predicts_orig_mod_df <- read.csv('../Results/Model_metrics/LR/predicts_models_to_use.csv')[1,]
nn_lpi_orig_mod_df <- read.csv('../Results/Model_metrics/NN/lpi_models_to_use.csv')[1,]
nn_predicts_orig_mod_df <- read.csv('../Results/Model_metrics/NN/predicts_models_to_use.csv')[1,]
lr_lpi_orig_mod_df['Dataset'] <- 'LPD'
lr_predicts_orig_mod_df['Dataset'] <- 'PREDICTS'
nn_lpi_orig_mod_df['Dataset'] <- 'LPD'
nn_predicts_orig_mod_df['Dataset'] <- 'PREDICTS'
lr_lpi_orig_mod_df['Model'] <- 'LR A'
lr_predicts_orig_mod_df['Model'] <- 'LR A'
nn_lpi_orig_mod_df['Model'] <- 'CNN A'
nn_predicts_orig_mod_df['Model'] <- 'CNN A'
orig_mod_df <- rbind(lr_lpi_orig_mod_df[c('Dataset', 'Model', 'avAUC')],
lr_predicts_orig_mod_df[c('Dataset', 'Model', 'avAUC')],
nn_lpi_orig_mod_df[c('Dataset', 'Model', 'avAUC')],
nn_predicts_orig_mod_df[c('Dataset', 'Model', 'avAUC')])
# Merge original and resampled dfs
resamp_bp_df <- merge(x = resamp_bp_df, y = orig_mod_df[c('Model', 'Dataset', 'avAUC')], by = c('Model', 'Dataset'))
resamp_bp_df$Model <- factor(resamp_bp_df$Model,
levels = c("LR A", "CNN A"))
# Plot variation in avAUC
resamp_plt <- ggplot(data = resamp_bp_df) +
geom_errorbar(aes(x = Model, ymin = q2_5, ymax = q97_5, width = 0, colour = Dataset),
size = 1,
position = position_dodge(width = 0.3),
show.legend = F) +
geom_point(aes(x = Model, y = q50, colour = Dataset),
pch = 16, size = 3.5, position = position_dodge(width = 0.3)) +
geom_point(aes(x = Model, y = avAUC, group = Dataset),
pch = 18, colour = 'black', size = 3.5, alpha = 0.7,
position = position_dodge(width = 0.3)) +
ylab('Average AUC') +
xlab('Model') +
geom_vline(xintercept = 1.5, lty = 'dashed', colour = 'grey50') +
scale_y_continuous(breaks = c(0.95, 1.00),
limits = c(0.95, 1.00)) +
scale_color_manual(name = 'Indicator dataset',
values = c("#E69F00", "#009E73"),
breaks = c('LPD', 'PREDICTS')) +
theme_bw() +
theme(axis.text = element_text(size = 16),
axis.title = element_text(size = 20),
legend.text = element_text(size = 16),
legend.title = element_text(size = 18),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
resamp_plt
# ggsave(plot = resamp_plt, filename = 'resamp_plt.pdf',
# path = '../Results/Figs',
# width = 8, height = 5, dpi = 300, device = "pdf")
####
# Standard stats
####
range(resamp_auc_df[which(resamp_auc_df$Model == 'LR A' & resamp_auc_df$Dataset == 'LPD'),]$avAUC)
mean(resamp_auc_df[which(resamp_auc_df$Model == 'LR A' & resamp_auc_df$Dataset == 'LPD'),]$avAUC)
sd(resamp_auc_df[which(resamp_auc_df$Model == 'LR A' & resamp_auc_df$Dataset == 'LPD'),]$avAUC)
range(resamp_auc_df[which(resamp_auc_df$Model == 'LR A' & resamp_auc_df$Dataset == 'PREDICTS'),]$avAUC)
mean(resamp_auc_df[which(resamp_auc_df$Model == 'LR A' & resamp_auc_df$Dataset == 'PREDICTS'),]$avAUC)
sd(resamp_auc_df[which(resamp_auc_df$Model == 'LR A' & resamp_auc_df$Dataset == 'PREDICTS'),]$avAUC)
range(resamp_auc_df[which(resamp_auc_df$Model == 'CNN A' & resamp_auc_df$Dataset == 'LPD'),]$avAUC)
mean(resamp_auc_df[which(resamp_auc_df$Model == 'CNN A' & resamp_auc_df$Dataset == 'LPD'),]$avAUC)
sd(resamp_auc_df[which(resamp_auc_df$Model == 'CNN A' & resamp_auc_df$Dataset == 'LPD'),]$avAUC)
range(resamp_auc_df[which(resamp_auc_df$Model == 'CNN A' & resamp_auc_df$Dataset == 'PREDICTS'),]$avAUC)
mean(resamp_auc_df[which(resamp_auc_df$Model == 'CNN A' & resamp_auc_df$Dataset == 'PREDICTS'),]$avAUC)
sd(resamp_auc_df[which(resamp_auc_df$Model == 'CNN A' & resamp_auc_df$Dataset == 'PREDICTS'),]$avAUC)
|
20c7677981f121ec8649f65816289b6c799064f1
|
63d65462159caec758526256b242350653bc6c95
|
/7_1_21_voltage.R
|
0c70fc5d8de1ce14b3d798a5f8c7de1928f6f8e2
|
[] |
no_license
|
foliva3/buckthorn_sap_flux
|
a2db03cbf939a990ea1d417a9b2d12ab23f0a763
|
ee734385708b818cebc891277af85d2b521da83f
|
refs/heads/main
| 2023-06-28T07:41:18.480466
| 2021-07-20T13:55:13
| 2021-07-20T13:55:13
| 381,122,923
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 808
|
r
|
7_1_21_voltage.R
|
library(lubridate)
library(ggplot2)
datav <- read.csv("K:\\Environmental_Studies\\hkropp\\Data\\campus\\buckthorn\\sapflux\\campbell\\07_01_2021\\Sapflow_TableTC.dat",
skip = 4, header = FALSE, na.strings = "NAN")
tablev <- datav[,c(1,165:166)]
colnames(tablev) <- c("date","Htr1", "Htr2")
datav$date <- ymd_hms(datav$date)
#changes format of dates to POSIXct
Datev <- as.POSIXct(tablev$date, format = "%Y-%m-%d %H:%M")
#Heater 1
ggplot(data = tablev, aes(Datev, Htr1, group = 1))+
geom_line()+
scale_x_datetime(date_breaks = "1 day",
date_labels = "%m/%d")+
ggtitle("Heater 1")
#Heater 2
ggplot(data = tablev, aes(Datev, Htr2, group = 1))+
geom_line()+
scale_x_datetime(date_breaks = "1 day",
date_labels = "%m/%d")+
ggtitle("Heater 2")
|
028452f4846650245c578305367c123753f4fd01
|
679ad602f16cfb52ff7ca24264c51c19a063eb3c
|
/man/setTest.Rd
|
5418cac1846e204961d2b0bc38d3a2bee2d84fbf
|
[] |
no_license
|
mitra-ep/rSEA
|
cd7cde7f0691507a8a3e8b30eb78f3120a2faf36
|
20c9a545781daef94c119fccb1db66bdb37dbbbc
|
refs/heads/master
| 2021-10-25T05:27:09.743107
| 2021-10-17T19:59:20
| 2021-10-17T19:59:20
| 213,402,082
| 0
| 1
| null | 2021-10-17T19:59:20
| 2019-10-07T14:14:43
|
R
|
UTF-8
|
R
| false
| true
| 2,661
|
rd
|
setTest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/settest.R
\name{setTest}
\alias{setTest}
\title{setTest}
\usage{
setTest(pvalue, featureIDs, data, set, testype, testvalue)
}
\arguments{
\item{pvalue}{The vector of p-values. It can be the name of the covariate representing the Vector of
raw p-values in the \code{data} or a single vector but in the latter case it should match the
\code{featureIDs} vector}
\item{featureIDs}{The vector of feature IDs. It can be the name of the covariate representing the IDs in the
\code{data} or a single vector but in the latter case it should match the \code{pvalue} vector}
\item{data}{Optional data frame or matrix containing the variables in \code{pvalue} and \code{featureIDs}}
\item{set}{The selection of features defining the feature-set based on the the \code{featureIDs}.
If missing, the set of all features is selected}
\item{testype}{Character, type of the test: "selfcontained" or "competitive". Choosing the self-contained
option will automatically set the threshold to zero and the \code{testvalue} is ignored. Choosing the
competitive option without a \code{testvalue} will set the threshold to the overall estimated proportion
of true hypotheses}
\item{testvalue}{Optional value to test against. Setting this value to c along with
\code{testype=="competitive"} will lead to testing the null hypothesis against a threshold c.
Note: this value needs to be a proportion}
}
\value{
The adjusted p-value of the specified test for the feature-set is returned.
}
\description{
calculates the adjusted p-value for the local hypothesis as defined by \code{testtype}
and \code{testvalue}.
}
\examples{
\dontrun{
#Generate a vector of pvalues
set.seed(159)
m<- 100
pvalues <- runif(m,0,1)^5
featureIDs <- as.character(1:m)
# perform a self-contained test for all features
settest(pvalues, featureIDs, testype = "selfcontained")
# create a random pathway of size 60
randset=as.character(c(sample(1:m, 60)))
# perform a competitive test for the random pathway
settest(pvalues, featureIDs, set=randset, testype = "competitive")
# perform a unified null hypothesis test against 0.2 for a set of size 50
settest(pvalues, featureIDs, set=randset, testype = "competitive", testvalue = 0.2 )
}
}
\references{
Mitra Ebrahimpoor, Pietro Spitali, Kristina Hettne, Roula Tsonaka, Jelle Goeman,
Simultaneous Enrichment Analysis of all Possible Gene-sets: Unifying Self-Contained
and Competitive Methods, Briefings in Bioinformatics, , bbz074, https://doi.org/10.1093/bib/bbz074
}
\seealso{
\code{\link{setTDP}} \code{\link{SEA}}
}
\author{
Mitra Ebrahimpoor
\email{m.ebrahimpoor@lumc.nl}
}
|
71ae15663b6a6f00506e94b503f7e8b06d550dfe
|
8dbe9cebc5603e7a05de12dc997986468c431563
|
/create_human.R
|
0c96b3c3a3f512530d3e8663694afcab18e36627
|
[] |
no_license
|
ottoy91/IODS-project
|
ac1bbb68abcd7aee6ef1246180632c3cb83bd9d4
|
4994bc33d7fc2851cfc86ac26bb879b736098774
|
refs/heads/master
| 2020-04-05T15:46:10.330083
| 2018-12-09T16:58:19
| 2018-12-09T16:58:19
| 156,983,096
| 0
| 0
| null | 2018-11-10T13:19:54
| 2018-11-10T13:19:54
| null |
UTF-8
|
R
| false
| false
| 2,065
|
r
|
create_human.R
|
### 25.11.2018/Otto Ylöstalo/IODS-project Week 4 ###
library(dplyr)
#1
hd <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human_development.csv", stringsAsFactors = F)
gii <- read.csv("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/gender_inequality.csv", stringsAsFactors = F, na.strings = "..")
#2
#structure, dimensions and summary of hd
str(hd)
dim(hd)
summary(hd)
#structure, dimensions and summary of gii
str(gii)
dim(gii)
summary(gii)
#3
names(hd) <- c("hd_rank","country","HDI","LE_at_birth","exp_edu_years","mean_edu_years","GNI","GNI_rank")
names(gii) <- c("gii_rank","country","gii","mmr","abr","prp","eduF","eduM","labF","labM")
#4
gii <- mutate(gii, edu_ratio = eduF/eduM)
gii <- mutate(gii, lab_ratio = labF/labM)
#5
human <- inner_join(hd, gii, by = "country", suffix = c(".hd", ".gii"))
#save file
write.csv(human,"human.csv",row.names = F)
##2.12.2018/Otto Ylöstalo/IODS-project Week 5
##Data contains different variables that are related to human development reports from different countries
##and how these and not the economic growth alone are a criteria for a countrys development.
##Soure:
##http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human_development.csv
##http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/gender_inequality.csv
str(human)
dim(human)
#dataset combines 19 indicators from 195 countries in the world related to HDI
library(stringr)
str(human$GNI)
#1
str_replace(human$GNI, pattern=",", replace =".") %>% as.numeric
#2
keep <- c("country", "edu_ratio", "lab_ratio", "LE_at_birth", "exp_edu_years", "GNI", "mmr", "abr", "prp")
human <- select(human, one_of(keep))
#3
complete.cases(human)
data.frame(human[-1], comp = complete.cases(human))
human <- filter(human, complete.cases(human))
complete.cases(human)
#4
print(human$country)
last <- nrow(human) - 7
human <- human[1:last, ]
#5
rownames(human) <- human$country
human <- human[,2:9]
write.csv(human,"human.csv",row.names = F)
|
944ee9133e4bc4503c704ff22fcadaa70778fe7e
|
f0c9e167c8ceae9388986d22e89eb7293c664343
|
/data/FGClimatology/R/LV_windrose.R
|
7c0c9add832bf6f2e761878681a2870469105ed9
|
[] |
no_license
|
gavin971/r_dev_messkonzept
|
1a2c91e51d45f18df65476bf04b7918c809f0503
|
af6073c2b18f1f036a528bf8df6143bf1b51a561
|
refs/heads/master
| 2020-03-19T03:13:34.235435
| 2015-05-22T08:07:06
| 2015-05-22T08:07:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,027
|
r
|
LV_windrose.R
|
windrose<-function(windspeed, winddir, r=5,p=10){
# Stand: 03.12.2013
#
# windrose ist eine Funktion um Winddaten aus den Campbell Datenloggern in Windrosen zu plotten.
# Voraussetzung ist entsprechende Vorprozessierung (keine NAN) und Installation des climatol-packages
# Da das climatol package von der CRAN repository entfernt wurde (warum auch immer), muss es einmalig aus dem Archiv heruntergeladen werden.
# <http://cran.r-project.org/src/contrib/Archive/climatol/> da die neuste Version herunterladen und mit
#! install.packages(file, repos = NULL, type = "source") installieren (file: character vektor of directory/download path)
#
# Autor des Skripts: Carsten Vick
# Code im Skript: Britta Jaenicke
#
# windspeed: a numeric vector containing windspeed data
# winddir: a numeric vector containing winddirection data
#
# Anmerkung des Autors: Ich habe den folgenden Code fast genauso von Britta übernommen und einfach eine Funktion daraus gemacht.
# Die Variablennamen sind an einigen Stellen uneindeutig bezeichnet und sind oft Hilfsvariablen.
# Falls es Fragen zum Code gibt, bitte an <britta.jaenicke@yahoo.de> wenden.
library(climatol) # laden des packages
data(windfr) # laden des data.frames aus dem Beispiel (ist bereits gut vordefiniert)
windv_class<- windfr*0 # löschen der Daten aus dem Beispiel
dirup <- c(11.25,33.75,56.25,78.75,101.25,123.75,146.25,168.75,191.25,213.75,236.25,258.75,281.25,303.75,326.25,348.75) #festlegen der Gruppenobergrenzen
dirlow <-c(348.75,11.25,33.75,56.25,78.75,101.25,123.75,146.25,168.75,191.25,213.75,236.25,258.75,281.25,303.75,326.25) #festlegen der Grupenuntergrenzen
speedup <- c(0.5,1.0,1.5,2.0) #festlegen der Geschwindigkeitsobergrenzen
speedlow <- (c(0.0,0.5,1.0,1.5)) #festlegen der Geschwindigkeitsuntergrenzen
rownames(windv_class)<- c(paste(toString(speedlow[1]),"-",toString(speedup[1])), paste(toString(speedlow[2]),"-",toString(speedup[2])), paste(toString(speedlow[3]),"-",toString(speedup[3])), paste(">",toString(speedup[3]))) # ändern der rownames des Beispiels
# die folgende Schleife ordnet die Windgeschwindigkeiten in entsprechende Geschwindigkeitsgruppen
for (a in seq_along(speedup)){
idx <- which(windspeed >=speedlow[a] & windspeed < speedup[a])
# die jetzt innerhalb folgende Schleife ordnet die Windrichtung in eine Richtungsgruppe
for (b in seq_along(dirlow)){
if (b == 1) cnt1 <- length(which(winddir[idx] >=dirlow[b]))
if (b == 1) cnt2 <- length(which(winddir[idx] < dirup[b]))
if (b == 1) cnt <- cnt1+cnt2
if (b > 1) cnt <- length(which(winddir[idx] >=dirlow[b] & winddir[idx] < dirup[b]))
windv_class[a,b]<-cnt
}
}
# die so entstandene windv_class hat genau die Dimensionen die von der rosavent-Funktion benötigt wird.
rosavent(windv_class,r,p,ang=-3*pi/16,main="Windrose der Station")
}
|
4371e6e834eb711da1ca59d1f0097936c5cf7260
|
26021ab16e74ecfdca657c2ba3fbad6ef3f92ed5
|
/ShipTracker/tests/testthat/test-map_server.R
|
65d94b85cd4378d279bf31cdc80a49e7604a1f8b
|
[] |
no_license
|
radbasa/shiptracker
|
ed034b33b7d0326db599e3841ab5e93039d5c6a8
|
5a4dfbe7bbccb113d1d00b2cfd6bc88d7c2fc281
|
refs/heads/master
| 2023-07-28T20:47:45.363191
| 2021-09-30T10:53:19
| 2021-09-30T10:53:19
| 411,481,093
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 572
|
r
|
test-map_server.R
|
setwd("../..")
source("global.R")
sdm <- ShipData$new(global$data_file_path)
test_that("Map Server outputs a leaflet map", {
testthat::local_edition(3)
selected_ship <- list(
ship_legs = reactive({sdm$get_ship_legs(316100)}),
ship_info = reactive({sdm$get_ship_info(316100)})
)
testServer(mapServer, args = list(selected_ship = selected_ship), {
expect_s3_class(output$ship_map, "json")
# This saves the Mapbox access token in the snapshot. Gitignore this.
expect_snapshot(output$ship_map)
})
})
|
ab06f351354ffea2fb307cffc314d96bf44d372d
|
6589453f2dfec010434963966841d0de183d3a15
|
/project_scripts/orthogroups.R
|
a077db740a6363b67bf96e75377e92a1688a27d3
|
[] |
no_license
|
Werner0/anomura
|
c3256da9630e4db54dac3386dd5e4734adc3b91b
|
6fc9f5eb205bac7ec55ad4c503f9cb34b0b5496b
|
refs/heads/main
| 2023-03-09T09:58:05.142920
| 2021-02-28T05:38:23
| 2021-02-28T05:38:23
| 333,056,367
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,250
|
r
|
orthogroups.R
|
#SNIPPETS (USE BEFORE SCRIPT)
library(data.table)
library(tibble)
library(plyr)
library(gridExtra)
if (!getwd()=="/Users/wernerveldsman/Desktop/Rsources/Orthogroups/") {
setwd("/Users/wernerveldsman/Desktop/Rsources/Orthogroups/")}
#perspecies <- fread("Statistics_PerSpecies.tsv") #Cut file manually before loading
#colSums(perspecies[,2:10])
orthonumbers <- fread("Orthogroups.GeneCount.tsv")
#orthonumbers <- orthonumbers[apply(orthonumbers!=0, 1, all),]
#orthonumbers[, "sum"] <- orthonumbers$kingcrab.aa+orthonumbers$bluekingcrab-(orthonumbers$lobster.aa)-(orthonumbers$coconutcrab.aa)
#orthonumbers <- orthonumbers[apply(orthonumbers[,c(2:10)],1,function(x) all(x>0)),]
orthonumbers <- orthonumbers[apply(orthonumbers[,c(2:10)],1,function(x) all(x==1)),]
#orthonumbers <- orthonumbers[orthonumbers$amphipod.aa<3&orthonumbers$isopod.aa<3,]
orthonumbers <- orthonumbers[order(-orthonumbers$coconutcrab.aa),]
orthos <- fread("Orthogroups.tsv", sep="\t")
#singleorthos <- orthonumbers$Orthogroup
alltables_reduced <- fread("alltables_reduced.txt")
#singletable <- data.table()
#for (i in singleorthos) {
#query <- strsplit(gsub(" ","",orthos$bluekingcrab[orthos$Orthogroup=="OG000001"]),",")
#query <- query[[1]]
#subject <- alltables_reduced[alltables_reduced$query_name %in% query,]
#OGdata <- count(subject[subject$organism=="bluekingcrab",c("Preferred_name","eggNOG free text desc.")])
# OGplaceholder <- data.table(Preferred_name = NA,eggNOG.free.text.desc.=NA)
# if (nrow(OGdata)<1) {singletable <- rbind(singletable, OGplaceholder, fill = T)}
# singletable <- rbind(singletable, OGdata[,1:2], fill = T) }
#highcopygenes <- count(alltables_reduced[,c("Preferred_name","organism")])
#highcopygenes <- as.data.table(highcopygenes[order(-highcopygenes$freq),]) #Choose a gene from here for use in next line
#highcopyidentifiers <- alltables_reduced[alltables_reduced$Preferred_name %in% "KIF22"&alltables_reduced$organism=="coconutcrab",c("query_name")]
#orthogroupsofinterest <- orthos[orthos$coconutcrab.aa %in% highcopyidentifiers$query_name, c("Orthogroup")]
library("ape")
library("Biostrings")
library("ggplot2")
library("ggtree")
#library("flextable")
tree <- read.tree("SpeciesTree_rooted_node_labels.txt")
nodedata <- as.data.frame(tree$node.label)
nodedata$duplications <- c(758,846,740,1449,227,581,2935,11042)
nodedata$duplicationsl <- c("","","","","","","","")
colnames(nodedata) <- c("newick_label","duplications","duplicationsl")
groupInfo <- split(tree$tip.label, gsub("_\\w+", "", tree$tip.label))
names(groupInfo) <- gsub("\\..*","",names(groupInfo))
names(groupInfo) <- gsub("\\-.*","",names(groupInfo))
names(groupInfo) <- c("A. vulgare", "P. hawaiensis", "Achelata", "P. virginalis", "P. trituberculatus", "Anomura", "Anomura", "Anomura", "L. vannamei")
tree$tip.label <- c("Armadillidium vulgare", "Parhyale hawaiensis", "Panulirus ornatus", "Procambarus virginalis", "Portunus trituberculatus", "Birgus latro", "Paralithodes camtschaticus", "Paralithodes platypus", "Litopenaeus vannamei")
tree <- groupOTU(tree, groupInfo)
#tree <- root(tree, node = 011, edgelabel = TRUE)
g <- ggtree(tree, size=1.5) %<+% nodedata + geom_treescale() + xlim(NA, 1.1)
g <- rotate(g,14) #Lobster
g <- rotate(g,16) #Anomura
g2 <- g + geom_label(aes(label = duplicationsl, fill = duplications), show.legend = FALSE) +
theme(legend.position = NULL) + scale_fill_gradientn(colors = RColorBrewer::brewer.pal(3, "YlGnBu")) +
#geom_cladelabel(node=12, label="Pleocyemata", color="red2", offset=0.6, align=TRUE, angle = 90, offset.text = 0.05, hjust = 0.5, barsize = 1) +
#geom_cladelabel(node=17, label="Anomura", color="red2", offset=0.8, align=TRUE, angle = 90, offset.text = 0.05, hjust = 0.5, barsize = 1) +
theme_tree2()
#geom_text(aes(subset=(node==508), label = italic('Acetobacter spp.')), parse=TRUE, colour="blue", hjust=-.02)
#(isopod.aa_6304:0.253617,(amphipod.aa_8566:0.638071,(((lobster.aa_49883:0.202489,marbled.aa_11462:0.19829)N4_227:0.0536186,(swimmingcrab.aa_5873:0.346618,(coconutcrab.aa_85938:0.181323,(kingcrab.aa_78264:0.00540666,bluekingcrab_84676:0.0272019)N7_11042:0.215072)N6_2935:0.188058)N5_581:0.0669401)N3_1449:0.0654401,whiteshrimp.aa_4397:0.293433)N2_740:0.234702)N1_846:0.253617)N0_758;
|
f6e008d9db02fdf46e7eab70fb78de1a54575dd1
|
21051e5f5923f2f88fe0f869201e55a16c848434
|
/man/ev_surveillance.Rd
|
79d28eb3e023922487854ddcbe67f84c02c96b96
|
[] |
no_license
|
XiangdongGu/hkdata
|
4bb4011f2c3f22f7ae9f20ece7e354d1ba70c70f
|
453261388c8e8b30d8e7912c0d20ba719afeb185
|
refs/heads/master
| 2020-03-23T02:06:55.540330
| 2019-08-02T08:55:08
| 2019-08-02T08:55:08
| 140,956,671
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,611
|
rd
|
ev_surveillance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hk-health.R
\name{ev_surveillance}
\alias{ev_surveillance}
\title{Get Data for "Department of Health: EV Scan's Figures Data"}
\format{A data frame with 11 variables.\cr
* `year`: Year\cr
* `week`: Week\cr
* `from`: From (Date)\cr
* `to`: To (Date)\cr
* `n_ev71`: Number of EV71 cases by week\cr
* `n_hfmd_inst`: Number of HFMD institutional outbreaks by week\cr
* `n_hfmd_hosp_adm`: Number of hospital admission episodes of HFMD by week\cr
* `rate_hfmd_aed`: Accident & Emergency Department surveillance of HFMD syndrome group (per 1000 coded cases)\cr
* `prop_hfmd_ccc_kg`: Proportion of child care centres/kindergartens (CCC/KG) with HFMD cases based on HFMD sentinel surveillance at CCC/KG by week\cr
* `rate_hfmd_pvt_med`: Consultation rate for HFMD based on HFMD sentinel surveillance among private medical practitioner clinics by week (per 1000 consultations) \cr
* `rate_hfmd_gopc`: Consultation rate for HFMD based on HFMD sentinel surveillance among General Out-patient Clinics by week (per 1000 consultations)}
\source{
<https://data.gov.hk/en-data/dataset/hk-dh-chpsebcdde-ev-scan>
}
\usage{
ev_surveillance(path = ".", keep = FALSE)
}
\arguments{
\item{path}{path to save the file}
\item{keep}{whether to keep the file after read}
}
\description{
Hand, foot and mouth disease surveillance data including number of EV71 cases,
institutional outbreaks, hospital surveillance and sentinel surveillance. \cr
\cr
UPDATE FREQUENCY: WEEKLY
}
\details{
* Recent data are provisional figures and subject to further revision.
}
|
c429b7985b557ba1196feefd4eba78e3c3983905
|
15d1e3d8f2f3dc5dda60e264be1153a202934100
|
/man/iterate_umap.Rd
|
6e80e7ef53522097c14292860e38fe1c13772f22
|
[
"MIT"
] |
permissive
|
Ryan-Laird/PhenoClustR
|
547bcad7c327e4e9567b54eead77a354734ed94b
|
c294c3b04449503e726a89c205d8800805c029af
|
refs/heads/master
| 2022-04-25T09:16:41.941153
| 2020-04-28T22:39:33
| 2020-04-28T22:39:33
| 254,729,594
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 693
|
rd
|
iterate_umap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iterate_umap.R
\name{iterate_umap}
\alias{iterate_umap}
\title{Run UMAP with many combinations of hyperparameters.}
\usage{
iterate_umap(dat, dat.labels, params)
}
\arguments{
\item{dat}{Unlabeled dataframe for UMAP input}
\item{dat.labels}{Labels for dat}
\item{params}{Sqaure dataframe of UMAP hyperparameter values (all required):
\itemize{
\item n_neighbors
\item min_dist
\item n_components
\item metric
\item method
\item seed
}}
}
\value{
Nested tibble::tibble() containing UMAP objs, output layout, labeled output, and 2D/3D plot obj.
}
\description{
Run UMAP with many combinations of hyperparameters.
}
|
d65e2ff124f576de743ab2cdece81f8ce6b82c3d
|
43ee56af5973642c4ee4a8044ea5aef8ef4b6477
|
/lib/XGBoost_model_fitting_WHOLE_DATA.R
|
e330f19b0cfb7a73abf8614188d5d99bc8af7dc9
|
[] |
no_license
|
TZstatsADS/Fall2016-proj5-grp3
|
865651c1ca1afd6a2306c5bdefa273d29cb111b8
|
a0b4a4c8e55e9cf8e2aa87af2de378f5c5a4bbd9
|
refs/heads/master
| 2021-05-01T02:41:01.497237
| 2016-12-13T21:03:28
| 2016-12-13T21:03:28
| 74,604,825
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,295
|
r
|
XGBoost_model_fitting_WHOLE_DATA.R
|
library(dplyr)
library(xgboost)
setwd('C:\\Users\\LENOVO\\Desktop\\Academic\\ADS\\project_5')
load('df(final).RData')
train_df <- df
for (i in c(3, 4, 5, 12, 13, 14, 15, 17, 18, 21, 24)){
train_df[, i] <- as.factor(train_df[ , i])
}
###Train the model
train_df_X <- data.matrix(train_df[ , c(3:6, 8:15, 17:24)])
cv_number <- nrow(train_df_X)
k_folds <- cut(1:cv_number, breaks = 5, labels = FALSE)
cv_errors <- matrix(0, nrow = 24, ncol = 5)
for (k in 1:5){
models <- list()
cv_indices <- which(k_folds == k, arr.ind = TRUE)
cv_train <- train_df_X[cv_indices, ]
cv_test <- train_df_X[-cv_indices, ]
for (i in 25:48){
cv_y <- train_df[cv_indices , i]
temp_model <- xgboost(data = cv_train,
label = cv_y,
nrounds = 50,
missing = NaN
)
models[[i - 24]] <- temp_model
print(i)
}
pred_matrix <- matrix(0, ncol = 24, nrow = nrow(cv_test))
error_rate <- rep(0, 24)
for (j in 1:24){
pred <- round(predict(models[[j]], cv_test, missing = NaN))
pred_matrix[ , j] <- pred
error_rate[j] <- sum(pred != train_df[-cv_indices , j + 24]) / length(pred)
print(j)
}
cv_errors[, k] <- error_rate
}
|
adcabf5313e44d783089b27e3aa529473fc0965e
|
d5a14ba66821cab667def0c8730dbbef1551b762
|
/man/IDW.Rd
|
6181b1ea527228ba23ddd7ef73a658cc418a4130
|
[] |
no_license
|
overeem11/RAINLINK
|
5f964a6bcfae67e3f32c42a55c5dcd6fcca20f6d
|
5e38a76f8b99ccbb444d6486b96c7ce4c3cb9954
|
refs/heads/master
| 2023-07-11T00:15:34.423165
| 2023-06-20T13:41:26
| 2023-06-20T13:41:26
| 48,035,739
| 12
| 20
| null | 2018-06-07T13:40:54
| 2015-12-15T10:22:57
|
R
|
UTF-8
|
R
| false
| true
| 1,050
|
rd
|
IDW.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IDW.R
\name{IDW}
\alias{IDW}
\title{Subfunction for inverse distance weighted interpolation on point data.}
\usage{
IDW(idp, rain.grid, Rainlink)
}
\arguments{
\item{idp}{The inverse distance weighting power.}
\item{rain.grid}{Interpolation grid in a Cartesian coordinate system.}
\item{Rainlink}{Coordinates of links in a Cartesian coordinate system
and rainfall intensity (latitude in km, longitude in km, intensity in mm h\eqn{^{-1}}).}
}
\value{
Interpolated field of rainfall intensities.
}
\description{
Subfunction for inverse distance weighted interpolation on point data.
}
\examples{
IDW(idp=idp,rain.grid=rain.grid,Rainlink=Rainlink)
}
\references{
''ManualRAINLINK.pdf''
Overeem, A., Leijnse, H., and Uijlenhoet, R., 2016: Retrieval algorithm for rainfall mapping from microwave links in a
cellular communication network, Atmospheric Measurement Techniques, 9, 2425-2444, https://doi.org/10.5194/amt-9-2425-2016.
}
\author{
Aart Overeem & Hidde Leijnse
}
|
955ecbada9a0b4ef8f29e1232259911b65081a14
|
e525513e27156b29a12a0aa585327faa6241ed53
|
/R/cea_policy_tree.R
|
46df2f8bc906909cc38452d3c8a2f2416d2633ef
|
[] |
no_license
|
bonander/CEAforests
|
b9c340305a5004d06c94802fed90bebee08d0643
|
111db3c0518a1379d2635aaafe524270244e7d40
|
refs/heads/master
| 2023-03-26T03:57:52.073071
| 2021-03-25T10:40:52
| 2021-03-25T10:41:31
| 228,402,995
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,944
|
r
|
cea_policy_tree.R
|
#' @title Train a policy tree after a CEA forest.
#' @description \code{cea_policy_tree} Trains an efficient policy decision tree given a CEA forest (a wrapper for policytree::policy_tree).
#'
#' @param forest A trained CEA forest.
#' @param X A covariate matrix containing variables that are to be used in the policy tree.
#' @param WTP Willingness to pay for a one unit increase in the outcome. If NULL, the WTP supplied to the CEA forest is used.
#' @param depth The desired depth for the decision tree.
#' @param ci.level Desired significance level (for confidence intervals).
#' @param robust.se Whether or not robust (sandwich) standard errors are desired. Defaults to FALSE.
#'
#'
#' @references Athey, S., & Wager, S. (2017). Efficient policy learning. arXiv preprint arXiv:1702.02896.
#'
#' @return Returns a trained policy tree.
#' @examples
#' \dontrun{
#' To be added...
#' }
#' @importFrom utils installed.packages
#' @import stats
#' @export
cea_policy_tree = function(forest, X, WTP=NULL, depth=2, ci.level=0.95, robust.se=FALSE) {
if (isTRUE("policytree" %in% rownames(installed.packages())==FALSE)) {
stop("Package \"policytree\" must be installed to estimate policy trees.")
}
if (isTRUE(any(class(forest) %in% c("CEAforests")))) {
if (is.null(WTP)==TRUE) {
WTP = forest[["WTP"]]
warning(paste("WTP not specified, assuming WTP = ", WTP, ", as supplied to the CEAforests object.", sep=""))
}
gamma1 = cate.prepare(forest[["outcome.forest"]])*WTP
gamma2 = cate.prepare(forest[["cost.forest"]])
gamma = gamma1-gamma2
Gamma = cbind(control=-gamma, treated=gamma)
treefit = policytree::policy_tree(X, Gamma, depth=depth) } else {
stop("Unrecognized or unsupported forest object. Please supply a CEAforests object.")}
results = list()
treefit$n.sample = nrow(Gamma)
results[["tree"]] = treefit
results[["X"]] = X
class(results) = c("cea_policy_tree", "CEAforests")
return(results)
}
#' @title Conduct inference for a personalized treatment policy.
#' @description Conduct inference for a personalized treatment policy, either using a manually specified policy or a learned policy.
#'
#' @param forest A trained CEA forest.
#' @param treat.policy A logical vector or cea policy tree defining the subset covered by the policy.
#' @param WTP Willingness to pay for a one unit increase in the outcome. If NULL, the WTP supplied to the CEA forest is used.
#' @param ci.level Desired significance level (for confidence intervals).
#' @param robust.se Whether or not robust (sandwich) standard errors are desired. Defaults to FALSE. Ignored when boot.ci=TRUE.
#' @param boot.ci Whether or not bootstrapped confidence intervals are desired. Defaults to FALSE.
#' @param R The number of bootstrap replications. Defaults to 999. Ignored when boot.ci=FALSE.
#'
#' @return Returns a matrix containing estimates for the average welfare gain per population member under various treatment policies (treat everyone vs. treat no one; treat suggested subset vs. treat no one; treat suggested subset vs. treat everyone). Also outputs the share of the popuation covered by the policy.
#' @examples
#' \dontrun{
#' To be added...
#' }
#' @import stats
#' @import boot
#' @export
infer_policy = function(forest, treat.policy, WTP=NULL, ci.level=0.95, robust.se=FALSE, boot.ci=FALSE, R=999) {
subset = treat.policy
if (isTRUE(any(class(forest) %in% c("CEAforests")))) {#Check if forest class OK
if (is.null(WTP)==TRUE) {
WTP = forest[["WTP"]]
warning(paste("WTP not specified, assuming WTP = ", WTP, ", as supplied to the CEAforests object.", sep=""))
}
#Estimate doubly robust scores
gamma1 = cate.prepare(forest[["outcome.forest"]])*WTP
gamma2 = cate.prepare(forest[["cost.forest"]])
gamma = gamma1-gamma2
if (isTRUE("cea_policy_tree" %in% class(subset))) {
#Predict the suggested policy using the supplied X
X = subset[["X"]]
predicted.action = predict(subset[["tree"]], newdata=X)
P = as.numeric(predicted.action==2)
} else {
if (class(subset) == "logical" & length(subset) ==
length(forest[["outcome.forest"]]$Y.hat)) {
subset <- which(subset)
}
if (!all(subset %in% 1:length(forest[["outcome.forest"]]$Y.hat))) {
stop(paste("treat.policy must be a vector contained in 1:n,",
"a boolean vector of length n or a trained CEA policy tree."))
}
P = rep(0, length(gamma)); P[subset] = 1
}
tau_tr = gamma #Scores for treat everyone
policy_suggested = tau_tr*P #Scores for suggested policy
policy_diff = policy_suggested-tau_tr #Scores for difference between suggested and treat everyone
#Fit intercept only models to get mean and variance
all.m = lm(tau_tr~1)
sugg.m = lm(policy_suggested~1)
vs.m = lm(policy_diff~1)
est.tr.all = as.vector(coef(all.m))
est.suggested = as.vector(coef(sugg.m))
est.diff = as.vector(coef(vs.m))
ests = c(est.tr.all, est.suggested, est.diff)
if (!isTRUE(boot.ci)) {#Asymptotic variance estimates
#Extract estimates and standard errors
if (isTRUE(robust.se)) {
se.tr.all = as.vector(sqrt(diag(sandwich::vcovHC(all.m))))
se.suggested = as.vector(sqrt(diag(sandwich::vcovHC(sugg.m))))
se.diff = as.vector(sqrt(diag(sandwich::vcovHC(vs.m))))
} else {
se.tr.all = as.vector(sqrt(diag(vcov(all.m))))
se.suggested = as.vector(sqrt(diag(vcov(sugg.m))))
se.diff = as.vector(sqrt(diag(vcov(vs.m)))) }
ses = c(se.tr.all, se.suggested, se.diff)
#Get confidence intervals
lowers = ests-ses*qt(1-(1-ci.level)/2, df=length(gamma)-1)
uppers = ests+ses*qt(1-(1-ci.level)/2, df=length(gamma)-1)
} else {#Else bootstrap scores
bootres=boot.policy_scores(tau_tr, policy_suggested, R, 1-ci.level)[[1]]
ses = bootres[,1]
lowers = bootres[,2]
uppers = bootres[,3]
}
#Share of population who are treated (in suggested policy)
tr.sugg.share = mean(P)
#Tidy up and print results
res = as.data.frame(cbind(ests,ses,lowers,uppers))
res = rbind(res, c(tr.sugg.share,NA,NA,NA))
colnames(res) = c("Estimate", "Std.Err", "Lower.CI", "Upper.CI")
rownames(res) = c("Average NMB, new-for-all vs control-for-all",
"Average NMB, suggested policy vs control-for-all",
"Difference in NMB, suggested vs. new-for-all",
"Prop. who gets new treatment, suggested policy")
return(res)
} else (stop("Unrecognized forest object."))
}
#' Writes each node information
#' If it is a leaf node: show it in different color, show number of samples, show leaf id
#' If it is a non-leaf node: show its splitting variable and splitting value
#' @param tree the tree to convert
#' @param index the index of the current node
#' @param group.names names of the treatment and control states (defaults to c("Do not reimburse", "Reimburse"))
#' @keywords internal
cea_create_dot_body <- function(tree, index = 1, group.names=c("Do not reimburse", "Reimburse")) {
nam1 <- group.names[1]
nam2 <- group.names[2]
#n = tree$n.sample
node <- tree$nodes[[index]]
# Leaf case: print label only
if (node$is_leaf) {
action <- node$action
action <- ifelse(action==1, nam1, nam2)
line_label <- paste(index - 1, ' [shape=box,style=filled,color="White", height=0.2, label="', action, "\n", '"];', sep="")
return(line_label)
}
# Non-leaf case: print label, child edges
if (!is.null(node$left_child)) {
edge <- paste(index - 1, "->", node$left_child - 1)
if (index == 1) {
edge_info_left <- paste(edge, '[labeldistance=2.5, labelangle=45, headlabel="Yes"];')
}
else {
edge_info_left <- paste(edge, " ;")
}
}
else {
edge_info_right <- NULL
}
if (!is.null(node$right_child)) {
edge <- paste(index - 1, "->", node$right_child - 1)
if (index == 1) {
edge_info_right <- paste(edge, '[labeldistance=2.5, labelangle=-45, headlabel="No"]')
} else {
edge_info_right <- paste(edge, " ;")
}
} else {
edge_info_right <- NULL
}
variable_name <- tree$columns[node$split_variable]
node_info <- paste(index - 1, '[label="', variable_name, "<=", round(node$split_value, 2), '"] ;')
this_lines <- paste(node_info,
edge_info_left,
edge_info_right,
sep = "\n"
)
left_child_lines <- ifelse(!is.null(node$left_child),
cea_create_dot_body(tree, index = node$left_child),
NULL
)
right_child_lines <- ifelse(!is.null(node$right_child),
cea_create_dot_body(tree, index = node$right_child),
NULL
)
lines <- paste(this_lines, left_child_lines, right_child_lines, sep = "\n")
return(lines)
}
#' Export a tree in DOT format.
#' This function generates a GraphViz representation of the tree,
#' which is then written into `dot_string`.
#' @param tree the tree to convert
#' @param group.names names of the treatment and control states (defaults to c("Do not reimburse", "Reimburse"))
#' @keywords internal
cea_export_graphviz <- function(tree,group.names=c("Do not reimburse", "Reimburse")) {
header <- "digraph nodes { \n node [shape=box] ;"
footer <- "}"
body <- cea_create_dot_body(tree,group.names=group.names)
dot_string <- paste(header, body, footer, sep = "\n")
return(dot_string)
}
#' Plot a cea_policy_tree tree object.
#' @param x The tree to plot
#' @param group.names names of the treatment and control states (defaults to "Control treatment", "New treatment")
#' @param ... Additional options (currently ignored).
#'
#' @method plot cea_policy_tree
#' @export
plot.cea_policy_tree <- function(x,group.names=c("Do not reimburse", "Reimburse"), ...) {
if (!requireNamespace("DiagrammeR", quietly = TRUE)) {
stop("Package \"DiagrammeR\" must be installed to plot trees.")
}
dot_file <- cea_export_graphviz(x[["tree"]],group.names=group.names)
DiagrammeR::grViz(dot_file)
}
#' @title Bootstrap average effects
#' @description \code{boot.dr_scores} Bootstraps doubly robust scores and obtains accelerated bootstrap confidence intervals (BCa).
#'
#' @param Gamma_all Scores for treating everyone vs treating no-one.
#' @param Gamma_policy Scores for suggested policy vs treating no-one.
#' @param R Number of bootstrap replicates.
#' @param alpha Desired confidence level.
#' @keywords internal
#' @return Returns a matrix with estimated standard errors and BCa confidence intervals.
#' @export
#'
boot.policy_scores <- function(Gamma_all, Gamma_policy, R, alpha) {
df = as.data.frame(cbind(Gamma_all, Gamma_policy))
n = nrow(df)
bfun = function(data, indices, Gamma_all, Gamma_policy) {
d=data[indices,]
tr_all=mean(d[,Gamma_all])
tr_policy=mean(d[,Gamma_policy])
tr_diff=tr_policy-tr_all
res=c(tr_all,tr_policy,tr_diff)
return(res)}
b=boot::boot(data=df, bfun, R=R, Gamma_all="Gamma_all", Gamma_policy="Gamma_policy")
all_se=sd(b$t[,1])
policy_se=sd(b$t[,2])
diff_se=sd(b$t[,3])
res = list()
if (R<=n) {
warning("Number of bootstrap replicates R is smaller than the number of rows in the data. BCa confidence intervals cannot not be computed. Please increase R.")
res[[1]] = cbind(c(NA,NA,NA), c(NA,NA,NA), c(NA,NA,NA))}
if (R>n) {
bci_all=boot::boot.ci(b, index=1, conf=1-alpha, type="bca")
bci_policy=boot::boot.ci(b, index=2, conf=1-alpha, type="bca")
bci_diff=boot::boot.ci(b, index=3, conf=1-alpha, type="bca")
lower_all = bci_all$bca[,4]
upper_all = bci_all$bca[,5]
lower_policy = bci_policy$bca[,4]
upper_policy = bci_policy$bca[,5]
lower_diff = bci_diff$bca[,4]
upper_diff = bci_diff$bca[,5]
ses = c(all_se,policy_se,diff_se)
lowers = c(lower_all,lower_policy,lower_diff)
uppers = c(upper_all, upper_policy, upper_diff)
res = list()
res[[1]] = cbind(ses, lowers, uppers)
}
res[[2]] = b$t
return(res)
}
|
ad5261aa816f0510f566509388a75e16cb9a5031
|
7b74f00cd80694634e6925067aaeb6572b09aef8
|
/2020/notes-2020/session_files/session_6_fe8828_r_blockchain-master/fe8828_r_blockchain-master/node_server/blockchain-node-server.R
|
2c825347df94ee8f21d5f9f7ef7c6a894f6e6487
|
[] |
no_license
|
leafyoung/fe8828
|
64c3c52f1587a8e55ef404e8cedacbb28dd10f3f
|
ccd569c1caed8baae8680731d4ff89699405b0f9
|
refs/heads/master
| 2023-01-13T00:08:13.213027
| 2020-11-08T14:08:10
| 2020-11-08T14:08:10
| 107,782,106
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,462
|
r
|
blockchain-node-server.R
|
list.of.packages <- c("uuid")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
require(uuid)
# make sure you put the path of your blockchain.R file
source("blockchain.R")
# Generate a globally unique address for this node
node_identifier = gsub('-','',UUIDgenerate())
# Instantiate the Blockchain
blockchain = Blockchain()
# genesis block
blockchain$nextBlock(previousHash=1, nonce=100)
#* @get /chain/show
#* @html
function(req)
{
render.html <- ""
render.html <- paste0(render.html, '<div>')
render.html <- paste0(render.html, '<h1>Current nodes:</h1>')
if (length(blockchain$nodes) > 0) {
for (i in 1:length(blockchain$nodes))
{
render.html <- paste0(render.html, '<b>Node:</b>' , i ,'<br>')
render.html <- paste0(render.html, 'name:', blockchain$nodes[i][1])
render.html <- paste0(render.html, '<br>')
}
}
render.html <- paste0(render.html, '<br>')
render.html <- paste0(render.html, '</div>')
render.html <- paste0(render.html, '<div>')
render.html <- paste0(render.html, '<h1>Current transactions:</h1>')
if (length(blockchain$currentTransactions) > 0) {
for (i in 1:length(blockchain$currentTransactions))
{
render.html <- paste0(render.html, '<b>Transaction:</b>', i ,'<br>')
render.html <- paste0(render.html, 'sender:', blockchain$currentTransactions[i]$transaction$sender)
render.html <- paste0(render.html, '<br>')
render.html <- paste0(render.html, 'recipient:', blockchain$currentTransactions[i]$transaction$recipient)
render.html <- paste0(render.html, '<br>')
render.html <- paste0(render.html, 'amount:', blockchain$currentTransactions[i]$transaction$amount)
render.html <- paste0(render.html, '<br>')
}
}
render.html <- paste0(render.html, '<br>')
render.html <- paste0(render.html, '</div>')
render.html <- paste0(render.html, '<div>')
render.html <- paste0(render.html, '<h1>Current block:</h1>')
for (i in 1:blockchain$lastBlock()$block$index)
{
render.html <- paste0(render.html, '<br>')
render.html <- paste0(render.html, '<b>Block nr:</b>', blockchain$chain[i]$block$index)
render.html <- paste0(render.html, '<br>')
render.html <- paste0(render.html, '<b>Transactions:</b>')
render.html <- paste0(render.html, '<br>')
if (length(blockchain$chain[i]$block$transactions) > 0 ) {
for (j in 1:length(blockchain$chain[i]$block$transactions)) {
render.html <- paste0(render.html, blockchain$chain[i]$block$transactions[j])
render.html <- paste0(render.html, '<br>')
}
}
render.html <- paste0(render.html, '<b>Nonce:</b>')
render.html <- paste0(render.html, '<br>')
render.html <- paste0(render.html,blockchain$chain[i]$block$nonce)
render.html <- paste0(render.html, '<br>')
if (i > 1) {
render.html <- paste0(render.html, "<b>Proof guess:</b>")
render.html <- paste0(render.html, '<br>')
render.html <- paste0(render.html, blockchain$guessProof(blockchain$chain[i-1]$block$nonce, blockchain$chain[i]$block$nonce))
render.html <- paste0(render.html, '<br>')
}
render.html <- paste0(render.html, '<hr>')
}
render.html <- paste0(render.html, '<br>')
render.html <- paste0(render.html, '</div>')
render.html
}
#* @serializer custom_json
#* @get /chain
function(req)
{
list('length'= length(blockchain$chain),
'chain' = blockchain$chain)
}
#* @serializer custom_json
#* @get /transactions/new
#* @post /transactions/new
function(req, sender, recipient, amount)
{
# eg req_json <- '{"sender": "my address", "recipient": "someone else address", "amount": 5}'
# values <- jsonlite::fromJSON(req_json)
if (req$REQUEST_METHOD == "GET") {
values <- list(sender = sender,
recipient = recipient,
amount = amount)
} else if (req$REQUEST_METHOD == "POST") {
values <- jsonlite::fromJSON(req$postBody)
}
# Check that the required fields are in the POST'ed data
required = c('sender','recipient', 'amount')
if (!all(required %in% names(values))) {
return ('Missing Values - sender, recipient and amount are required')
}
index <- blockchain$addTransaction(values$sender, values$recipient, values$amount)
list('message' = paste('Transaction will be added to Block', index))
}
#* @serializer custom_json
#* @get /mine
function(req)
{
# We run the proof of work algorithm to get the next nonce
lastBlock <- blockchain$lastBlock()
lastNonce <- lastBlock$block$nonce
nonce <- blockchain$proofOfWork(lastNonce)
# We must receive a reward for finding the Nonce.
# The sender is "0" to signify that this node has mined a new coin.
blockchain$addTransaction(sender="0", recipient = node_identifier, amount=1)
# Forge the new block by adding it to the chain
previousHash <- blockchain$hashBlock(lastBlock)
block <- blockchain$nextBlock(nonce, previousHash)
list('message'='New block forged',
'index'= block$block$index,
'transactions'= block$block$transactions,
'nonce'=block$block$nonce,
'previousHash'=block$block$previousHash)
# list('message'='New block forged', c('index'= block$block$index, 'transactions'= block$block$transactions, 'nonce'=block$block$nonce,'previousHash'=block$block$previousHash))
}
#* @serializer custom_json
#* @post /nodes/register
#* @get /nodes/register
function(req, nodes)
{
# req_json <- '{"sender": "my address", "recipient": "someone else address", "amount": 5}'
if (req$REQUEST_METHOD == "GET") {
} else if (req$REQUEST_METHOD == "POST") {
values <- jsonlite::fromJSON(req$postBody)
nodes <- values$nodes
}
cat(paste0(nodes, "\n"))
if (is.null(nodes)) {
return("Error: the list of nodes is not valid")
}
blockchain$registerNode(nodes)
}
#* @serializer custom_json
#* @get /nodes/resolve
function (req)
{
replaced = blockchain$handleConflicts()
if (replaced) {
list('message'='Replaced', 'chain' = blockchain$chain)
} else {
list('message'='Authoritative block chain - not replaceable ', 'chain'=blockchain$chain)
}
}
#* Log some information about the incoming request
#* @filter logger
function(req){
cat(as.character(Sys.time()), "-",
req$REQUEST_METHOD, req$PATH_INFO, "-",
req$HTTP_USER_AGENT, "@", req$REMOTE_ADDR, "\n")
plumber::forward()
}
|
149d0405865f49b98f9e558ee0b27bab3efd2b60
|
94aed2117dfdb2227eea8b019fed82d5b6193e4e
|
/TextMine.R
|
061168a7ed7e0a19ea513d100e952e0fe985cb56
|
[] |
no_license
|
elliott828/Working
|
c2199454360508eac7f7a21c302abf71908a7530
|
e5caeba56871c7c1c4a890e2b08737452a5cb013
|
refs/heads/master
| 2020-12-07T00:46:05.930959
| 2015-06-04T08:30:30
| 2015-06-04T08:30:30
| 36,857,779
| 0
| 0
| null | 2015-06-04T08:24:17
| 2015-06-04T08:24:16
|
R
|
UTF-8
|
R
| false
| false
| 5,430
|
r
|
TextMine.R
|
#--------------------------------
req.pcg <- function(pcg){
# packages to be installed
tbinst <- pcg[(!(pcg %in% installed.packages()[, "Package"]))|
(pcg %in% old.packages()[, "Package"])]
if (sum(tbinst %in% c("tmcn", "Rwordseg", "Rweibo"))>0){
cntm <- tbinst[tbinst %in% c("tmcn", "Rwordseg", "Rweibo")]
install.packages(cntm,
repos = "http://R-Forge.R-project.org",
type = "source")
}else if(sum(tbinst == "Rgraphviz")>0){
source("http://bioconductor.org/biocLite.R")
biocLite("Rgraphviz")
}else if (length(tbinst)){
install.packages(tbinst, dependencies = T)
}
sapply(pcg, require, warn.conflicts = FALSE, character.only = TRUE, quietly = TRUE)
}
all.pcg <- c("tm", "SnowballC", "qdap", "qdapDictionaries", "dplyr",
"RColorBrewer", "ggplot2", "scales", "wordcloud", "igraph",
"Rweibo", "Rwordseg", "RWeka", "ggdendro")
req.pcg(all.pcg)
# ERROR: compilation failed for package 'tmcn'
# Warning in install.packages : package 'tmcn' is not available (for R version 3.2.0)
#--------------------------------
df <- read.csv("FO_Increased.csv")
df <- read.csv("FO_Dropped.csv")
df <- read.csv("FO_Same.csv")
df <- read.csv("FO_Total.csv")
df <- read.csv("Australia.csv")
i <- 1
i <- 2
sub_cont <- Corpus(VectorSource(df[complete.cases(df[, i]), i]))
sub_cont <- sub_cont %>%
tm_map(content_transformer(tolower)) %>%
tm_map(removeWords, stopwords("english")[c(-81:-98, -160, -165:-167)])
# [160] "more"
# others: not
change <- content_transformer(function(x, from, to) gsub(from, to, x))
for(j in c(81:98, 166)) {
sub_cont <- tm_map(sub_cont, change, stopwords("english")[j], "not")
}
sub_cont <- sub_cont %>%
tm_map(change, "blue oval", "blueoval") %>%
tm_map(change, "loyal followers", "loyalfollower")
sub_cont <- sub_cont %>%
tm_map(removePunctuation) %>%
tm_map(stripWhitespace) %>%
tm_map(stemDocument) %>%
tm_map(removeNumbers)
# change words into original form
mat <- matrix(c(c("releas", "purchas", "websit", "territori", "specif", "peopl", "futur", "decid", "brochur", "pictur"),
c("release", "purchase", "website", "territory", "specify", "people", "future", "decide", "brochure", "picture")),
nrow = 2, byrow = TRUE)
for(k in 1:ncol(mat)){
sub_cont <- tm_map(sub_cont, change, mat[1, k], mat[2, k])
}
sub_cont <- tm_map(sub_cont, removeWords,
c("even", "still", "just", "will", "yet", "can", "much", "car", "ford", "also", "one", "vehicl"))
dtm <- DocumentTermMatrix(sub_cont)
tdm <- TermDocumentMatrix(sub_cont)
dim(dtm)
# inspect(dtm[1:5, 1:5])
freq <- colSums(as.matrix(dtm))
# length(freq)
ord <- order(freq, decreasing = TRUE)
# table(freq)
freq <- freq[ord]
wf <- data.frame(word=names(freq), freq=freq)
head(wf)
# Histogram of Frequency
subset(wf, freq > 2) %>%
ggplot(aes(word, freq)) +
geom_bar(stat="identity") +
theme(axis.text.x=element_text(angle=45, hjust=1))
comp <- function(words, mat){
for(i in 1:length(words)){
if(any(words[i] == mat[1, ])){
words[i] <- mat[2, ][which(words[i] == mat[1, ])]
}
}
return(words)
}
wf$word <- comp(wf$word, mat)
wf$word <- factor(wf$word, levels = wf[order(wf[,2], decreasing = FALSE), 1], ordered=T)
ggplot(subset(wf, freq > 25),aes(x= word, freq)) +
geom_bar(stat = "identity") +
coord_flip() +
ggtitle("Word Frequency > 25") +
ylab("Frequency") +
xlab("Word")
# png("Dendrogram_db.png", width=12, height=8, units="in", res=300)
set.seed(123)
wordcloud(names(freq), freq, min.freq = 4, scale = c(5, .8),
random.order = FALSE, colors=brewer.pal(6, "Dark2"))
# Association plot
Attrs <- list(node = list(shape = "ellipse", fixedsize = FALSE,
style = "invis", fontcolor = "white",
fillcolor = "red"),
edge = list(dir = "both", color = "darkblue", weight = 1.2))
plot(dtm,
terms = findFreqTerms(dtm, lowfreq = 4),
corThreshold = 0.2,
attrs = Attrs,
weighting = TRUE)
# Cluster Dendrogram:
# DistMat <- dist(scale(as.matrix(tdm)))
DistMat <- dist(scale(as.matrix(tdm)[order(rowSums(as.matrix(tdm)), decreasing = TRUE), ][1:35, ]))
fit <- hclust(DistMat)
# method = "ward.D", "ward.D2", "single", "complete", "average"...
plot(fit)
ggdendrogram(fit)
# cut tree into k clusters
rect.hclust(fit, k = 6)
# rect.hclust(tree, k = NULL, which = NULL, x = NULL, h = NULL,
# border = 2, cluster = NULL)
# kmeans
# findAssocs(dtm, "not", corlimit = 0.3)
#------------------------------------
df <- read.csv("FO_Compare.csv", head = FALSE)
df <- read.csv("FO_Compare2.csv", head = FALSE)
df <- sapply(df, as.character)
df <- df[, -1]
sub_cont <- Corpus(DataframeSource(df))
tdm6 <- as.matrix(tdm)
tdm6 <- tdm6[!rownames(tdm6) %in% c("new", "vehicl"), ]
colnames(tdm6) <- c("Drop1", "Drop2", "Increase1", "Increase2", "Same1", "Same2")
colnames(tdm6) <- c("Drop2", "Increase2", "Same2")
comparison.cloud(tdm6, random.order = F, max.words = Inf, title.size = 1.5)
commonality.cloud(tdm6, random.order=FALSE,
colors = brewer.pal(8, "Dark2"),
title.size=1.5)
|
5d2b30df4efb30376a04def5834f064b0187c8bb
|
6e7af9b27cf18bb4633ad9d0b63a7e8ed9a887fb
|
/man/ranges_df-pHSensor-method.Rd
|
8b049209ed639a484ebec5d26df2b9c9b25d13bd
|
[
"MIT"
] |
permissive
|
ApfeldLab/SensorOverlord
|
0fc62dd3c11b702cd477d0692085ea7be46911a7
|
2fbe7e0d0963561241d5c1e78dd131211e1b31a0
|
refs/heads/master
| 2022-12-27T15:20:27.343783
| 2020-10-13T23:28:48
| 2020-10-13T23:28:48
| 176,821,341
| 2
| 0
| null | 2020-06-14T15:37:09
| 2019-03-20T21:40:17
|
R
|
UTF-8
|
R
| false
| true
| 1,588
|
rd
|
ranges_df-pHSensor-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Sensor_Methods.R
\name{ranges_df,pHSensor-method}
\alias{ranges_df,pHSensor-method}
\title{Finds the ranges df of this pH sensor at given inaccuracies}
\usage{
\S4method{ranges_df}{pHSensor}(
object,
inaccuracies = c(0.02),
pHmin = 1,
pHmax = 14,
by = 0.001,
name = "Sensor",
thresholds = c(0.01, 0.05, 0.1, 0.15, 0.2)
)
}
\arguments{
\item{object}{A pHSensor object}
\item{inaccuracies}{(optional, default: c(0.02)) A vector of inaccuracies
(e.g. 0.02 for 2\% error), always relative}
\item{pHmin}{(optional, default: 1) The minimum pH
for which to record error}
\item{pHmax}{(optional, default: 14) The maximum pH
for which to record error}
\item{by}{(optional, default: 0.001) The granularity of the error table--e.g.,
by = 0.01 would record 7 and 7.01, etc.}
\item{name}{(optional, default: "Sensor") A name for this sensor}
\item{thresholds}{A vector of error thresholds (e.g. c(0.5, 1) for 0.5 and 1)}
}
\value{
A dataframe of suited ranges with these columns:
'Sensor_Name': the name of the sensor
'Minimum': the minimum pH measurable at the given inaccuracy
'Maximum': the maximum pH measurable at the given inaccuracy
'Inaccuracy': the inaccuracy associated with this row (relative)
'error_thresh': the error threshold associated with this row
}
\description{
Adding this method on 31 May 2020, hoping this style will depreciate
getErrorTable in the future.
}
\examples{
my_sensor <- new("pHSensor", new("Sensor", Rmin = 1, Rmax = 5, delta = 0.2), pKa = 7)
ranges_df(my_sensor)
}
|
0689eefd9f38ef5b43a36025c1440dabbd1d0f29
|
2d1f4a315a1b6fda16341144a95e62da2898c9d9
|
/workflow/daily_update.R
|
b6476aa746615e8adb5020a64c3d57a451329585
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
sonar98/covid-19
|
913893c15a5ff1f765b7c55bcd16b9ce185849a3
|
cd56342803266f83850be4170d54ad520f0966bf
|
refs/heads/master
| 2023-01-10T00:10:00.817896
| 2020-11-02T13:59:58
| 2020-11-02T13:59:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,458
|
r
|
daily_update.R
|
pull(repo)
# Generate Banner
source("workflow/generate_banner.R")
# Parse RIVM, NICE and corrections data
source("workflow/parse_nice-data.R")
source("workflow/parse_rivm-data.R")
source("workflow/parse_lcps-data.R")
source("workflow/parse_municipalities.R")
source("workflow/parse_corrections.R")
Sys.setlocale("LC_TIME", "nl_NL")
## Merge RIVM, NICE and corrections data
rivm.by_day <- read.csv("data/rivm_by_day.csv")
nice.by_day <- read.csv("data-nice/nice-today.csv")
lcps.by_day <- read.csv("data/lcps_by_day.csv")
corr.by_day <- read.csv("corrections/corrections_perday.csv")
daily_datalist <- list(rivm.by_day,nice.by_day,lcps.by_day,corr.by_day)
all.data <- Reduce(
function(x, y, ...) merge(x, y, by="date",all.x = TRUE, ...),
daily_datalist
)
all.data$date <- as.Date(all.data$date)
all.data <- all.data[order(all.data$date),]
write.csv(all.data, file = "data/all_data.csv",row.names = F)
source("plot_scripts/daily_plots.R")
#source("plot_scripts/daily_maps_plots.R")
all.data <- read.csv("data/all_data.csv")
nice_by_day <- read.csv("data/nice_by_day.csv")
## Corrections or not?
text.deaths.corrections <- paste0(ifelse(last(all.data$net.deaths)>=0," (+"," (-"),abs(last(all.data$net.deaths))," ivm ",last(all.data$corrections.deaths)," corr.)")
# get tokens
source("workflow/twitter/token_mzelst.R")
source("workflow/twitter/token_edwinveldhuizen.R")
## Build tweets
tweet.main <- paste0("#COVID19NL statistieken t.o.v. gisteren:
Positief getest: ",last(all.data$new.infection),"
Totaal: ",last(all.data$cases)," (+",last(all.data$net.infection)," ivm ",last(all.data$corrections.cases)," corr.)
Opgenomen*: ",last(all.data$Kliniek_Nieuwe_Opnames_COVID),"
Huidig*: ",last(all.data$Kliniek_Bedden),"
Opgenomen op IC*: ",last(all.data$IC_Nieuwe_Opnames_COVID),"
Huidig*: ",last(all.data$IC_Bedden_COVID),"
* LCPS cijfers - www.lcps.nu
Overleden: ",last(all.data$new.deaths),"
Totaal: ",last(all.data$deaths),"")
posted_tweet <- post_tweet (
tweet.main,
token = token.mzelst,
media = (paste0("banners/",Sys.Date(),".png"))
) ## Post tweet
posted_tweet <- fromJSON(rawToChar(posted_tweet$content))
tweet.main.id <- posted_tweet$id_str
tweet.last_id <- tweet.main.id
# Retweet for @edwinveldhuizen
post_tweet (token = token.edwinveldhuizen,
retweet_id = tweet.main.id)
# Tweet for hospital numbers - Data NICE ####
temp = tail(list.files(path = "data-nice/data-nice-json/",pattern="*.csv", full.names = T),2)
myfiles = lapply(temp, read.csv)
dat.today <- as.data.frame(myfiles[2])
dat.yesterday <- as.data.frame(myfiles[1])
Verpleeg_Opname_Bevestigd <- sum(dat.today$Hospital_Intake_Proven) - sum(dat.yesterday$Hospital_Intake_Proven)
Verpleeg_Opname_Verdacht <- sum(dat.today$Hospital_Intake_Suspected) - sum(dat.yesterday$Hospital_Intake_Suspected)
IC_Opname_Bevestigd <- sum(dat.today$IC_Intake_Proven) - sum(dat.yesterday$IC_Intake_Proven)
IC_Opname_Verdacht <- sum(dat.today$IC_Intake_Suspected) - sum(dat.yesterday$IC_Intake_Suspected)
Verpleeg_Huidig_Toename <- last(dat.today$Hospital_Currently) - last(dat.yesterday$Hospital_Currently)
IC_Huidig_Toename <- last(dat.today$IC_Current) - last(dat.yesterday$IC_Current)
hospital.cumulative <- rjson::fromJSON(file = "https://www.stichting-nice.nl/covid-19/public/zkh/intake-cumulative/",simplify = TRUE) %>%
map(as.data.table) %>%
rbindlist(fill = TRUE)
sign.hosp.nice <- paste0(ifelse(Verpleeg_Huidig_Toename>=0," (+"," ("))
sign.ic.nice <- paste0(ifelse(IC_Huidig_Toename>=0," (+"," ("))
tweet.nice <- paste0("#COVID19NL statistieken t.o.v. gisteren (data NICE):
Patiënten verpleegafdeling
Bevestigd: ",Verpleeg_Opname_Bevestigd,"
Verdacht: ",Verpleeg_Opname_Verdacht,"
Huidig: ",last(dat.today$Hospital_Currently),sign.hosp.nice,Verpleeg_Huidig_Toename,")
Totaal: ",last(hospital.cumulative$value),"
Patiënten IC
Bevestigd: ",IC_Opname_Bevestigd,"
Verdacht: ",IC_Opname_Verdacht,"
Huidig: ",last(dat.today$IC_Current),sign.ic.nice,IC_Huidig_Toename,")
Totaal: ",last(dat.today$IC_Cumulative))
# Tweet for report ####
posted_tweet <- post_tweet (
tweet.nice,
token = token.mzelst,
media = c("plots/positieve_tests_per_dag.png",
"plots/overview_aanwezig_zkh.png",
"plots/overview_opnames_zkh.png"
),
in_reply_to_status_id = tweet.last_id,
auto_populate_reply_metadata = TRUE
)
posted_tweet <- fromJSON(rawToChar(posted_tweet$content))
tweet.last_id <- posted_tweet$id_str
########
# report
########
tweet.report = "Ik heb een start gemaakt met een dagelijks epidemiologisch rapport (work in progress). Hierin vindt u kaarten en tabellen met gegevens per leeftijdsgroep, provincie, en GGD: https://github.com/mzelst/covid-19/raw/master/reports/daily_report.pdf"
posted_tweet <- post_tweet (
tweet.report,
token = token.mzelst,
in_reply_to_status_id = tweet.last_id,
auto_populate_reply_metadata = TRUE
)
posted_tweet <- fromJSON(rawToChar(posted_tweet$content))
tweet.last_id <- posted_tweet$id_str
##### Generate municipality images
source("workflow/generate_municipality_images.R")
#####
########
# Municipality tweet - cases
########
tweet.municipality.date <- Sys.Date() %>%
format('%d %b') %>%
str_to_title() %>%
str_replace( '^0', '')
tweet.municipality.colors <- read.csv("data/municipality-totals-color.csv", fileEncoding = "UTF-8")
tweet.municipality.cases <- "Geconstateerde besmettingen per gemeente %s
%s %d / 355 gemeentes
%s %d / 355 gemeentes
%s %d / 355 gemeentes
Zie de eerste afbeelding voor een uitgebreide legenda
[%s]
%s"
tweet.municipality.cases <- sprintf(tweet.municipality.cases,
intToUtf8(0x1F447),
intToUtf8(0x1F6D1),
tweet.municipality.colors$d0[[4]],
intToUtf8(0x1F7E3),
tweet.municipality.colors$d0[[5]],
intToUtf8(0x26A1),
tweet.municipality.colors$d0[[6]],
tweet.municipality.date,
'https://raw.githack.com/mzelst/covid-19/master/workflow/daily_municipality.html'
)
Encoding(tweet.municipality.cases) <- "UTF-8"
posted_tweet <- post_tweet (
tweet.municipality.cases,
token = token.edwinveldhuizen,
media = c("plots/list-cases-head.png", "plots/list-cases-all-part1.png", "plots/list-cases-all-part2.png", "plots/list-cases-all-part3.png"),
in_reply_to_status_id = tweet.main.id,
auto_populate_reply_metadata = TRUE
)
posted_tweet <- fromJSON(rawToChar(posted_tweet$content))
tweet.last_id <- posted_tweet$id_str
rm(tweet.municipality.cases, tweet.municipality.colors, posted_tweet)
post_tweet (
token.mzelst,
retweet_id = tweet.last_id)
########
# Municipality tweet - hospital admissions
########
tweet.municipality.hosp <- "Positief geteste patiënten per gemeente die zijn opgenomen met specifiek COVID-19 als reden v. opname
[%s]"
tweet.municipality.hosp <- sprintf(tweet.municipality.hosp,
tweet.municipality.date
)
posted_tweet <- post_tweet (
tweet.municipality.hosp,
token = token.edwinveldhuizen,
media = c("plots/list-hosp-head.png", "plots/list-hosp-all-part1.png", "plots/list-hosp-all-part2.png", "plots/list-hosp-all-part3.png"),
in_reply_to_status_id = tweet.last_id,
auto_populate_reply_metadata = TRUE
)
posted_tweet <- fromJSON(rawToChar(posted_tweet$content))
tweet.last_id <- posted_tweet$id_str
rm(tweet.municipality.hosp, posted_tweet)
########
# Municipality tweet - deaths
########
tweet.municipality.deaths <- "Patiënten per gemeente die positief getest zijn op COVID-19 en helaas zijn overleden
[%s]
Onze condoleance en veel sterkte aan alle nabestaanden. %s"
tweet.municipality.deaths <- sprintf(tweet.municipality.deaths,
tweet.municipality.date,
intToUtf8(0x1F339)
)
Encoding(tweet.municipality.deaths) <- "UTF-8"
posted_tweet <- post_tweet (
tweet.municipality.deaths,
token = token.edwinveldhuizen,
media = c("plots/list-deaths-head.png", "plots/list-deaths-all-part1.png", "plots/list-deaths-all-part2.png", "plots/list-deaths-all-part3.png"),
in_reply_to_status_id = tweet.last_id,
auto_populate_reply_metadata = TRUE
)
posted_tweet <- fromJSON(rawToChar(posted_tweet$content))
tweet.last_id <- posted_tweet$id_str
rm(tweet.municipality.deaths, tweet.municipality.date, posted_tweet)
##### Download case file
rivm.data <- utils::read.csv("https://data.rivm.nl/covid-19/COVID-19_casus_landelijk.csv", sep =";") ## Read in data with all cases until today
filename <- paste0("data-rivm/casus-datasets/COVID-19_casus_landelijk_",Sys.Date(),".csv")
write.csv(rivm.data, file=filename,row.names = F) ## Write file with all cases until today
#####
Sys.setenv(RSTUDIO_PANDOC="C:/Program Files/RStudio/bin/pandoc"); rmarkdown::render('reports/daily_report.Rmd') ## Render daily report
file.copy(from = list.files('reports', pattern="*.pdf",full.names = TRUE),
to = paste0("reports/daily_reports/Epidemiologische situatie COVID-19 in Nederland - ",
format((Sys.Date()),'%d')," ",format((Sys.Date()),'%B'),".pdf")) ## Save daily file in archive
git.credentials <- read_lines("git_auth.txt")
git.auth <- cred_user_pass(git.credentials[1],git.credentials[2])
## Push to git
repo <- init()
add(repo, path = "*")
commit(repo, all = T, paste0("Daily (automated) update RIVM and NICE data ",Sys.Date()))
push(repo, credentials = git.auth)
## Workflows for databases
source("workflow/dashboards/cases_ggd_agegroups.R")
source("workflow/dashboards/date_statistics_mutations.R")
source("workflow/parse_age-data.R")
source("workflow/dashboards/heatmap-age-week.R")
source("workflow/dashboards/rivm-date-corrections.R")
|
b56b6cc6d76b6ec989ada4e3dbde3c44e0f2cb89
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/RPresto/R/utility_functions.R
|
8ec4fe75864e09691d51c27e57e53c942f06b0ee
|
[
"BSD-3-Clause"
] |
permissive
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,266
|
r
|
utility_functions.R
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
check.status.code <- function(response) {
status <- httr::status_code(response)
if (status >= 400 && status < 500) {
text.content <- httr::content(response, as = "text", encoding='UTF-8')
if (is.null(text.content) || !nzchar(text.content)) {
httr::stop_for_status(status)
}
stop('Received error response (HTTP ', status, '): ', text.content)
}
}
response.to.content <- function (response) {
text.content <- httr::content(response, as = "text", encoding='UTF-8')
return(jsonlite::fromJSON(text.content, simplifyVector = FALSE))
}
wait <- function () {
# sleep 50 - 100 ms
Sys.sleep(stats::runif(n = 1, min = 50, max = 100) / 1000)
}
get.state <- function (content) {
if (is.null(content[['stats']])
|| is.null(content[['stats']][['state']])
) {
stop('No state information in content')
}
return(content[['stats']][['state']])
}
stop.with.error.message <- function (content) {
query.id <- content[['id']]
message <- content[['error']][['message']]
stop("Query ", query.id, " failed: ", message)
}
|
4125bacb354cf965e09500193d7b47218a7e8024
|
76a4c4703c9c23f43e5f9ffcf6e800c36f5c92df
|
/IR_DS_filtering_workflow.R
|
9f6735461bc8bbe2280e6a7208c9eb7e0810711f
|
[] |
no_license
|
danjst/PDAC_2020
|
f00bd18dbef619e1f4a08f9a9b771a3d3b7efd20
|
4ebaf0a1b2231c12b34913ec8cbf113a9e4381f6
|
refs/heads/master
| 2023-04-27T10:44:18.702881
| 2020-09-23T11:11:49
| 2020-09-23T11:11:49
| 295,741,952
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,237
|
r
|
IR_DS_filtering_workflow.R
|
# first filter out events with more than 10% NA
pdac_less_10_percent_NA <- all_pdac_events[rowSums(is.na(all_pdac_events)) < 0.1 * ncol(all_pdac_events), ]
###Next filtering for less than 0.05 abs NA difference between 2 clusters
#make pdac_cluster1 and pdac_cluster2 first
pdac_10na_2<-pdac_less_10_percent_NA[,pdac_cluster2]
pdac_10na_1<-pdac_less_10_percent_NA[,pdac_cluster1]
pdac_1_NAs <- apply(pdac_10na_1, 1, function(x) sum(is.na(x)))
pdac_1_NAs_percent<-pdac_2_NAs/40
pdac_2_NAs <- apply(pdac_10na_2, 1, function(x) sum(is.na(x)))
pdac_2_NAs_percent<-pdac_2_NAs/36
pdac_1_minus_2_percent<-pdac_1_NAs_percent-pdac_2_NAs_percent
pdac_1_minus_2_percent<-abs(pdac_1_minus_2_percent)
pdac_over_0.05<-c()
for (i in 1:nrow(pdac_less_10_percent_NA)){
if (pdac_1_minus_2_percent[[i]]>0.05){pdac_over_0.05<-c(pdac_over_0.05,names(pdac_1_minus_2_percent[i]))}
}
pdac_less_10_percent_NA_also_0.05 <- pdac_less_10_percent_NA[!row.names(pdac_less_10_percent_NA)%in%pdac_over_0.05,]
#join with DS list
#re-adjust p-values here
#check for significance here, filter padj<0.05
###doing the 0.1 mean psi difference
pdac_10na_0.05_1<-pdac_less_10_percent_NA_also_0.05[,pdac_cluster1]
pdac_10na_0.05_2<-pdac_less_10_percent_NA_also_0.05[,pdac_cluster2]
pdac_rowmeans_1<-rowMeans(pdac_10na_0.05_1,na.rm = T)
pdac_rowmeans_2<-rowMeans(pdac_10na_0.05_2,na.rm = T)
pdac_rowmeans_difference<-pdac_rowmeans_1-pdac_rowmeans_2
pdac_rowmeans_difference_greater_0.1<-pdac_rowmeans_difference[abs(pdac_rowmeans_difference)>0.1]
pdac_final_event_rownames<-names(pdac_rowmeans_difference_greater_0.1)
pdac_final_ds_events <- pdac_less_10_percent_NA_also_0.05[row.names(pdac_less_10_percent_NA_also_0.05)%in%pdac_final_event_rownames,]
pdac_final_event_meanpsi_values<-as.numeric(paste(unlist(pdac_rowmeans_difference_greater_0.1)))
pdac_final_ds_events<-cbind(pdac_final_ds_events,pdac_final_event_meanpsi_values)
pdac_final_ds_events_x<-cbind(pdac_final_ds_events[1:6],pdac_final_event_meanpsi_values)
pdac_final_ds_events_x$gene_no_version<-substr(pdac_final_ds_events_x$gene_old_ensembl,start=1,stop=15)
pdac_final_ds_events_x<-join_all(list(pdac_final_ds_events_x,gencodev22_bed_2020), by = c('gene_no_version'), type = "left", match = "all")
|
eff61bacdf25705d855f0d8080b4cfac3e5f7a3f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/syuzhet/examples/get_sentences.Rd.R
|
9f9c80ec3869e03a2b5ebfda657c357b540a322d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 500
|
r
|
get_sentences.Rd.R
|
library(syuzhet)
### Name: get_sentences
### Title: Sentence Tokenization
### Aliases: get_sentences
### ** Examples
(x <- c(paste0(
"Mr. Brown comes! He says hello. i give him coffee. i will ",
"go at 5 p. m. eastern time. Or somewhere in between!go there"
),
paste0(
"Marvin K. Mooney Will You Please Go Now!", "The time has come.",
"The time has come. The time is now. Just go. Go. GO!",
"I don't care how."
)))
get_sentences(x)
get_sentences(x, as_vector = FALSE)
|
9e0c85ef630f54dbc7065fb389a81c43d9929aaf
|
1811c5e994ab0d62884a02639b425f1da7b7bde4
|
/R/graphics/ggplot2/ggplot2_tut.R
|
5207a7101449897f1bd46e8b934aee8da806cea1
|
[] |
no_license
|
aufrank/tutorials
|
62aa2fb8d3e5eb061fb6693e58597252fc970dc7
|
12b06fdfffee7bde6127dc437cdaa0b58a49bcdd
|
refs/heads/master
| 2021-01-19T03:13:56.736438
| 2009-02-25T20:17:43
| 2009-02-25T20:17:43
| 137,469
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,224
|
r
|
ggplot2_tut.R
|
library(ggplot2)
library(plyr)
## get an interactive window
quartz()
## x11()
## lexical decision data
data(english, package="languageR")
## default scatterplot
qplot(x = WrittenFrequency, y = RTlexdec, data = english,
main = "Frequency and Reaction Time")
## transform the dependent variable
qplot(x = WrittenFrequency, y = exp(RTlexdec), data = english)
## color for groups
qplot(x = WrittenFrequency, y = RTlexdec, data = english,
colour=AgeSubject)
## color for continuous variable
qplot(WrittenFrequency, RTlexdec, data=english,
colour=WrittenSpokenFrequencyRatio)
## shape for groups
qplot(x = WrittenFrequency, y = RTlexdec, data = english,
shape=AgeSubject)
## colour and shape for groups
qplot(x = WrittenFrequency, y = RTlexdec, data = english,
colour=AgeSubject,
shape=WordCategory)
## colour, shape, and size
qplot(x = WrittenFrequency, y = RTlexdec, data = english,
colour=AgeSubject,
shape=WordCategory,
size=FamilySize)
## panels
## one conditioning variable
qplot(x = WrittenFrequency, y = RTlexdec, data = english,
colour=AgeSubject,
shape=Frication,
facets = . ~ WordCategory)
## panels with totals shown
qplot(x = WrittenFrequency, y = RTlexdec, data = english,
colour=AgeSubject,
shape=AgeSubject,
facets = . ~ WordCategory,
margins=TRUE)
## two conditioning variables
qplot(x = WrittenFrequency, y = RTlexdec, data = english,
colour=AgeSubject,
shape=Frication,
facets = CV ~ WordCategory)
## panels with totals shown
qplot(x = WrittenFrequency, y = RTlexdec, data = english,
colour=AgeSubject,
shape=Frication,
facets = CV ~ WordCategory,
margins=TRUE)
## changing type of plot requires changing the "geom"
qplot(x = WrittenFrequency, data = english,
geom = "density", fill=rev(AgeSubject))
## histograms don't have a sensible default for bin size
qplot(x = RTlexdec, data = english,
geom = "histogram",
fill=AgeSubject)
## stacked bar charts based on counts are the default
qplot(x=WordCategory, data = english,
geom = "bar",
fill=Voice)
## use "fill" positioning to convert them to proportions
qplot(x=WordCategory, data = english,
geom = "bar",
fill=Voice,
position="fill")
## use "dodge" positioning to unstack them
qplot(x=WordCategory, data = english,
geom = "bar",
fill=Voice,
position="dodge")
## use stat="identity" if you've already aggregated your data and want
## to plot the results
d <- with(english, aggregate(RTlexdec, by=list(Age=AgeSubject, Category=WordCategory), FUN=mean))
qplot(x=Age, y=x, data=d,
fill=Category,
geom="bar",
stat="identity",
position="dodge")
## plots from scratch, without qplot
## boxplot
bxp <- ggplot(data = english, aes(x=AgeSubject, y=RTlexdec))
bxp <- bxp + stat_boxplot(aes(fill=WordCategory))
(bxp)
## barplot of means
bpm <- ggplot(data=english, aes(x=AgeSubject, y=RTlexdec, fill=WordCategory))
bpm <- bpm +
stat_summary(fun.y=mean, geom="bar", pos="dodge")
(bpm)
## change the scale on the y axis to zoom in on the relevant region of
## the data
## NB: If you don't want to show the entire range starting
## from 0, you should consider using a dot plot instead of a bar plot
bpm <- bpm + scale_y_continuous(limits=c(5,7))
(bpm)
## bar plot with automatically calculated normal error bars
bpe <- ggplot(data=english, aes(x=AgeSubject, y=RTlexdec, fill=WordCategory))
bpe <- bpe + stat_summary(fun.y="mean", geom="bar", pos="dodge")
bpe <- bpe + stat_summary(fun.data="mean_cl_normal", geom="errorbar", width=0.2)
(bpe)
## dot plot of means
dpm <- ggplot(data=english, aes(x=AgeSubject, y=RTlexdec, colour=WordCategory))
dpm <- dpm + stat_summary(fun.y="mean", geom="point")
(dpm)
## dot plot of means with error bars of 2*sd
dpe <- ggplot(data=english,
aes(x=AgeSubject, y=RTlexdec, colour=WordCategory))
dpe <- dpe + stat_summary(fun.data="mean_sdl", geom="pointrange")
(dpe)
## horizontal dotplot, often used to display regression parameters
dpe <- dpe + coord_flip()
(dpe)
## xyplot with grouping by color
xy.tr <- ggplot(data=english, aes(x=WrittenFrequency, y=RTlexdec, colour=AgeSubject))
xy.tr <- xy.tr + geom_point()
## now add a specific color scale where we define an alpha level
xy.tr <- xy.tr + scale_colour_hue(alpha=1/3)
(xy.tr)
## add a smoother to our scatterplot
xy.tr <- xy.tr + stat_smooth()
(xy.tr)
## or we can specify that the smooth be based on a linear model
qplot(WrittenFrequency, RTlexdec, data=english, colour=AgeSubject) +
scale_colour_hue(alpha=1/3) +
stat_smooth(method=lm)
## and we can add a robust linear smooth if we want
library(MASS)
last_plot() + stat_smooth(method=rlm)
## apply the black and white theme to our plot
last_plot() + theme_bw()
## and change font sizes. See page 123 and 124 of the ggplot2 book.
last_plot() + opts(title = "Frequency Effects",
plot.title=theme_text(size=24))
## finally, let's make a hexbinplot
hbp <- ggplot(english, aes(x=WrittenFrequency, y=RTlexdec)) +
stat_binhex() +
stat_smooth(method=rlm, colour=I("orange"), size=1.5) +
opts(panel.background=theme_blank())
hbp
|
1b961282611fcf9061b095bfa0dfc3402805e1bf
|
a176626eb55b6525d5a41e2079537f2ef51d4dc7
|
/Uni/Projects/code/P050.Haifa.Mon/cn001_indiv_V4.r
|
9999ebb42fd519d4ed42f2e77b4ccbe600a4797f
|
[] |
no_license
|
zeltak/org
|
82d696b30c7013e95262ad55f839998d0280b72b
|
d279a80198a1dbf7758c9dd56339e8a5b5555ff2
|
refs/heads/master
| 2021-01-21T04:27:34.752197
| 2016-04-16T04:27:57
| 2016-04-16T04:27:57
| 18,008,592
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,431
|
r
|
cn001_indiv_V4.r
|
###############
#LIBS
###############
library(lme4)
library(reshape)
library(foreign)
library(plyr)
library(dplyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
library(readr)
library(stargazer)
library(splines)
library(sjPlot)
#ind <-read.csv("/home/zeltak/ZH_tmp/dat/indiv1415av.csv")
ind <-read.dbf("/home/zeltak/ZH_tmp/dat/tipot_all_SPSS_6.02.16.dbf")
names(ind)
ind$month = as.numeric(format(ind$BIRTH_DATE,"%m"))
#rename
setnames(ind,old=c("WEIGHT1_VA", "HEAD1_VALU","PREGNANCYW"),new=c("birthw", "headc","ges"))
# #subset data
#ind<-ind[,c("Head1_Valu","X","Y","Gender","Weight1_Va","Mother_Nat","PregnancyW","month","TotalSibli","Education_","ApgarOneMi","ApgarFiveM","POPULATION","HOUSEHOLDS","AVERAGE_HH","DENSITY","OWNERSHIP","RENTALS","BAGRUT","BA","INCOME","N_AIRPORT","N_BAZAN","N_POWERSTA","N_OIL_L","N_OIL_S","N_ROAD","nox","day","Postal","Mother_Bir","pm25","so2","nox2014","Elevation","People_est","Pop_arnona"),with=FALSE]
ind<-select (ind,birthw,headc,ges,X,Y,SEX,POPULATION,HOUSEHOLDS,AVERAGE_HH)
#save for GIS
# #clean data
# #delete bad data
# ind<-filter(ind,ges >= 20 )
# ind<-filter(ind,ges <= 44 )
# #clean all crap data
# ind[ind == -999] <- NA
# ind[ind == -9.99] <- NA
# ind<-filter(ind,OWNERSHIP >= 0 )
# ind<-filter(ind,AVERAGE_HH >= 0 )
# ind<-filter(ind,BA >= 0 )
# ind<-filter(ind,birthw >= 2 )
# summary(ind$Mother_Nat)
# ###recoding
# #finer race
# ind$mrn.n<-0 #for jews
# ind<- ind[Mother_Nat == "יהודי" , mrn.n := 1] #jewish
# #gender 01
# ind$sex<-1 #for male
# ind<- ind[Gender == "נקבה" , sex := 0] #female
# #only nox liner, dist linear, #nox was most significance
# ind$N_BAZAN<-ind$N_BAZAN/1000
# ind$N_AIRPORT<-ind$N_AIRPORT/1000
# ind$N_POWERSTA<-ind$N_POWERSTA/1000
# ind$N_OIL_L<-ind$N_OIL_L/1000
# ind$N_OIL_S<-ind$N_OIL_S/1000
# ind$N_ROAD<-ind$N_ROAD/1000
# ind$headcWT<-ind$headc/ind$birthw
# ind$birthwHC<-ind$birthw/ind$headc
ind.pre<-filter(ind, ges < 37)
ind.full<-filter(ind, ges >= 37)
#write.dbf(ind,"/home/zeltak/ZH_tmp/dat/indiv1415_clean.dbf")
#normal
##Headc regression nox
m1.formula <- as.formula(headc.bc ~birthw+ges+as.factor(SEX)+as.factor(month)+MOTHER_COU+LNDIST_ROA+LNDIST_OIL)
h1 <- lm(m1.formula,data=ind)
summary(h1)
##Headc regression nox pre
h1.pre <- lm(m1.formula,data=ind.pre)
summary(h1.pre)
##Headc regression nox full
h1.full <- lm(m1.formula,data=ind.full)
summary(h1.full)
#html
stargazer(h1,h1.full,h1.pre,
type="html",
dep.var.labels=c("Head Circumference"),
column.labels=c("Full model","pre term","Full term"),
title="Factors affecting head circumference in the Haifa Bay area (dependent variables – head circumference (centimeters), Box-Cox transformed values (λ=1.752); method – OLS regression)",
intercept.bottom = TRUE,
omit.stat = c("ser"),
report=('vct*'),
model.numbers = FALSE,
single.row = TRUE,
#remove DF
df = FALSE,
#which variables to keep
keep = c("birthw","ges","SEX","MOTHER_COU","LNDIST_ROA","LNDIST_OIL"),
covariate.labels = c("Birth Weight (kg)", "pregnancy (weeks)","Gender (1=boy, 0=girl)",
"2nd quarter", "3rd quarter", "Fourth quarter"),
out="~/ZH_tmp/models_HC.htm")
birthw+ges+as.factor(SEX)+as.factor(month)+MOTHER_COU+LNDIST_ROA+LNDIST_OIL
|
2e6797095be833550c203b81909d8b7d1e53230f
|
55cb4f0c690a409b41b5b6e8cd0eb9d322c115d6
|
/R/Bossa_Simi.R
|
a0423831d7f002ace16de5650671a8970f6d847b
|
[] |
no_license
|
TinyOpen/OnGoing
|
d2b711233c08b59a7ef8abfe10395912092ebbab
|
776a2297267c7ed57aac77c41bd7380a917a4157
|
refs/heads/master
| 2021-01-23T15:28:19.375929
| 2017-11-12T07:30:45
| 2017-11-12T07:30:45
| 102,712,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,867
|
r
|
Bossa_Simi.R
|
#' Bind two factors
#'
#' Create a new factor from two existing factors, where the new factor's levels
#' are the union of the levels of the input factors.
#'
#' @param a factor
#' @param b factor
#'
#' @return factor
#' @export
#' @examples
#'#' fbind(iris$Species[c(1, 51, 101)], PlantGrowth$group[c(1, 11, 21)])
BossaClust <- function(data, alpha = 1,
p = c(0.9, 0.8, 0.7, 0.5), lin = 0.2,
is.pca = TRUE, pca.sum.prop = 0.95, fix.pca.comp = FALSE, n.comp = 50,
cri = 1, lintype = "ward.D2")
{
require("psych")
# Check input data --------------------------
data.pre <- BossaSimi(data, is.pca = is.pca, pca.sum.prop = pca.sum.prop,
fix.pca.comp = fix.pca.comp, n.comp = n.comp, alpha = alpha)
data.simi <- data.pre$bossa.simi
n <- dim(data)[1]
# Do overlap cluster with "SC" method -----------------------------
overlap.pre <- OverlapClust(data.simi, p = p, lin = lin)
overlap.clu <- overlap.pre$overlap.clu
clust.center <- overlap.pre$clust.center
# Merge clusters-----------------------------
sum.clu <- dim(overlap.clu)[2] - 2
colnames(overlap.clu) <- c("first.clu", "belong.layer",
paste("clust.", 1:sum.clu, sep = ""))
ori.clu <- overlap.clu[,-c(1,2)]
shmat <- clush(overlap.clu[, -c(1, 2)])
sig.lev <- ifelse(cri < 1, cri,
ifelse(cri == 1, 0.05/sum.clu, 0.05/sum.clu/(sum.clu-1)))
if(sum.clu < 2) return(list(clust.center = clust.center,
overlap.clu = overlap.clu, shmat = shmat, p = p))
clumatch<-keyfeat(ori.clu, sig.lev)
scrit0<-clumatch$scrit0
scrit1<-clumatch$scrit1
clu.dis<-as.dist(clumatch$stat)
merclu<-clumatch$kfp
sepclu<-clumatch$kfn
# Take charge of the left cells
non.core.ind <- (1:n)[apply(overlap.clu[, -c(1,2)], 1, sum) == 0]
k1<-dim(clust.center)[1]
for (i in non.core.ind){
maxci<-rep(0,k1)
ij<-0
for (j in 1:k1){
ij<-ij+1
maxci[ij]<-quantile(data.simi[i,overlap.clu[,1]==j], 0.5)
}
max.ind<-which.max(maxci)
overlap.clu[i,1]<-max.ind
overlap.clu[i,(max.ind+2)]<-1
}
clu.hc <- hclust(clu.dis,lintype)
tree.max <-max(cutree(clu.hc, h = scrit0))
tree.min <-max(cutree(clu.hc, h = scrit1))
clu.merge <- sapply(tree.min:tree.max, ClustMerge)
cell.hc.clust <- sapply(tree.min:tree.max, function(x){
hc.clust <- hclust(as.dist(data.pre$bossa.disimi), lintype)
hc.tree <- cutree(hc.clust, k = 13)
})
return(list(overlap.clu = clu.merge, non.overlap.clu = cell.hc.clust,
ori.overlap = overlap.clu, clust.center = clust.center,
clu.dis = clu.dis, tree.max = tree.max, tree.min = tree.min,
cell.simi = data.simi))
}
plot.interactive <- function(object){
}
|
9e5cd4b877d4b28e7f0a75539626be3b3bed0530
|
c3f1366b81357f78e9d30988ef3770d4a253ec6e
|
/man/summary.stdf.Rd
|
79dd889f0c6ca608e8b449034ef2efb49586b5fe
|
[] |
no_license
|
guiludwig/stdf
|
ae6716d48eb4c398d73b15c199f799d0d5f04cd1
|
76359cf39a96e94965a584e659789f669e1dbd12
|
refs/heads/master
| 2021-01-21T23:41:11.641890
| 2019-03-19T19:28:44
| 2019-03-19T19:28:44
| 24,806,915
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 275
|
rd
|
summary.stdf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{summary.stdf}
\alias{summary.stdf}
\title{Summarizing STDF Model Fits}
\usage{
\method{summary}{stdf}(object, ...)
}
\description{
\code{summary} method for class \code{"stdf"}.
}
|
f84fadb9ec9d6a13dedc5acf016f238523211ea8
|
d76b09f9edb1d7ce0b5f34573b57bf42c1e9d2e6
|
/intro/example_2.r
|
2e54e80f5ed3c88cd7f0e1999e1b7fc202a6de64
|
[] |
no_license
|
rmporsch/automate-science
|
f32175f7862d902d92a53eecb1f03f54d026e85b
|
73d318e77a81bdb72769f8e6fcb06c92deab73a7
|
refs/heads/master
| 2020-07-15T08:56:32.183289
| 2017-07-06T02:51:00
| 2017-07-06T02:51:00
| 94,306,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 269
|
r
|
example_2.r
|
# Import some data
message("importing data")
dat <- read.csv("data.csv")
# convert Fahrenheit to Celsius
message("convert Fahrenheit to Celsius")
dat$Temp.C <- (dat$Temp - 32)/1.8
# write table back to disk
message("writing to disk")
write.csv(dat, "data_withC.csv")
|
5f708fafe3125772c7f9e5c65fa7d1a1b7dc06da
|
0f2405028a6d5a919445f405b449e50d176e3b92
|
/Code/Make Tidy.R
|
60d1e89d2310a4a27f74bdecaa125716df35b271
|
[] |
no_license
|
Flashlightis/Capstone-Project
|
8451375aef4ccdf0f022c76db14cf7bab46c3d2d
|
f1dbe9cd0d985138a591cebd96971a7fd5c4dea7
|
refs/heads/master
| 2021-10-04T03:31:32.830578
| 2018-12-03T04:42:51
| 2018-12-03T04:42:51
| 125,099,535
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,853
|
r
|
Make Tidy.R
|
# Make Tidy - All Data
df3_clean <- gather(df3, Web_Metric, Number, c("Sessions", "Users", "New_Users", "Pageviews",
"Number_Session", "Pages_Session", "Avg_S_Duration",
"Bounce"))
df4_m2 <- mutate(df3_2014,
month = as.POSIXlt(Date)$mon + 1)
df4_m2 <- group_by(df4_m2, month)
df4_2014_month <- summarize(df4_m2, Sessions = sum(Sessions, na.rm = TRUE),
Pageviews = sum(Pageviews, na.rm = TRUE),
Users = sum(Users, na.rm = TRUE),
New_Users = sum(New_Users, na.rm = TRUE),
Number_Session = sum(Number_Session, na.rm = TRUE),
Pages_Session = sum(Pages_Session, na.rm = TRUE),
Avg_S_Duration = sum(Avg_S_Duration, na.rm = TRUE),
Bounce = mean(Bounce, na.rm = TRUE))
df4_2014_month <- gather(df4_2014_month, Web_Metric, Number, c("Sessions", "Users", "New_Users")) # Make Tidy
# 2015
df5_m2 <- mutate(df3_2015,
month = as.POSIXlt(Date)$mon + 1)
df5_m2 <- group_by(df5_m2, month)
df5_2015_month <- summarize(df5_m2, Sessions = sum(Sessions, na.rm = TRUE),
Pageviews = sum(Pageviews, na.rm = TRUE),
Users = sum(Users, na.rm = TRUE),
New_Users = sum(New_Users, na.rm = TRUE),
Number_Session = sum(Number_Session, na.rm = TRUE),
Pages_Session = sum(Pages_Session, na.rm = TRUE),
Avg_S_Duration = sum(Avg_S_Duration, na.rm = TRUE),
Bounce = mean(Bounce, na.rm = TRUE))
df5_2015_month <- gather(df5_2015_month, Web_Metric, Number, c("Sessions", "Users", "New_Users")) # Make Tidy
# 2016
df6_m2 <- mutate(df3_2016,
month = as.POSIXlt(Date)$mon + 1)
df6_m2 <- group_by(df6_m2, month)
df6_2016_month <- summarize(df6_m2, Sessions = sum(Sessions, na.rm = TRUE),
Pageviews = sum(Pageviews, na.rm = TRUE),
Users = sum(Users, na.rm = TRUE),
New_Users = sum(New_Users, na.rm = TRUE),
Number_Session = sum(Number_Session, na.rm = TRUE),
Pages_Session = sum(Pages_Session, na.rm = TRUE),
Avg_S_Duration = sum(Avg_S_Duration, na.rm = TRUE),
Bounce = mean(Bounce, na.rm = TRUE))
df6_2016_month <- gather(df6_2016_month, Web_Metric, Number, c("Sessions", "Users", "New_Users")) # Make Tidy
# 2017
df7_m2 <- mutate(df3_2017,
month = as.POSIXlt(Date)$mon + 1)
df7_m2 <- group_by(df7_m2, month)
df7_2017_month <- summarize(df7_m2, Sessions = sum(Sessions, na.rm = TRUE),
Pageviews = sum(Pageviews, na.rm = TRUE),
Users = sum(Users, na.rm = TRUE),
New_Users = sum(New_Users, na.rm = TRUE),
Number_Session = sum(Number_Session, na.rm = TRUE),
Pages_Session = sum(Pages_Session, na.rm = TRUE),
Avg_S_Duration = sum(Avg_S_Duration, na.rm = TRUE),
Bounce = mean(Bounce, na.rm = TRUE))
df7_2017_month <- gather(df7_2017_month, Web_Metric, Number, c("Sessions", "Users", "New_Users")) # Make Tidy
# Convert to Tidy Dataset ---
df3_ty2 <- gather(df3_Total_year, Web_Metric, Number, Sessions:Users)
df3_ty3 <- gather(df3_Total_year, Web_Metric, Number, c("Sessions", "Users", "New_Users"))
View(df3_ty)
View(df3_ty2)
View(df3_ty3)
|
a97ccdf5686c9ff1929e318c7504d536b32acdc7
|
c5f708e71aae6e56605e9f30a57e349ca1ced4a4
|
/server.R
|
51a392a928bb3b5f6f18a209747568c1c21b78e9
|
[] |
no_license
|
xjlc/climit
|
977f6b3aa6644fa63fd4c344ad98a3e8e43275fb
|
07656c44691e7c6697f90c45c1218320e481210e
|
refs/heads/master
| 2021-01-10T21:21:07.621348
| 2015-01-22T06:33:22
| 2015-01-22T06:33:22
| 29,587,583
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,184
|
r
|
server.R
|
# illustration of the CLT
# based on a github gist by https://github.com/tgouhier/climit
library(shiny)
shinyServer(function(input, output) {
# one simulation run: generate random numbers
simdata <- function(input, n) {
if (input$dist=="rpois") {
vals <- do.call(input$dist, list(n=input$n, lambda=1))
} else if (input$dist=="rbinom") {
vals <- do.call(input$dist, list(n=input$n, size=30, p=.25))
} else {
vals <- do.call(input$dist, list(n=input$n))
}
}
data <- reactive({
vals <- simdata(input, n)
return (list(fun=input$dist, vals=vals))
})
output$plot <- renderPlot({
# generate plot title based on user-chosen distribution
distname <- switch(input$dist,
runif = "Uniform distribution", # (n = ",
rnorm = "Normal distribution", # (n = ",
rlnorm = "Log-normal distribution", # (n = ",
rexp = "Exponential distribution", # (n = ",
rbinom = "Binomial distribution (n=30, p=.25)",
rpois = "Poisson distribution",
rcauchy = "Cauchy distribution") # (n = ")
# extract parameters from user input
n <- input$n
N <- input$N
pdist <- data()$vals
# generate N samples
x <- replicate(N, simdata(input, n))
# extract means of samples
# note: this was rowMeans in the original code, but I think that was a mistake
ndist <- colMeans(x)
# expected values from the literature/formulary
expect <- switch(input$dist,
rexp = c(1^-1, 1^-2),
rnorm = c(0, 1),
rlnorm = c(exp(0+(1/2)*1^2), exp(0 + 1^2)*(exp(1^2)-1)),
runif = c(0.5, (1/12)*1),
rbinom = c(30*.25, 30*.25*.75),
rpois =c(1, 1),
rcauchy = rep(NA, 2))
obs <- data.frame(pdist=c(mean(pdist), var(pdist)), ndist=c(mean(ndist), var(ndist)))
# TODO: better visualization, ggplot?, add means, samples, etc.
nbreaks <- 10
par(mfrow=c(2,2))
# first panel: a single simulation
pdens <- density(pdist)
phist <- hist(pdist, plot=FALSE)
hist(pdist, main=paste("A single sample of", n, "observations\nfrom the", distname),
xlab="Values (X)", freq=FALSE, ylim=c(0, max(pdens$y, phist$density)), breaks=nbreaks)
lines(pdens, col="black", lwd=2)
abline(v=obs$pdist[1], col="blue", lwd=2, lty=2)
abline(v=expect[1], col="red", lwd=2, lty=2)
legend(x="topright", col=c("black", "red"), lwd=2, lty=2,
legend=c("Observed", "Expected"))
box()
# second panel: add a plot showing the individual distributions
# densities <- apply(x, 2, density, bw="SJ", adjust=2)
if (input$dist=="rexp" | input$dist=="rlnorm" | input$dist=="rpois") {
xl <- c(0, max(as.vector(x)))
densities <- apply(x, 2, density, from=0.05)
} else if (input$dist=="runif") {
xl <- c(0, 1)
densities <- apply(x, 2, density, n=512, from=0.02, to=.98)
} else {
xl <- range(as.vector(x))
densities <- apply(x, 2, density)
}
plot(densities[[1]], type="l", lwd=.5, xlim=xl, ylim=c(0, max(sapply(densities, "[[", "y"))), main="Individual samples (smoothed)\nwith sample means in red", xlab="Value")
sapply(densities, lines, lwd=.5)
abline(v=ndist, col="red", lty=1, lwd=.25)
# third panel: histogram of sample means
ndens <- density(ndist)
nhist <- hist(ndist, plot=FALSE)
hist(ndist, main=paste("Distribution of mean values from ", N,
" random samples each\nconsisting of ", n,
" observations from the ", distname, sep=""), col="red",
xlab=expression(paste("Sample means (", bar(X), ")")),
freq=FALSE, ylim=c(0, max(ndens$y, nhist$density)), breaks=nbreaks, xlim=range(phist$breaks))
lines(ndens, col="black", lwd=3)
abline(v=obs$ndist[1], col="blue", lwd=2, lty=2)
abline(v=expect[1], col="red", lwd=2, lty=2)
legend(x="topright", col=c("blue", "red"), lwd=2, lty=2,
legend=c("Observed", "Expected"))
box()
# fourth panel: compare sample means to normal distribution
qqnorm(ndist, main=paste("Distribution of sample means\n from the", distname, "against Normal"))
qqline(ndist)
})
})
|
cffeeacdd4c0573e36ba456b331d949c86b249c1
|
f95b3720c2ff266261cfbbd2e06fc987b34db08a
|
/R/CopyToClipboard.R
|
fc21a7e2f8db3af6278ff0f63877ecaec3d88f6e
|
[] |
no_license
|
selinaZitrone/Lessons_Alessio
|
674422516ff193c5834bca6a2abc46b2007448a5
|
1f816c104c7bc760e967669eb52cb1d56680d3d1
|
refs/heads/master
| 2023-01-19T09:28:07.413472
| 2020-10-05T17:48:51
| 2020-10-05T17:48:51
| 300,846,108
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 349
|
r
|
CopyToClipboard.R
|
# file (string): file to be copied to the clipboard (with fileending)
# filepath (string): path where file to be copied to clipboard is stored
CopyToClipboard <- function(file, filepath = here::here("HTML")){
readLines(paste0(filepath, "/", file)) %>% clipr::write_clip()
#xml2::read_html(paste0(filepath, "/", file)) %>% clipr::write_clip()
}
|
69525749ec125d28e88c8998609e47b64e5f2e05
|
bac8d44dff959258dc7a706a35e1ebd1944c7641
|
/plot1.R
|
9a476505549687d818f137929ab8154ad02460b1
|
[] |
no_license
|
subhashish7/ExData_Plotting1
|
889fefa4ca6d0511fd0f22e6335f4bc48e91286a
|
35b93a5192b2e5c79cca88c9b21c4bbcc69018b3
|
refs/heads/master
| 2020-04-10T14:00:01.145357
| 2018-12-28T15:00:45
| 2018-12-28T15:00:45
| 161,064,479
| 0
| 0
| null | 2018-12-09T17:33:57
| 2018-12-09T17:33:56
| null |
UTF-8
|
R
| false
| false
| 494
|
r
|
plot1.R
|
# set working directory
setwd('C:/Users/Desktop/Coursera/EDA')
# read file
data <- read.csv('household_power_consumption.txt', header = TRUE, sep=';', dec='.', stringsAsFactors = FALSE)
# select data from relevant dates
df <- data[data$Date %in% c("1/2/2007","2/2/2007"),]
# open device
png("plot1.png", width=480, height=480)
# plot histogram
hist(as.numeric(df$Global_active_power), col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
# close device
dev.off()
|
d8eda88c55e5496d893150f3c5569fce02edac66
|
0d85667371d11e998a8526cbfea00eb919ef3e30
|
/man/convSiteData.Rd
|
db701e396efed1f9da9141581076ab481457ea5f
|
[
"MIT"
] |
permissive
|
dinaIssakova/rgenesconverged
|
5ef5230af0cde0c61545cfed73f0bb5c2f0f1da3
|
e1b5bb82bfa8fe03d232f1aa19bb3ed785252d07
|
refs/heads/master
| 2020-07-28T06:07:59.193155
| 2020-01-06T20:48:17
| 2020-01-06T20:48:17
| 209,332,677
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 937
|
rd
|
convSiteData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getResults.R
\name{convSiteData}
\alias{convSiteData}
\title{Get number of convergent sites}
\usage{
convSiteData(
tree,
phydat,
spe1,
spe2,
t,
type = c("abs", "score"),
m = getm(tree, phydat, spe1, spe2)
)
}
\arguments{
\item{tree}{A phylogenetic tree}
\item{phydat}{An object of class phydat}
\item{spe1}{The name of species 1}
\item{spe2}{The name of species 2}
\item{t}{threshold}
\item{type}{Type of analysis: 'abs' for basic model or 'score' for by convergence score model}
\item{m}{The length of each gene (Up to what is desired to be evaluated). Default is entire gene}
}
\value{
The number of potentially convergent sites
}
\description{
Get the number of potentially evolved convergent sites and print the probability that this occured by chance.
}
\examples{
\dontrun{
convSiteData(smallTree, primates, "Human", "Chimp", 5)
}
}
|
e3fde9575f98117e86373d41ba582037061a5358
|
3ab08891487e23f0a6bcf184649ba331a938070b
|
/NYC_Paking_Graded_Case_Study_V6.R
|
718c7d8bd2fc757a581cf6617ce281b2b309d30d
|
[] |
no_license
|
Abhijit-Barik01/SPARK-R-SQL-NYC-PArking-Ticket-Analysis
|
b7a00f820ae3f7003ffaaa56c8a9e4db7e899c80
|
85f11875b4e8eda5a0faae56fdc9c9664d3d1d99
|
refs/heads/master
| 2023-03-18T08:17:05.755512
| 2019-01-01T07:03:43
| 2019-01-01T07:03:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 77,588
|
r
|
NYC_Paking_Graded_Case_Study_V6.R
|
#######################################################################################################################################################
#1.Problem Statement/Business Understanding
#2.Broad Assumptions
#3.Initialise Environment,load libraries & data
#4.Understanding
#5.Assignment Tasks - Examining Data
#6.Assignment Tasks - Aggregation tasks
#7.Closure
#######################################################################################################################################################
#######################################################################################################################################################
#1.Problem Statement/Business Understanding
#######################################################################################################################################################
#NYC Police Department has collected data for parking tickets.We have been provided with data files for 2015,2016 and 2017
#The purpose of this case study is to conduct an exploratory data analysis that helps to understand the data.
#The scope of this analysis, we wish to compare the phenomenon related to parking tickets over three different years - 2015, 2016, 2017
#It's reccomended to do analysis over fiscal year however it's fine to use calendar year approach as well.
#
#######################################################################################################################################################
#2.Broad Assumptions
#-We will be using calendar year instead of fiscal year- as permitted in the problem statement.
#-We'll load all files together for analysis and perform required analysis.This will cause all data pertaining to calendar years 2015,2016, 2017
#-to be considered valid for case study.Some of this data would have been invalid in other approach/es.
#-Since the purpose of this case study is EDA itself, we have performed EDA only as needed and cleanup is performed only in case it affects the analysis.
#-We are assuming that all required libraries are installed in the environment prior to execution
#-It was observed that 2017 file contained 8 less columns however names of the columns were same in all 3 years hence it was decided
# to use combined data load instead of individual data frames per year to avoid repetative code.
#######################################################################################################################################################
#######################################################################################################################################################
#3.Initialise Environment, load libraries & data
#######################################################################################################################################################
#File Location
#'/common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_201x.csv'
# Load SparkR
spark_path <- '/usr/local/spark'
if (nchar(Sys.getenv("SPARK_HOME")) < 1) {
Sys.setenv(SPARK_HOME = spark_path)
}
library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
# Initialise the sparkR session
sparkR.session(master = "yarn-client", sparkConfig = list(spark.driver.memory = "1g"))
#Ensure that all libraries below are installed before loading
#Sample install command
#install.packages("sparklyr")
library(sparklyr)
library(dplyr)
library(ggplot2)
library(ggrepel)
library(tidyr)
#connect to spark session
#sc <- spark_connect(master = "yarn-client")
# Before executing any hive-sql query from RStudio, you need to add a jar file in RStudio
sql("ADD JAR /opt/cloudera/parcels/CDH/lib/hive/lib/hive-hcatalog-core-1.1.0-cdh5.11.2.jar")
#filePath <- "hdfs:///common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_201*.csv"
#Load all data files at once - combined data
NYC_Ticket_Base<-SparkR::read.df("hdfs:///common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_201*.csv", "CSV", header="true", inferSchema = "true")
#Load individual year files
NYC_Ticket_Base_2015<-SparkR::read.df("hdfs:///common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_2015.csv", "CSV", header="true", inferSchema = "true")
NYC_Ticket_Base_2016<-SparkR::read.df("hdfs:///common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_2016.csv", "CSV", header="true", inferSchema = "true")
NYC_Ticket_Base_2017<-SparkR::read.df("hdfs:///common_folder/nyc_parking/Parking_Violations_Issued_-_Fiscal_Year_2017.csv", "CSV", header="true", inferSchema = "true")
#######################################################################################################################################################
#4.Understanding/Examining Data
#######################################################################################################################################################
ncol(NYC_Ticket_Base_2015) #51 columns
nrow(NYC_Ticket_Base_2015) #11809233 records/rows
ncol(NYC_Ticket_Base_2016) #51 columns
nrow(NYC_Ticket_Base_2016) #10626899 records/rows
ncol(NYC_Ticket_Base_2017) #43 columns
nrow(NYC_Ticket_Base_2017) #10803028 records/rows
ncol(NYC_Ticket_Base)#51 columns
nrow(NYC_Ticket_Base)#33239160 records/rows
colnames(NYC_Ticket_Base_2015)
#[1] "Summons Number" "Plate ID" "Registration State"
#[4] "Plate Type" "Issue Date" "Violation Code"
#[7] "Vehicle Body Type" "Vehicle Make" "Issuing Agency"
#[10] "Street Code1" "Street Code2" "Street Code3"
#[13] "Vehicle Expiration Date" "Violation Location" "Violation Precinct"
#[16] "Issuer Precinct" "Issuer Code" "Issuer Command"
#[19] "Issuer Squad" "Violation Time" "Time First Observed"
#[22] "Violation County" "Violation In Front Of Or Opposite" "House Number"
#[25] "Street Name" "Intersecting Street" "Date First Observed"
#[28] "Law Section" "Sub Division" "Violation Legal Code"
#[31] "Days Parking In Effect " "From Hours In Effect" "To Hours In Effect"
#[34] "Vehicle Color" "Unregistered Vehicle?" "Vehicle Year"
#[37] "Meter Number" "Feet From Curb" "Violation Post Code"
#[40] "Violation Description" "No Standing or Stopping Violation" "Hydrant Violation"
#[43] "Double Parking Violation" "Latitude" "Longitude"
#[46] "Community Board" "Community Council " "Census Tract"
#[49] "BIN" "BBL" "NTA"
str(NYC_Ticket_Base_2015)
# 'SparkDataFrame': 51 variables:
# $ Summons Number : num 8002531292 8015318440 7611181981 7445908067 7037692864 7704791394
# $ Plate ID : chr "EPC5238" "5298MD" "FYW2775" "GWE1987" "T671196C" "JJF6834"
# $ Registration State : chr "NY" "NY" "NY" "NY" "NY" "PA"
# $ Plate Type : chr "PAS" "COM" "PAS" "PAS" "PAS" "PAS"
# $ Issue Date : chr "10/01/2014" "03/06/2015" "07/28/2014" "04/13/2015" "05/19/2015" "11/20/2014"
# $ Violation Code : int 21 14 46 19 19 21
# $ Vehicle Body Type : chr "SUBN" "VAN" "SUBN" "4DSD" "4DSD" "4DSD"
# $ Vehicle Make : chr "CHEVR" "FRUEH" "SUBAR" "LEXUS" "CHRYS" "NISSA"
# $ Issuing Agency : chr "T" "T" "T" "T" "T" "T"
# $ Street Code1 : int 20390 27790 8130 59990 36090 74230
# $ Street Code2 : int 29890 19550 5430 16540 10410 37980
# $ Street Code3 : int 31490 19570 5580 16790 24690 38030
# $ Vehicle Expiration Date : chr "01/01/20150111 12:00:00 PM" "01/01/88888888 12:00:00 PM" "01/01/20160524 12:0
# $ Violation Location : int 7 25 72 102 28 67
# $ Violation Precinct : int 7 25 72 102 28 67
# $ Issuer Precinct : int 7 25 72 102 28 67
# $ Issuer Code : int 345454 333386 331845 355669 341248 357104
# $ Issuer Command : chr "T800" "T103" "T302" "T402" "T103" "T302"
# $ Issuer Squad : chr "A2" "B" "L" "D" "X" "A"
# $ Violation Time : chr "0011A" "0942A" "1020A" "0318P" "0410P" "0839A"
# $ Time First Observed : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ Violation County : chr "NY" "NY" "K" "Q" "NY" "K"
# $ Violation In Front Of Or Opposite: chr "F" "F" "F" "F" "F" "F"
# $ House Number : chr "133" "1916" "184" "120-20" "66" "1013"
# $ Street Name : chr "Essex St" "Park Ave" "31st St" "Queens Blvd" "W 116th St" "Rutland Rd"
# $ Intersecting Street : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ Date First Observed : chr "01/05/0001 12:00:00 PM" "01/05/0001 12:00:00 PM" "01/05/0001 12:00:00 PM" "01
# $ Law Section : int 408 408 408 408 408 408
# $ Sub Division : chr "d1" "c" "f1" "c3" "c3" "d1"
# $ Violation Legal Code : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ Days Parking In Effect : chr "Y Y Y" "YYYYY" "NA" "YYYYY" "YYYYYYY" "Y"
# $ From Hours In Effect : chr "1200A" "0700A" "NA" "0300P" "NA" "0830A"
# $ To Hours In Effect : chr "0300A" "1000A" "NA" "1000P" "NA" "0900A"
# $ Vehicle Color : chr "BL" "BROWN" "BLACK" "GY" "BLACK" "WHITE"
# $ Unregistered Vehicle? : int NA NA NA NA NA NA
# $ Vehicle Year : int 2005 0 2010 2015 0 0
# $ Meter Number : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ Feet From Curb : int 0 0 0 0 0 0
# $ Violation Post Code : chr "A 77" "CC3" "J 32" "01 4" "19 7" "C 32"
# $ Violation Description : chr "21-No Parking (street clean)" "14-No Standing" "46A-Double Parking (Non-COM)"
# $ No Standing or Stopping Violation: chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ Hydrant Violation : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ Double Parking Violation : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ Latitude : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ Longitude : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ Community Board : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ Community Council : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ Census Tract : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ BIN : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ BBL : chr "NA" "NA" "NA" "NA" "NA" "NA"
# $ NTA : chr "NA" "NA" "NA" "NA" "NA" "NA"
setdiff(colnames(NYC_Ticket_Base_2015),colnames(NYC_Ticket_Base_2016))
#Both data sets have same columns
setdiff(colnames(NYC_Ticket_Base_2015),colnames(NYC_Ticket_Base_2017))
#NYC_Ticket_Base_2017 does not have below columns
# [1] "Latitude" "Longitude" "Community Board" "Community Council " "Census Tract" "BIN"
# [7] "BBL" "NTA"
#This shows that all data files contain same columns- 2017 has 8 less columns however those are not requrired for analysis
#Hence we will use the combined data set that contains data for all years for further analysis.
# Finding number of null values in each column
nullcount <- SparkR::select(NYC_Ticket_Base, lapply(columns(NYC_Ticket_Base), function(c)
alias(sum(cast(isNull(NYC_Ticket_Base[[c]]), "integer")), c)))
nullcount %>% as.data.frame %>% gather(NYC_Ticket_Base, sum_null)
# NYC_Ticket_Base sum_null
# 1 Summons Number 0
# 2 Plate ID 4
# 3 Registration State 0
# 4 Plate Type 0
# 5 Issue Date 0
# 6 Violation Code 0
# 7 Vehicle Body Type 127700
# 8 Vehicle Make 212135
# 9 Issuing Agency 0
# 10 Street Code1 0
# 11 Street Code2 0
# 12 Street Code3 0
# 13 Vehicle Expiration Date 1
# 14 Violation Location 5740226
# 15 Violation Precinct 1
# 16 Issuer Precinct 1
# 17 Issuer Code 1
# 18 Issuer Command 5704460
# 19 Issuer Squad 5706033
# 20 Violation Time 6058
# 21 Time First Observed 30042504
# 22 Violation County 3594319
# 23 Violation In Front Of Or Opposite 5985339
# 24 House Number 6312797
# 25 Street Name 18338
# 26 Intersecting Street 23509505
# 27 Date First Observed 2
# 28 Law Section 2
# 29 Sub Division 5008
# 30 Violation Legal Code 27532215
# 31 Days Parking In Effect 8418387
# 32 From Hours In Effect 15613695
# 33 To Hours In Effect 15613692
# 34 Vehicle Color 415377
# 35 Unregistered Vehicle? 29595048
# 36 Vehicle Year 4
# 37 Meter Number 27290553
# 38 Feet From Curb 4
# 39 Violation Post Code 9349625
# 40 Violation Description 3647162
# 41 No Standing or Stopping Violation 33239159
# 42 Hydrant Violation 33239159
# 43 Double Parking Violation 33239159
# 44 Latitude 33239160
# 45 Longitude 33239160
# 46 Community Board 33239160
# 47 Community Council 33239160
# 48 Census Tract 33239160
# 49 BIN 33239160
# 50 BBL 33239160
# 51 NTA 33239160
# A lot of columns towards end of data frame are completely empty
# Based on questions, we will not need these column for analysis hence we can remove these.
# There are some columns with high number of nulls.These are not needed for analysis hence we decided to leave those as it is.
# `violation location` and `Violation Time` are needed for analysis and have nulls to be checked as part of analysis
# Hence we'll treat those nulls later during the analysis as needed for this case study
NYC_Ticket_Base<-SparkR::dropna(NYC_Ticket_Base,how="any", cols=c("Vehicle Body Type","Vehicle Make","Violation Precinct","Issuer Precinct"))
NYC_Ticket_Base$`No Standing or Stopping Violation`<-NULL
NYC_Ticket_Base$`Hydrant Violation`<-NULL
NYC_Ticket_Base$`Double Parking Violation`<-NULL
NYC_Ticket_Base$`Latitude`<-NULL
NYC_Ticket_Base$`Longitude`<-NULL
NYC_Ticket_Base$`Community Board`<-NULL
NYC_Ticket_Base$`Community Council `<-NULL
NYC_Ticket_Base$`Census Tract`<-NULL
NYC_Ticket_Base$`BIN`<-NULL
NYC_Ticket_Base$`BBL`<-NULL
NYC_Ticket_Base$`NTA`<-NULL
#unique values
unique_NYC_Ticket<-SparkR:::lapply(names(NYC_Ticket_Base),function(x) alias(countDistinct(NYC_Ticket_Base[[x]]), x))
head(do.call(agg, c(x = NYC_Ticket_Base, unique_NYC_Ticket)))
#
# Summons Number Plate ID Registration State Plate Type Issue Date Violation Code Vehicle Body Type
# 1 31864311 6021216 69 89 3379 100 3824
# Vehicle Make Issuing Agency Street Code1 Street Code2 Street Code3 Vehicle Expiration Date
# 1 12679 20 7024 7319 7072 9555
# Violation Location Violation Precinct Issuer Precinct Issuer Code Issuer Command Issuer Squad
# 1 579 580 847 60900 5977 50
# Violation Time Time First Observed Violation County Violation In Front Of Or Opposite House Number
# 1 2169 2547 19 6 78608
# Street Name Intersecting Street Date First Observed Law Section Sub Division Violation Legal Code
# 1 189521 407674 1517 9 144 5
# Days Parking In Effect From Hours In Effect To Hours In Effect Vehicle Color Unregistered Vehicle?
# 1 190 784 905 4418 4
# Vehicle Year Meter Number Feet From Curb Violation Post Code Violation Description
# 1 100 54500 17 1234 110
#It is observed that there are nearly 1.1M(32932223 vs 31864311 unique) records with same Summons Numbers.
#Summon Number is expected to be unique hence based on https://learn.upgrad.com/v/course/126/question/99083
#We will remove these records
createOrReplaceTempView(NYC_Ticket_Base, "NYC_Ticket_Base_tab")
NYC_Ticket_Base<-SparkR::sql("select * from NYC_Ticket_Base_tab where `Summons Number` in ( select `Summons Number` from NYC_Ticket_Base_tab group by `Summons Number` having count(*) = 1 )")
ncol(NYC_Ticket_Base)#40
nrow(NYC_Ticket_Base)#30842504
NYC_Ticket_Base$`Issue Date`<-SparkR::to_date(NYC_Ticket_Base$`Issue Date`, "MM/dd/yyyy")
NYC_Ticket_Base$Issue_Date_Year<-SparkR::year(NYC_Ticket_Base$`Issue Date`)
#Creating a table for quering
createOrReplaceTempView(NYC_Ticket_Base, "NYC_Ticket_Base_tab")
#######################################################################################################################################################
#5.Assignment Tasks - Examining Data
#######################################################################################################################################################
#------------------------------------------------------------------------------------------------------------------------------------------------------
#A1. Find the total number of tickets for each year.
#We'll check if the data is clean first before getting answers
Year_Wise_Record <- SparkR::sql("SELECT Issue_Date_Year, count(*) REC_COUNT FROM NYC_Ticket_Base_tab group by Issue_Date_Year")
head(Year_Wise_Record)
# Issue_Date_Year REC_COUNT
# 1 1990 3
# 2 2025 40
# 3 1975 1
# 4 1977 1
# 5 2027 48
# 6 2003 5
# looks like the year spred is more than the expected 3 years
#For the scope of this analysis, we wish to compare the phenomenon related to parking tickets over three different years - 2015, 2016, 2017
nrow(Year_Wise_Record)
#71 seems lie a lot of years data is present, lets filter out unwanted years
NYC_Ticket_Base <- SparkR::sql("SELECT * FROM NYC_Ticket_Base_tab where Issue_Date_Year in (2015, 2016, 2017)")
createOrReplaceTempView(NYC_Ticket_Base, "NYC_Ticket_Base_tab")
#let's check yearwise number of tickets in the data
Year_Wise_Record <- SparkR::sql("SELECT Issue_Date_Year, count(*) REC_COUNT FROM NYC_Ticket_Base_tab group by Issue_Date_Year")
head(Year_Wise_Record)
# Issue_Date_Year REC_COUNT
# 1 2015 10008087
# 2 2016 10146977
# 3 2017 5377542
#The ticket counts have decreased over the years, specially 2017 seems to have very low numbers
#Alternate -Let's check this on a plot
c <- SparkR::count(groupBy(NYC_Ticket_Base, "Issue_Date_Year"))
c.r <- SparkR::collect(c)
year_count <- c.r[c.r$Issue_Date_Year %in% c(2015,2016,2017), 1:2]
# plot showing yearly number of tickets
g <- ggplot(year_count, aes(x=year_count$Issue_Date_Year, y=year_count$count))
g + geom_bar(stat = "identity") + geom_text(label = year_count$count, position = position_stack(vjust = 0.5))
#------------------------------------------------------------------------------------------------------------------------------------------------------
#A2. Find out the number of unique states from where the cars that got parking tickets came from.
#(Hint: Use the column 'Registration State')
# There is a numeric entry in the column which should be corrected.
#Replace it with the state having maximum entries. Give the number of unique states for each year again.
#First check the data for cleanup needs- as mentioned, there is numeric entry in the data that needs to be fixed.
State_Wise<-SparkR::sql("select `Registration State`,Count(*) Record_count from NYC_Ticket_Base_tab group by `Registration State` order by Count(*) desc")
nrow(State_Wise)
#Number of states in overall data - 69.This contains 99 which is invalid.
head(State_Wise,69)
#Maximum entries NY 20024087
#Numeric entries 99 73154
#Fix the numeric state records with NY
NYC_Ticket_Base <- SparkR::sql("SELECT NYC_Ticket_Base_tab.*, case when `Registration State`=99 then 'NY' else `Registration State` end Registration_State FROM NYC_Ticket_Base_tab ")
createOrReplaceTempView(NYC_Ticket_Base, "NYC_Ticket_Base_tab")
Year_State<- SparkR::sql("select Issue_Date_Year, count(distinct Registration_State) REC_COUNT FROM NYC_Ticket_Base_tab group by Issue_Date_Year")
head(Year_State,nrow(Year_State))
#Number of unique states in parking tickets data
# Issue_Date_Year REC_COUNT
# 1 2015 68
# 2 2016 67
# 3 2017 64
#Plot - Number of tickets by Registration State
rs <- SparkR::count(groupBy(NYC_Ticket_Base, "Registration State"))
rs <- SparkR::collect(rs)
rs <- (arrange(rs, desc(rs$count)))
# plot showing number of tickets based on Registration State
g <- ggplot(rs, aes(x=reorder(rs$`Registration State`,-rs$count), y=rs$count))
g + geom_bar(stat = "identity") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
head(rs)
# Registration State count
# 1 NY 20024087
# 2 NJ 2266677
# 3 PA 638387
# 4 CT 339772
# 5 FL 330976
# 6 MA 214649
# Number of tickets by Plate Type
pt <- SparkR::count(groupBy(NYC_Ticket_Base, "Plate Type"))
pt <- SparkR::collect(pt)
pt <- (arrange(pt, desc(pt$count)))
# plot indicating number of tickets based on Plate Type
g <- ggplot(pt, aes(x=reorder(pt$`Plate Type`,-pt$count), y=pt$count))
g + geom_bar(stat = "identity") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
head(pt)
# Plate Type count
# 1 PAS 18673869
# 2 COM 4702049
# 3 OMT 939756
# 4 OMS 230251
# 5 SRF 218486
# 6 IRP 138984
#Passenger vehicles in NY have highest violations
#------------------------------------------------------------------------------------------------------------------------------------------------------
#A3. Some parking tickets don't have the address for violation location on them,
#which is a cause for concern. Write a query to check the number of such tickets.
#The values should not be deleted or imputed here. This is just a check.
violation_location_missing<-SparkR::sql("select count(*) REC_COUNT from NYC_Ticket_Base_tab where `violation location` is null")
head(violation_location_missing)
# REC_COUNT
# 4448923
(4448923*100)/nrow(NYC_Ticket_Base)
#17.4% records have violation location missing from the records
violation_location_missing_yearwise<-SparkR::sql("select Issue_Date_Year,count(*) REC_COUNT from NYC_Ticket_Base_tab where `violation location` is null
group by Issue_Date_Year")
head(violation_location_missing_yearwise)
#Yearwise records - Violation location missing
# Issue_Date_Year REC_COUNT
# 1 2015 1555016
# 2 2016 1970527
# 3 2017 923380
#######################################################################################################################################################
#6.Assignment Tasks - Aggregation tasks
#######################################################################################################################################################
#------------------------------------------------------------------------------------------------------------------------------------------------------
#A1.How often does each violation code occur? Display the frequency of the top five violation codes.
violation_code_freq<-SparkR::sql("select `violation code`, count(*) REC_COUNT from NYC_Ticket_Base_tab group by `violation code` order by count(*) desc")
head(violation_code_freq,5)
# violation code REC_COUNT
# 1 21 3595056
# 2 36 2964148
# 3 38 2749510
# 4 14 2138952
# 5 37 1594604
#Year wise top 5
violation_code_freq_yearwise<-SparkR::sql("select Issue_Date_Year,`violation code`, count(*) REC_COUNT
from NYC_Ticket_Base_tab
group by `violation code`,Issue_Date_Year
order by count(*) desc")
violation_code_freq_2015<-SparkR::collect(SparkR::filter(violation_code_freq_yearwise, violation_code_freq_yearwise$Issue_Date_Year == 2015))
head(violation_code_freq_2015,5)
#Top 5 violation code in 2015
# Issue_Date_Year violation code REC_COUNT
# 1 2015 21 1425779
# 2 2015 38 1143343
# 3 2015 36 951024
# 4 2015 14 851119
# 5 2015 37 668394
violation_code_freq_2016<-SparkR::collect(SparkR::filter(violation_code_freq_yearwise, violation_code_freq_yearwise$Issue_Date_Year == 2016))
head(violation_code_freq_2016,5)
#Top 5 violation code in 2016
# Issue_Date_Year violation code REC_COUNT
# 1 2016 21 1409997
# 2 2016 36 1351297
# 3 2016 38 1066339
# 4 2016 14 815800
# 5 2016 37 633584
violation_code_freq_2017<-SparkR::collect(SparkR::filter(violation_code_freq_yearwise, violation_code_freq_yearwise$Issue_Date_Year == 2017))
head(violation_code_freq_2017,5)
#Top 5 violation code in 2017
# Issue_Date_Year violation code REC_COUNT
# 1 2017 21 759280
# 2 2017 36 661827
# 3 2017 38 539828
# 4 2017 14 472033
# 5 2017 20 317551
#Plot the violation codes for visual analysis
violation_code_freq_yearwise<-SparkR::collect(SparkR::filter(violation_code_freq_yearwise, violation_code_freq_yearwise$REC_COUNT>50000))
plot <- ggplot(violation_code_freq_yearwise,aes(x = factor(Issue_Date_Year), y = REC_COUNT,col=`violation code`,label=`violation code`)) +
geom_point() +
geom_label_repel(aes(label = `violation code`),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
theme_classic()+ xlab("violation code") + ylab("REC_COUNT")
plot
#21,38,36,14 and 37 all are top 5 violation codes
#Let's check number of tickets by violation code through another visualization
vc <- SparkR::count(groupBy(NYC_Ticket_Base, "Violation Code"))
vc <- SparkR::collect(vc)
vc <- (arrange(vc, desc(vc$count)))
# plot indicating number of tickets based on violation code
g <- ggplot(vc, aes(x=reorder(vc$`Violation Code`,-vc$count), y=vc$count))
g + geom_bar(stat = "identity") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
head(vc)
# Violation Code count
# 1 21 3595056
# 2 36 2964148
# 3 38 2749510
# 4 14 2138952
# 5 37 1594604
# 6 20 1475446
#21,38,36,14 and 37 all are overall top 5 violation codes
#------------------------------------------------------------------------------------------------------------------------------------------------------
#A2.How often does each 'vehicle body type' get a parking ticket?
#How about the 'vehicle make'? (Hint: find the top 5 for both)
#Overall
#Body Type
vehicle_body_type_freq<-SparkR::sql("select `vehicle body type`, count(*) REC_COUNT from NYC_Ticket_Base_tab group by `vehicle body type` order by count(*) desc")
head(vehicle_body_type_freq,5)
# vehicle body type REC_COUNT
# 1 SUBN 8551193
# 2 4DSD 7296843
# 3 VAN 3571103
# 4 DELV 1754513
# 5 SDN 994615
#Make
vehicle_make_freq<-SparkR::sql("select `vehicle make`, count(*) REC_COUNT from NYC_Ticket_Base_tab group by `vehicle make` order by count(*) desc")
head(vehicle_make_freq,5)
# vehicle make REC_COUNT
# 1 FORD 3155810
# 2 TOYOT 2801819
# 3 HONDA 2486750
# 4 NISSA 2077591
# 5 CHEVR 1799535
## Year wise analysis
#Body Type
vehicle_body_type_year_freq<-SparkR::sql("select `vehicle body type`,Issue_Date_Year, count(*) REC_COUNT
from NYC_Ticket_Base_tab
group by `vehicle body type`,Issue_Date_Year order by count(*) desc")
#2015
vehicle_body_type_year_freq_2015<-SparkR::collect(SparkR::filter(vehicle_body_type_year_freq, vehicle_body_type_year_freq$Issue_Date_Year == 2015))
head(vehicle_body_type_year_freq_2015,5)
#Top 5 vehicle body type in 2015
# vehicle body type Issue_Date_Year REC_COUNT
# 1 SUBN 2015 3245663
# 2 4DSD 2015 2862109
# 3 VAN 2015 1448572
# 4 DELV 2015 734937
# 5 SDN 2015 390372
#2016
vehicle_body_type_year_freq_2016<-SparkR::collect(SparkR::filter(vehicle_body_type_year_freq, vehicle_body_type_year_freq$Issue_Date_Year == 2016))
head(vehicle_body_type_year_freq_2016,5)
# vehicle body type Issue_Date_Year REC_COUNT
# 1 SUBN 2016 3425471
# 2 4DSD 2016 2888670
# 3 VAN 2016 1403728
# 4 DELV 2016 667754
# 5 SDN 2016 413483
#2017
vehicle_body_type_year_freq_2017<-SparkR::collect(SparkR::filter(vehicle_body_type_year_freq, vehicle_body_type_year_freq$Issue_Date_Year == 2017))
head(vehicle_body_type_year_freq_2017,5)
# vehicle body type Issue_Date_Year REC_COUNT
# 1 SUBN 2017 1880059
# 2 4DSD 2017 1546064
# 3 VAN 2017 718803
# 4 DELV 2017 351822
# 5 SDN 2017 190760
#Let's visualize these inferences
vehicle_body_type_year_freq<-SparkR::collect(SparkR::filter(vehicle_body_type_year_freq, vehicle_body_type_year_freq$REC_COUNT>50000))
plot <- ggplot(vehicle_body_type_year_freq,aes(x = factor(Issue_Date_Year), y = REC_COUNT,col=`vehicle body type`,label=`vehicle body type`)) +
geom_point() +
geom_label_repel(aes(label = `vehicle body type`),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
theme_classic()+ xlab("violation code") + ylab("REC_COUNT")
plot
#Year wise SUBN,4DSD,VAN,DELV,SDN are consistant top 5 violating body types
#Make
vehicle_make_year_freq<-SparkR::sql("select `vehicle make`,Issue_Date_Year, count(*) REC_COUNT
from NYC_Ticket_Base_tab
group by `vehicle make`,Issue_Date_Year order by count(*) desc")
#Overall Analysis
head(vehicle_make_year_freq,5)
# vehicle make Issue_Date_Year REC_COUNT
# 1 FORD 2015 1267693
# 2 FORD 2016 1252931
# 3 TOYOT 2016 1132494
# 4 TOYOT 2015 1065459
# 5 HONDA 2016 997047
#Yearwise Analysis
#2015
vehicle_make_year_freq_2015<-SparkR::collect(SparkR::filter(vehicle_make_year_freq, vehicle_make_year_freq$Issue_Date_Year == 2015))
head(vehicle_make_year_freq_2015,5)
# vehicle make Issue_Date_Year REC_COUNT
# 1 FORD 2015 1267693
# 2 TOYOT 2015 1065459
# 3 HONDA 2015 952177
# 4 NISSA 2015 780454
# 5 CHEVR 2015 748049
#2016
vehicle_make_year_freq_2016<-SparkR::collect(SparkR::filter(vehicle_make_year_freq, vehicle_make_year_freq$Issue_Date_Year == 2016))
head(vehicle_make_year_freq_2016,5)
# vehicle make Issue_Date_Year REC_COUNT
# 1 FORD 2016 1252931
# 2 TOYOT 2016 1132494
# 3 HONDA 2016 997047
# 4 NISSA 2016 836343
# 5 CHEVR 2016 696322
#2017
vehicle_make_year_freq_2017<-SparkR::collect(SparkR::filter(vehicle_make_year_freq, vehicle_make_year_freq$Issue_Date_Year == 2017))
head(vehicle_make_year_freq_2017,5)
# vehicle make Issue_Date_Year REC_COUNT
# 1 FORD 2017 635186
# 2 TOYOT 2017 603866
# 3 HONDA 2017 537526
# 4 NISSA 2017 460794
# 5 CHEVR 2017 355164
#Let's visualize these inferences
vehicle_make_year_freq<-SparkR::collect(SparkR::filter(vehicle_make_year_freq, vehicle_make_year_freq$REC_COUNT>50000))
plot <- ggplot(vehicle_make_year_freq,aes(x = factor(Issue_Date_Year), y = REC_COUNT,col=`vehicle make`,label=`vehicle make`)) +
geom_point() +
geom_label_repel(aes(label = `vehicle make`),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
theme_classic()+ xlab("violation code") + ylab("REC_COUNT")
plot
#Year wise FORD,TOYOT(A),HONDA,NISSA,CHEVR are top 5 brands with violation tickets
#Let's perform overall analysis another way
# Number of tickets by Vehicle Body Type
vbt <- SparkR::count(groupBy(NYC_Ticket_Base, "Vehicle Body Type"))
vbt <- SparkR::collect(vbt)
vbt <- (arrange(vbt, desc(vbt$count)))
head(vbt)
# Vehicle Body Type count
# 1 SUBN 8551193
# 2 4DSD 7296843
# 3 VAN 3571103
# 4 DELV 1754513
# 5 SDN 994615
# 6 2DSD 667143
# Number of tickets by Vehicle Make
vm <- SparkR::count(groupBy(NYC_Ticket_Base, "Vehicle Make"))
vm <- SparkR::collect(vm)
vm <- (arrange(vm, desc(vm$count)))
head(vm)
# Vehicle Make count
# 1 FORD 3155810
# 2 TOYOT 2801819
# 3 HONDA 2486750
# 4 NISSA 2077591
# 5 CHEVR 1799535
# 6 FRUEH 1022941
#This analysis confirms our earlier findings as well
#------------------------------------------------------------------------------------------------------------------------------------------------------
#3.A precinct is a police station that has a certain zone of the city under its command. Find the (5 highest) frequency of tickets for each of the following:
#3.1 'Violation Precinct' (this is the precinct of the zone where the violation occurred).
#Using this, can you make any insights for parking violations in any specific areas of the city?
#Overall Analysis
Violation_Precinct_freq<-SparkR::sql("select `Violation Precinct`, count(*) REC_COUNT from NYC_Ticket_Base_tab group by `Violation Precinct` order by count(*) desc")
head(Violation_Precinct_freq,6)
# Violation Precinct REC_COUNT
# 1 0 4448923
# 2 19 1320716
# 3 14 830512
# 4 18 790596
# 5 1 750644
# 6 114 712183
#Let's try alternate way
# Number of tickets by Violation Precinct
vp <- SparkR::count(groupBy(NYC_Ticket_Base, "Violation Precinct"))
vp <- SparkR::collect(vp)
vp <- (arrange(vp, desc(vp$count)))
head(vp)
# Violation Precinct count
# 1 0 4448923
# 2 19 1320716
# 3 14 830512
# 4 18 790596
# 5 1 750644
# 6 114 712183
#Year-wise analysis
Violation_Precinct_year_freq<-SparkR::sql("select `Violation Precinct`,Issue_Date_Year, count(*) REC_COUNT
from NYC_Ticket_Base_tab where `Violation Precinct` !=0
group by `Violation Precinct`,Issue_Date_Year order by count(*) desc")
#2015
Violation_Precinct_year_freq_2015<-SparkR::collect(SparkR::filter(Violation_Precinct_year_freq, Violation_Precinct_year_freq$Issue_Date_Year == 2015))
head(Violation_Precinct_year_freq_2015,5)
# Violation Precinct Issue_Date_Year REC_COUNT
# 1 19 2015 526252
# 2 18 2015 340438
# 3 14 2015 334275
# 4 114 2015 286258
# 5 1 2015 273800
#2016
Violation_Precinct_year_freq_2016<-SparkR::collect(SparkR::filter(Violation_Precinct_year_freq, Violation_Precinct_year_freq$Issue_Date_Year == 2016))
head(Violation_Precinct_year_freq_2016,5)
# Violation Precinct Issue_Date_Year REC_COUNT
# Violation Precinct Issue_Date_Year REC_COUNT
# 1 19 2016 522311
# 2 1 2016 304737
# 3 14 2016 295558
# 4 18 2016 284146
# 5 114 2016 279142
#2017
Violation_Precinct_year_freq_2017<-SparkR::collect(SparkR::filter(Violation_Precinct_year_freq, Violation_Precinct_year_freq$Issue_Date_Year == 2017))
head(Violation_Precinct_year_freq_2017,5)
# Violation Precinct Issue_Date_Year REC_COUNT
# 1 19 2017 272153
# 2 14 2017 200679
# 3 1 2017 172107
# 4 18 2017 166012
# 5 114 2017 146783
#Let's visualize these inferernces
##Reducing counts of smaller numbers for plotting
Violation_Precinct_year_freq<-SparkR::collect(SparkR::filter(Violation_Precinct_year_freq, Violation_Precinct_year_freq$REC_COUNT>100000))
plot <- ggplot(Violation_Precinct_year_freq,aes(x = factor(Issue_Date_Year), y = REC_COUNT,col=`Violation Precinct`,label=`Violation Precinct`)) +
geom_point() +
geom_label_repel(aes(label = `Violation Precinct`),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
theme_classic()+ xlab("Violation Precinct") + ylab("REC_COUNT")
plot
# 19,14,18,1,114 are top 5 Violation precincts where violations occur the most with 19 being consistently highest
#3.2 'Issuer Precinct' (this is the precinct that issued the ticket)
#Here you would have noticed that the dataframe has 'Violating Precinct' or 'Issuing Precinct' as '0'. These are the erroneous entries.
#Hence, provide the record for five correct precincts. (Hint: print top six entries after sorting)
#Overall Analysis
Issuer_Precinct_freq<-SparkR::sql("select `Issuer Precinct`, count(*) REC_COUNT from NYC_Ticket_Base_tab group by `Issuer Precinct` order by count(*) desc")
head(Issuer_Precinct_freq,6)
# Issuer Precinct REC_COUNT
# 1 0 5061166
# 2 19 1288533
# 3 14 811162
# 4 18 771257
# 5 1 731608
# 6 114 699980
#Let's try alternate way
# Number of tickets by Issuer Precinct
ip <- SparkR::count(groupBy(NYC_Ticket_Base, "Issuer Precinct"))
ip <- SparkR::collect(ip)
ip <- (arrange(ip, desc(ip$count)))
head(ip)
# Issuer Precinct count
# 1 0 5061166
# 2 19 1288533
# 3 14 811162
# 4 18 771257
# 5 1 731608
# 6 114 699980
#Year-wise analysis
Issuer_Precinct_year_freq<-SparkR::sql("select `Issuer Precinct`,Issue_Date_Year, count(*) REC_COUNT
from NYC_Ticket_Base_tab where `Issuer Precinct` !=0
group by `Issuer Precinct`,Issue_Date_Year order by count(*) desc")
#2015
Issuer_Precinct_year_freq_2015<-SparkR::collect(SparkR::filter(Issuer_Precinct_year_freq, Issuer_Precinct_year_freq$Issue_Date_Year == 2015))
head(Issuer_Precinct_year_freq_2015,5)
# Issuer Precinct Issue_Date_Year REC_COUNT
# 1 19 2015 513583
# 2 18 2015 334355
# 3 14 2015 325592
# 4 114 2015 282353
# 5 1 2015 268316
#2016
Issuer_Precinct_year_freq_2016<-SparkR::collect(SparkR::filter(Issuer_Precinct_year_freq, Issuer_Precinct_year_freq$Issue_Date_Year == 2016))
head(Issuer_Precinct_year_freq_2016,5)
# Issuer Precinct Issue_Date_Year REC_COUNT
# Issuer Precinct Issue_Date_Year REC_COUNT
# 1 19 2016 510002
# 2 1 2016 296457
# 3 14 2016 287561
# 4 18 2016 276548
# 5 114 2016 274101
#2017
Issuer_Precinct_year_freq_2017<-SparkR::collect(SparkR::filter(Issuer_Precinct_year_freq, Issuer_Precinct_year_freq$Issue_Date_Year == 2017))
head(Issuer_Precinct_year_freq_2017,5)
# Issuer Precinct Issue_Date_Year REC_COUNT
# 1 19 2017 264948
# 2 14 2017 198009
# 3 1 2017 166835
# 4 18 2017 160354
# 5 114 2017 143526
#Let's visualize these inferernces
##Reducing counts of smaller numbers for plotting
Issuer_Precinct_year_freq<-SparkR::collect(SparkR::filter(Issuer_Precinct_year_freq, Issuer_Precinct_year_freq$REC_COUNT>100000))
plot <- ggplot(Issuer_Precinct_year_freq,aes(x = factor(Issue_Date_Year), y = REC_COUNT,col=`Issuer Precinct`,label=`Issuer Precinct`)) +
geom_point() +
geom_label_repel(aes(label = `Issuer Precinct`),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
theme_classic()+ xlab("Issuer Precinct") + ylab("REC_COUNT")
plot
# 19,14,18,1,114 are top 5 issuer precincts where violations tickets are issued.The most with 19 being consistently highest
# From the counts it appears that not all tickets are issued in the same precinct where violation occured
#------------------------------------------------------------------------------------------------------------------------------------------------------
#A4. Find the violation code frequency across three precincts which have issued the most number of tickets -
#do these precinct zones have an exceptionally high frequency of certain violation codes?
#Are these codes common across precincts?
#Hint: You can analyse the three precincts together using the 'union all' attribute in SQL view.
#In the SQL view,use the 'where' attribute to filter among three precincts and combine them using 'union all'.
#From data analyzed above,
#Three prestine with most issued tickets
# 19 1372464
# 14 870724
# 18 831708
#Overall analysis
Issuer_Precinct_freq<-SparkR::sql("select `Issuer Precinct`,`violation code`, count(*) REC_COUNT
from NYC_Ticket_Base_tab where `Issuer Precinct` in (19,14,18)
group by `Issuer Precinct`,`violation code`
order by `Issuer Precinct`,count(*) desc")
Issuer_Precinct_freq_19<-SparkR::collect(SparkR::filter(Issuer_Precinct_freq, Issuer_Precinct_freq$`Issuer Precinct` == 19))
head(Issuer_Precinct_freq_19,5)
# Issuer Precinct violation code REC_COUNT
# 1 19 38 186785
# 2 19 37 182166
# 3 19 46 177442
# 4 19 14 145387
# 5 19 21 133729
Issuer_Precinct_freq_14<-SparkR::collect(SparkR::filter(Issuer_Precinct_freq, Issuer_Precinct_freq$`Issuer Precinct` == 14))
head(Issuer_Precinct_freq_14,5)
# Issuer Precinct violation code REC_COUNT
# 1 14 14 166912
# 2 14 69 160205
# 3 14 31 92628
# 4 14 47 66760
# 5 14 42 56359
Issuer_Precinct_freq_18<-SparkR::collect(SparkR::filter(Issuer_Precinct_freq, Issuer_Precinct_freq$`Issuer Precinct` == 18))
head(Issuer_Precinct_freq_18,5)
# Issuer Precinct violation code REC_COUNT
# 1 18 14 239395
# 2 18 69 107960
# 3 18 47 59707
# 4 18 31 56661
# 5 18 42 38060
#Let's visualize this inferences
Issuer_Precinct_freq<-SparkR::collect(SparkR::filter(Issuer_Precinct_freq, Issuer_Precinct_freq$REC_COUNT>1000))
plot <- ggplot(Issuer_Precinct_freq,aes(x = `violation code`, y = REC_COUNT,col=`Issuer Precinct`)) +
geom_point() +
xlab("violation code") + ylab("REC_COUNT")
plot
#violation code 14 is consistently high for all three issue prectine location
#38,37,46,21 are high for 19 precinct
#69,31,47,42 are high for 14 as well as 19 precinct
#Year wise analysis
Issuer_Precinct_freq_yrly<-SparkR::sql("select Issue_Date_Year,`Issuer Precinct`,`violation code`, count(*) REC_COUNT
from NYC_Ticket_Base_tab where `Issuer Precinct` in (19,14,18)
group by Issue_Date_Year,`Issuer Precinct`,`violation code`
order by Issue_Date_Year,count(*) desc")
#2015
Issuer_Precinct_freq_yrly_2015<-SparkR::collect(SparkR::filter(Issuer_Precinct_freq_yrly, Issuer_Precinct_freq_yrly$Issue_Date_Year == 2015))
head(Issuer_Precinct_freq_yrly_2015,15)
# Issue_Date_Year Issuer Precinct violation code REC_COUNT
# 1 2015 18 14 104232
# 2 2015 19 38 76862
# 3 2015 19 37 71766
# 4 2015 14 69 71005
# 5 2015 14 14 67082
# 6 2015 19 14 59317
# 7 2015 19 46 57692
# 8 2015 19 21 53514
# 9 2015 19 16 52939
# 10 2015 18 69 50557
# 11 2015 14 31 35147
# 12 2015 19 20 28217
# 13 2015 18 47 26188
# 14 2015 18 31 24848
# 15 2015 14 47 24683
#2016
Issuer_Precinct_freq_yrly_2016<-SparkR::collect(SparkR::filter(Issuer_Precinct_freq_yrly, Issuer_Precinct_freq_yrly$Issue_Date_Year == 2016))
head(Issuer_Precinct_freq_yrly_2016,15)
# Issue_Date_Year Issuer Precinct violation code REC_COUNT
# 1 2016 18 14 85542
# 2 2016 19 37 74379
# 3 2016 19 38 73734
# 4 2016 19 46 72431
# 5 2016 14 69 58999
# 6 2016 19 14 56463
# 7 2016 14 14 55300
# 8 2016 19 21 51844
# 9 2016 19 16 46045
# 10 2016 18 69 37369
# 11 2016 14 31 34980
# 12 2016 19 20 26752
# 13 2016 14 47 23770
# 14 2016 14 42 22608
# 15 2016 18 31 19959
#2017
Issuer_Precinct_freq_yrly_2017<-SparkR::collect(SparkR::filter(Issuer_Precinct_freq_yrly, Issuer_Precinct_freq_yrly$Issue_Date_Year == 2017))
head(Issuer_Precinct_freq_yrly_2017,15)
# Issue_Date_Year Issuer Precinct violation code REC_COUNT
# 1 2017 18 14 49621
# 2 2017 19 46 47319
# 3 2017 14 14 44530
# 4 2017 19 38 36189
# 5 2017 19 37 36021
# 6 2017 14 69 30201
# 7 2017 19 14 29607
# 8 2017 19 21 28371
# 9 2017 14 31 22501
# 10 2017 18 69 20034
# 11 2017 14 47 18307
# 12 2017 19 20 14601
# 13 2017 18 47 14050
# 14 2017 18 31 11854
# 15 2017 19 40 11380
#14,37,38,46,69,21,31,47,42 are top violation codes occuring across 2015,2016 and 2017 across precincts
#------------------------------------------------------------------------------------------------------------------------------------------------------
#5. You'd want to find out the properties of parking violations across different times of the day:
#Find a way to deal with missing values, if any.
#Hint: Check for the null values using 'isNull' under the SQL.
#Also, to remove the null values, check the 'dropna' command in the API documentation.
#The Violation Time field is specified in a strange format.
#Find a way to make this into a time attribute that you can use to divide into groups.
#Divide 24 hours into six equal discrete bins of time.
#The intervals you choose are at your discretion. For each of these groups,
#find the three most commonly occurring violations.
#Hint: Use the CASE-WHEN in SQL view to segregate into bins.
#For finding the most commonly occurring violations,
#a similar approach can be used as mention in the hint for question 4.
#Now, try another direction. For the 3 most commonly occurring violation codes,
#find the most common time of the day (in terms of the bins from the previous part)
Violation_Time_cnt<-SparkR::sql("select count(*) REC_COUNT
from NYC_Ticket_Base_tab where `Violation Time` is null")
head(Violation_Time_cnt)
#1314 violation time fields are null , lets remove them with dropna
NYC_Ticket_Base<-SparkR::dropna(NYC_Ticket_Base,how="all", cols="Violation Time")
createOrReplaceTempView(NYC_Ticket_Base, "NYC_Ticket_Base_tab")
Violation_Time_cnt<-SparkR::sql("select count(*) REC_COUNT
from NYC_Ticket_Base_tab where `Violation Time` is null")
head(Violation_Time_cnt)
#0 Violation Time records are null
head(NYC_Ticket_Base[,"Violation Time"])
# Violation Time
# 1 1002A
# 2 0820P
# 3 0240P
# 4 0749A
# 5 0848A
# 6 1010P
# converting these values into army hours
NYC_Ticket_Base<-SparkR::sql("select NYC_Ticket_Base_tab.*,
substr(`Violation Time`,0,2) + case when substr(`Violation Time`,-1)=='A' then 0 else 12 end Violation_Time_Hour
from NYC_Ticket_Base_tab ");
createOrReplaceTempView(NYC_Ticket_Base, "NYC_Ticket_Base_tab")
head(NYC_Ticket_Base[,"Violation_Time_Hour"])
# Violation_Time_Hour
# 1 10
# 2 20
# 3 14
# 4 7
# 5 8
# 6 22
min_max_hour<-SparkR::sql("select max(Violation_Time_Hour) max_hour, min(Violation_Time_Hour) min_hour from NYC_Ticket_Base_tab")
head(min_max_hour)
#max_hour min_hour
# 99 0
#There seems to be invalid values in the data
garbage_hour<-SparkR::sql("select count(*) from NYC_Ticket_Base_tab where Violation_Time_Hour>23")
head(garbage_hour)
#2343248 records
2343248/nrow(NYC_Ticket_Base)
##9.2% records contain invalid data, lets remove them
NYC_Ticket_Base<-SparkR::sql("select * from NYC_Ticket_Base_tab where Violation_Time_Hour<=23")
createOrReplaceTempView(NYC_Ticket_Base, "NYC_Ticket_Base_tab")
#validate the data
min_max_hour<-SparkR::sql("select max(Violation_Time_Hour) max_hour, min(Violation_Time_Hour) min_hour from NYC_Ticket_Base_tab")
head(min_max_hour)
#max_hour min_hour
# 23 0
#hours ranging from 00-23 - army time
#Creating 6 buckets 4 hours each
NYC_Ticket_Base<-SparkR::sql("select NYC_Ticket_Base_tab.*,
case
when Violation_Time_Hour between 0 and 3 then 1
when Violation_Time_Hour between 4 and 7 then 2
when Violation_Time_Hour between 8 and 11 then 3
when Violation_Time_Hour between 12 and 15 then 4
when Violation_Time_Hour between 16 and 19 then 5
when Violation_Time_Hour between 20 and 23 then 6 end Violation_Time_bucket
from NYC_Ticket_Base_tab")
createOrReplaceTempView(NYC_Ticket_Base, "NYC_Ticket_Base_tab")
Violation_Time_bucket_Codes<-SparkR::sql("select Violation_Time_bucket,`violation code`, count(*) REC_COUNT
from NYC_Ticket_Base_tab group by Violation_Time_bucket,`violation code` ")
head(Violation_Time_bucket_Codes)
# Violation_Time_bucket violation code REC_COUNT
# 1 6 19 42520
# 2 4 54 5
# 3 2 46 74526
# 4 1 12 14
# 5 5 93 8
# 6 4 89 2222
#Time of the day for most 3 commonly occuring codes
commoncode_violation_time_buckets <- SparkR::collect(Violation_Time_bucket_Codes)
commoncode_violation_time_buckets<- arrange(commoncode_violation_time_buckets, desc(commoncode_violation_time_buckets$REC_COUNT))
head(commoncode_violation_time_buckets,3)
#Time bucket for 3 top codes
# Violation_Time_bucket violation code REC_COUNT
# 1 3 21 2825191
# 2 3 36 1483548
# 3 3 38 928803
#It appears that the top violation codes occur in time bucket 3
#filtering for higher values for plotting
Violation_Time_bucket_Codes<-SparkR::collect(SparkR::filter(Violation_Time_bucket_Codes, Violation_Time_bucket_Codes$REC_COUNT>50000))
plot <- ggplot(Violation_Time_bucket_Codes,aes(x = Violation_Time_bucket, y = REC_COUNT,col=`violation code`,label=`violation code`)) +
geom_point() +
geom_label_repel(aes(label = `violation code`),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
theme_classic()+ xlab("Time Bucket") + ylab("REC_COUNT")
plot
# for 0-3 21,40,78 are the top violation code
# for 4-7 14,21,40
# for 8-11 21, 36,38
# for 12-15 38,36,37
# for 15-19 38, 37,14
# for 19-23 14, 38, 7
#36-38 , 40 , 14, 21 are mostly common across time buckets
#Year wise analysis
#Query based
Violation_Time_bucket_Year_Codes<-SparkR::sql(" select * from (select Year_time_bucket,
`violation code`,REC_COUNT ,
dense_rank() OVER (PARTITION BY Year_time_bucket ORDER BY REC_COUNT DESC) as rank
from (
select concat(Violation_Time_bucket,'_',Issue_Date_Year) Year_time_bucket,
`violation code`, count(*) REC_COUNT
from NYC_Ticket_Base_tab
group by concat(Violation_Time_bucket,'_',Issue_Date_Year),
`violation code`)T)V where rank<=3 ")
head(Violation_Time_bucket_Year_Codes,nrow(Violation_Time_bucket_Year_Codes))
# Year_time_bucket violation code REC_COUNT rank
# 1 3_2016 21 1099915 1
# 2 3_2016 36 686587 2
# 3 3_2016 38 355058 3
# 4 5_2016 38 205800 1
# 5 5_2016 37 155279 2
# 6 5_2016 14 131121 3
# 7 4_2017 38 184105 1
# 8 4_2017 36 184050 2
# 9 4_2017 37 130466 3
# 10 4_2015 38 367090 1
# 11 4_2015 37 289704 2
# 12 4_2015 36 282136 3
# 13 6_2015 7 59612 1
# 14 6_2015 38 55596 2
# 15 6_2015 40 42432 3
# 16 1_2015 21 59280 1
# 17 1_2015 40 33684 2
# 18 1_2015 78 27319 3
# 19 2_2017 14 73567 1
# 20 2_2017 40 60397 2
# 21 2_2017 21 56737 3
# 22 3_2017 21 592259 1
# 23 3_2017 36 347650 2
# 24 3_2017 38 175693 3
# 25 1_2017 21 33956 1
# 26 1_2017 40 23216 2
# 27 1_2017 14 13866 3
# 28 2_2015 14 131702 1
# 29 2_2015 21 101552 2
# 30 2_2015 40 86878 3
# 31 2_2016 14 131765 1
# 32 2_2016 21 107374 2
# 33 2_2016 40 93228 3
# 34 5_2017 38 102533 1
# 35 5_2017 14 75000 2
# 36 5_2017 37 70223 3
# 37 4_2016 36 382783 1
# 38 4_2016 38 348430 2
# 39 4_2016 37 278588 3
# 40 6_2017 7 26238 1
# 41 6_2017 40 22011 2
# 42 6_2017 14 20778 3
# 43 3_2015 21 1133017 1
# 44 3_2015 36 449311 2
# 45 3_2015 38 398052 3
# 46 1_2016 21 66975 1
# 47 1_2016 40 38369 2
# 48 1_2016 78 27160 3
# 49 5_2015 38 196455 1
# 50 5_2015 37 151789 2
# 51 5_2015 14 130146 3
# 52 6_2016 7 59420 1
# 53 6_2016 38 47491 2
# 54 6_2016 14 42775 3
#Plot based
plot <- ggplot(SparkR::collect(Violation_Time_bucket_Year_Codes),
aes(x = Year_time_bucket , y = REC_COUNT,col=`violation code`,label=`violation code`)) +
geom_point() +
geom_label_repel(aes(label = `violation code`),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
theme_classic()+ xlab("Year_time_bucket") + ylab("REC_COUNT")
plot
#Please zoom the plot for better viewing
# We can see that 3rd bucket that is 8-11 morning hrs the counts are really high for all three years
# 21,36,38 being the key violation codes, atlthough 2017 has seen improment overall and these codes are repeated here as well
# For the first window 21, 40 are common one and two, and 78 is replaced by 14 in 2017 post 2015 & 2016
# For 2nd window 14,21,40 are first three across years
# For 4th window 36,37,38 are interchanging across three years
# 38 is common top for 5th window
# 37 is 2nd for first two year and 3rd for the last one
# 7 for 1st and 14 other two yearas is filling up the remaing positions
# 7 is common 1st in the 6th bucket
# 38,14 40 are filling up the other postions across the years
#So in a day violations start with codes 21,36,38 as time passed 14, 40 appears then afterwords again 36,37,38 dominate
#follwed by introduction of 7 and finishing with 38,14,40
# All 3 top violations happened in 3rd time window in 2016
# Maximum violation in code 36,38 happened in 4th time window in 2015 and 2017
# Maximum violation in code 21 happened in 1st time window in 2015
# Maximum violation in code 21 happened in 2nd time window in 2017
#------------------------------------------------------------------------------------------------------------------------------------------------------
#6.Let's try and find some seasonality in this data
#First, divide the year into some number of seasons,
#and find frequencies of tickets for each season.
#(Hint: Use Issue Date to segregate into seasons)
#Then, find the three most common violations for each of these seasons.
#(Hint: A similar approach can be used as mention in the hint for question 4.)
#lets divide the year into 4 set of month , 1-3,4-6,7-9 & 1-12
NYC_Ticket_Base$Issue_Date_Month<-SparkR::month(NYC_Ticket_Base$"Issue Date")
createOrReplaceTempView(NYC_Ticket_Base, "NYC_Ticket_Base_tab")
#Overall analysis
NYC_Ticket_Base<-SparkR::sql("select NYC_Ticket_Base_tab.*,
case
when Issue_Date_Month between 1 and 3 then 1
when Issue_Date_Month between 4 and 6 then 2
when Issue_Date_Month between 7 and 9 then 3
when Issue_Date_Month between 10 and 12 then 4 end Month_bucket
from NYC_Ticket_Base_tab")
createOrReplaceTempView(NYC_Ticket_Base, "NYC_Ticket_Base_tab")
# Season wise spread across the three years
Month_Wise<-SparkR::sql("select Month_bucket,count(*) as REC_COUNT from NYC_Ticket_Base_tab group by Month_bucket")
Month_Wise <- SparkR::arrange(Month_Wise, Month_Wise$Month_bucket)
head(Month_Wise,4)
# Month_bucket REC_COUNT
1 6489169
2 7108573
3 4692555
4 4897743
# Season wise violation code analysis across all the three years
Month_Wise_Violation_1 <- filter(Month_Wise_Violation, Month_Wise_Violation$Month_bucket == 1)
head(arrange(Month_Wise_Violation_1, desc(Month_Wise_Violation_1$REC_COUNT)))
#Month_bucket violation code REC_COUNT
# 1 21 824866
# 1 38 759550
# 1 36 688665
# 1 14 571222
# 1 37 404433
# 1 20 397134
Month_Wise_Violation_2 <- filter(Month_Wise_Violation, Month_Wise_Violation$Month_bucket == 2)
head(arrange(Month_Wise_Violation_2, desc(Month_Wise_Violation_2$REC_COUNT)))
# Month_bucket violation code REC_COUNT
# 2 21 1033621
# 2 36 740325
# 2 38 727504
# 2 14 632942
# 2 37 427841
# 2 20 418218
Month_Wise_Violation_3 <- filter(Month_Wise_Violation, Month_Wise_Violation$Month_bucket == 3)
head(arrange(Month_Wise_Violation_3, desc(Month_Wise_Violation_3$REC_COUNT)))
# Month_bucket violation code REC_COUNT
# 3 21 713757
# 3 38 493050
# 3 14 405934
# 3 36 364038
# 3 37 293070
# 3 20 277801
Month_Wise_Violation_4 <- filter(Month_Wise_Violation, Month_Wise_Violation$Month_bucket == 4)
head(arrange(Month_Wise_Violation_4, desc(Month_Wise_Violation_4$REC_COUNT)))
# Month_bucket violation code REC_COUNT
# 4 36 751179
# 4 21 699299
# 4 38 482932
# 4 14 392281
# 4 20 277341
# 4 37 270932
Month_Wise_Violation<-SparkR::sql("select Month_bucket,`violation code` ,count(*) REC_COUNT
from NYC_Ticket_Base_tab group by Month_bucket,`violation code` ")
Month_Wise_Violation<-SparkR::collect(SparkR::filter(Month_Wise_Violation, Month_Wise_Violation$REC_COUNT>50000))
plot <- ggplot(Month_Wise_Violation,aes(x = Month_bucket, y = REC_COUNT,col=`violation code`,label=`violation code`)) +
geom_point() +
geom_label_repel(aes(label = `violation code`),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
theme_classic()+ xlab("Season") + ylab("REC_COUNT")
plot
#all seasons/months have 21,38,36, 14 as most common in differnt order
#Year wise analysis
Month_year_Wise_Violation<-SparkR::sql("select * from (
select Month_year_Bucket,`violation code`,REC_COUNT,
dense_rank() OVER (PARTITION BY Month_year_Bucket ORDER BY REC_COUNT DESC) as Rank
from (
select concat(Month_bucket,'_',Issue_Date_Year) Month_year_Bucket,
`violation code` ,count(*) REC_COUNT
from NYC_Ticket_Base_tab
group by concat(Month_bucket,'_',Issue_Date_Year),`violation code` ) T) V
where Rank<=3
")
head(Month_year_Wise_Violation,nrow(Month_year_Wise_Violation))
#Yearly Month/Seasonwise violation codes and records
# Month_year_Bucket violation code REC_COUNT Rank
# 1 3_2016 21 345564 1
# 2 3_2016 38 220875 2
# 3 3_2016 36 204171 3
# 4 4_2017 46 209 1
# 5 4_2017 40 132 2
# 6 4_2017 21 116 3
# 7 4_2015 21 388242 1
# 8 4_2015 36 375921 2
# 9 4_2015 38 245213 3
# 10 1_2015 38 226022 1
# 11 1_2015 21 175707 2
# 12 1_2015 14 159197 3
# 13 2_2017 21 354076 1
# 14 2_2017 36 266183 2
# 15 2_2017 14 232710 3
# 16 3_2017 21 243 1
# 17 3_2017 46 202 2
# 18 3_2017 40 112 3
# 19 1_2017 21 333263 1
# 20 1_2017 36 293799 2
# 21 1_2017 38 256831 3
# 22 2_2015 21 369613 1
# 23 2_2015 38 276722 2
# 24 2_2015 14 214645 3
# 25 2_2016 21 309932 1
# 26 2_2016 36 285092 2
# 27 2_2016 38 223546 3
# 28 4_2016 36 375258 1
# 29 4_2016 21 310941 2
# 30 4_2016 38 237713 3
# 31 3_2015 21 367950 1
# 32 3_2015 38 272166 2
# 33 3_2015 14 217383 3
# 34 1_2016 21 315896 1
# 35 1_2016 36 294616 2
# 36 1_2016 38 276697 3
#Let's visualize this inference
plot <- ggplot(SparkR::collect(Month_year_Wise_Violation),aes(x = Month_year_Bucket, y = REC_COUNT,col=`violation code`,label=`violation code`)) +
geom_point() +
geom_label_repel(aes(label = `violation code`),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
theme_classic()+ xlab("Month_year_Bucket") + ylab("REC_COUNT")
plot
#
#Violation codes analysis
#21 38 are common for first 3 months across years
#14 for first year followed by 36 for remaining months are the norm
#
#21 is the category for 2nd set of month in all the three years
#with differnce being remarkably high in 2015 & 2017
# 38,36,14 share 2 slot each across the years
#
#Year 2017 have very low counts for this month
# 21,38 again feature as dominating the for years 2015 and 2016
# 14, 36 are in 3rd ranking for these two years respectively
#
# 46 and 40 are the odd one for 2017
#2017 observation for 4th set is same as for 3rd month
#21,36,38 are the are interchangely presnt for 2015 and 2016
#------------------------------------------------------------------------------------------------------------------------------------------------------
#7.The fines collected from all the parking violation constitute a revenue source for the NYC police department. Let’s take an example of estimating that for the three most commonly occurring codes.
#Find total occurrences of the three most common violation codes
#Then, visit the website:
# http://www1.nyc.gov/site/finance/vehicles/services-violation-codes.page
#It lists the fines associated with different violation codes.
#They're divided into two categories, one for the highest-density locations of the city,
#the other for the rest of the city. For simplicity, take an average of the two.
#Using this information, find the total amount collected for the three violation codes with maximum tickets.
#State the code which has the highest total collection.
#What can you intuitively infer from these findings?
TOP_3_violation_code<-SparkR::sql("select `violation code`,count(*) REC_COUNT
from NYC_Ticket_Base_tab
group by `violation code` order by count(*) desc limit 3")
head(TOP_3_violation_code,3)
#violation code REC_COUNT Manhattan_fine all_others avg_fine Definition
#21 3271543 65 45 55 Street Cleaning: No parking where parking is not allowed by sign, street marking or traffic control device.
#36 2544207 50 50 50 Exceeding the posted speed limit in or near a designated school zone.
#38 2463036 65 35 50 (38) Failing to show a receipt or tag in the windshield.Drivers get a 5-minute grace period past the expired time on parking meter receipts.
#Total amount collected for top 3 violations - calculation
#Violation code 21
3271543*55
#179934865
#Violation code 36
2544207*50
#127210350
#Violation code 38
2463036*50
#123151800
#Parking related violation are most common type and are a good source of revenue
#Year wise analysis
TOP_3_violation_code_year<-SparkR::sql("select Issue_Date_Year,`violation code`,count(*) REC_COUNT
from NYC_Ticket_Base_tab
group by Issue_Date_Year,`violation code` order by count(*) desc")
#2015
TOP_3_violation_code_year_2015<-SparkR::collect(SparkR::filter(TOP_3_violation_code_year, TOP_3_violation_code_year$Issue_Date_Year == 2015))
head(TOP_3_violation_code_year_2015,3)
#Top 3 violation codes in 2015
# Issue_Date_Year violation code REC_COUNT
# 1 2015 21 1301512
# 2 2015 38 1020123
# 3 2015 36 825088
#2016
TOP_3_violation_code_year_2016<-SparkR::collect(SparkR::filter(TOP_3_violation_code_year, TOP_3_violation_code_year$Issue_Date_Year == 2016))
head(TOP_3_violation_code_year_2016,3)
#Top 3 violation codes in 2016
# Issue_Date_Year violation code REC_COUNT
# 1 2016 21 1282333
# 2 2016 36 1159137
# 3 2016 38 958831
#2017
TOP_3_violation_code_year_2017<-SparkR::collect(SparkR::filter(TOP_3_violation_code_year, TOP_3_violation_code_year$Issue_Date_Year == 2017))
head(TOP_3_violation_code_year_2017,3)
#Top 3 violation codes in 2017
# Issue_Date_Year violation code REC_COUNT
# 1 2017 21 687698
# 2 2017 36 559982
# 3 2017 38 484082
#Violation codes 21,36,38 are common across all 3 years
#Let's visualize these inferernces
##Reducing counts of smaller numbers for plotting
TOP_3_violation_code_year<-SparkR::collect(SparkR::filter(TOP_3_violation_code_year, TOP_3_violation_code_year$REC_COUNT>100000))
plot <- ggplot(TOP_3_violation_code_year,aes(x = Issue_Date_Year, y = REC_COUNT,col=`violation code`,label=`violation code`)) +
geom_point() +
geom_label_repel(aes(label = `violation code`),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
theme_classic()+ xlab("Issue_Date_Year") + ylab("REC_COUNT")
plot
#21,36,38 are top violation codes across the years
#Yearly amounts
#2015-21
1301512*55
#71583160
#2016-21
1282333*55
#70528315
#2017-21
687698*55
#37823390
#2015-36
825088*50
#41254400
#2016-36
1159137*50
#57956850
#2016-36
559982*50
#27999100
#2015-38
1020123*50
#51006150
#2016-38
958831*50
#47941550
#2017-38
484082*50
#24204100
#2015+2016+2017-total amount collected for 3 top violation codes
#Violation code 21
(1301512+1282333+687698)*55
#179934865
#Violation code 36
(825088+1159137+559982)*50
#127210350
#Violation code 38
(1020123+958831+484082)*50
#123151800
#Parking related violation are most common violation type and are a good source of revenue
#######################################################################################################################################################
#7.Closure
#######################################################################################################################################################
sparkR.stop()
|
990551a74656fb8d792fca617d1deb36cd1acaff
|
478dff15dbb67b960d4386bf393ec289ddd82b6f
|
/plot2.R
|
84c9978cb54e54402e24916d137fb6777685ddd9
|
[] |
no_license
|
panzerfauster/ExData_Plotting1
|
1ac21596542a21fc6e4a710121df573be9634fb4
|
a4dcef416d5c77ff28ba72ef84197461d9447c12
|
refs/heads/master
| 2021-01-17T23:40:06.477457
| 2015-09-13T18:55:23
| 2015-09-13T18:55:23
| 42,335,710
| 0
| 0
| null | 2015-09-11T22:47:45
| 2015-09-11T22:47:45
| null |
ISO-8859-2
|
R
| false
| false
| 899
|
r
|
plot2.R
|
# Data Science Specialization
# Course 4: Exploratory Data Analysis
# Course Project 1: Consumption Plots
# Fausto Martín López
### This code generates the plots required for Course Project 1 in the current Working Directory.
## Read and process the source file
colClasses=c("character", "character", rep("numeric", 7))
file <- read.table(file="household_power_consumption.txt", header=TRUE, sep=";", quote="", na.strings="?", colClasses=colClasses)
# Filter the dates as instructed
file <- file[as.Date(file$Date, "%d/%m/%Y")=="2007-02-01"|as.Date(file$Date, "%d/%m/%Y")=="2007-02-02",]
# Merge the Date and Time columns into a timestamp
file$DateTime <- strptime(paste(file$Date, file$Time), "%d/%m/%Y %T")
file <- file[,c(10,3:9)]
## Create the plot
# Plot 2
png("plot2.png")
plot(file$DateTime, file$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
8da034814cb5f5debe9d58bc4ba3277a87a6f322
|
277dafa77508edd5d298730aacae4866d8587d32
|
/ensm.R
|
c1f1df4a4b164db7a27a70c9da9286f866b2e55d
|
[] |
no_license
|
Allisterh/NaturalRate_ensm
|
ec1bac3bf9a7b41af5e2080ad362e1246aae6a2d
|
ce42313ceb6fe29f61c633c6f3ee5e40e2d1cb52
|
refs/heads/main
| 2023-08-26T10:02:44.443160
| 2021-11-05T10:05:55
| 2021-11-05T10:05:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,461
|
r
|
ensm.R
|
## extract file
df <- read.csv("treasurynotes12311993.csv")
## convert maturity to standard date format
df <- transform(df, "Maturity" = as.Date(as.character(df[,1]), "%Y%m%d"))
## calculate ttm = years to maturity
df$ttm <- (as.numeric(df[,1])-as.numeric(as.Date("1993-12-31")))/365
##determine coupon.no = number of coupon payments
df$coupon.no <- floor((df[,5]-0.01)*2)+1
##set parameters and ensm function to calculate discount factor Z(0,T)
p <- c(0.0687, -0.0422, -0.2399, 0.2116, 0.9652, 0.8825)
z.ensm <- function(ttm) {
ifelse(ttm==0, 0, exp(-(p[1] + (p[2]+p[3])*(1-exp(-ttm/p[5]))/(ttm/p[5]) - p[3]*exp(-ttm/p[5]) + p[4]*( (1-exp(-ttm/p[6]))/(ttm/p[6]) - exp(-ttm/p[6])))*ttm))
}
#set a function to calculate pv of coupon payments
coupon.price <- function(ttm1, c1, n) {
result <- 0
for(k in n) {
l <- ttm1 - 0.5*(k-1)
result <- result + (c1/2)*z.ensm(l)
if(l < 0.5) {
break
}
}
return(result)
}
#create output = price table
price.list <- matrix(0, 224,5, dimnames = list(1:224, c("coupon.pv", "par.pv", "price.model", "price.gross", "price.diff")))
for (i in 1:224) {
price.list[i,1] <- coupon.price(df[i,5], df[i,2], c(1:df[i,6]))
price.list[i,2] <- 100*z.ensm(df[i,5])
price.list[i,3] <- price.list[i,1]+price.list[i,2]
price.list[i,4] <- df[i,3] + (1 - (df[i,5]*2 - floor(df[i,5]*2)))*df[i,2]/2
price.list[i,5] <- price.list[i,4] - price.list[i,3]
}
|
ac5646c0c9278891f328e9efdd931f50d9007449
|
67426b6f11131a696dfcd535183a722772a182d8
|
/Simulation/predict.R
|
6b71785b798cd2923f00e76311039f1ce8579e88
|
[
"MIT"
] |
permissive
|
jingeyu/CSSN_data_code
|
700c93eeeac4bba23287d7d0630bc89deb7364d3
|
e54c5820d3df3a1e46a631f41e83c945ebbffd12
|
refs/heads/main
| 2023-03-19T21:41:42.355100
| 2021-03-06T12:47:24
| 2021-03-06T12:47:24
| 331,542,553
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,858
|
r
|
predict.R
|
###############################################################
######################### Prediction ##########################
###############################################################
rm(list = ls())
library(CholWishart)
library(MASS)
set.seed(20201231)
# only 20201205
load(paste0("RData/", 20201205, "_Sigma.RData"))
load(paste0("RData/", 20201205, "_Sim_network.RData"))
load("RData/Result_20201205_0.1_70.RData")
Corr.true[abs(Corr.true) != 0] <- 1
Sparse.Corr[Sparse.Corr != 0] <- 1
G <- nrow(X)
n <- ncol(X)
# set number of missing genes
miss.num <- 50
# generate coordinates of missing genes
miss.x <- runif(miss.num, 0, 750)
miss.y <- runif(miss.num, 0, 1000)
# whether there coordinates in cell.info already
sum(miss.x %in% cell.info[,2])
sum(miss.y %in% cell.info[,3])
miss.indx <- cbind(miss.x, miss.y)
# Square neighborhood radius
r <- 80
NeiFind <- function(miss.indx){
nei.indx <- which(abs(cell.info[,2] - miss.indx[1]) < r & abs(cell.info[,3] - miss.indx[2]) < r)
cell.type.nei <- cell.type[nei.indx]
return(cbind(nei.indx, cell.type.nei))
}
cell.type <- cell.info[,1]
ExpSigma <- function(miss.indx, r, nu.i){
nei.mat <- data.frame(NeiFind(i))
colnames(nei.mat) <- c("cell.index", "cell.type")
ni <- nrow(nei.mat)
if(ni == 0){
Lambda.i <- (nu.i - G - 1) * Sigma.k[,,cell.type[i]]
}else{
cell.label <- as.integer(names(table(nei.mat$cell.type)))
nei.nk <- as.numeric(table(nei.mat$cell.type))
weight <- nei.nk / ni
tmp <- 0
for(j in 1:length(cell.label)){
tmp <- tmp + Sigma.k[,,cell.label[j]] * weight[j]
}
Lambda.i <- (nu.i - G - 1) * tmp
}
}
nu <- rep(G + 50, miss.num)
Sigma.miss <- array(NA, dim = c(G, G, miss.num))
X.miss <- matrix(NA, G, miss.num)
Lambda.miss <- array(NA, dim = c(G, G, miss.num))
c.thre <- 0.5
Corr.miss <- array(NA, dim = c(G, G, miss.num))
for(i in 1:miss.num){
Lambda.miss[,, i] <- ExpSigma(i, r, nu[i])
Sigma.miss[,, i] <- rInvWishart(1, nu[i], Lambda.miss[,, i])[,,1]
Sigma.miss[,, i][abs(Sigma.miss[,, i]) < c.thre] <- 0
# ensure Sigma_i are positive definite
diag(Sigma.miss[,,i]) <- diag(Sigma.miss[,,i]) + 5
Corr.miss[,,i] <- diag(diag(Sigma.miss[,,i])^(-0.5)) %*% Sigma.miss[,,i] %*% diag(diag(Sigma.miss[,,i])^(-0.5))
X.miss[,i] <- mvrnorm(1, mu = rep(0, G), Sigma = Sigma.miss[,,i])
}
Corr.miss[Corr.miss != 0] <- 1
#-------- Predictions of missing cells--------
est.miss <- array(NA, dim = c(G, G, miss.num))
for(i in 1:miss.num){
miss.nei <- NeiFind(miss.indx[i,])
tmp <- Sparse.Corr[,, miss.nei[,1]]
tmp1 <- apply(tmp, 1:2, mean)
tmp1[tmp1 < 0.5] <- 0
tmp1[tmp1 >= 0.5] <- 1
est.miss[,, i] <- tmp1
}
pre.error <- rep(0, miss.num)
for(i in 1:miss.num){
pre.error[i] <- sum(abs(est.miss[,,i][upper.tri(est.miss[,,i])] - Corr.miss[,,i][upper.tri(Corr.miss[,,i])]))
}
sum(pre.error) / miss.num
# 347.84
|
0c422c8d9b383fbeafd5877d9add45d3944ec2fa
|
02b178b7ebb101940d6ede02b10c52dec501dcd6
|
/microarray/UseRMA.R
|
cce12db544bae178318068cbf7f0e0e5cbae22e4
|
[
"MIT"
] |
permissive
|
radio1988/bpipes
|
21ea7c124f1bd962afe32644c445da3bb7a7d177
|
0aceb97070210c2361adb45ee0040b6aa5be771b
|
refs/heads/master
| 2023-08-24T12:40:19.129216
| 2023-08-24T00:49:42
| 2023-08-24T00:49:42
| 140,731,030
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,111
|
r
|
UseRMA.R
|
#Generate csv file with readaffy and RMA
#By Lihua Julie Zhu
#on December 7th 2007
#using .CEL file
rm(list=ls())
library(affy)
setwd("~/Documents/ConsultingActivities/MicroarrayExp/MarianWalhout");
require(affy)
Data = ReadAffy(celfile.path="embryo")
#Data<-ReadAffy(filenames=targets$FileName,celfile.path="CEL");
#par(mfrow=c(1,2));
temp = unlist(strsplit(sampleNames(Data), "\\."))
sampleNames(Data) = cbind(temp[1],temp[3], temp[5], temp[7], temp[9], temp[11])
boxplot(Data, col=c(2,3,4,5,6,7))
#boxplot(Data, col=c(1,2,3,4));
eset<-rma(Data);
write.exprs(eset, file="dataRMA.txt");
pcNorm <-read.table("dataRMA.txt", header=TRUE, sep="\t", dec=".");
colnames(pcNorm)[1] = "Probe"
sampleNames(Data)
slotNames(Data)
library("simpleaffy")
Data.qc <- qc(Data)
avbg(Data.qc) #comparable
#Data.qc <- qc.affy(Data,normalised=NULL,tau=0.015,logged=TRUE,
cdfn=cleancdfname(cdfName(Data)))
#Data.qc <- qc.affy(Data,normalised=NULL,tau=0.015,logged=TRUE, cdfn=cdfName(Data))
#scaling factor
sfs(Data.qc) #comparable
percent.present(Data.qc) # comparable
ratios(Data.qc)[,1:2] # <3
spikeInProbes(Data.qc)
## Normalization of the data using MAS5.0
eset.mas5 <- mas5(Data)
##################### Get the P/A call info ##############
APInfo <- mas5calls(Data)
#exprs2excel(APInfo, file="Results/dataMas5_PresentCall.csv")
#setwd('./Results')
#exprs2excel(eset.mas5, file="dataMas5.csv")
#write.exprs(eset.mas5, file="Results/dataMas5.csv")
slotNames(APInfo)
present.call <- exprs(APInfo)
colnames(present.call) = paste("PresentCall",colnames(present.call), sep=".");
colnames(present.call)
#boxplot(pcNorm[,2:dim(pcNorm)[2]], col=c(rep(2,4),rep(3,3),rep(4,4),rep(5,4)),range=0);
boxplot(pcNorm[,2:dim(pcNorm)[2]], col=c(2,3,4,5,6,7),range=0);
#par(mfrow=c(2,2));
#image(Data);
#geneIDS <- "need to put a list of ids in"
library(annaffy)
annotation(eset)
#Symbol = aafSymbol(geneIDs, "zebrafish")
##########################################The following is for exploring purpose#############################
deg <- AffyRNAdeg(Data)
plotAffyRNAdeg(deg,col=c(2,3,4,5,6,7))
#summaryAffyRNAdeg(deg)
deg$sample.names
legend(5, 10, deg$sample.names, pch = rep(16, 6), col=c(2,3,4,5,6,7))
#legend(0,46,c("LL_Drosophila_2_1","LL_Drosophila_2_2","LL_Drosophila_2_3","LL_Drosophila_2_4"),pch=rep(16,4),col=c(2,3,4,5))
probeNames(Data)[1:10]
dim(pm(Data));
pm(Data)[1:10,]
dim(mm(Data));
dim(intensity(Data)); #intensity for a given probe of the same cdf type across all chips
geneNames(Data)[1:10]
prenorm<- cbind(as.character(probeNames(Data)),pm(Data));
write.table(prenorm, file="preNorm_PM.csv", sep=",");
normPM <- normalize(Data, method="quantiles")
dim(pm(normPM))
qqplot(pm(Data)[,1],pm(Data)[,2]);
qqplot(pm(normPM)[,1], pm(normPM)[,2]);
qqplot(pm(normPM)[,1],pm(normPM)[,3])
qqplot(pm(Data)[,1],pm(Data)[,3])
postnorm <- cbind(as.character(probeNames(Data)),pm(normPM));
write.table(postPM, file="postNorm_PM.csv", sep=",");
#write.exprs(eset, file="StatRMA.csv");
|
4296390d3edf80ac0660324e31ffa054e64a544c
|
457c6af00135a67a0c7969dba0348214a26b4335
|
/plot2.R
|
0a8de962e0d4caec2e2b70025d9bf8e98ed90ced
|
[] |
no_license
|
hiicharles/ExData_Plotting1
|
912f46b2ac0bd984c5fe78a6cdb134f6bb279482
|
815ca53b5742c54f9da1ba3554608339e3b5d03b
|
refs/heads/master
| 2020-12-14T08:57:14.561822
| 2014-12-07T17:59:13
| 2014-12-07T17:59:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,227
|
r
|
plot2.R
|
## Exploratory Data Analysis (exdata-016)
## Course Project 1
## By hiicharles@gmail.com
## plot2.R
## If you want to test, change the filePath.
setwd("~/Development/data/exdata-016/")
filePath <- "~/Development/data/exdata-016/household_power_consumption.txt"
## Read file.
## Replace ? with NA.
## 2075259 obs
data <- read.table(file = filePath, header = TRUE, sep = ";", na.strings = "?")
## Only want data with date "1/2/2007" and "2/2/2007"
## 1/2/2007 - 1440 observations
## 2/2/2007 - 1440 observations
sub_data <- data[ data$Date %in% c("1/2/2007", "2/2/2007"), ]
## Remove data to free up memory
rm(data)
## Add a column Date1 and Time1
## Date1 - "2007-02-01" of class Date
## Time1 - "2007-02-01 00:00:00" of class POSIXct
sub_data$Date1 <- as.Date(x = sub_data$Date, format = "%d/%m/%Y")
sub_data$Time1 <- as.POSIXct(x = paste(sub_data$Date, sub_data$Time, sep = " "), format = "%d/%m/%Y %H:%M:%S")
## Graphic File Device to PNG
png(filename = "plot2.png",
width = 480,
height = 480,
units = "px" )
## Generate plot
plot(x = sub_data$Time1,
y = sub_data$Global_active_power,
type="l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
## Close device
dev.off()
|
df9db2dbb308f6b3471d20ca1aa5e4471897d8ed
|
a44a3f17ac568ae03e4a23121cda5e4ab9bfa275
|
/Assignemnt1.R
|
1a3feb29f852e666d03f1780b311bc8abbcf5f53
|
[] |
no_license
|
y437li/AI_Market_assignment
|
f1673ed9749cfe7705b49b6d74d65c37673042ed
|
7ecfb6790eff63f52870e8187cdea80617865e21
|
refs/heads/master
| 2022-11-17T07:14:44.228196
| 2020-07-19T14:21:18
| 2020-07-19T14:21:18
| 279,747,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,118
|
r
|
Assignemnt1.R
|
##Load data from local csv file
path = '/Users/yangli/OneDrive/MMAI/MMAI831/AIOS1_adv_sales.csv'
#path = 'C:/Users/y437l/OneDrive/MMAI/MMAI831/AIOS1_adv_sales.csv'
data <- read.csv(file =path)
#drop index column
data <- subset(data, select = -c(X))
##No missing data
#basic descriptive statistics
summary(data[,c(1:6)])
data[,1:6]<- scale(data[,1:6])
#check data suitability
#generalized pair grapjs to check for bivariate correlations
library(gpairs)
gpairs(data[,c(1:6)])
###seems like there is no correlation relationship between independent variables
library(corrplot)
corrplot(cor(data[,c(1:6)]), method = "color", type="full", addgrid.col = "red",
addshade = "positive", addCoef.col = "black")
##The goal of a sales driver analysis is to discover relationships between
##the sale volume with features of the the price and number of stores for the
## product and different type of advertisements.
###split the data
train_end <- floor(0.75*nrow(data))
test_star <- train_end+1
train_data <- data[0:train_end,]
test_data <- data[test_star:nrow(data),]
##Fitting the model
##Start with simple linear regression
model1<-lm(sales~price,data=train_data)
summary(model1)
#predict test data
model1_test_result <- predict(model1, newdata=test_data)
TSS1 <- sum((test_data$sales-mean(test_data$sales))^2)
RSS1 <- sum((test_data$sales-model1_test_result)^2)
test_R_squared1 = 1 - (RSS1/TSS1)
###########################
#train R squared:0.06081
#test R squared:0.04407
###########################
##Multiple linear regression with two factors
library(lmtest)
model2<-lm(sales~price+store,data=train_data)
##Anova table,F-test
anova(model2)
#R squared
summary(model2)
# check heteroskedasticity
par(mfrow=c(1,1))
plot(model2)
plot(model2$residuals)
###Breusch-pagen test
bptest(model2)
###Durbin-Watson test serial correlation
dwtest(model2)
#predict test data
model2_test_result <- predict(model2, newdata=test_data)
TSS2 <- sum((test_data$sales-mean(test_data$sales))^2)
RSS2 <- sum((test_data$sales-model2_test_result)^2)
test_R_squared2 = 1 - (RSS2/TSS2)
###########################
#train R squared:0.3237
#test R squared:0.3121
###########################
##Multiple linear regression with three factors
model3<-lm(sales~price+store+billboard,data=train_data)
##Anova table,F-test
anova(model3)
#R squared
summary(model3)
# check heteroskedasticity
par(mfrow=c(1,1))
plot(model3)
plot(model3$residuals)
###Breusch-pagen test
bptest(model3)
###Durbin-Watson test serial correlation
dwtest(model3)
#predict test data
model3_test_result <- predict(model3, newdata=test_data)
TSS3 <- sum((test_data$sales-mean(test_data$sales))^2)
RSS3 <- sum((test_data$sales-model3_test_result)^2)
test_R_squared3 = 1 - (RSS3/TSS3)
###########################
#train R squared:0.84
#test R squared:0.854
###########################
##Multiple linear regression with four factors
model4<-lm(sales~price+store+billboard+printout,data=train_data)
##Anova table,F-test
anova(model4)
#R squared
summary(model4)
# check heteroskedasticity
par(mfrow=c(1,1))
plot(model4)
###Breusch-pagen test
bptest(model4)
###Durbin-Watson test serial correlation
plot(model4$residuals)
dwtest(model4)
#predict test data
model4_test_result <- predict(model4, newdata=test_data)
TSS4 <- sum((test_data$sales-mean(test_data$sales))^2)
RSS4 <- sum((test_data$sales-model4_test_result)^2)
test_R_squared4 = 1 - (RSS4/TSS4)
###########################
#train R squared:0.84
#test R squared:0.854
###########################
##Multiple linear regression with five factors
model5<-lm(sales~price+store+billboard+printout+sat,data=train_data)
##Anova table,F-test
anova(model5)
#R squared
summary(model5)
# check heteroskedasticity
par(mfrow=c(1,1))
plot(model5)
plot(model5$residuals)
###Breusch-pagen test heteroskedasticity
bptest(model5)
###Durbin-Watson test serial correlation
dwtest(model5)
#predict test data
model5_test_result <- predict(model5, newdata=test_data)
TSS5 <- sum((test_data$sales-mean(test_data$sales))^2)
RSS5 <- sum((test_data$sales-model5_test_result)^2)
test_R_squared5 = 1 - (RSS5/TSS5)
###########################
#train R squared:0.9135
#test R squared:0.9158
###########################
##Multiple linear regression with six factors
model6<-lm(sales~price+store+billboard+printout+sat+comp,data=train_data)
##Anova table,F-test
anova(model6)
#R squared
summary(model6)
# check heteroskedasticity
par(mfrow=c(1,1))
plot(model6)
plot(model6$residuals)
###Breusch-pagen test heteroskedasticity
bptest(model6)
###Durbin-Watson test serial correlation
dwtest(model6)
#predict test data
model6_test_result <- predict(model6, newdata=test_data)
TSS6 <- sum((test_data$sales-mean(test_data$sales))^2)
RSS6 <- sum((test_data$sales-model6_test_result)^2)
test_R_squared6 = 1 - (RSS6/TSS6)
###########################
#train R squared:0.9201
#test R squared:0.92018
###########################
##Multiple linear regression with five factors
model5_1<-lm(sales~price+store+billboard+sat+comp,data=train_data)
##Anova table,F-test
anova(model5_1)
#R squared
summary(model5_1)
# check heteroskedasticity
par(mfrow=c(1,1))
plot(model5_1)
plot(model5_1$residuals)
###Breusch-pagen test heteroskedasticity
bptest(model5_1)
###Durbin-Watson test serial correlation
dwtest(model5_1)
#predict test data
model5_1_test_result <- predict(model5_1, newdata=test_data)
TSS5_1 <- sum((test_data$sales-mean(test_data$sales))^2)
RSS5_1 <- sum((test_data$sales-model5_1_test_result)^2)
test_R_squared5_1 = 1 - (RSS5_1/TSS5_1)
###########################
#train R squared:0.9201
#test R squared:0.92026
###########################
##Multiple linear regression with nine factors
model9<-lm(sales~price+store+billboard+printout+sat+comp
+store:billboard+store:printout+billboard:printout,data=train_data)
##Anova table,F-test
anova(model9)
#R squared
summary(model9)
# check heteroskedasticity
par(mfrow=c(1,1))
plot(model9)
plot(model9$residuals)
###Breusch-pagen test heteroskedasticity
bptest(model9)
###Durbin-Watson test serial correlation
dwtest(model9)
#predict test data
model9_test_result <- predict(model9, newdata=test_data)
TSS9 <- sum((test_data$sales-mean(test_data$sales))^2)
RSS9 <- sum((test_data$sales-model9_test_result)^2)
test_R_squared9 = 1 - (RSS9/TSS9)
###########################
#train R squared:0.9259
#test R squared:0.92839
###########################
##Multiple linear regression with six factors
model6_1<-lm(sales~price+store+billboard+sat+comp
+store:billboard,data=train_data)
##Anova table,F-test
anova(model6_1)
#R squared
summary(model6_1)
# check heteroskedasticity
par(mfrow=c(1,1))
plot(model6_1)
plot(model6_1$residuals)
###Breusch-pagen test heteroskedasticity
bptest(model6_1)
###Durbin-Watson test serial correlation
dwtest(model6_1)
#predict test data
model6_1_test_result <- predict(model6_1, newdata=test_data)
TSS6_1 <- sum((test_data$sales-mean(test_data$sales))^2)
RSS6_1 <- sum((test_data$sales-model6_1_test_result)^2)
test_R_squared6_1 = 1 - (RSS6_1/TSS6_1)
###########################
#train R squared:0.9257
#test R squared:0.9284961
###########################
|
d46c29cffbe95ce0e246b6ea90c5335aaf82632f
|
9a3b4965c85af90f870baba83c23a0103a986353
|
/assignments/a6/plot_null_alt.R
|
03fe14d274f729ca01258dc8a3f54e41b84ff271
|
[
"CC0-1.0",
"GPL-1.0-or-later",
"GPL-2.0-only",
"GPL-3.0-only",
"CC-BY-4.0"
] |
permissive
|
ly129/EPIB607
|
2ed99374d99c4e8f003191782af7a6fd97707756
|
ac2f917bc064f8028a875766af847114cd306396
|
refs/heads/master
| 2020-07-05T19:28:44.636344
| 2018-12-11T13:13:52
| 2018-12-11T13:13:52
| 202,746,856
| 0
| 1
|
CC0-1.0
| 2019-08-16T14:56:33
| 2019-08-16T14:56:33
| null |
UTF-8
|
R
| false
| false
| 10,765
|
r
|
plot_null_alt.R
|
#' Plot null and alternative distributions
#' @param n sample size
#' @param s population standard deviation (or estimated standard deviation)
#' @param mu0 mean under the null hypothesis
#' @param mha mean under the alternative hypothesis
#' @param alternative is alternative hypothesis greater than or less than or
#' equal to mu0. Defaults to 'less'. If alternative='equal' then two cutoff
#' points must be specified
#' @param cutoff critical value(s). if alternative='equal', then you must
#' provide two values, e.g., for alpha level 0.05, cutoffs = qnorm(c(0.025,
#' 0.975), mu0, s/sqrt(n))
#' @param legend show legend? Defaults to TRUE
#' @param ... other arguments passed to graphics::title
#' @details requires the latex2exp package to be installed
power_plot <- function(n, s, mu0, mha, cutoff,
alternative = c("less", "greater", "equal"),
legend = TRUE, ...) {
if (!requireNamespace("latex2exp"))
stop("you need to install the 'latex2exp' package for this function to work")
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
alternative <- match.arg(alternative)
SEM <- s / sqrt(n)
if (alternative == "greater") {
if (mha < mu0) stop("mean under Ha is less than the null. select alternative='less'")
x <- seq(mu0 - 4.25*SEM, mha + 3*SEM, length = 1000)
dh0 <- dnorm(x, mu0, SEM)
dh1 <- dnorm(x, mha, SEM)
ht <- 1.1 * dnorm(mu0, mu0, SEM)
plot.new()
plot.window(xlim = range(x), ylim = c(.01*ht, 3.2*ht))
axis(1)
title(...)
# null
green <- seq(mu0 - 3 * SEM, cutoff, length.out = 1000)
d <- c(dnorm(green,mu0,SEM),0)
I <- 1
polygon(c(green,cutoff),d + I * ht, col = cbPalette[4], border = NA)
red <- seq(cutoff, mu0 + 3 * SEM, length.out = 1000)
d <- c(dnorm(red, mu0, SEM), 0)
polygon(c(red,cutoff), d + I * ht, col = "red", border = NA)
points(mu0, I * ht, cex = 0.7, pch = 19)
text(labels = latex2exp::TeX(sprintf("$\\mu_{H_0} = %#.4f$", mu0)),
x = mu0, y = I*ht*.90)
# alternative
green <- seq(mha - 3 * SEM, cutoff, length.out = 1000)
d <- c(dnorm(green, mha, SEM), 0)
polygon(c(green,cutoff),d + (I + 1) * ht, col = cbPalette[6], border = NA)
points(mha, (I + 1) * ht, cex = 0.7, pch = 19)
text(labels = latex2exp::TeX(sprintf("$\\mu_{H_A} = %#.4f$", mha)),
x = mha, y = (I + 1) * ht * .95)
red <- seq(cutoff, mha + 3 * SEM, length.out = 1000)
d <- c(dnorm(red, mha, SEM), 0)
polygon(c(red,cutoff), d + (I + 1) * ht, col = cbPalette[2], border = NA)
alpha <- pnorm(cutoff, mu0, SEM, lower.tail = FALSE)
beta <- pnorm(cutoff, mha, SEM)
labs.h0 <- latex2exp::TeX(sprintf("$\\alpha$ = %#.3f", alpha))
labs.h1a <- latex2exp::TeX(sprintf("$\\beta$ = %#.3f", beta))
labs.h1b <- latex2exp::TeX(sprintf("$1 - \\beta$ = %#.3f",1 - beta))
if (legend) legend("topleft", legend = c(labs.h0, labs.h1a, labs.h1b), pch = 15,cex = 1.2,
col = c("red", cbPalette[c(6,2)]))
segments(cutoff,(I)*ht*0.2,
cutoff,(I+1)*ht,lwd=0.5,col="red")
text(labels = latex2exp::TeX(sprintf("cutoff = %#.4f$", cutoff)),
x = cutoff, y = (I)*ht*0.15)
arrows(mu0,(I-0.25)*ht,
cutoff,(I-0.25)*ht,length=0.05,
code=3,angle=20,col=cbPalette[6],lwd=1.5)
arrows(cutoff,(I+1-0.25)*ht,
mha,(I+1-0.25)*ht,length=0.05,
code=3,angle=15,col="red",lwd=1.5)
segments(mha, I * ht, mha,
(I + 2) * ht * 1.2,lwd=0.5,col="grey60")
}
if (alternative == "less") {
if (mha > mu0) stop("mean under Ha is greater than the null. select alternative='greater'")
# browser()
x <- seq(mha - 4.25*SEM, mu0 + 3*SEM, length = 1000)
dh0 <- dnorm(x, mu0, SEM)
dh1 <- dnorm(x, mha, SEM)
ht <- 1.1 * dnorm(mu0, mu0, SEM)
plot.new()
plot.window(xlim = range(x), ylim = c(.01*ht, 3.2*ht))
axis(1)
title(...)
# null
green <- seq(mu0 + 3 * SEM, cutoff, length.out = 1000)
d <- c(dnorm(green,mu0,SEM),0)
I <- 1
polygon(c(green,cutoff),d + I * ht, col = cbPalette[4], border = NA)
red <- seq(mu0 - 3 * SEM, cutoff, length.out = 1000)
d <- c(dnorm(red, mu0, SEM), 0)
polygon(c(red,cutoff), d + I * ht, col = "red", border = NA)
points(mu0, I * ht, cex = 0.7, pch = 19)
text(labels = latex2exp::TeX(sprintf("$\\mu_{H_0} = %#.4f$", mu0)),
x = mu0, y = I*ht*.90)
# alternative
green <- seq(mha + 3 * SEM, cutoff, length.out = 1000)
d <- c(dnorm(green, mha, SEM), 0)
polygon(c(green,cutoff),d + (I + 1) * ht, col = cbPalette[6], border = NA)
points(mha, (I + 1) * ht, cex = 0.7, pch = 19)
text(labels = latex2exp::TeX(sprintf("$\\mu_{H_A} = %#.4f$", mha)),
x = mha, y = (I + 1) * ht * .95)
red <- seq(mha - 3 * SEM, cutoff, length.out = 1000)
d <- c(dnorm(red, mha, SEM), 0)
polygon(c(red,cutoff), d + (I + 1) * ht, col = cbPalette[2], border = NA)
alpha <- pnorm(cutoff, mu0, SEM, lower.tail = TRUE)
beta <- pnorm(cutoff, mha, SEM, lower.tail = FALSE)
labs.h0 <- latex2exp::TeX(sprintf("$\\alpha$ = %#.3f", alpha))
labs.h1a <- latex2exp::TeX(sprintf("$\\beta$ = %#.3f", beta))
labs.h1b <- latex2exp::TeX(sprintf("$1 - \\beta$ = %#.3f",1 - beta))
if (legend) legend("topleft", legend = c(labs.h0, labs.h1a, labs.h1b), pch = 15,cex = 1.2,
col = c("red", cbPalette[c(6,2)]))
segments(cutoff,(I)*ht*0.2,
cutoff,(I+1)*ht,lwd=0.5,col="red")
text(labels = latex2exp::TeX(sprintf("cutoff = %#.4f$", cutoff)),
x = cutoff, y = (I)*ht*0.15)
arrows(mu0,(I-0.25)*ht,
cutoff,(I-0.25)*ht,length=0.05,
code=3,angle=20,col=cbPalette[6],lwd=1.5)
arrows(cutoff,(I+1-0.25)*ht,
mha,(I+1-0.25)*ht,length=0.05,
code=3,angle=15,col="red",lwd=1.5)
segments(mha, I * ht, mha,
(I + 2) * ht * 1.2,lwd=0.5,col="grey60")
}
if (alternative == "equal") {
# if (mha > mu0) stop("mean under Ha is greater than the null. select alternative='greater'")
if (length(cutoff) != 2) stop("cutoff should be a vector of length 2 when alternative='equal'")
# browser()
x <- seq(min(mha - 4.25*SEM, mu0 - 4.25*SEM),
max(mha + 4.25*SEM, mu0 + 4.25*SEM), length = 1000)
dh0 <- dnorm(x, mu0, SEM)
dh1 <- dnorm(x, mha, SEM)
ht <- 1.1 * dnorm(mu0, mu0, SEM)
plot.new()
plot.window(xlim = range(x), ylim = c(.01*ht, 3.2*ht))
axis(1)
# axis(2)
title(...)
# null ----
green <- seq(min(cutoff), max(cutoff), length.out = 1000)
d <- c(0,dnorm(green,mu0,SEM),0)
I <- 1
polygon(c(min(cutoff),green,max(cutoff)), d + I * ht, col = cbPalette[4], border = NA)
# lower tail for null
red <- seq(mu0 - 3 * SEM, min(cutoff), length.out = 1000)
d <- c(dnorm(red, mu0, SEM), 0)
polygon(c(red,min(cutoff)), d + I * ht, col = "red", border = NA)
# upper tail for null
red <- seq(max(cutoff), mu0 + 3 * SEM, length.out = 1000)
d <- c(0, dnorm(red, mu0, SEM))
polygon(c(max(cutoff), red), d + I * ht, col = "red", border = NA)
points(mu0, I * ht, cex = 0.7, pch = 19)
text(labels = latex2exp::TeX(sprintf("$\\mu_{H_0} = %#.4f$", mu0)),
x = mu0, y = I*ht*.90)
# alternative - upper ----
green <- seq(min(cutoff), max(cutoff), length.out = 1000)
d <- c(0,dnorm(green,mha,SEM),0)
polygon(c(min(cutoff),green,max(cutoff)), d + (I+1) * ht, col = cbPalette[6], border = NA)
points(mha, (I + 1) * ht, cex = 0.7, pch = 19)
text(labels = latex2exp::TeX(sprintf("$\\mu_{H_A} = %#.4f$", mha)),
x = mha, y = (I + 1) * ht * .95)
# lower tail for alternative
red <- seq(mha - 4.25 * SEM, min(cutoff), length.out = 1000)
d <- c(dnorm(red, mha, SEM), 0)
polygon(c(red,min(cutoff)), d + (I + 1) * ht, col = cbPalette[2], border = NA)
# upper tail for alternative
red <- seq(max(cutoff), mha + 4.25 * SEM, length.out = 1000)
d <- c(0, dnorm(red, mha, SEM))
polygon(c(max(cutoff), red), d + (I + 1) * ht, col = cbPalette[2], border = NA)
alpha <- pnorm(min(cutoff), mu0, SEM, lower.tail = TRUE) +
pnorm(max(cutoff), mu0, SEM, lower.tail = FALSE)
power <- pnorm(min(cutoff), mha, SEM, lower.tail = TRUE) +
pnorm(max(cutoff), mha, SEM, lower.tail = FALSE)
beta <- 1 - power
labs.h0 <- latex2exp::TeX(sprintf("$\\alpha$ = %#.3f", alpha))
labs.h1a <- latex2exp::TeX(sprintf("$\\beta$ = %#.3f", beta))
labs.h1b <- latex2exp::TeX(sprintf("$1 - \\beta$ = %#.3f",1 - beta))
if (legend) legend("topleft", legend = c(labs.h0, labs.h1a, labs.h1b), pch = 15,cex = 1.2,
col = c("red", cbPalette[c(6,2)]))
segments(cutoff,(I)*ht*0.2,
cutoff,(I+1)*ht,lwd=0.5,col="red")
text(labels = latex2exp::TeX(sprintf("cutoff = %#.4f$", cutoff)),
x = cutoff, y = (I)*ht*0.15)
arrows(mu0,(I-0.25)*ht,
cutoff,(I-0.25)*ht,length=0.05,
code=3,angle=20,col=cbPalette[6],lwd=1.5)
# arrows(cutoff,(I+1-0.25)*ht,
# mha,(I+1-0.25)*ht,length=0.05,
# code=3,angle=15,col="red",lwd=1.5)
segments(mha, I * ht, mha,
(I + 2) * ht * 1.2,lwd=0.5,col="grey60")
}
}
# examples
# # less than alternative ----
# n <- 5 # sample size
# s <- 0.0080 # standard deviation
# mu0 <- -0.540 # mean undder the null
# mha <- 1.01 * mu0 # mean under the alternative
# cutoff <- mu0 + qnorm(0.05) * s / sqrt(n)
# power_plot(n = n,
# s = s,
# mu0 = mu0,
# mha = mha,
# cutoff = cutoff,
# alternative = "less",
# xlab = "")
#
#
# # greater than alternative ----
# n <- 5 # sample size
# s <- 0.0080 # standard deviation
# mu0 <- -0.540 # mean undder the null
# mha <- 0.99 * mu0 # mean under the alternative
# cutoff <- mu0 + qnorm(0.95) * s / sqrt(n)
# power_plot(n = n,
# s = s,
# mu0 = mu0,
# mha = mha,
# cutoff = cutoff,
# alternative = "greater",
# xlab = "")
#
#
# # two-sided alternative ----
# n <- 3 # sample size
# s <- 0.088 # standard deviation
# mu0 <- .86 # mean undder the null
# mha <- .88 # mean under the alternative
# cutoff <- mu0 + qnorm(c(0.025, 0.975)) * s / sqrt(n)
# power_plot(n = n,
# s = s,
# mu0 = mu0,
# mha = mha,
# cutoff = cutoff,
# alternative = "equal",
# xlab = "")
|
8248479993e78b05536f9472d0199a288d5e1c0b
|
ffaf081897ef7781ec44634b757eae461e848ccb
|
/day03/day03.R
|
90501debf75661a0c8a92f9f58f19d048ad99931
|
[] |
no_license
|
marcmace/AdventofCode2020
|
f8e80842f16fd1c9f7fd181bbcc0054f1b7031c0
|
642ce0922052bf7853c1dccf4710b7682345f085
|
refs/heads/main
| 2023-01-29T20:53:42.362419
| 2020-12-09T13:49:27
| 2020-12-09T13:49:27
| 317,989,115
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 508
|
r
|
day03.R
|
library(dplyr)
input <- readLines(paste(getwd(),"/day03/day03_input.txt",sep=""))
geo <- transpose(as.data.frame(strsplit(x = input, split = "")))
day3 <- function(colmove, rowmove) {
trees <- 0
i <- 1
j <- 1
while (i <= nrow(geo)) {
if (geo[i,j]== "#") {
trees <- trees + 1
# geo[i,j] <- "X"
}
# else geo[i,j] <- "O"
i <- i + rowmove
j <- (j + colmove - 1) %% ncol(geo) + 1
}
return (trees)
}
|
207ef8d944b16119cd17e00d52cee2400274ad9f
|
9425481e2f3e6218b31870f121445400e2c47530
|
/Binomial/man/bin_distribution.Rd
|
12bf52f8c4a479c0669b690040df0003a625a981
|
[] |
no_license
|
stat133-sp19/hw-stat133-JingtongZhao
|
909c7e2cefeae1dd01e66842af132452b8170285
|
b60cab3396f9871239ff687707c36686d29fd7bb
|
refs/heads/master
| 2020-04-28T07:25:17.528497
| 2019-05-04T00:39:47
| 2019-05-04T00:39:47
| 175,091,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 598
|
rd
|
bin_distribution.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Binomial.R
\name{bin_distribution}
\alias{bin_distribution}
\title{Binomial Distribution}
\usage{
bin_distribution(trials, prob)
}
\arguments{
\item{trials}{number of fixed trials}
\item{prob}{probability of success on each trial}
}
\value{
an object of class \code{"bindis"}
}
\description{
calculates probabilities based on different number of successes in a fixed number of random trials performed
under identical conditions
}
\examples{
#binomial probability distribution
bin_distribution(trials = 5, prob = 0.5)
}
|
59faa190a5796e6a10d0a2afc5286f30c36b802e
|
a47e15d8a4b9bd62db8a531dacbdaa7d8a797a3d
|
/man/layer.Rd
|
178a692cc1d99ca75b445ba0a6d3c7b76dc5f1ff
|
[] |
no_license
|
johannes-titz/leabRa
|
f5d4189793db830e5f5aed7af1df0305b3ad58f3
|
237bc5c67c81fcd24e7dac895af3106bbad9974a
|
refs/heads/master
| 2021-05-15T00:03:52.568144
| 2017-09-25T08:53:48
| 2017-09-25T08:53:48
| 103,940,125
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 7,197
|
rd
|
layer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layer.R
\docType{class}
\name{layer}
\alias{layer}
\title{Leabra layer class}
\format{\code{\link{R6Class}} object}
\usage{
layer
}
\value{
Object of \code{\link{R6Class}} with methods for calculating changes
of activation in a layer of neurons.
}
\description{
This class simulates a biologically realistic layer of neurons in the
Leabra framework. It consists of several \code{\link{unit}} objects
in the variable (field) \code{units} and some layer-specific
variables.
}
\section{Fields}{
\describe{
\item{\code{units}}{A list with all \code{\link{unit}} objects of the layer.}
\item{\code{avg_act}}{The average activation of all units in the layer
(this is an active binding).}
\item{\code{n}}{Number of units in layer.}
\item{\code{weights}}{A receiving x sending weight matrix, where the receiving units
(rows) has the current weight values for the sending units (columns). The
weights will be set by the \code{\link{network}} object, because they
depend on the connection to other layers.}
\item{\code{ce_weights}}{Sigmoidal contrast-enhanced version of the weight matrix
\code{weights}. These weights will be set by the \code{\link{network}}
object.}
\item{\code{layer_number}}{Layer number in network (this is 1 if you create
a layer on your own, without the network class).}
}}
\section{Methods}{
\describe{
\item{\code{new(dim, g_i_gain = 2)}}{Creates an object of this class with
default parameters.
\describe{
\item{\code{dim}}{A pair of numbers giving the dimensions (rows and
columns) of the layer.}
\item{\code{g_i_gain}}{Gain factor for inhibitory conductance, if you
want less activation in a layer, set this higher.}
}
}
\item{\code{get_unit_acts()}}{Returns a vector with the activations of all
units of a layer.
}
\item{\code{get_unit_scaled_acts()}}{Returns a vector with the scaled
activations of all units of a layer. Scaling is done with
\code{recip_avg_act_n}, a reciprocal function of the number of active
units.
}
\item{\code{cycle(intern_input, ext_input)}}{Iterates one time step with
layer object.
\describe{
\item{\code{intern_input}}{Vector with inputs from all other layers.
Each input has already been scaled by a reciprocal function of the
number of active units (\code{recip_avg_act_n}) of the sending layer
and by the connection strength between the receiving and sending
layer. The weight matrix \code{ce_weights} is multiplied with this
input vector to get the excitatory conductance for each unit in the
layer.
}
\item{\code{ext_input}}{Vector with inputs not coming from another
layer, with length equal to the number of units in this layer. If
empty (\code{NULL}), no external inputs are processed. If the external
inputs are not clamped, this is actually an excitatory conductance
value, which is added to the conductance produced by the internal
input and weight matrix.
}
}
}
\item{\code{clamp_cycle(activations)}}{Iterates one time step with layer
object with clamped activations, meaning that activations are
instantaneously set without time integration.
\describe{
\item{\code{activations}}{Activations you want to clamp to the units in
the layer.
}
}
}
\item{\code{get_unit_act_avgs()}}{Returns a list with the short, medium and
long term activation averages of all units in the layer as vectors. The
super short term average is not returned, and the long term average is not
updated before being returned (this is done in the function \code{chg_wt()}
with the method\code{updt_unit_avg_l}). These averages are used by the
network class to calculate weight changes.
}
\item{\code{updt_unit_avg_l()}}{Updates the long-term average
(\code{avg_l}) of all units in the layer, usually done after a plus phase.
}
\item{\code{updt_recip_avg_act_n()}}{Updates the \code{avg_act_inert} and
\code{recip_avg_act_n} variables, these variables update before the weights
are changed instead of cycle by cycle. This version of the function assumes
full connectivity between layers.
}
\item{\code{reset(random = FALSE)}}{Sets the activation and activation
averages of all units to 0. Used to begin trials from a stationary point.
\describe{
\item{\code{random}}{Logical variable, if TRUE the activations are set
randomly between .05 and .95 for every unit instead of 0.
}
}
}
\item{\code{set_ce_weights()}}{Sets contrast enhanced weight values.
}
\item{\code{get_unit_vars(show_dynamics = TRUE, show_constants =
FALSE)}}{Returns a data frame with the current state of all unit variables
in the layer. Every row is a unit. You can choose whether you want dynamic
values and / or constant values. This might be useful if you want to
analyze what happens in units of a layer, which would otherwise not be
possible, because most of the variables (fields) are private in the unit
class.
\describe{
\item{\code{show_dynamics}}{Should dynamic values be shown? Default is
TRUE.
}
\item{\code{show_constants}}{Should constant values be shown? Default
is FALSE.
}
}
}
\item{\code{get_layer_vars(show_dynamics = TRUE, show_constants =
FALSE)}}{Returns a data frame with 1 row with the current state of the
variables in the layer. You can choose whether you want dynamic values and
/ or constant values. This might be useful if you want to analyze what
happens in a layer, which would otherwise not be possible, because some of
the variables (fields) are private in the layer class.
\describe{
\item{\code{show_dynamics}}{Should dynamic values be shown? Default is
TRUE.
}
\item{\code{show_constants}}{Should constant values be shown? Default
is FALSE.
}
}
}
}
}
\examples{
l <- layer$new(c(5, 5)) # create a 5 x 5 layer with default leabra values
l$g_e_avg # private values cannot be accessed
# if you want to see alle variables, you need to use the function
l$get_layer_vars(show_dynamics = TRUE, show_constants = TRUE)
# if you want to see a summary of all units without constant values
l$get_unit_vars(show_dynamics = TRUE, show_constants = FALSE)
# let us clamp the activation of the 25 units to some random values between
# 0.05 and 0.95
l <- layer$new(c(5, 5))
activations <- runif(25, 0.05, .95)
l$avg_act
l$clamp_cycle(activations)
l$avg_act
# what happened to the unit activations?
l$get_unit_acts()
# compare with activations
activations
# scaled activations are scaled by the average activation of the layer and
# should be smaller
l$get_unit_scaled_acts()
}
\references{
O'Reilly, R. C., Munakata, Y., Frank, M. J., Hazy, T. E., and
Contributors (2016). Computational Cognitive Neuroscience. Wiki Book, 3rd
(partial) Edition. URL: \url{http://ccnbook.colorado.edu}
Have also a look at
\url{https://grey.colorado.edu/emergent/index.php/Leabra} (especially the
link to the 'MATLAB' code) and \url{https://en.wikipedia.org/wiki/Leabra}
}
\keyword{data}
|
71a2e3db2ff8bece607d7bb02a72bb08e98a6a4a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/biolink/examples/urls.Rd.R
|
f9e2ad21c8b6b383ef381db07378c91fbf0bb51e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 302
|
r
|
urls.Rd.R
|
library(biolink)
### Name: urls
### Title: Construct urls to online resources
### Aliases: urls url_go url_kegg url_pubmed url_entrez url_cran url_bioc
### ** Examples
# gene ontology url
url_go("GO:0005539")
# KEGG pathway url
url_kegg("hsa04915")
# PubMed article url
url_pubmed("23193287")
|
4ca68847ee5bb01f22ee69c3ca3817dbb4270179
|
3b0be5721a5478b1bac4e6b08cdcd1b88e3a4046
|
/inst/snippets/Example9.18d.R
|
305ac60005a35771249b0972cb4a7f558fa74046
|
[] |
no_license
|
stacyderuiter/Lock5withR
|
b7d227e5687bc59164b9e14de1c8461cb7861b14
|
417db714078dc8eaf91c3c74001b88f56f09b562
|
refs/heads/master
| 2020-04-06T06:33:39.228231
| 2015-05-27T11:41:42
| 2015-05-27T11:41:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 43
|
r
|
Example9.18d.R
|
Ink.Price(PPM=3.0, interval='confidence')
|
c64726ce65cc8ad5665eca698f05e658dcea35f5
|
e3847b953b7bb3e6464a7686801fc607fde3a0a1
|
/Plot2.R
|
711c11b6eee52fb193cf2ca603d04b7520ac8b2f
|
[] |
no_license
|
asrulnb/ExData_Plotting1
|
d2b00aaead4434d9ef656ba60a68a04776671c73
|
a77d628debf1305cbad893b0f20a7f7dda04e033
|
refs/heads/master
| 2021-01-16T21:08:59.127160
| 2015-08-09T15:53:03
| 2015-08-09T15:53:03
| 40,299,974
| 0
| 0
| null | 2015-08-06T10:52:21
| 2015-08-06T10:52:21
| null |
UTF-8
|
R
| false
| false
| 910
|
r
|
Plot2.R
|
###[ Initializing Library ]
rm(list = ls())
library(dplyr)
library(data.table)
library(lubridate)
library(datasets)
library(graphics)
###[ Set working Directory to where the R source file is ]
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
###[ Part 1 : Read data from file ]
mainDT <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings="?")
###[ Part 2 : Filter Data ]
mainDT <- filter(mainDT,(Date == "1/2/2007"|Date == "2/2/2007"))
###[ Convert Data ]
datetime <- paste(dmy(mainDT$Date), " ", mainDT$Time) ## put together with proper formating
mainDT <- mutate(mainDT, DateAndTime = ymd_hms(datetime)) ## to ensure the final Date and Time format
###[ Output to PNG File ]
plot(mainDT$Global_active_power~mainDT$DateAndTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
7a917e2807b4c5bc1d669837d05bb7f75761f34d
|
71808098f4c7eb7ed63ea4806509feaf84fac756
|
/Regression/Guns_Regression.r
|
a59b799d7bd1a155b3b408cabc56eaf1d3baf7e5
|
[] |
no_license
|
MarcoBuratti/BusinessAnalytics-HACKATON
|
b1f691da9477d3b87cfb09099ee40e5d15567291
|
efb2e3ecbc0a0967ecd49dda494d6fe35183c52c
|
refs/heads/master
| 2023-01-23T22:07:30.483411
| 2020-11-12T15:57:23
| 2020-11-12T15:57:23
| 307,710,379
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,654
|
r
|
Guns_Regression.r
|
library("readxl")
library(rgl)
library(kknn)
# data <- read_excel('Guns.xls')
data <- Guns
names(data)
sapply(data, class)
#response
violent <- log(data$violent)
#sapply(violent, class)
#regressor
afam <- data$afam
# Plot data with the log value on y axis
plot(afam, violent, xlim=c(0,15), ylim=c(3,8))
grid()
abline(0,1, lty=3)
# Fit a linear model
fm <- lm(violent ~ afam)
summary(fm)
# plot the regression line and the fitted values
abline(coefficients(fm), col='red')
points(afam, fitted(fm), col='red', pch=16)
# Plot the avg value of afam on x-axis and violent on y-axis
abline(h=mean(violent))
abline(v=mean(afam))
# Create a qq-plot of the residuals
qqnorm(residuals(fm))
# Perform a Normality test of the residuals
shapiro.test(residuals(fm))
# Print the estimates of the regression coefficients
coefficients(fm)
# Compute CI of the regression coefficients
confint(fm, level= 0.95)
# Print the estimate of the error variance
s2 <- sum(residuals(fm)^2)/fm$df
sqrt(s2)
# Compute a set of CIs
# Build a new data base
Z0 <- data.frame(cbind(afam=seq(0, 35, by=0.1)))
# Compute the CIs
CI <- predict(fm, Z0, interval='confidence')
# Plot the CIs
plot(afam, violent, xlim=c(0,35), ylim=c(4,8))
lines(Z0[,1], CI[,'fit'])
lines(Z0[,1], CI[,'lwr'], lty=4)
lines(Z0[,1], CI[,'upr'], lty=4)
# Compute a set of PIs
PI <- predict(fm, Z0, interval='prediction', level=0.95)
# Plot the PIs
lines(Z0[,1], PI[,'fit'])
lines(Z0[,1], PI[,'lwr'], lty=2)
lines(Z0[,1], PI[,'upr'], lty=2)
# Import the second regressor
population <- data$population
# Plot data with the log value on y axis
plot(population, violent, xlim=c(0,15), ylim=c(3,8))
grid()
abline(0,1, lty=3)
# Fit a linear model
fm2 <- lm(violent ~ population)
summary(fm2)
# plot the regression line and the fitted values
abline(coefficients(fm2), col='red')
points(population, fitted(fm2), col='red', pch=16)
# Add a third regressor
income <- data$income
# Plot data with the log value on y axis
plot(income, violent, xlim=c(9000,20000), ylim=c(3,8))
grid()
abline(0,1, lty=3)
# Fit a linear model
fm3 <- lm(violent ~ income)
summary(fm3)
# plot the regression line and the fitted values
abline(coefficients(fm3), col='red')
points(income, fitted(fm3), col='red', pch=16)
fm3 <- lm(violent ~ afam + population + income)
summary(fm3)
pairs(cbind(violent, afam, population, income))
#Plot in 3D pop, afam and violent
open3d()
plot3d(x=afam, y=population, z=violent, size=10, col='black')
# Fit the linear model
fm2 <- lm(violent ~ afam + population)
summary(fm2)
# plot the regression surface and the fitted values
points3d(x=afam, y=population, z=fitted(fm2), size=10, col='red')
planes3d(coefficients(fm2)[2],coefficients(fm2)[3],-1,coefficients(fm2)[1], alpha=0.5, color='red')
# Plot data with the log value on y axis
plot(afam, violent, xlim=c(0,15), ylim=c(3,8))
grid()
abline(0,1, lty=3)
# Fit a linear model
fm <- lm(violent ~ afam)
summary(fm)
# plot the regression line and the fitted values
abline(coefficients(fm), col='red')
# Compute the fitted values using k=8 and the rectangular kernel
Z0 <- data.frame(cbind(afam=seq(0, 35, by=0.1)))
predicted.response <- kknn(violent ~ afam,
train = data.frame(afam, violent),
test = Z0,
k = 18, kernel='rectangular')
# Plot the knn prediction line
lines(Z0[,'afam'], predicted.response$fitted.values, col='blue')
# Select the value of k (leave-one-out crossvalidation method)
train.cv <- train.kknn(violent ~ afam, data = data.frame(afam, violent),
kmax = 40, scale = F, kernel = 'rectangular')
plot(train.cv)
grid()
|
3f3466431535a58007eb4f3e0058a0a972d1a2aa
|
fadd039259d32ffee1b8387659295cd3c94067b2
|
/02 R Programming/Week 3/Programming Assignment/ProgrammingAssignment3.R
|
14f8bbe06c2079a3c57841aa610a248a96414d3c
|
[] |
no_license
|
castner-jon/datasciencecoursera
|
e4198c7668ab98ae746fdc21531fbb0dab2e09b3
|
2b602b37d278aaf11547c9d8fd162ca587fef0a5
|
refs/heads/master
| 2021-09-09T16:29:55.751823
| 2018-03-18T01:00:53
| 2018-03-18T01:00:53
| 119,195,856
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 346
|
r
|
ProgrammingAssignment3.R
|
## read in the data
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
head(outcome)
class(outcome)
str(outcome)
dim(outcome)
colnames(outcome)
## histogram of mortality rates from heart attacks
outcome[, 11] <- as.numeric(outcome[, 11])
hist(outcome$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)
|
80c242129acdba02b573b11b6dc45a819da6c6a7
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/uwot/tests/testthat/helper_data.R
|
e49ad62eb3c1bedcd002a288e8679f429596d23b
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,665
|
r
|
helper_data.R
|
# Small -ve distances are possible
dist2 <- function(X) {
D2 <- rowSums(X * X)
D2 + sweep(X %*% t(X) * -2, 2, t(D2), `+`)
}
# Squared Euclidean distances, ensuring no small -ve distances can occur
safe_dist2 <- function(X) {
D2 <- dist2(X)
D2[D2 < 0] <- 0
D2
}
# convert dataframe to distance matrix
x2d <- function(X) {
sqrt(safe_dist2(x2m(X)))
}
# Covert a vector into a 2D matrix for generating Y output
c2y <- function(...) {
matrix(unlist(list(...)), ncol = 2)
}
iris10 <- x2m(iris[1:10, ])
iris10_Y <- pca_scores(iris10, ncol = 2)
diris10 <- dist(iris10)
# Sparse iris10 dist
dmiris10 <- as.matrix(diris10)
dmiris10z <- dmiris10
dmiris10z[dmiris10z > 0.71] <- 0
dmiris10z <- Matrix::drop0(dmiris10z)
# some Y data
ycat <- as.factor(c(levels(iris$Species)[rep(1:3, each = 3)], NA))
ycat2 <- as.factor(c(NA, levels(iris$Species)[rep(1:3, times = 3)]))
ynum <- (1:10) / 10
ynum2 <- seq(from = 10, to = -10, length.out = 10) / 100
nn <- find_nn(iris10,
k = 4, method = "fnn", metric = "euclidean",
n_threads = 0, verbose = FALSE
)
# Just test that res is a matrix with valid numbers
expect_ok_matrix <- function(res, nr = nrow(iris10), nc = 2) {
expect_is(res, "matrix")
expect_equal(nrow(res), nr)
expect_equal(ncol(res), nc)
expect_false(any(is.infinite(res)))
}
expect_is_nn <- function(res, nr = 10, k = 4) {
expect_is(res, "list")
expect_is_nn_matrix(res$dist, nr, k)
expect_is_nn_matrix(res$idx, nr, k)
}
expect_is_nn_matrix <- function(res, nr = 10, k = 4) {
expect_is(res, "matrix")
expect_equal(nrow(res), nr)
expect_equal(ncol(res), k)
}
|
5062eb4fa0786f275049d0f8eb4660398441a086
|
f8f3d53abf579dfbf6d49cfb59295b1c3ddc3fb2
|
/R/add_flextable.R
|
fac9a391aa6d2161eeaa3602f0d181e0f74f598f
|
[] |
no_license
|
cardiomoon/rrtable
|
9010574549a6fc41015f89638a708c691c7975cf
|
8346fca2bb0dc86df949fb31738e1af90eeb5a70
|
refs/heads/master
| 2023-03-15T20:43:07.685721
| 2023-03-12T11:36:34
| 2023-03-12T11:36:34
| 127,721,282
| 3
| 2
| null | 2021-11-17T01:08:31
| 2018-04-02T07:32:08
|
R
|
UTF-8
|
R
| false
| false
| 1,455
|
r
|
add_flextable.R
|
#' Add a flextable or mytable object into a document object
#' @param mydoc A document object
#' @param ftable A flextable or mytable object
#' @param code R code string
#' @param echo whether or not display R code
#' @param landscape Logical. Whether or not make a landscape section.
#' @importFrom officer add_slide ph_with body_add_par body_end_section_landscape body_end_section_portrait
#' @importFrom flextable body_add_flextable
#' @return a document object
#' @export
#' @examples
#' \dontrun{
#' require(rrtable)
#' require(moonBook)
#' require(officer)
#' require(magrittr)
#' ftable=mytable(Dx~.,data=acs)
#' title="mytable Example"
#' ft=df2flextable(head(iris))
#' title2="df2flextable Example"
#' doc=read_docx()
#' doc %>% add_text(title=title) %>%
#' add_flextable(ftable) %>%
#' add_text(title=title2) %>%
#' add_flextable(ft)
#'}
add_flextable=function(mydoc,ftable,echo=FALSE,code="",landscape=FALSE){
if("mytable" %in% class(ftable)){
ft<-mytable2flextable(ftable)
} else {
ft<-ftable
}
pos=1.5
if(echo & (code!="")) pos=2
if(inherits(mydoc,"rpptx")){
mydoc<-mydoc %>% ph_with(value=ft,location = ph_location(left=1,top=pos))
} else {
if(landscape) mydoc <- body_end_section_portrait(mydoc)
mydoc<-mydoc %>% body_add_flextable(ft)
if(landscape) mydoc <- body_end_section_landscape(mydoc)
}
mydoc
}
|
6cc02fa368753709b26ae50ebc78cfdb4a716be8
|
79f08f05d41ab55c37bbb5216a827d4f03507a3f
|
/module3/homework3-Q2/app.R
|
b6db45c80b05455a6b56884411d308435d91a716
|
[] |
no_license
|
pmalo46/CUNY_DATA_608
|
d353bcbbe186675f064d367f520babd12d17a185
|
5d15862117bdc7e7df926afd29b9ec43523b7b1a
|
refs/heads/master
| 2023-02-02T06:23:34.846447
| 2020-12-14T04:39:33
| 2020-12-14T04:39:33
| 292,177,538
| 0
| 0
| null | 2020-09-02T04:22:08
| 2020-09-02T04:22:07
| null |
UTF-8
|
R
| false
| false
| 2,896
|
r
|
app.R
|
# author: Pat Maloney
# Data for this project:
# https://github.com/charleyferrari/CUNY_DATA608/tree/master/module3/data
# Question 2:
# Often you are asked whether particular States are improving their mortality
# rates (per cause) faster than, or slower than, the national average. Create a
# visualization that lets your clients see this for themselves for one cause of
# death at the time. Keep in mind that the national average should be weighted by
# the national population.
library(ggplot2)
library(dplyr)
library(plotly)
library(shiny)
library(sqldf)
library(rsconnect)
df <- read.csv("https://raw.githubusercontent.com/charleyferrari/CUNY_DATA608/master/lecture3/data/cleaned-cdc-mortality-1999-2010-2.csv", header= TRUE)
names(df) <- gsub('\\.', '_', names(df)) %>%
tolower()
national <-sqldf("select ICD_Chapter
, Year
, round(sum(Deaths)*100000.00 /sum(Population),2) as Crude_Rate
, 'National' as State
from df
Group by ICD_Chapter
, Year")
names(national) <-tolower(names(national) )
state <- sqldf("select icd_chapter
, year
, crude_rate
, state
from df ")
national2 <- sqldf("select * from state
union all
select * from national")
ui <- fluidPage(
headerPanel('State Mortality Rates Explorer'),
sidebarPanel(
selectInput('state', 'State', unique(national2$state), selected='NY'),
selectInput('icd_chapter', 'Cause of Death', unique(national2$icd_chapter), selected='Certain infectious and parasitic diseases')
),
mainPanel(
plotlyOutput('plot1'),
verbatimTextOutput('stats'),
h6("Number of deaths per 100,000 people")
)
)
server <- function(input, output, session) {
nationalData <- reactive({
national %>%
filter(icd_chapter == input$icd_chapter)
})
statedata <- reactive({
dfSlice <- national2 %>%
filter(state == input$state, icd_chapter == input$icd_chapter)
})
combined <- reactive({
merge(x = nationalData(), y = statedata(), all = TRUE)
})
output$plot1 <- renderPlotly({
df2 <- national2 %>%
filter(state == input$state, icd_chapter == input$icd_chapter)
line_colors <- c("red", "blue")
plot_ly(combined(), x = ~year, y = ~crude_rate, color = ~state, colors = line_colors, type='scatter',
mode = 'lines')
})
output$stats <- renderPrint({
df3 <- statedata() %>%
filter(state == input$state)
summary(df3$crude_rate)
})
}
shinyApp(ui = ui, server = server)
|
c842a98a8f3826da7f3466c2d3a5142ec0ee4458
|
efa4ba01ec27df73d1a40b09ee82a0c7af3cc850
|
/CODE/13_sleep_site_occupancy_randomizations.R
|
fe05b52940bfd5c3ce831b7540b1978adc10237b
|
[] |
no_license
|
CarterLoftus/intergroup_sleep
|
444e1c1ee4abce299865c216b027b326bb45d7a0
|
446448b8436f0408f87d13f7ec9359ef1ce860b7
|
refs/heads/main
| 2023-04-14T05:20:35.681620
| 2022-07-27T10:57:51
| 2022-07-27T10:57:51
| 518,377,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,034
|
r
|
13_sleep_site_occupancy_randomizations.R
|
#### sleep site randomizations ######
library( infotheo )
library( hms )
library( brms )
library( boot )
library( data.table )
## function for normalizing a vector
normalize_func <- function( x ) return( (x - mean( x, na.rm = T ) )/ sd( x, na.rm = T ) )
which_spec <- 'baboon'
if( which_spec == "baboon" ){
spec_df <- read.csv( "DATA/bab_complete.csv" )
}else{
if( which_spec == "vervet" ){
spec_df <- read.csv( "DATA/verv_complete.csv" )
}else{
if( which_spec == "leopard" ){
spec_df <- read.csv( "DATA/leo_complete.csv" )
}
}
}
## just to confirm that each group has a maximum of one sleep site assigned each night
same_sleep_check <- aggregate( spec_df$sleep_clus, by = list( spec_df$group, spec_df$day ), FUN = function( x ) sum( !is.na( unique( x ) ) ) )
names( same_sleep_check ) <- c( 'group', 'day', 'num_sleep_sites')
same_sleep_check[ same_sleep_check$num_sleep_sites != 1, ]
# make a dataframe with one row per group per night, stating when they left the sleep site that morning and then they arrived at their sleep site in the evening (averaged across the individuals in the group when there is more than one)
cosleep_dat <- aggregate( spec_df[ , c( 'arrive_sleep_site', 'leave_sleep_site' ) ], by = list( spec_df$group, spec_df$day, spec_df$sleep_clus ), FUN = function( x ) as_hms( mean( as.numeric( as_hms( as.character( x ) ) ), na.rm = T ) ) )
# rename the columns of the dataframe
names( cosleep_dat )[ 1:3 ] <- c( 'group', 'day', 'sleep_clus' )
# reorder the dataframe
cosleep_dat <- cosleep_dat[ order( cosleep_dat$day ), ]
cosleep_dat <- cosleep_dat[ order( cosleep_dat$group ), ]
# make a column that will declare whether the group coslept with another group that night. Instantiate the column with all 0s
cosleep_dat$cosleep <- 0
# fill in the cosleep columns with 1's on nights when the group slept at the same site as another group
cosleep_dat[ duplicated( cosleep_dat[ , c( 'day', 'sleep_clus' ) ] ) | duplicated( cosleep_dat[ , c( 'day', 'sleep_clus' ) ], fromLast = T ), 'cosleep' ] <- 1
#### how many nights do they cosleep on?
sum( duplicated( cosleep_dat[ , c( 'day', 'sleep_clus' ) ] ) )
#### rand parameters ####
# make a dataframe with one row per group per night, stating when they left the sleep site that morning and then they arrived at their sleep site in the evening (averaged across the individuals in the group when there is more than one)
cosleep_dat <- aggregate( spec_df[ , c( 'arrive_sleep_site', 'leave_sleep_site' ) ], by = list( spec_df$group, spec_df$day, spec_df$sleep_clus ), FUN = function( x ) as_hms( mean( as.numeric( as_hms( as.character( x ) ) ), na.rm = T ) ) )
# rename the columns of the dataframe
names( cosleep_dat )[ 1:3 ] <- c( 'group', 'day', 'sleep_clus' )
cosleep_dat$id <- cosleep_dat$group
cosleep_dat$func_ind <- cosleep_dat$group
tag_names <- as.character( unique( cosleep_dat$id ) )
n <- 1000
rand_day_thresh <- 30
min_to_rand <- 1
# create empty vectors. These vectors will be filled with entries that eventually be put into the final dataframe that declares which functuals need to be compared
vec_a <- c()
vec_b <- c()
for( a in 1:( length( tag_names ) - 1 ) ){
for( b in ( a + 1 ): length( tag_names ) ){
# create vectors that represent the unique combinations that can be made of functuals in this category
vec_a <- c( vec_a, tag_names[ a ] )
vec_b <- c( vec_b, tag_names[ b ] )
}
}
vec_cat_a <- rep( 'baboon', length( vec_a ) )
vec_cat_b <- vec_cat_a
# set up the final dataframe that will be filled out to give the empirical coefficient of associations between each dyad of functuals
real_final_cosleep <- data.frame( func_a = vec_a, func_b = vec_b, num = rep( NA, times = length( vec_a ) ), denom = rep( NA, times = length( vec_a ) ), MI = NA, cat_a = vec_cat_a, cat_b = vec_cat_b , stringsAsFactors = F)
# set up the final dataframe that will be filled out to give the coefficient of associations between each dyad of functuals produced by the randomizations
rand_final_cosleep <- data.frame( func_a = rep( vec_a, times = n ), func_b = rep( vec_b, times = n ), rand_n = rep( 1:n , each = length( vec_a ) ), num = rep( NA, times = length( rep( vec_a, times = n ) ) ), denom = rep( NA, times = length( rep( vec_a, times = n ) ) ), MI = NA, cat_a = rep( vec_cat_a, times = n ), cat_b = rep( vec_cat_b, times = n ), stringsAsFactors = F)
# create a dataframe that will contain the metadata for the randomizations
meta_cosleep_dat <- data.frame( func_a = vec_a, func_b = vec_b, start_rand_at = rep( NA, length( vec_a ) ), end_rand_at = rep( NA, length( vec_a ) ), cat_a = vec_cat_a, cat_b = vec_cat_b, stringsAsFactors = F )
cosleep_dat$day <- as.numeric( cosleep_dat$day )
# saves the first day of the study
start_date <- min( cosleep_dat$day )
cosleep_sites_real <- data.frame( func_a = character(), func_b = character(), shared_site = integer() )
cosleep_sites_rand <- data.frame( func_a = character(), func_b = character(), rand_num = integer(), shared_site = integer() )
set.seed( 111 )
for( row in 1:nrow( real_final_cosleep) ){
print( row / nrow(real_final_cosleep) )
# subset the full gps dataframe to just including the functual dyad's data during the correct period. This appropriate combination is determined by the set up of the dataframes above
pair_cosleep_dat <- cosleep_dat[ cosleep_dat$id %in% c( real_final_cosleep[ row, c('func_a', 'func_b') ]), ]
# just making sure 'func_ind' is a character and not a factor. It messes things up if it is a factor
pair_cosleep_dat$func_ind <- as.character( pair_cosleep_dat$func_ind )
# trims the dataframe so it starts on the first day that both members of the dyad have data. We don't want to analyze anything before this
pair_cosleep_dat <- pair_cosleep_dat[ pair_cosleep_dat$day >= max( aggregate( pair_cosleep_dat$day , by=list( pair_cosleep_dat$func_ind ), FUN = min )[ , 2 ] ) , ]
# makes a column with both of their day columns starting at 1 on the first day when they both have the data. Important for chunking up the data in the next line
pair_cosleep_dat$temp_day <- as.numeric( pair_cosleep_dat$day - min( pair_cosleep_dat$day ) + 1, units = 'days' )
# assign each row of data to a subset so that days of the data are only randomized within an range determined by rand_day_thresh
pair_cosleep_dat$chunk_num <- ceiling( pair_cosleep_dat$temp_day / rand_day_thresh )
# split up the data into the subsets created in the line above. Now we have a list of dataframes, which each dataframe corresponding to one time chunk within randomizaation is allowable
chunked_cosleep_dat <- split( pair_cosleep_dat, f = pair_cosleep_dat$chunk_num )
# create an empty dataframe that will represent a dyadic distance at every simultaneous fix during the current study period
total_real <- data.frame( day = integer(), sleep_site_a = integer(), sleep_site_b = integer() )
# perform n permutations of the dyad's dataset
for( i in 1:n ){
# create an empty dataframe that will represent a dyadic distance at every derived simultaneous fix of the study period that results after the randomization
total_rand <- data.frame( day = integer(), sleep_site_a = integer(), sleep_site_b = integer() )
# loop through each chunk and permute the data within the chunks
for( w in 1:length( chunked_cosleep_dat ) ){ # For each chunk of data
# save the chunk of data to the dataframe "chunk"
chunk <- chunked_cosleep_dat[[w]]
# save the number of days of data that individuals a and b have in this chunk
num_unique_days_a <- length( unique( chunk[ chunk$func_ind == real_final_cosleep$func_a[ row ], 'day' ] ) )
num_unique_days_b <- length( unique( chunk[ chunk$func_ind == real_final_cosleep$func_b[ row ], 'day' ] ) )
# if one of the functuals has no data for this chunk, skip the rest of the body of the loop and move to the next chunk
if( num_unique_days_a == 0 || num_unique_days_b == 0 ){
next
}
# if either individual a or b don't have the necessary number of days worth of data as determined by the min_to_rand parameter, skip the rest of the body of the loop
if( num_unique_days_a < (min_to_rand * rand_day_thresh) || num_unique_days_b < (min_to_rand * rand_day_thresh) ){
next
}
# The first time through, we will make the empirical dataframe of dyadic distances. We only need to do this once
if( i == 1 ){
# use the dyad_dist function to calculate the dyadic distance for this pair of functuals
real_sub <- as.data.frame( dcast( as.data.table( chunk ), day ~ id, value.var = 'sleep_clus', drop = F ) )
names( real_sub ) <- c( 'day', 'sleep_site_a', 'sleep_site_b' )
# add this to the running dataframe of dyadic distances over the whole study for this dyad
total_real <- rbind( total_real, real_sub )
# save the latest time that will be successfully randomized. For the first run through for each functual dyad, this will get updated every time the conditions above are surpassed such that a chunk of data is successfully randomized. Eventually this will serve to mark the end of successful randomizations
end_of_rand <- max(chunked_cosleep_dat[[w]]$day)
}
# determine how many days to shift ID b's data by for the randomization
ID_b_shifts <- sample( 1:( rand_day_thresh - 1 ), 1, replace = TRUE )
# shift ID b's data by the amount determined above
new_days_b <- chunk[ chunk$func_ind == real_final_cosleep$func_b[ row ], 'day'] + ID_b_shifts
# complicated line of code, but all it does is wrap the end of b's data back around to match the beginning of a's data. So if we shifted b's data by 3, a's data will still be 1, 2, 3, 4, 5, 6, 7; and b's data will be 5, 6, 7, 1, 2, 3, 4.
new_days_b[ new_days_b > max( chunk[ chunk$func_ind == real_final_cosleep$func_b[ row ], 'day' ] ) ] <- new_days_b[ new_days_b > max( chunk [ chunk$func_ind == real_final_cosleep$func_b[ row ], 'day' ] ) ] - max( chunk[ chunk$func_ind == real_final_cosleep$func_b[ row ], 'day' ] ) + min( chunk[ chunk$func_ind == real_final_cosleep$func_b[ row ], 'day'] ) - 1
# replace b's day data with these 'fake' shifted days
chunk[ chunk$func_ind == real_final_cosleep$func_b[ row ], 'day'] <- new_days_b
rand_sub <- as.data.frame( dcast( as.data.table( chunk ), day ~ id, value.var = 'sleep_clus', drop = F ) )
names( rand_sub ) <- c( 'day', 'sleep_site_a', 'sleep_site_b' )
# add this to the running dataframe of derived dyadic distances for the randomized data over the whole study
total_rand <- rbind( total_rand, rand_sub )
}
# if we are on the first run through the loop, save the empirical CA of the dyad
if( i == 1 ){
# adds the empirical coefficient of association for this dyad
real_final_cosleep[ row , 'num' ] <- sum( total_real$sleep_site_a == total_real$sleep_site_b, na.rm = T )
real_final_cosleep[ row , 'denom' ] <- sum( !is.na( total_real$sleep_site_a == total_real$sleep_site_b ) )
real_final_cosleep[ row , 'MI' ] <- mutinformation( as.character( total_real$sleep_site_a ) , as.character( total_real$sleep_site_b ) )
meta_cosleep_dat[ row, c( 'start_rand_at', 'end_rand_at' ) ] <- c( min( chunked_cosleep_dat [[ 1 ]]$day ), end_of_rand )
sites_real <- total_real$sleep_site_a[ total_real$sleep_site_a == total_real$sleep_site_b & !is.na( total_real$sleep_site_a == total_real$sleep_site_b ) ]
if( length( sites_real ) != 0 ){
sites_real_sub <- data.frame( func_a = real_final_cosleep$func_a[ row ], func_b = real_final_cosleep$func_b[ row ], shared_site = sites_real )
cosleep_sites_real <- rbind( cosleep_sites_real, sites_real_sub )
}
}
# save the randomized CA of the functual dyad for this randomization
rand_final_cosleep[ ( row + nrow( real_final_cosleep ) * ( i - 1 ) ), 'num' ] <- sum( total_rand$sleep_site_a == total_rand$sleep_site_b, na.rm = T )
rand_final_cosleep[ ( row + nrow( real_final_cosleep ) * ( i - 1 ) ), 'denom' ] <- sum( !is.na( total_rand$sleep_site_a == total_rand$sleep_site_b ) )
rand_final_cosleep[ ( row + nrow( real_final_cosleep ) * ( i - 1 ) ) , 'MI' ] <- mutinformation( as.character( total_rand$sleep_site_a ), as.character( total_rand$sleep_site_b ) )
sites_rand <- total_rand$sleep_site_a[ total_rand$sleep_site_a == total_rand$sleep_site_b & !is.na( total_rand$sleep_site_a == total_rand$sleep_site_b ) ]
if( length( sites_rand ) != 0 ){
sites_rand_sub <- data.frame( func_a = real_final_cosleep$func_a[ row ], func_b = real_final_cosleep$func_b[ row ], rand_num = i, shared_site = sites_rand )
cosleep_sites_rand <- rbind( cosleep_sites_rand, sites_rand_sub )
}
}
}
real_agg <- aggregate( real_final_cosleep[ , c('num', 'denom') ], by = list( real_final_cosleep$cat_a, real_final_cosleep$cat_b ), FUN = sum, na.rm = T)
names( real_agg ) <- c( 'cat_a', 'cat_b', 'num', 'denom' )
real_agg$prop <- real_agg$num / real_agg$denom
rand_agg <- aggregate( rand_final_cosleep[ , c('num', 'denom') ], by = list( rand_final_cosleep$cat_a, rand_final_cosleep$cat_b, rand_final_cosleep$rand_n ), FUN = sum, na.rm = T)
names( rand_agg ) <- c( 'cat_a', 'cat_b', 'rand_n', 'num', 'denom' )
rand_agg$prop <- rand_agg$num / rand_agg$denom
p <- sum( real_agg$prop <= rand_agg$prop ) / max( rand_agg$rand_n )
final_p <- ifelse( p > 0.5, sum( real_agg$prop >= rand_agg$prop ) / max( rand_agg$rand_n ), p )
par( bg = 'white' )
dens_rand <- density( rand_agg$prop )
plot( dens_rand$x, dens_rand$y, col = 'black', col.axis = 'black', col.lab = 'black', col.main = 'black', xlab = 'Probability of two groups sleeping at same site', type = 'l', main = '', ylab = 'Probability density', bty = 'l' )
axis(1, col = 'black', tick = T, labels = F )
axis(2, col = 'black', tick = T, labels = F )
abline( v = quantile( rand_agg$prop, c( 0.025, 0.975 ) ), col = 'black', lty = 3)
abline( v = real_agg$prop, col = 'red', lty = 1 )
print( paste( 'p-value = ', round( final_p, 4) ), side = 4 )
par( bg = 'black' )
dens_rand <- density( rand_agg$prop )
plot( dens_rand$x, dens_rand$y, col = 'white', col.axis = 'white', col.lab = 'white', col.main = 'white', xlab = 'Probability of two groups sleeping at same site', type = 'l', main = '', ylab = 'Probability density', bty = 'l' )
axis(1, col = 'white', tick = T, labels = F )
axis(2, col = 'white', tick = T, labels = F )
abline( v = quantile( rand_agg$prop, c( 0.025, 0.975 ) ), col = 'white', lty = 3)
abline( v = real_agg$prop, col = 'red', lty = 1 )
print( paste( 'p-value = ', round( final_p, 4) ), side = 4 )
|
1876b8f39dcd4d53ef6f1d0173e0cd651d33182f
|
cf2af9741bbf4ab0ccf83c108ac34a6300cdbeff
|
/plot4.R
|
0aaf098cd4856e46e4197f3017ec64760d781666
|
[] |
no_license
|
ejsheehan/ExData_Plotting1
|
59e896b67b4f3488cb5cbd7af006c69bf018d717
|
19d04ed06721b139dbbab8930d4a0fd8d02b23c3
|
refs/heads/master
| 2021-01-24T15:24:08.958774
| 2015-11-05T00:46:45
| 2015-11-05T00:46:45
| 45,576,730
| 0
| 0
| null | 2015-11-05T00:31:08
| 2015-11-05T00:31:08
| null |
UTF-8
|
R
| false
| false
| 1,453
|
r
|
plot4.R
|
#read text file
dat<-read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?", stringsAsFactors=FALSE)
#create a combined Date and Time column
dat$Date_Time<-paste(dat$Date, dat$Time)
#identify Date and Time as date class
dat$Date_Time<-strptime(dat$Date_Time, "%e/%m/%Y %H:%M:%S")
#identify Date column as date class for use of subsetting
dat$Date<-strptime(dat$Date, "%e/%m/%Y")
#subset data to only include 2/1/2007 and 2/2/2007
dat<-dat[dat$Date=="2007-02-01" | dat$Date=="2007-02-02",]
#use lubridate to name dates with abbreviated weekday name
library(lubridate)
dat$day<-wday(dat$Date)
#open connection to png graphic device
png(file="plot4.png")
#prepare to add 4 plots to same graphic
par(mfrow=c(2,2))
#Upper left plot
plot(dat$Date_Time, dat$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
#Upper right plot
plot(dat$Date_Time, dat$Voltage, type="l", xlab="datetime", ylab="Voltage")
#Lower left plot
plot(dat$Date_Time, dat$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(dat$Date_Time, dat$Sub_metering_2, col="red")
lines(dat$Date_Time, dat$Sub_metering_3, col="blue")
legend(x="topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1), col=c("black","red","blue"))
#Lower right plot
plot(dat$Date_Time, dat$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
#end connection to graphic device
dev.off()
|
149583870d7cbae07937369ea90f6a8a6d853d71
|
3c3cf41378249e9f378b5a0c821e7d06dda22729
|
/man/LoadCleanRaw.Rd
|
6472ee2be4e9903018e6a6f074bed7396d84d50c
|
[] |
no_license
|
kaylafrisoli/iRland
|
e4d89628abd57cff5cb5fe2b4d91c04d192043ac
|
fbb47d445679fa4eb66d4ba1832f058384946f16
|
refs/heads/master
| 2021-06-04T11:04:20.115525
| 2020-09-15T04:25:26
| 2020-09-15T04:25:26
| 123,225,559
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 945
|
rd
|
LoadCleanRaw.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LoadClean.R
\name{LoadCleanRaw}
\alias{LoadCleanRaw}
\title{Consistently load raw Irish census data}
\usage{
LoadCleanRaw(path, file_delim = " ", assignID = NULL, preProcess = FALSE)
}
\arguments{
\item{path}{path to data}
\item{file_delim}{single character used to separate fields within a record; default is " " for .txt files}
\item{assignID}{string of variable names for creating a unique identifier}
\item{preProcess}{has the data been preprocessed?}
}
\value{
data in tibble format
}
\description{
Consistently load and standardize raw Irish census data
}
\examples{
LoadCleanRaw("~/GoogleDrive/irelandData/ticknock_kayla.csv", ",", preProcess = TRUE)
LoadCleanRaw("~/GoogleDrive/irelandData/census_ireland_1901/Carlow/Ticknock.txt")
LoadCleanRaw("~/GoogleDrive/irelandData/census_ireland_1901/Carlow/Ticknock.txt",
assignID = c("County", "DED", "Year"))
}
|
6181a5885902e061eac4896b073810d1039dabfb
|
0ad326a4515ac93236aecb01c7b2b49b9c6906c5
|
/getTags.R
|
23f11b1644929be749ad33307096be41a58f8084
|
[] |
no_license
|
stunglan/Rtraining
|
70a34b287b9d62f97dd928d15aa501cfa94b7c00
|
d3fdafe3a92c45ad49e8a766aab1a54cab7e057c
|
refs/heads/master
| 2021-01-22T13:58:01.618725
| 2014-09-18T19:42:04
| 2014-09-18T19:42:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 285
|
r
|
getTags.R
|
#getTags
#if (!exists('getTags_R')) {
getTags_R <-TRUE
setwd("~/GitHub/Rtraining")
getTags <- function() {
data <- read.table("data/artistTags.csv",col.names=c("artist","tag","count"),stringsAsFactors = FALSE,na.strings = "NA")
data <- unique(data)
# END Fix DATA
}
#}
|
9e4f42f833a89178e892a8c8bc4ad3b83818efba
|
ae0e9f9203adc6348a26d2195e5a09d80790ba24
|
/R/functions.r
|
5fa3e376844d3b6e29a019c1edd949ec3ef6852e
|
[
"MIT"
] |
permissive
|
Moritz-Kohls/taxaEstimator
|
b546b9fb544a8b0116205035f2a19c134211bac3
|
1b93eb55db3af905268917b064f39fa9dbfb6040
|
refs/heads/master
| 2023-02-28T00:54:25.406346
| 2021-02-02T14:49:51
| 2021-02-02T14:49:51
| 335,295,331
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,490
|
r
|
functions.r
|
#' Class specific positive predictive values (PPV)
#'
#' Step 1 of the taxa classification and estimation procedure.
#' This function creates artificial reads based on NCBI viral reference genomes FASTA file,
#' computes input features and artificial neural network (ANN) model
#' and finally stores the trained model and classification results in R-intern variables.
#'
#' @seealso \code{\link{ANN_new_sample}}, \code{\link{estimation_taxa_distribution}}
#' @import Biostrings
#' @import keras
#' @import stringi
#' @param fasta.file_path FASTA file path (e.g. FASTA file viral.genomic.fna downloaded from NCBI: ftp://ftp.ncbi.nih.gov/refseq/release/viral).
#' @param taxonomy.file_path Taxonomy file path (e.g. taxonomy file taxonomy_viruses_available.csv delivered within this package).
#' @param temp.directory Results directory containing accuracy results, generalised confusion matrix results and accuracy as well as loss graphics of the simulation runs.
#' @param count.reads_training Number of sampled viruses and artificially generated reads per virus taxonomy, e.g. order (training data).
#' @param read_length Read length of all artificially generated reads.
#' @param simulation_runs Simulation runs
#' @examples
#' # Please specify your file paths and directories!
#' fasta.file_path = "~/ag_bioinf/genomes/viruses_na/refseq/viral.genomic.fna" # Download from NCBI!
#' taxonomy.file_path = "inst/extdata/taxonomy_viruses_available.csv" # Relative file path
#' temp.directory = "~/ag_bioinf/research/metagenomics/temp" # Results directory
#' count.reads_training = 100
#' read_length = 150
#' simulation_runs = 10
#' \dontrun{
#' class_specific_PPVs ( fasta.file_path, taxonomy.file_path, temp.directory, count.reads_training, read_length )
#' }
class_specific_PPVs = function ( fasta.file_path, taxonomy.file_path, temp.directory, count.reads_training, read_length, simulation_runs ) {
# Taxonomy file of available viruses:
taxonomy.file = read.csv(taxonomy.file_path, sep=";", stringsAsFactors=TRUE)
str(taxonomy.file)
taxonomy.file$NC_Id = as.character(taxonomy.file$NC_Id)
taxonomy.file$Species = as.character(taxonomy.file$Species)
str(taxonomy.file)
table(taxonomy.file$Order, useNA = "always")
# Taxonomy file of viruses with available order information:
df.taxonomy_order = taxonomy.file[is.na(taxonomy.file$Order) == F,]
# Fasta file:
fasta.file = readDNAStringSet(fasta.file_path, format = "fasta")
head(names(fasta.file))
genome_lengths = width(fasta.file)
head(genome_lengths)
indices = regexpr(" ",names(fasta.file))
fasta.NC_Ids = substring(names(fasta.file),1,indices-1)
count.viruses_all = table(df.taxonomy_order$Order) # All viruses of the 9 different orders
count.viruses_order = length(count.viruses_all) # 9 orders
count.viruses_validation = count.viruses_test = round ( count.viruses_all * 0.15 ) # Number of viruses used for validation resp. test data
count.viruses_training = count.viruses_all - count.viruses_validation - count.viruses_test # Number of viruses used for training data
count.reads_validation = count.reads_test = round ( count.reads_training / 70 * 15 ) # Number of sampled viruses and artificially generated reads per virus taxonomy, e.g. order (validation resp. test data)
count.reads_all = count.reads_training + count.reads_validation + count.reads_test # Number of sampled viruses and artificially generated reads per virus taxonomy, e.g. order (training, validation and test data)
accuracy_results = rep(NA_real_,simulation_runs)
confusion_matrix_results = vector("list",simulation_runs)
acc_loss_graphics_results = vector("list",simulation_runs)
for ( iteration in 1:(simulation_runs) ) {
print(iteration)
set.seed(iteration)
# Split all viruses of all orders into training, validation and test data:
Order_NC_Id.all = vector("list", length = count.viruses_order)
for ( i in 1:count.viruses_order ) {
Order_NC_Id.all [[i]] = unname(unlist(subset(df.taxonomy_order, Order == names(count.viruses_all)[i], select = NC_Id)))
}
names(Order_NC_Id.all) = names(count.viruses_all)
Order_NC_Id.training = Order_NC_Id.validation = Order_NC_Id.test = vector("list", length = count.viruses_order)
for ( i in 1:count.viruses_order ) {
indices.training = sample(count.viruses_all[i], count.viruses_training[i])
indices.temp = setdiff(1:count.viruses_all[i], indices.training)
indices.validation = sample(indices.temp, count.viruses_validation[i])
indices.test = setdiff(indices.temp, indices.validation)
Order_NC_Id.training [[i]] = Order_NC_Id.all[[i]] [indices.training]
Order_NC_Id.validation [[i]] = Order_NC_Id.all[[i]] [indices.validation]
Order_NC_Id.test [[i]] = Order_NC_Id.all[[i]] [indices.test]
}
names(Order_NC_Id.training) = names(Order_NC_Id.validation) = names(Order_NC_Id.test) = names(count.viruses_all)
# NC Ids of viruses per order. From each order, count.reads_training, count.reads_validation and count.reads_test NC Ids are sampled.
order_NC_Id_sampled.training = matrix(nrow = count.viruses_order, ncol = count.reads_training)
order_NC_Id_sampled.validation = matrix(nrow = count.viruses_order, ncol = count.reads_validation)
order_NC_Id_sampled.test = matrix(nrow = count.viruses_order, ncol = count.reads_test)
rownames(order_NC_Id_sampled.training) = rownames(order_NC_Id_sampled.validation) = rownames(order_NC_Id_sampled.test) = names(count.viruses_all)
for ( i in 1:count.viruses_order ) {
if ( count.viruses_training[i] >= count.reads_training ) {
order_NC_Id_sampled.training [i,] = sample(Order_NC_Id.training [[i]], size = count.reads_training)
}
if ( count.viruses_validation[i] >= count.reads_validation ) {
order_NC_Id_sampled.validation [i,] = sample(Order_NC_Id.validation [[i]], size = count.reads_validation)
}
if ( count.viruses_test[i] >= count.reads_test ) {
order_NC_Id_sampled.test [i,] = sample(Order_NC_Id.test [[i]], size = count.reads_test)
}
if ( count.viruses_training[i] < count.reads_training ) {
NC_Ids = rep(Order_NC_Id.training [[i]], floor(count.reads_training/count.viruses_training[i]))
{
if ( count.reads_training %% count.viruses_training[i] > 0 ) {
order_NC_Id_sampled.training [i,] = c(NC_Ids, sample(Order_NC_Id.training [[i]], size = count.reads_training %% count.viruses_training[i]))
}
else if ( count.reads_training %% count.viruses_training[i] == 0 ) {
order_NC_Id_sampled.training [i,] = NC_Ids
}
}
order_NC_Id_sampled.training [i,] = sample(order_NC_Id_sampled.training [i,])
}
if ( count.viruses_validation[i] < count.reads_validation ) {
NC_Ids = rep(Order_NC_Id.validation [[i]], floor(count.reads_validation/count.viruses_validation[i]))
{
if ( count.reads_validation %% count.viruses_validation[i] > 0 ) {
order_NC_Id_sampled.validation [i,] = c(NC_Ids, sample(Order_NC_Id.validation [[i]], size = count.reads_validation %% count.viruses_validation[i]))
}
else if ( count.reads_validation %% count.viruses_validation[i] == 0 ) {
order_NC_Id_sampled.validation [i,] = NC_Ids
}
}
order_NC_Id_sampled.validation [i,] = sample(order_NC_Id_sampled.validation [i,])
}
if ( count.viruses_test[i] < count.reads_test ) {
NC_Ids = rep(Order_NC_Id.test [[i]], floor(count.reads_test/count.viruses_test[i]))
{
if ( count.reads_test %% count.viruses_test[i] > 0 ) {
order_NC_Id_sampled.test [i,] = c(NC_Ids, sample(Order_NC_Id.test [[i]], size = count.reads_test %% count.viruses_test[i]))
}
else if ( count.reads_test %% count.viruses_test[i] == 0 ) {
order_NC_Id_sampled.test [i,] = NC_Ids
}
}
order_NC_Id_sampled.test [i,] = sample(order_NC_Id_sampled.test [i,])
}
}
# Indices (positions) of sampled NC Ids in fasta file:
indices_NC_Id.training = matrix(sapply(order_NC_Id_sampled.training, FUN = function(x) grep(x,fasta.NC_Ids)), nrow = count.viruses_order, ncol = count.reads_training)
indices_NC_Id.validation = matrix(sapply(order_NC_Id_sampled.validation, FUN = function(x) grep(x,fasta.NC_Ids)), nrow = count.viruses_order, ncol = count.reads_validation)
indices_NC_Id.test = matrix(sapply(order_NC_Id_sampled.test, FUN = function(x) grep(x,fasta.NC_Ids)), nrow = count.viruses_order, ncol = count.reads_test)
# Random start positions of reads:
art_reads.start_pos.training = matrix(nrow = count.viruses_order, ncol = count.reads_training)
art_reads.start_pos.validation = matrix(nrow = count.viruses_order, ncol = count.reads_validation)
art_reads.start_pos.test = matrix(nrow = count.viruses_order, ncol = count.reads_test)
for ( i in 1:count.viruses_order ) {
for ( j in 1:count.reads_training ) {
art_reads.start_pos.training [i,j] = sample.int(genome_lengths [ indices_NC_Id.training[i,j] ] - read_length + 1, size = 1)
}
}
for ( i in 1:count.viruses_order ) {
for ( j in 1:count.reads_validation ) {
art_reads.start_pos.validation [i,j] = sample.int(genome_lengths [ indices_NC_Id.validation[i,j] ] - read_length + 1, size = 1)
}
}
for ( i in 1:count.viruses_order ) {
for ( j in 1:count.reads_test ) {
art_reads.start_pos.test [i,j] = sample.int(genome_lengths [ indices_NC_Id.test[i,j] ] - read_length + 1, size = 1)
}
}
# Generate artificial reads by subsequencing original reads:
art_reads.training = vector("list", length = count.viruses_order * count.reads_training)
for ( i in 1:count.viruses_order ) {
for ( j in 1:count.reads_training ) {
art_reads.training[[count.reads_training*(i-1)+j]] = subseq(fasta.file[[indices_NC_Id.training[i,j]]], start = art_reads.start_pos.training[i,j], width = read_length)
}
}
art_reads.validation = vector("list", length = count.viruses_order * count.reads_validation)
for ( i in 1:count.viruses_order ) {
for ( j in 1:count.reads_validation ) {
art_reads.validation[[count.reads_validation*(i-1)+j]] = subseq(fasta.file[[indices_NC_Id.validation[i,j]]], start = art_reads.start_pos.validation[i,j], width = read_length)
}
}
art_reads.test = vector("list", length = count.viruses_order * count.reads_test)
for ( i in 1:count.viruses_order ) {
for ( j in 1:count.reads_test ) {
art_reads.test[[count.reads_test*(i-1)+j]] = subseq(fasta.file[[indices_NC_Id.test[i,j]]], start = art_reads.start_pos.test[i,j], width = read_length)
}
}
# One-mer, two-mer and three-mer distributions and inter-nucleotide distances (4 + 16 + 64 + 36 = 120 variables).
two_mer_permutations = expand.grid(c("A","C","G","T"),c("A","C","G","T"))
two_mer_permutations = as.matrix(two_mer_permutations)
two_mer_permutations = apply(two_mer_permutations, MARGIN = 1, FUN = function (x) paste(x,collapse=""))
two_mer_permutations = sort(two_mer_permutations)
three_mer_permutations = expand.grid(c("A","C","G","T"),c("A","C","G","T"),c("A","C","G","T"))
three_mer_permutations = as.matrix(three_mer_permutations)
three_mer_permutations = apply(three_mer_permutations, MARGIN = 1, FUN = function (x) paste(x,collapse=""))
three_mer_permutations = sort(three_mer_permutations)
count_input_features = 4 + 16 + 64 + 36
x_training = matrix(nrow = count.viruses_order * count.reads_training, ncol = count_input_features)
for ( i in 1:(count.viruses_order*count.reads_training)) {
x_training [i,1:4] = letterFrequency(art_reads.training[[i]], letters = c("A","C","G","T"), as.prob = F)
for ( j in 5:20 ) {
x_training [i,j] = stri_count_fixed(art_reads.training[[i]],pattern = two_mer_permutations[j-4])
}
for ( j in 21:84 ) {
x_training [i,j] = stri_count_fixed(art_reads.training[[i]],pattern = three_mer_permutations[j-20])
}
dist_A = diff(stri_locate_all(art_reads.training[[i]], regex = "A") [[1]] [,1])
dist_A = table(factor(dist_A, levels = 2:10))
dist_C = diff(stri_locate_all(art_reads.training[[i]], regex = "C") [[1]] [,1])
dist_C = table(factor(dist_C, levels = 2:10))
dist_G = diff(stri_locate_all(art_reads.training[[i]], regex = "G") [[1]] [,1])
dist_G = table(factor(dist_G, levels = 2:10))
dist_T = diff(stri_locate_all(art_reads.training[[i]], regex = "T") [[1]] [,1])
dist_T = table(factor(dist_T, levels = 2:10))
x_training [i,85:93] = dist_A
x_training [i,94:102] = dist_C
x_training [i,103:111] = dist_G
x_training [i,112:120] = dist_T
}
for ( my.index in 85:count_input_features ) {
x_training[is.na(x_training[,my.index]),my.index] = 0
}
x_training = apply(x_training, MARGIN = 2, FUN = function(x) (x-min(x)) / diff(range(x)))
colnames(x_training) = c("A","C","G","T",two_mer_permutations,three_mer_permutations,
paste0("d_A_",2:10),paste0("d_C_",2:10),paste0("d_G_",2:10),paste0("d_T_",2:10))
x_validation = matrix(nrow = count.viruses_order * count.reads_validation, ncol = count_input_features)
for ( i in 1:(count.viruses_order*count.reads_validation)) {
x_validation [i,1:4] = letterFrequency(art_reads.validation[[i]], letters = c("A","C","G","T"), as.prob = F)
for ( j in 5:20 ) {
x_validation [i,j] = stri_count_fixed(art_reads.validation[[i]],pattern = two_mer_permutations[j-4])
}
for ( j in 21:84 ) {
x_validation [i,j] = stri_count_fixed(art_reads.validation[[i]],pattern = three_mer_permutations[j-20])
}
dist_A = diff(stri_locate_all(art_reads.validation[[i]], regex = "A") [[1]] [,1])
dist_A = table(factor(dist_A, levels = 2:10))
dist_C = diff(stri_locate_all(art_reads.validation[[i]], regex = "C") [[1]] [,1])
dist_C = table(factor(dist_C, levels = 2:10))
dist_G = diff(stri_locate_all(art_reads.validation[[i]], regex = "G") [[1]] [,1])
dist_G = table(factor(dist_G, levels = 2:10))
dist_T = diff(stri_locate_all(art_reads.validation[[i]], regex = "T") [[1]] [,1])
dist_T = table(factor(dist_T, levels = 2:10))
x_validation [i,85:93] = dist_A
x_validation [i,94:102] = dist_C
x_validation [i,103:111] = dist_G
x_validation [i,112:120] = dist_T
}
for ( my.index in 85:count_input_features ) {
x_validation[is.na(x_validation[,my.index]),my.index] = 0
}
x_validation = apply(x_validation, MARGIN = 2, FUN = function(x) (x-min(x)) / diff(range(x)))
colnames(x_validation) = c("A","C","G","T",two_mer_permutations,three_mer_permutations,
paste0("d_A_",2:10),paste0("d_C_",2:10),paste0("d_G_",2:10),paste0("d_T_",2:10))
x_test = matrix(nrow = count.viruses_order * count.reads_test, ncol = count_input_features)
for ( i in 1:(count.viruses_order*count.reads_test)) {
x_test [i,1:4] = letterFrequency(art_reads.test[[i]], letters = c("A","C","G","T"), as.prob = F)
for ( j in 5:20 ) {
x_test [i,j] = stri_count_fixed(art_reads.test[[i]],pattern = two_mer_permutations[j-4])
}
for ( j in 21:84 ) {
x_test [i,j] = stri_count_fixed(art_reads.test[[i]],pattern = three_mer_permutations[j-20])
}
dist_A = diff(stri_locate_all(art_reads.test[[i]], regex = "A") [[1]] [,1])
dist_A = table(factor(dist_A, levels = 2:10))
dist_C = diff(stri_locate_all(art_reads.test[[i]], regex = "C") [[1]] [,1])
dist_C = table(factor(dist_C, levels = 2:10))
dist_G = diff(stri_locate_all(art_reads.test[[i]], regex = "G") [[1]] [,1])
dist_G = table(factor(dist_G, levels = 2:10))
dist_T = diff(stri_locate_all(art_reads.test[[i]], regex = "T") [[1]] [,1])
dist_T = table(factor(dist_T, levels = 2:10))
x_test [i,85:93] = dist_A
x_test [i,94:102] = dist_C
x_test [i,103:111] = dist_G
x_test [i,112:120] = dist_T
}
for ( my.index in 85:count_input_features ) {
x_test[is.na(x_test[,my.index]),my.index] = 0
}
x_test = apply(x_test, MARGIN = 2, FUN = function(x) (x-min(x)) / diff(range(x)))
colnames(x_test) = c("A","C","G","T",two_mer_permutations,three_mer_permutations,
paste0("d_A_",2:10),paste0("d_C_",2:10),paste0("d_G_",2:10),paste0("d_T_",2:10))
# Categories to predict (Virus orders 1 to 9, here indices 0 to 8):
y_training = rep(0:(count.viruses_order-1),each = count.reads_training)
y_validation = rep(0:(count.viruses_order-1),each = count.reads_validation)
y_test = rep(0:(count.viruses_order-1),each = count.reads_test)
df.order_id = data.frame(Order = names(count.viruses_all), Id = 0:(count.viruses_order-1))
# Build the model. At first, setup the layers:
model <- keras_model_sequential()
model %>%
layer_dense(units = 64, activation = 'relu', input_shape = c(count_input_features)) %>%
layer_dense(units = count.viruses_order, activation = 'softmax')
# Compile the model:
model %>% compile(
optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = c('accuracy')
)
# Train the model:
history <- model %>% fit(
x_training, y_training, validation_data = list(x_validation, y_validation), verbose = 0,
shuffle = T,
epochs = 100,
batch_size = floor(sqrt(nrow(x_training)))^2,
callbacks = callback_early_stopping(monitor = "val_loss", patience = 10, verbose = 0, restore_best_weights = T)
)
# Evaluate accuracy:
score <- model %>% evaluate(x_test, y_test)
cat('Test loss:', score$loss, "\n")
cat('Test accuracy:', score$acc, "\n")
# Predict classes:
predictions = model %>% predict_classes(x_test)
(predictions = factor(predictions, levels = 0:(count.viruses_order-1)))
Confusion.Matrix = data.frame(matrix(nrow = count.viruses_order, ncol = count.viruses_order+6))
colnames(Confusion.Matrix) = c("TPR","TNR","PPV","NPV","Order","Order_ID",paste("Order",0:(count.viruses_order-1),sep = "_"))
Confusion.Matrix$Order = names(count.viruses_all)
Confusion.Matrix$Order_ID = 0:(count.viruses_order-1)
for ( i in 0:(count.viruses_order-1) ) {
my.indices = which(y_test == i )
my.row = table(predictions[my.indices])
Confusion.Matrix [i+1,-(1:6)] = my.row
}
total_sum = sum(Confusion.Matrix[,-(1:6)])
for ( i in 0:(count.viruses_order-1) ) {
my.matrix = Confusion.Matrix[,-(1:6)]
TP = my.matrix [i+1,i+1]
TN = sum(my.matrix [-(i+1),-(i+1)])
C_P = sum(my.matrix[i+1,])
C_N = sum(my.matrix[-(i+1),])
P_C_P = sum(my.matrix[,i+1])
P_C_N = sum(my.matrix[,-(i+1)])
TPR = TP / C_P
TNR = TN / C_N
PPV = TP / P_C_P
NPV = TN / P_C_N
Confusion.Matrix [i+1,1:4] = round(100*c(TPR,TNR,PPV,NPV),1)
}
Confusion.Matrix
accuracy_results[iteration] = score$acc
confusion_matrix_results[[iteration]] = Confusion.Matrix
acc_loss_graphics_results[[iteration]] = history
}
saveRDS(accuracy_results, paste0(temp.directory,"/accuracy_results.rds"))
saveRDS(confusion_matrix_results, paste0(temp.directory,"/confusion_matrix_results.rds"))
saveRDS(acc_loss_graphics_results, paste0(temp.directory,"/acc_loss_graphics_results.rds"))
}
#' Artificial neural network (ANN) classification of a new sample
#'
#' Step 2 of the taxa classification and estimation procedure.
#' This function loads an ANN model which was trained on artificial data,
#' computes input features of the new, adjusted sample file (FASTQ or SAM)
#' and stores the predicted classes of its read sequences.
#'
#' @seealso \code{\link{class_specific_PPVs}}, \code{\link{estimation_taxa_distribution}}
#' @import Biostrings
#' @import keras
#' @import stringi
#' @param fasta.file_path FASTA file path (e.g. FASTA file viral.genomic.fna downloaded from NCBI: ftp://ftp.ncbi.nih.gov/refseq/release/viral).
#' @param taxonomy.file_path Taxonomy file path (e.g. taxonomy file taxonomy_viruses_available.csv delivered within this package).
#' @param temp.directory Results directory containing accuracy results, generalised confusion matrix results and accuracy as well as loss graphics of the simulation runs.
#' @param read_sequences.file_path Only read sequences without identifier or species names, extracted from FASTQ or SAM file!
#' @param model.file_path File path of ANN model trained on artificially generated data.
#' @param predictions.file_path File path of the result file of predicted taxonomic orders.
#' @examples
#' # Please specify your file paths and directories!
#' fasta.file_path = "~/ag_bioinf/genomes/viruses_na/refseq/viral.genomic.fna" # Download from NCBI!
#' taxonomy.file_path = "inst/extdata/taxonomy_viruses_available.csv" # Relative file path
#' temp.directory = "~/ag_bioinf/research/metagenomics/temp" # Results directory
#' read_sequences.file_path = "~/ag_bioinf/research/metagenomics/Data/Seehund_Mapping/read_sequences.txt"
#' model.file_path = "inst/extdata/model_training_1_dataset.h5"
#' predictions.file_path = "~/ag_bioinf/research/metagenomics/temp/900000_training_samples_1_iteration/test/predictions_seal_sample.rds"
#' \dontrun{
#' ANN_new_sample ( fasta.file_path, taxonomy.file_path, temp.directory, read_sequences.file_path )
#' }
ANN_new_sample = function ( ) {
read_sequences = readLines(read_sequences.file_path)
# read_sequences = read_sequences [1:100]
read_sequences.count = length(read_sequences)
seq_len = unname(sapply(read_sequences,nchar))
sequences = DNAStringSet(read_sequences)
# One-mer, two-mer and three-mer distributions and inter-nucleotide distances (4 + 16 + 64 + 36 = 120 variables).
two_mer_permutations = expand.grid(c("A","C","G","T"),c("A","C","G","T"))
two_mer_permutations = as.matrix(two_mer_permutations)
two_mer_permutations = apply(two_mer_permutations, MARGIN = 1, FUN = function (x) paste(x,collapse=""))
two_mer_permutations = sort(two_mer_permutations)
three_mer_permutations = expand.grid(c("A","C","G","T"),c("A","C","G","T"),c("A","C","G","T"))
three_mer_permutations = as.matrix(three_mer_permutations)
three_mer_permutations = apply(three_mer_permutations, MARGIN = 1, FUN = function (x) paste(x,collapse=""))
three_mer_permutations = sort(three_mer_permutations)
count_input_features = 4 + 16 + 64 + 36
x_test = matrix(nrow = read_sequences.count, ncol = count_input_features)
for ( i in 1:read_sequences.count) {
if ( i %% 100 == 0 ) print(round(i/read_sequences.count*100,2))
x_test [i,1:4] = letterFrequency(sequences[[i]], letters = c("A","C","G","T"), as.prob = F)
for ( j in 5:20 ) {
x_test [i,j] = stri_count_fixed(sequences[[i]],pattern = two_mer_permutations[j-4])
}
for ( j in 21:84 ) {
x_test [i,j] = stri_count_fixed(sequences[[i]],pattern = three_mer_permutations[j-20])
}
dist_A = diff(stri_locate_all(sequences[[i]], regex = "A") [[1]] [,1])
dist_A = table(factor(dist_A, levels = 2:10))
dist_C = diff(stri_locate_all(sequences[[i]], regex = "C") [[1]] [,1])
dist_C = table(factor(dist_C, levels = 2:10))
dist_G = diff(stri_locate_all(sequences[[i]], regex = "G") [[1]] [,1])
dist_G = table(factor(dist_G, levels = 2:10))
dist_T = diff(stri_locate_all(sequences[[i]], regex = "T") [[1]] [,1])
dist_T = table(factor(dist_T, levels = 2:10))
x_test [i,85:93] = dist_A
x_test [i,94:102] = dist_C
x_test [i,103:111] = dist_G
x_test [i,112:120] = dist_T
}
for ( my.index in 85:count_input_features ) {
x_test[is.na(x_test[,my.index]),my.index] = 0
}
x_test = apply(x_test, MARGIN = 2, FUN = function(x) (x-min(x)) / diff(range(x)))
colnames(x_test) = c("A","C","G","T",two_mer_permutations,three_mer_permutations,
paste0("d_A_",2:10),paste0("d_C_",2:10),paste0("d_G_",2:10),paste0("d_T_",2:10))
# Load your previously on artificially generated reads trained ANN model ("e.g. model_training_1_dataset.h5"):
model = load_model_hdf5(model.file_path)
predictions = model %>% predict_classes(x_test)
# Save the classification results of your model:
saveRDS(predictions,predictions.file_path)
}
#' Prior and posterior estimation of taxa distribution
#'
#' Step 3 of the taxa classification and estimation procedure.
#' This function loads the predicted classes of the ANN model which was trained on artificial data,
#' computes prior as well as posterior taxa distribution estimations
#' and saves a graphics result file containing the estimation of the predicted classes.
#'
#' @seealso \code{\link{class_specific_PPVs}}, \code{\link{ANN_new_sample}}
#' @import Biostrings
#' @import keras
#' @import stringi
#' @import ggplot2
#' @import gridExtra
#' @import ggpubr
#' @import ggplotify
#' @param temp.directory Results directory containing accuracy results, generalised confusion matrix results and accuracy as well as loss graphics of the simulation runs.
#' @param graphics.directory Directory of graphics result file.
#' @param a_priori_table.file_path Predictions of previously classified reads of new sample file.
#' @examples
#' # Please specify your file paths and directories!
#' temp.directory = "inst/extdata/class_specific_PPVs_results/" # Results directory
#' graphics.directory = "~/ag_bioinf/research/metagenomics/ManuscriptNeuralNet/Graphics/"
#' a_priori_table.file_path = "inst/extdata/predictions_3154562.rds"
#' \dontrun{
#' estimation_taxa_distribution ( temp.directory, graphics.directory, a_priori_table.file_path )
#' }
estimation_taxa_distribution = function ( ) {
estimation.a_priori.table = readRDS(a_priori_table.file_path)
estimation.a_priori.table = estimation.a_priori.table[-10]
confusion_matrix_results = readRDS(paste0(temp.directory,"confusion_matrix_results.rds"))
loss_acc_results = readRDS(paste0(temp.directory,"loss_acc_results.rds"))
loss_acc_graphics_results = readRDS(paste0(temp.directory,"loss_acc_graphics_results.rds"))
order_names = c("Bunyavirales","Caudovirales","Herpesvirales","Ligamenvirales","Mononegavirales",
"Nidovirales","Ortervirales","Picornavirales","Tymovirales")
TPR = matrix(NA_real_, nrow = 9, ncol = 10)
PPV = matrix(NA_real_, nrow = 9, ncol = 10)
for ( i in 1:10 ) {
TPR[,i] = confusion_matrix_results [[i]] [,1]
PPV[,i] = confusion_matrix_results [[i]] [,3]
}
rownames(TPR) = rownames(PPV) = order_names
df.TPR_and_PPV = data.frame(Order = rep(order_names, each = 10), TPR = as.vector(t(TPR)), PPV = as.vector(t(PPV)))
estimation.a_priori = as.data.frame(matrix(nrow = 9, ncol = 12))
colnames(estimation.a_priori) = c("Order","Order_ID,",paste0("Iteration_",1:10))
estimation.a_priori[,1] = order_names
estimation.a_priori[,2] = 0:8
for ( j in 3:12 ) {
estimation.a_priori[,j] = unlist(estimation.a_priori.table)
}
estimation.a_posteriori = estimation.a_priori
PPV_probabilities = vector("list",10)
for ( j in 1:10 ) {
PPV_probabilities[[j]] = matrix(NA_real_,9,9)
rownames(PPV_probabilities[[j]]) = paste0("Order_",0:8)
colnames(PPV_probabilities[[j]]) = paste0("Order_",0:8)
}
for ( i in 1:9 ) {
for ( j in 1:10 ) {
PPV_probabilities[[j]] [,i] = confusion_matrix_results[[j]] [,i+6] / sum(confusion_matrix_results[[j]] [,i+6])
}
}
PPV_probabilities.mean = vector("list",10)
for ( j in 1:10 ) {
PPV_probabilities.mean[[j]] = matrix(NA_real_,9,9)
rownames(PPV_probabilities.mean[[j]]) = paste0("Order_",0:8)
colnames(PPV_probabilities.mean[[j]]) = paste0("Order_",0:8)
for ( i in 1:9 ) {
temp = sapply(PPV_probabilities[-j], FUN = function(x) x[,i])
PPV_probabilities.mean[[j]] [,i] = rowMeans(temp)
}
}
for ( j in 1:10 ) {
estimation.a_posteriori[,j+2] = PPV_probabilities.mean[[j]] %*% matrix(estimation.a_priori[,j+2],ncol = 1)
estimation.a_posteriori[,j+2] = PPV_probabilities[[j]] %*% matrix(estimation.a_priori[,j+2],ncol = 1)
}
my.data_frame = data.frame(Order = rep(rep(order_names, each = 10),2), Estimation = c(rep("Prior",9*10),rep("Posterior",9*10)),
Iteration = rep(1:10, 2*9), Count = c(as.vector(t(as.matrix(estimation.a_priori[,-(1:2)]))),
as.vector(t(as.matrix(estimation.a_posteriori[,-(1:2)])))))
my.data_frame$Order = as.factor(my.data_frame$Order)
my.data_frame$Estimation = factor(my.data_frame$Estimation, levels = c("Prior","Posterior"))
my.data_frame$Iteration = as.factor(my.data_frame$Iteration)
my.data_frame.relative = my.data_frame
for ( i in 1:10 ) {
my.data_frame.relative[seq(0+i,90,by=10),4] = my.data_frame.relative[seq(0+i,90,by=10),4] / sum(my.data_frame.relative[seq(0+i,90,by=10),4]) * 100
my.data_frame.relative[seq(90+i,180,by=10),4] = my.data_frame.relative[seq(90+i,180,by=10),4] / sum(my.data_frame.relative[seq(90+i,180,by=10),4]) * 100
}
my.data_frame.relative$Order = factor(my.data_frame.relative$Order, levels = rev(c("Bunyavirales","Caudovirales","Herpesvirales","Ligamenvirales","Mononegavirales",
"Nidovirales","Ortervirales","Picornavirales","Tymovirales") ))
require("ggplot2")
.df <- data.frame(x = my.data_frame.relative$Order, y =
my.data_frame.relative$Count, z = my.data_frame.relative$Estimation)
.plot <- ggplot(data = .df, aes(x = factor(x), y = y, colour = z)) +
stat_summary(fun.y = "mean", geom = "point", position =
position_dodge(width = 0.6) ) +
stat_summary(fun.data = "mean_cl_boot", geom = "errorbar", position =
position_dodge(width = 0.6), pch = 10, size = 1, width = 0.1, fun.args = list(conf.int =
1.0)) +
coord_flip() +
xlab("Order") +
ylab("Relative frequency (%)") +
labs(colour = "Estimation") +
theme_bw(base_size = 25, base_family = "sans")
print(.plot)
ggsave(filename =
paste0(graphics.directory,"prior_posterior_estimation.pdf"),
plot = .plot, width = 12, height = 8)
rm(.df, .plot)
dev.off()
}
|
097d2ac5c0fb984f516a96c4bdfd498f155619f4
|
1f5bc0ade472404258b43525269d15f5e1b543f7
|
/R/CreateBasis.R
|
d8e05574f66138f60cd736e3c71ff9607db72c35
|
[] |
no_license
|
mathchin/fdapace
|
d84d393d731a0fe64f7b3e15419d6637a524f490
|
67728c2d6be6f27bdfdb2eb97257b2c881847266
|
refs/heads/master
| 2021-01-13T13:32:24.601040
| 2016-07-15T10:19:26
| 2016-07-15T10:19:26
| 72,411,264
| 0
| 1
| null | 2016-10-31T07:02:22
| 2016-10-31T07:02:22
| null |
UTF-8
|
R
| false
| false
| 1,030
|
r
|
CreateBasis.R
|
# Create an orthogonal basis of K functions in [0, 1], with nGrid points.
# Output: a K by nGrid matrix, each column containing an basis function.
CreateBasis <- function(K, pts=seq(0, 1, length.out=50), type='sin') {
nGrid <- length(pts)
possibleTypes <- c('cos', 'sin', 'fourier', 'unknown')
type <- possibleTypes[pmatch(type, possibleTypes, nomatch=length(possibleTypes))]
stopifnot(is.numeric(K) && length(K) == 1 && K > 0)
if (type == 'cos') {
sapply(seq_len(K), function(k)
if (k == 1) {
rep(1, nGrid)
} else {
sqrt(2) * cos((k - 1) * pi * pts)
}
)
} else if (type == 'sin') {
sapply(seq_len(K), function(k) sqrt(2) * sin(k * pi * pts))
} else if (type == 'fourier') {
sapply(seq_len(K), function(k)
if (k == 1) {
rep(1, nGrid)
} else if (k %% 2 == 0) {
sqrt(2) * sin(k * pi * pts)
} else {
sqrt(2) * cos((k - 1) * pi * pts)
}
)
} else if (type == 'unknown') {
stop('unknown basis type')
}
}
|
9c63a940a75211ff633909a7fe8c6e122bf40577
|
b914a79b9cc2f66614fb4f6ab784c8649901229e
|
/Liger/spatial.RNA.plot.a.gene.R
|
4156cb22ad131e68802636792d92854a8a13092a
|
[] |
no_license
|
explorerwjy/ML_genomics
|
731111729e076038673851dc14170c2a03898295
|
91acbef43a49ee48205e088fac7c78a878a03752
|
refs/heads/main
| 2023-04-23T07:46:50.273596
| 2021-04-27T13:15:49
| 2021-04-27T13:15:49
| 361,255,171
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,750
|
r
|
spatial.RNA.plot.a.gene.R
|
spatial.RNA.plot.a.gene <- function(spatial.RNA.filename, genename, log.normalize=TRUE, savefilename="tmp.pdf") {
library(ggplot2)
library(ggthemes)
library(ggeasy)
spatialRNA <- read.table(spatial.RNA.filename,
header=T, sep="\t", row.names = 1)
LogNormalize <- function(M, scale = 1e5, normalize = TRUE){
if(normalize){
MM <- apply(M, 2, function(x){
if(sum(x) > 0){
x/sum(x)
}else{
x
}
})
} else {
MM <- M
}
MM <- scale * MM
MM <- log10(MM + 1)
}
if (!genename %in% row.names(spatialRNA)) {
message(paste0(genename, " not in ", spatial.RNA.filename))
return(NULL)
} else {
spatialRNA.coordinate <- matrix(NA, nrow = dim(spatialRNA)[2], ncol = 2)
for (i in 1:dim(spatialRNA)[2]) {
spatialRNA.coordinate[i, 1] = as.integer(strsplit(strsplit(colnames(spatialRNA)[i], split = "x")[[1]][1], split = "X")[[1]][2])
spatialRNA.coordinate[i, 2] = as.integer(strsplit(colnames(spatialRNA)[i], split = "x")[[1]][2])
}
# first log normalize, if necessary
if (log.normalize) {
spatialRNA <- LogNormalize(spatialRNA)
}
to.plot <- data.frame(expression = spatialRNA[match(genename, rownames(spatialRNA)),],
X=spatialRNA.coordinate[,1],
Y=spatialRNA.coordinate[,2])
mid<-1/2*(min(to.plot$expression)+max(to.plot$expression))
p <- ggplot(to.plot, aes(x=X, y=Y, col=expression)) +
geom_point(alpha=0.5, ) +
xlab("X") + ylab("Y") +
ggtitle(genename) + theme_classic() + ggeasy::easy_center_title()
scale_color_gradient2(midpoint=mid, low="blue", mid="white", high="red")
ggsave(savefilename)
}
}
|
77d944dd3649ee565a68c5b49309fdbde1fcee39
|
ce0db4deeb74d83cb4704a0ab95134ddd40a77f6
|
/gr.R
|
b17962dd5f8e090005fbc2ee95d892f41d02e980
|
[] |
no_license
|
lufanl/GrowthRespiration
|
ff313cd05ae1434889ae14fb57c4f773fa285a13
|
cdf51e50a6cc282c0fbc0e813f4445a7269e46c8
|
refs/heads/master
| 2020-04-06T04:57:21.108303
| 2014-05-31T01:53:03
| 2014-05-31T01:53:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,584
|
r
|
gr.R
|
require(PEcAn.all)
logger.setQuitOnSevere(FALSE)
settings <- read.settings("gr.settings.xml")
td <- get.trait.data(settings$pfts,settings$run$dbfiles,settings$database,TRUE)
## rescale trait data
trait.file = file.path(settings$pfts$pft$outdir, 'trait.data.Rdata')
load(trait.file)
for(i in 1:length(trait.data)){
trait.data[[i]]$mean = trait.data[[i]]$mean/100
trait.data[[i]]$stat = trait.data[[i]]$stat/100
}
save(trait.data,file=trait.file)
##PEcAn - get posterior priors
run.meta.analysis(td, settings$meta.analysis$iter, settings$run$dbfiles, settings$database)
load(file.path(settings$pfts$pft$outdir,"trait.mcmc.Rdata"))
load(file.path(settings$pfts$pft$outdir,"post.distns.Rdata"))
#########################################
c <- read.csv("cost.csv")
cost <- c$CO2Produced
NC <- length(cost) # #Components
## Convert gCO2 to gC
## gCO2*(12gC/44gCO2)
cost = cost*(12/44)
leafvariables = c('l_carbohydrates','l_lignin','l_lipids','l_minerals','l_organicacids','l_protein')
stemvariables = c('s_carbohydrates','s_lignin','s_lipids','s_minerals','s_organicacids','s_protein')
rootvariables = c('r_carbohydrates','r_lignin','r_lipids','r_minerals','r_organicacids','r_protein')
variables=matrix(c(leafvariables,stemvariables,rootvariables),NC,3)
NV=length(variables) #Number of variables
##########################################################
##FUNCTION FOR ANALYSIS
## function input: trait data and construction costs
## function outopt: list of 4 matrices of growth respiration values for uninformative prior, leaf, root, and stem
###########################################################
getdistribution <- function(trait.mcmc,post.distns,cost,variables) {
NC=length(cost)
NV=length(variables)
#R = Rl + Rs + Rr
#leaf
#Rl = kl*Gl
#kl = cost (g C produced) * pcompl (percent composition of leaf components)
## calc mean and sd from meta-analysis
mean = matrix(NA,NC,3)
var = matrix(NA,NC,3)
for(i in 1:NV){
if(variables[i] %in% names(trait.mcmc)){
y = as.matrix((trait.mcmc[[variables[i]]]))[,"beta.o"]
mean[i]= mean(y)
var[i]= var(y)
} else {
## use the prior
row = which(rownames(post.distns) == variables[i])
if(length(row)==1 & post.distns$distn[row] == 'beta'){
x = post.distns[row,]
mean[i] = x$parama/(x$parama+x$paramb)
var[i] = (x$parama*x$paramb)/((x$parama+x$paramb)^2*(x$parama+x$paramb+1)^2)
}
}
}
## moment matching to est. alpha
# USING DIRICHLET:
# mean[i]=a[i]/a0
# var[i]=a[i]*(a0-a[i])/((a0)^2*(a0+1))
# a = matrix(NA,NC,3)
# for(i in 1:length(variables)){
# a[i]=mean[i]*(((mean[i]-mean[i]^2)/var[i])-1)
# }
# USING BETA
# E[x]=M=a/(a+B)
# B=a(1-M)/M
# Var[X]=aB/[(a+B)^2(a+B+1)]
a=B=matrix(NA,NC,3)
for(i in 1:NV) {
a[i]=(1-mean[i])*mean[i]^2/var[i]-mean[i]
B[i]= a[i]*(1-mean[i])/mean[i]
}
########## functions to rescale percent composition to sum to 1 #############
NewP.oldDoesntWork <- function(k,p,a,b){
# calculate current quantile
q0 = pbeta(p,a,b)
qm = pbeta(a/(a+b),a,b)
# adjust by k
qnew = qm + k*(q0-qm)
qnew[qnew<0] = 0
qnew[qnew>1] = 1
# convert back to p
pnew = qbeta(qnew,a,b)
return(pnew)
}
NewP <- function(k,p,a,b){
# calculate current quantile
q0 = pbeta(p,a,b)
# calc SD equivalent of current quantile
sd0 = qnorm(q0)
# adjust by k
sd.new = sd0 + k
# calc new quantile
q.new = pnorm(sd.new)
# convert back to p
pnew = qbeta(q.new,a,b)
return(pnew)
}
SumToOneFactor <- function(k,p,a,b){
pnew = NewP(k,p,a,b)
# assess sum to 1
return((sum(pnew)-1)^2)
}
N = 5000 # Iterations
## l=leaf; s=stem; r=root; nd=assuming no parameter data
G=Gl=Gs=Gr=matrix(1,N,1)
Rl=Rs=Rr=Rnd=matrix(NA,N,1)
pcompl=pcomps=pcompr=pcompnd=matrix(NA,N,NC) #storage for % composition
kl=ks=kr=knd=matrix(NA,N,1) #cost*%composition
# get percent composition using alpha and beta
for(i in 1:N){
# rdirichlet(1,c(,1,1,1,1,1))
# pcompl[i,]=rdirichlet(1,c(a[,1]))
# pcomps[i,]=rdirichlet(1,c(a[,2]))
# pcompr[i,]=rdirichlet(1,c(a[,3]))
for (j in 1:NC) {
pcompnd[i,j]=rbeta(1,1,5)
pcompl[i,j]=rbeta(1,a[j,1],B[j,1])
pcomps[i,j]=rbeta(1,a[j,2],B[j,2])
pcompr[i,j]=rbeta(1,a[j,3],B[j,3])
}
## Rescale pcomp output so sums to 1
kopt = optimize(SumToOneFactor,c(-10,10),p=pcompnd[i,],a=1,b=6)
popt = NewP(kopt$minimum,p=pcompnd[i,],a=1,b=6)
koptl = optimize(SumToOneFactor,c(-10,10),p=pcompl[i,],a=a[,1],b=B[,1])
poptl = NewP(koptl$minimum,p=pcompl[i,],a=a[,1],b=B[,1])
kopts = optimize(SumToOneFactor,c(-10,10),p=pcomps[i,],a=a[,2],b=B[,2])
popts = NewP(kopts$minimum,p=pcomps[i,],a=a[,2],b=B[,2])
koptr = optimize(SumToOneFactor,c(-10,10),p=pcompr[i,],a=a[,3],b=B[,3])
poptr = NewP(koptr$minimum,p=pcompr[i,],a=a[,3],b=B[,3])
knd[i,]=sum(cost*popt)
kl[i,]=sum(cost*poptl)
ks[i,]=sum(cost*popts)
kr[i,]=sum(cost*poptr)
if(i %% 1000 == 0) print(i)
}
# Calculate growth respiration for leaf, stem, and root
Rnd=knd*G ## UNINFORMATIVE PRIOR; no percent composition data
Rl=kl*Gl
Rs=ks*Gs
Rr=kr*Gr
R<- list("Rnd"=Rnd,"Rl"=Rl,"Rs"=Rs,"Rr"=Rr,"var"=var)
return(R)
} #end of function
##########################################################################
R.allplants <- getdistribution(trait.mcmc,post.distns,cost,variables)
########## Create Plot of Distributions ##################################
cols = 1:4
dRnd = density(R.allplants$Rnd)
plot(density(R.allplants$Rl),xlim=range(dRnd$x),col=cols[2])
lines(dRnd,col=cols[1])
lines(density(R.allplants$Rs),col=cols[3])
lines(density(R.allplants$Rr),col=cols[4])
legend("topright",legend=c("Null","Leaf","Stem","Root"),col=cols,lwd=2)
########### Variance Decomposition ####################################################
## sum(Pcomp^2*Var(cost) + sum(cost^2*Var(Pcomp)) ## no variance in construction costs
vd = matrix(NA,NC,3)
for (i in 1:NC){
vd[i,1]=cost[i]^2*var(pcompl[,i])
vd[i,2]=cost[i]^2*var(pcomps[,i])
vd[i,3]=cost[i]^2*var(pcompr[,i])
}
## alternative that doesn't have sum to 1 constraints
for (i in 1:NC){
vd[i,1]=cost[i]^2*R.allplants$var[i,1]
vd[i,2]=cost[i]^2*R.allplants$var[i,2]
vd[i,3]=cost[i]^2*R.allplants$var[i,3]
}
colnames(vd) <- c("leaf","stem","root")
rownames(vd) <- c("carb","lignin","lipid","mineral","OA","protein")
totvar <- apply(vd,2,sum)
t(vd)/totvar ## % variance
totsd <- apply(sqrt(vd),2,sum)
t(sqrt(vd))/totsd *100 ## % sd
##########################################################
## Build covariates table
ctable=matrix(NA,0,NV)
colnames(ctable) <-variables
for(i in 1:length(variables)){
##Find variable in trait data
if(variables[i]%in%names(trait.data)){
tr=which(names(trait.data)==variables[i])
##Create unique ID for trait
v=paste(trait.data[[tr]]$specie_id,trait.data[[tr]]$site_id,sep="#")
for(j in 1:length(v)){
##if ID is already has a row in the table
if(v[j]%in%rownames(ctable)){
rownumber=which(rownames(ctable)==v[j])
if(is.na(ctable[rownumber,i])) {
ctable[rownumber,i]=trait.data[[i]]$mean[j]
} else {
####But if space in table already full
##average current and new value
ctable[rownumber,i]==mean(c(ctable[rownumber,i],trait.data[[i]]$mean[j]))
}
} else{
##if ID is new
newrow=matrix(NA,1,length(variables))
rownames(newrow)=v[j]
newrow[,i]=trait.data[[i]]$mean[j]
ctable=rbind(ctable,newrow)
}
}
}
}
## fit missing data model to estimate NAs
MissingData = "
model{
for(i in 1:n){
x[i,] ~ dmnorm(mu,tau)
}
mu ~ dmnorm(m0,t0)
tau ~ dwish(R,k)
x[2,4] <- xmis
xmis ~ dnorm(0.2,1)
}
"
w = ncol(ctable)
data <- list(x = ctable,n=nrow(ctable),m0=rep(1/6,w),t0 = diag(1,w),R = diag(1e-6,w),k=w)
#test
#w = 4
#data <- list(x = ctable[1:2,1:w],n=2,m0=rep(1/6,w),t0 = diag(1,w),R = diag(1e-6,w),k=w)
j.model = jags.model(file=textConnection(MissingData),
data = data,
n.chains=1,
n.adapt=10,
inits = list(xmis = 0.1))
logit <- function(p){
log(p/(1-p))
}
ilogit <- function(x){
exp(x)/(1+exp(x))
}
Z = logit(ctable)
m = nrow(ctable)
###set up covariates
Zorig <- as.matrix(Z)
ncov <- ncol(as.matrix(Z))
#find Zobs
Zobs <- apply(Z,2,mean,na.rm=TRUE)
## HACK##
if(is.nan(Zobs[10])) Zobs[10] = Zobs[4]
if(is.nan(Zobs[11])) Zobs[11] = Zobs[5]
if(is.nan(Zobs[13])) Zobs[13] = Zobs[1]
if(is.nan(Zobs[16])) Zobs[16] = Zobs[4]
if(is.nan(Zobs[17])) Zobs[17] = Zobs[5]
if(is.nan(Zobs[18])) Zobs[18] = Zobs[12]
n.Z <- nrow(Z)
for(i in 1:ncov){
Z[is.na(Zorig[,i]),i] <- Zobs[i]
}
## initial guess
Z.init = ilogit(Z)
#priors for Zmis
#mean mu
muZ.ic <- Zobs
mu.Z0 <- rep(logit(1/6),ncol(Z)) #post-normalization
M.Z0 <- diag(rep(10,ncol(Z)))
IM.Z0 <- solve(M.Z0)
#cov V
V.Z.ic <- diag(cov(Z,use="pairwise.complete.obs"))
x.Z <- ncov + 2
V.Z0.all <- M.Z0*x.Z
V.Z0 <- diag(V.Z0.all)
IV.Z0 <- solve(V.Z0.all)
mu.Z <- mu.Z0
V.Z <- V.Z0.all
IV.Z <- solve(V.Z)
library(MCMCpack)
library(mvtnorm)
## set storage
start = 1
ngibbs = 500
muZgibbs <- matrix(0,nrow=ngibbs,ncol=ncov)
VZgibbs <- matrix(0,nrow=ngibbs,ncol=ncov*(ncov+1)/2)
Zgibbs <- Z*0
#gibbs loop
btimes <- 0
for(g in start:ngibbs){
print(g)
##missing Z's - mean
bigv <- try(solve(n.Z*IV.Z + IM.Z0))
if(is.numeric(bigv)){
smallv <- apply(Z %*% IV.Z,2,sum) + IM.Z0 %*% mu.Z0
mu.Z <- rmvnorm(1,bigv %*% smallv,bigv)
}
muZgibbs[g,] <- mu.Z
##missing Z's - Variance
u <- 0
for(i in 1:m){ u <- u + crossprod(Z[i,]-mu.Z) }
V.Z.orig <- V.Z
IV.Z.orig <- IV.Z
V.Z <- riwish(x.Z + n.Z, V.Z0.all + u)
IV.Z <- try(solve(V.Z))
if(!is.numeric(IV.Z)){
IV.Z <- IV.Z.orig
V.Z <- V.Z.orig
}
VZgibbs[g,] <- vech(V.Z)
##missing Z's - draw missing values
for(i in 1:m){
for(j in 1:ncov){
if(is.na(Zorig[i,j])){
bigv <- 1/IV.Z[j,j]
smallv <- mu.Z[j]*IV.Z[j,j]
zcols <- 1:ncov; zcols <- zcols[zcols != j]
for(k in zcols){
smallv <- smallv + (Z[i,k] - mu.Z[k])*IV.Z[k,j]
}
Z[i,j] <- rnorm(1,bigv * smallv, sqrt(bigv))
}
}
}
Zgibbs = Zgibbs + Z
if(g %% 500 == 0){ save.image("GR.RData")}
} #end Z.fillmissing
Zbar = ilogit(Zgibbs/g)
sum(is.na(Zbar))
cbind(apply(Zbar,2,mean),apply(Z.init,2,mean),
apply(ctable,2,mean,na.rm=TRUE))
pdf("muZgibb.pdf")
plot(as.mcmc(ilogit(muZgibbs)))
dev.off()
##################################################################
## PCA & Cluster Analysis
data.leaf <- Z.init[,1:6]
data.stem <- Z.init[,7:12]
data.root <- Z.init[,13:18]
## cluster analysis on raw leaf data
cluster.leaf <- kmeans(data.leaf,2)
plot(Z.init[,2],Z.init[,3])
plot(Z.init[,2],Z.init[,3],col=cluster.leaf$cluster)
cluster.stem <- kmeans(data.stem,2)
cluster.root <- kmeans(data.root,2)
## cluster analysis on leaf data weighted by construction costs
cluster.leaf.cost <- kmeans(t(t(data.leaf)*cost),2)
cluster.stem.cost <- kmeans(t(t(data.stem)*cost),2)
cluster.root.cost <- kmeans(t(t(data.root)*cost),2)
plot(Z.init[,2],Z.init[,3],col=cluster.leaf.cost$cluster)
## principal component analysis on raw leaf data
pca.leaf <- prcomp(data.leaf,retx=TRUE)
pca.leaf$sdev/sum(pca.leaf$sdev)*100
plot(pca.leaf)
plot(pca.leaf$x[,1],pca.leaf$x[,2])
## principal component analysis on leaf data weighed by construction costs
pca.leaf.cost <- prcomp(data.leaf,scale=cost,retx=TRUE)
##
cluster.pca.leaf <- kmeans(t(t(pca.leaf$x)*pca.leaf$sdev^2),2)
plot(pca.leaf$x[,1],pca.leaf$x[,2],col=cluster.pca.leaf$cluster)
cluster.pca.leaf <- kmeans(t(t(pca.leaf$x)*pca.leaf$sdev^2),2)
phenol = rbinom(nrow(pca.leaf$x),1,0.5) ## replace this with real data
## phenol.char = c("E","D")
plot(pca.leaf$x[,1],pca.leaf$x[,2],col=cluster.pca.leaf$cluster)
library(MASS)
library(vegan)
library("RPostgreSQL")
dbparms <- list(driver="PostgreSQL" , user = "bety", dbname = "bety", password = "bety")
con <- db.open(dbparms)
## species category gymnosperm?
input = db.query(paste('SELECT "id","scientificname","commonname","Category","GrowthForm" FROM species'),con)
#input = db.query(paste("SELECT * FROM species"),con)
## vectors of categoires corresponding to data
categories = growthform = speciesnames = commonname = id = vector()
for (i in 1:length(input$id)) {
k = grep(input$id[i], rownames(data.leaf), fixed=TRUE) ######## rownames(data.leaf) can't be redefined (should be id#site)
categories[k]=input$Category[i]
growthform[k]=input$GrowthForm[i]
speciesnames[k]=input$scientificname[i]
commonname[k]=input$commonname[i]
id[k]=input$id[i]
}
#################### Fill missing categories ##################################
growthform[1]="Single Crown"####????
growthform[2]="Bunch" ####??????
#American Beech
categories[3]="Dicot"
growthform[3]="Single Stem"
commonname[3]="American beech"
growthform[9]="Single Stem"
growthform[17]="Single Stem"
categories[18]="Dicot"
growthform[18]="Single Crown" ###CHECK
commonname[18]="Yellow Alpine Pasqueflower"
growthform[21]="Single Stem"
growthform[22:26]="Bunch"
categories[29]="Dicot"
growthform[29]="Single Stem"
commonname[29]="Oak"
growthform[30]="Single Stem"
growthform[35]="Single Crown" ###???
woody <- vector()
for (i in 1:length(growthform)) {
if (growthform[i]=="Single Stem") {
woody[i]=TRUE
} else if (growthform[i]=="Multiple Stem") {
woody[i]=TRUE
} else if (growthform[i]=="Single Crown") {
woody[i]=TRUE
} else if (growthform[i]=="Rhizomatous") {
woody[i]=TRUE
} else if (growthform[i]=="Bunch") {
woody[i]=FALSE
} else {
woody[i]=NA
}
}
sel.vect = which(!is.na(categories))
rownames(data.leaf)=rownames(data.stem)=rownames(data.root)=speciesnames #################### renaming rownames(data.leaf) for pca labels
characteristics = cbind(categories[sel.vect]=="Monocot",woody[sel.vect]==TRUE)
colnames(characteristics)=c("Monocot","Woody")
## fit species charactaristics to compositional pca
cluster.leaf.cost <- kmeans(t(t(data.leaf)*cost),2)
pca.leaf.cost <- prcomp(data.leaf[sel.vect,],scale=cost,retx=TRUE)
#ef.leaf <- envfit(pca.leaf.cost,as.factor(categories[sel.vect]),na.rm=TRUE)
ef.leaf <- envfit(pca.leaf.cost,characteristics,na.rm = TRUE)
biplot(pca.leaf.cost,cex=0.6,col=cluster.pca.leaf$cluster)
plot (ef.leaf,cex=0.5)
cluster.stem.cost <- kmeans(t(t(data.stem)*cost),2)
pca.stem.cost <- prcomp(data.stem[sel.vect,],scale=cost,retx=TRUE)
ef.stem <- envfit(pca.stem.cost,characteristics,na.rm=TRUE)
biplot(pca.stem.cost,cex=0.8)
plot(ef.stem,cex=0.5)
cluster.root.cost <- kmeans(t(t(data.root)*cost),2)
pca.root.cost <- prcomp(data.root[sel.vect,],scale=cost,retx=TRUE)
ef.root <- envfit(pca.root.cost,characteristics,na.rm=TRUE)
biplot(pca.root.cost,cex=0.8)
plot(ef.root,cex=0.5)
##################################
## Split into 2 distributions
##################################
####### Monocot vs. Dicot ########
## query species for char.
j=which(categories=="Monocot")
trait.data.mono = list()
trait.data.dicot = list()
for (i in 1:length(trait.data)) {
sel.mono = which(trait.data[[i]]$specie_id %in% id[j])
trait.data.mono[[i]] = trait.data[[i]][sel.mono,]
trait.data.dicot[[i]] = trait.data[[i]][-sel.mono,]
# for(l in 1:length(trait.data[[i]]$specie_id)) {
# if(trait.data[[i]]$specie_id[l]%in%id[j]) {
# #m=as.matrix(trait.data[[i]])
# }
# }
#j = which(trait.data[[i]]$specie_id%in%id[j])
}
names(trait.data.mono) = names(trait.data)
names(trait.data.dicot) = names(trait.data)
## split trait.data
#trait.data.foo1 = trait.data[k]
#trat.data.foo2 = trait.data[!k]
td.mono = td
td.mono$pft$outdir = "/home/carya/pecan/pft/gr.mono"
td.dicot = td
td.dicot$pft$outdir = "/home/carya/pecan/pft/gr.dicot/"
## save
#save(trait.data.foo1,file=foo1)
#save(trait.data.foo2,file=foo2)
save(trait.data.mono,file=file.path(td.mono$pft$outdir, 'trait.data.Rdata'))
save(trait.data.dicot,file=file.path(td.dicot$pft$outdir, 'trait.data.Rdata'))
##PEcAn - get posterior priors
#run.meta.analysis()
run.meta.analysis(td.mono, settings$meta.analysis$iter, settings$run$dbfiles, settings$database)
load(file.path(settings$pfts$pft$outdir,"trait.mcmc.Rdata"))
load(file.path(settings$pfts$pft$outdir,"post.distns.Rdata"))
R.mono = getdistribution(trait.mcmc,post.distns,cost,variables)
run.meta.analysis(td.dicot, settings$meta.analysis$iter, settings$run$dbfiles, settings$database)
load(file.path(settings$pfts$pft$outdir,"trait.mcmc.Rdata"))
load(file.path(settings$pfts$pft$outdir,"post.distns.Rdata"))
R.dicot = getdistribution(trait.mcmc,post.distns,cost,variables)
#### Probability distributions different?
ks.test(R.mono$Rl,R.dicot$Rl)
ks.test(R.mono$Rs,R.dicot$Rs)
ks.test(R.mono$Rr,R.dicot$Rr)
cols = 1:4
dR.monond = density(R.mono$Rnd)
plot(density(R.mono$Rl),xlim=range(dR.monond$x),col=cols[2])
lines(dR.monond,col=cols[1])
lines(density(R.mono$Rs),col=cols[3])
lines(density(R.mono$Rr),col=cols[4])
lines(density(R.dicot$Rnd),col=cols[1],lty=2)
lines(density(R.dicot$Rl),col=cols[2],lty=2)
lines(density(R.dicot$Rs),col=cols[3],lty=2)
lines(density(R.dicot$Rr),col=cols[4],lty=2)
legend("topright",legend=c("Null","Leaf","Stem","Root","Monocot","Dicot"),col=c(cols,1,1),lwd=2,lty=c(1,1,1,1,1,2))
###### Woody vs Nonwoody #######
## query species for char.
j=which(woody==TRUE)
trait.data.woody = list()
trait.data.nonwoody = list()
for (i in 1:length(trait.data)) {
sel.woody = which(trait.data[[i]]$specie_id %in% id[j])
trait.data.woody[[i]] = trait.data[[i]][sel.woody,]
trait.data.nonwoody[[i]] = trait.data[[i]][-sel.woody,]
}
names(trait.data.woody) = names(trait.data)
names(trait.data.nonwoody) = names(trait.data)
td.woody = td
td.woody$pft$outdir = "/home/carya/pecan/pft/gr.woody/"
td.nonwoody = td
td.nonwoody$pft$outdir = "/home/carya/pecan/pft/gr.nonwoody/"
## save
save(trait.data.woody,file=file.path(td.woody$pft$outdir, 'trait.data.Rdata'))
save(trait.data.nonwoody,file=file.path(td.nonwoody$pft$outdir, 'trait.data.Rdata'))
#run.meta.analysis()
run.meta.analysis(td.woody, settings$meta.analysis$iter, settings$run$dbfiles, settings$database)
load(file.path(settings$pfts$pft$outdir,"trait.mcmc.Rdata"))
load(file.path(settings$pfts$pft$outdir,"post.distns.Rdata"))
R.woody = getdistribution(trait.mcmc,post.distns,cost,variables)
run.meta.analysis(td.nonwoody, settings$meta.analysis$iter, settings$run$dbfiles, settings$database)
load(file.path(settings$pfts$pft$outdir,"trait.mcmc.Rdata"))
load(file.path(settings$pfts$pft$outdir,"post.distns.Rdata"))
R.nonwoody = getdistribution(trait.mcmc,post.distns,cost,variables)
#### Test probability distributions
ks.test(R.woody$Rl,R.nonwoody$Rl)
ks.test(R.woody$Rs,R.nonwoody$Rs)
ks.test(R.woody$Rr,R.nonwoody$Rr)
cols = 1:4
dR.woodynd = density(R.woody$Rnd)
plot(density(R.woody$Rl),xlim=range(dR.woodynd$x),col=cols[2])
lines(dR.woodynd,col=cols[1])
lines(density(R.woody$Rs),col=cols[3])
lines(density(R.woody$Rr),col=cols[4])
lines(density(R.nonwoody$Rnd),col=cols[1],lty=2)
lines(density(R.nonwoody$Rl),col=cols[2],lty=2)
lines(density(R.nonwoody$Rs),col=cols[3],lty=2)
lines(density(R.nonwoody$Rr),col=cols[4],lty=2)
legend("topright",legend=c("Null","Leaf","Stem","Root","Woody","Nonwoody"),col=c(cols,1,1),lwd=2,lty=c(1,1,1,1,1,2))
|
b5a397df0f0c92aac14390b2242a2e53e6507a74
|
384dd8ffffaf0b791f3934589ab008a0da22920b
|
/R/random.pseudoinverse.R
|
515d58bd001fe792f29f1a63e4bedd29fd83db88
|
[] |
no_license
|
cran/ndl
|
51ca423e2cdad9411883eb723a79a74034cd72f0
|
52291ac2f05d4591d139240501c87b888093892b
|
refs/heads/master
| 2021-01-06T20:41:47.421368
| 2018-09-10T12:40:02
| 2018-09-10T12:40:02
| 17,697,831
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,403
|
r
|
random.pseudoinverse.R
|
random.pseudoinverse = function (m, verbose=F, k = 0)
{
# computes an approximation of the SVD using the first k singular values of A
stoch_svd = function(A, k, p = 200, verbose=F) {
# default p=200, may need a larger value here
# to use the the fast.svd function from the corpcor library
# require(corpcor)
# get the dimensions of the matrix
n = dim(A)[1]
m = dim(A)[2]
# Make a random projection of A
if (verbose) message("Making Random Projection")
flush.console()
Y <- (A %*% matrix(rnorm((k+p) * m,-1,1), ncol=k+p))
# the left part of the decomposition for A (approximately)
if (verbose) message("Calculating QR decompostion of Random Projection")
Q = qr.Q(qr(Y))
# taking that off gives us something small to decompose
if (verbose) message("Multiply transposed QR with Orig.")
B = t(Q) %*% A
# decomposing B gives us singular values and right vectors for A
if (verbose) message("Doing SVD of subset.")
# s = fast.svd(B)
s = svd(B)
# Calculate U, Q time U of subset
if (verbose) message("Get U from Q times U of subset.")
U = Q %*% s$u
# and then we can put it all together for a complete result
if (verbose) message("Stoch SVD is complete.")
flush.console()
return (list(u=U, v=s$v, d=s$d))
}
if (k<1) {
#Default: K is the top 3/4 of the singular values.
k = floor((dim(m)[1]) * 0.75)
if (verbose) message(c("k = ",k))
}
if (k>dim(m)[1]) {
stop("k must be less that the size of the matrix")
}
if (verbose) message ("Starting reduced rank SVD approximation calc.")
msvd = stoch_svd(m,k,verbose)
if (length(msvd$d) == 0)
#No singular values, so return zero matrix
{
return(array(0, dim(m)[2:1]))
}
else
{
if (verbose) message ("Done calculating pseudoinverse.")
return(
msvd$v %*% (1/msvd$d * t(msvd$u))
)
}
}
|
c37fdf6b956f65f3e5f4fc811832f5de19f8b858
|
be0ac9cd7b4950edc946c6b5b93d92aa508cc4d6
|
/man/blk.diff.Rd
|
4e513a42baebe26ebecc10d442711737799350fe
|
[] |
no_license
|
benjaminrich/PCSmisc
|
bbc325c0981af635039a16b704c2414fec2a41c6
|
b56c3d684f900b6498654405c3cff0b01d8b280b
|
refs/heads/master
| 2023-06-09T09:28:29.609251
| 2023-05-27T21:07:27
| 2023-05-27T21:07:27
| 114,463,985
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,110
|
rd
|
blk.diff.Rd
|
\name{blk.diff}
\alias{blk.diff}
\alias{blk.intereventTime}
\title{Blockwise Lagged Differences}
\description{
Computes lagged difference within blocks and on selected elements.
}
\usage{
blk.diff(x, id, ind = NULL, lag = 1, fill = NA, diff.op = "-", ...)
blk.intereventTime(time, id, ind = NULL, lag = 1, fill = NA, diff.op = difftime.default, ...)
}
\arguments{
\item{x,time}{A vector in \code{\link{block-format}} with respect to \code{id}.}
\item{id}{A valid \code{\link{block-format}} ID.}
\item{ind}{A logical vector that designates a subset of \code{x}. By default all are included.}
\item{lag}{An integer specifying the lag.}
\item{fill}{A value to use when no other value is appropriate.}
\item{diff.op}{A function that subtracts one value from another.}
\item{...}{Further arguments passed to \code{diff.op}.}
}
\details{
These functions operate on data sets in \code{\link{block-format}}.
Essentially, the standard \code{\link{diff}} function is applied
within each block, except that a function \code{diff.op} can be
specified for doing the subtraction. Additionally, a subset on
which to perform the operation can be selected with \code{ind}. For
elements that are not selected the corresponding result is given by
\code{fill}. The first \code{lag} elements of each block are the
result are also assigned the value \code{fill} so that the result is
in \code{\link{block-format}} with respect to \code{id}.
For time values, \code{blk.intereventTime} is an alias with a more
descriptive name and a different default \code{diff.op}.
}
\value{
A vector in \code{\link{block-format}} with respect to \code{id}
containing the differenced values.
}
\author{Benjamin Rich <mail@benjaminrich.net>}
\seealso{
\itemize{
\item \code{\link{block-format}}
\item \code{\link{diff}}
\item \code{\link{deltat}}
}
}
\examples{
require(nlme)
data(Phenobarb)
dat <- Phenobarb[1:56,] # First 4 subjects
attach(dat)
cbind(dat, INTERDOSE.TIME=blk.intereventTime(time, asID(Subject), ind=!is.na(dose)))
detach(dat)
}
\keyword{ utilities }
% vim: tw=70 sw=2
|
390e38d52fbcaf76dcc782c124b92967d9849d87
|
8d11121c41ec3ea5e92c2789108c1fd1c43e7ca4
|
/run_analysis.R
|
ae05a2fa9e1579de32303931c9198542f80fc850
|
[] |
no_license
|
joannaconti/cleandata
|
f54be5e526e0b61175f6aed1a6df6df367de6c5b
|
35c12cdf2a406042ed4ab9a10b25abce2a1c3680
|
refs/heads/master
| 2020-05-26T01:44:19.623105
| 2014-05-24T19:53:50
| 2014-05-24T19:53:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,281
|
r
|
run_analysis.R
|
run_analysis <- function(x) {
# Read all 8 files into R
subjecttest = read.table("subject_test.txt")
subjecttrain = read.table("subject_train.txt")
xtest = read.table("X_test.txt")
ytest = read.table("y_test.txt")
xtrain = read.table("X_train.txt")
ytrain = read.table("y_train.txt")
features = read.table("features.txt")
activity = read.table("activity_labels.txt")
#Create vector of feature names to use for column names
featuresvector = features[ , "V2"]
colnames(xtest) <- featuresvector
colnames(xtrain) <- featuresvector
#Add description of activity to ytest & ytrain
newtest = cbind(ytest, activity[ytest[ , 1], "V2"])
newtrain = cbind(ytrain, activity[ytrain[ , 1], "V2"])
#Create column names for subject and activity
colnames(subjecttest) = "Subject"
colnames(subjecttrain) = "Subject"
colnames(newtest) = c("ActivityNum", "Activity")
colnames(newtrain) = c("ActivityNum", "Activity")
# Create new test and train databases with subject number included
test = cbind(subjecttest, newtest, xtest)
train = cbind(subjecttrain, newtrain, xtrain)
#Create final database with test and train combined
total = rbind(test, train)
#Extract the columns that have mean or std but not meanFreq in their name to a new file
extract = total[ , 1:3]
x = 4
while (x <= 564) {
addcolumn = FALSE
columnname = colnames(total) [x]
if (grepl("mean", columnname) == TRUE) {addcolumn = TRUE}
if (grepl("std", columnname) == TRUE) {addcolumn = TRUE}
if (grepl("meanFreq", columnname) == TRUE) {addcolumn = FALSE}
if (addcolumn == TRUE) {
extract = cbind(extract, total[, x])
columnnumb = ncol(extract)
colnames(extract)[columnnumb] = columnname
}
x = x+1
}
#Pretty up the column names
numcolumns = ncol(extract)
x = 4
while (x <= numcolumns) {
oldname = colnames(extract) [x]
numchar = nchar(oldname)
z = 6
newname = substr(oldname, 1, 5)
while (z <= numchar) {
curchar = substr(oldname, z, z)
if (curchar == "-") {curchar = "."}
if (curchar == "(") {curchar = ""}
if (curchar == ")") {curchar = ""}
newname = paste(newname, curchar, sep="")
z = z+1
}
colnames(extract)[x] = newname
x = x+1
}
#Extract just the columns that have mean in their name to extract2 file
extract2 = extract[ , 1:3]
x = 4
while (x <= 69) {
addcolumn = FALSE
columnname = colnames(extract) [x]
if (grepl("mean", columnname) == TRUE) {addcolumn = TRUE}
if (addcolumn == TRUE) {
extract2 = cbind(extract2, extract[, x])
columnnumb = ncol(extract2)
colnames(extract2)[columnnumb] = columnname
}
x = x+1
}
#Melt extract2 into a long file & dcast back the means
meltdf = melt(extract2, id.vars = c("Subject", "ActivityNum", "Activity"))
dcastdb=dcast(meltdf, Subject + Activity ~ variable, mean)
return(dcastdb)
}
|
738ae02fa5c530d180ed699e77c985cc6ab8a1a5
|
05ebb4d386cb2604bb7642bd79d09fa3ca76dc72
|
/man/tbk_data.Rd
|
307ee79c3e9492e62be0f223f5a7ca7d50e75ba9
|
[] |
no_license
|
trichelab/tbmater
|
a322d5b3c558c4b45474e3ed1e394754543cc5d5
|
dafbf46ca7a021849a0e5b86c1669fe7d2ad3447
|
refs/heads/master
| 2023-01-08T04:53:43.023578
| 2020-11-12T02:59:17
| 2020-11-12T02:59:17
| 312,152,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,228
|
rd
|
tbk_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tbk_data.R
\name{tbk_data}
\alias{tbk_data}
\title{Get data from tbk}
\usage{
tbk_data(
tbk_fnames,
idx_fname = NULL,
probes = NULL,
show.unaddressed = FALSE,
chrm = NULL,
beg = NULL,
end = NULL,
as.matrix = FALSE,
simplify = FALSE,
name.use.base = TRUE,
max_addr = 3000,
max_source = 10^6,
max_pval = 0.05,
min_coverage = 5,
all_units = FALSE
)
}
\arguments{
\item{idx_fname}{index file. If not given, use the idx.gz in first path}
\item{probes}{probe names}
\item{simplify}{reduce matrix to vector if only one sample is queried}
\item{name.use.base}{use basename for sample name}
\item{max_addr}{random addressing if under max_addr}
\item{max_pval}{maximum sig2 for float.float}
\item{min_coverage}{minimum sig2 for float.int}
\item{all_units}{retrieve all units for float.float and float.int}
\item{fnames}{tbk_fnames}
\item{min_source}{random addressing if source size is under min_source}
}
\value{
\preformatted{ a numeric matrix
}
}
\description{
Assumptions:
\enumerate{
\item All the tbks have the same index.
\item If idx_fname not given, idx.gz is in the same folder as the first sample.
}
}
|
604d09c5d2bca6fa0b3b409611d8f6409130c6aa
|
5c255d5bba86ddd16c76a43e4f51c3099fb22ac6
|
/tests/testthat/test_ST_Ranking.R
|
3b7c7960e51023e269091c9b5c703d549b1d35c5
|
[] |
no_license
|
tristanbains/rFootballAnalysis
|
92d7b4ef52c117d62a58f1ad901c70e9af1c3529
|
b898ba686baffd5da63c5312898e018241b9c127
|
refs/heads/master
| 2021-01-20T12:33:49.422731
| 2017-05-19T11:08:34
| 2017-05-19T11:08:34
| 90,376,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 309
|
r
|
test_ST_Ranking.R
|
context("ST_Ranking")
data = suppressMessages(suppressWarnings(
DL_Matches("ESP1",season1 = "2014-2015",season2 = "2014-2015")))
result = suppressMessages(suppressWarnings(ST_Ranking(data = data)))
test_that("Right ordering based on aggregate matches",{
expect_equal(result$GD[16:18],c(-25,-35,-21))
})
|
18619e7d3ab33e137b93b281616bdc2c1799c8ce
|
fc18e2ba06ff2e409f8a30c76d8841243c4b393a
|
/R/mergeROC.R
|
6b58eab6268572994b1f1ce891d2d9a55d1a0694
|
[] |
no_license
|
zhixingfeng/seqPatch
|
59c0f36eb02f8ca63f467f1b4122a4736ca36b1c
|
5ef9d6084e9af2d01cc292334eed4f71ec4e6e7b
|
refs/heads/master
| 2020-12-24T15:05:33.081698
| 2014-07-31T06:54:56
| 2014-07-31T06:54:56
| 6,462,374
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,101
|
r
|
mergeROC.R
|
mergeROC <- function(ROC.B, TPR.knots=seq(0.05,1,0.05))
{
# spline interpolate
model <- list()
FDR.B <- list()
for (i in 1:length(ROC.B)){
model[[i]] <- list()
FDR.B[[i]] <- list()
# fit spline for forward strand
for (k in 1:length(ROC.B[[i]])){
TPR <- ROC.B[[i]][[k]]$TPR
FDR <- ROC.B[[i]][[k]]$FDR
chr <- names(ROC.B[[i]])[k]
model[[i]][[chr]] <- approx(TPR,FDR, xout=TPR.knots, rule=2)
FDR.B[[i]][[chr]] <- model[[i]][[chr]]$y
}
}
FDR.mean <-list()
FDR.sd <-list()
for (k in 1:length(FDR.B[[i]]) ){
chr <- names(FDR.B[[i]])[k]
FDR.mean[[chr]] <- 0
FDR.sd[[chr]] <- 0
for (i in 1:length(FDR.B)){
FDR.mean[[chr]] <- FDR.mean[[chr]] + FDR.B[[i]][[k]]
FDR.sd[[chr]] <- FDR.sd[[chr]] + FDR.B[[i]][[k]]^2
}
FDR.mean[[chr]] <- FDR.mean[[chr]] / length(FDR.B)
FDR.sd[[chr]] <- sqrt(FDR.sd[[chr]]/length(FDR.B) - FDR.mean[[chr]]^2 + 1e-10)/sqrt(length(FDR.B))
FDR.mean[[chr]][FDR.mean[[chr]]<0] <- 0
FDR.mean[[chr]][FDR.mean[[chr]]>1] <- 1
}
list(FDR.B=FDR.B, FDR.mean=FDR.mean, FDR.sd= FDR.sd)
}
mergeROC.bak <- function(ROC.B, TPR.knots=seq(0.05,1,0.05))
{
# spline interpolate
model <- list()
FDR.B <- list()
for (i in 1:length(ROC.B)){
model[[i]] <- list()
FDR.B[[i]] <- list()
# fit spline for forward strand
for (k in 1:length(ROC.B[[i]]$pos)){
TPR <- ROC.B[[i]]$pos[[k]]$TPR
FDR <- ROC.B[[i]]$pos[[k]]$FDR
model[[i]]$pos[[k]] <- approx(TPR,FDR, xout=TPR.knots, rule=2)
FDR.B[[i]]$pos[[k]] <- model[[i]]$pos[[k]]$y
}
names(model[[i]]$pos) <- names(ROC.B[[i]]$pos)
names(FDR.B[[i]]$pos) <- names(ROC.B[[i]]$pos)
# fit spline for backward strand
for (k in 1:length(ROC.B[[i]]$neg)){
TPR <- ROC.B[[i]]$neg[[k]]$TPR
FDR <- ROC.B[[i]]$neg[[k]]$FDR
model[[i]]$neg[[k]] <- approx(TPR, FDR, xout=TPR.knots, rule=2)
FDR.B[[i]]$neg[[k]] <- model[[i]]$neg[[k]]$y
}
names(model[[i]]$neg) <- names(ROC.B[[i]]$neg)
names(FDR.B[[i]]$neg) <- names(ROC.B[[i]]$neg)
}
FDR.mean.pos <-list()
FDR.sd.pos <-list()
FDR.mean.neg <-list()
FDR.sd.neg <-list()
for (k in 1:length(ROC.B[[i]]$pos) ){
FDR.mean.pos[[k]] <- 0
FDR.sd.pos[[k]] <- 0
for (i in 1:length(ROC.B)){
FDR.mean.pos[[k]] <- FDR.mean.pos[[k]] + FDR.B[[i]]$pos[[k]]
FDR.sd.pos[[k]] <- FDR.sd.pos[[k]] + FDR.B[[i]]$pos[[k]]^2
}
FDR.mean.pos[[k]] <- FDR.mean.pos[[k]] / length(ROC.B)
FDR.sd.pos[[k]] <- sqrt(FDR.sd.pos[[k]]/length(ROC.B) - FDR.mean.pos[[k]]^2 + 1e-10)/sqrt(length(ROC.B))
FDR.mean.pos[[k]][FDR.mean.pos[[k]]<0] <- 0
FDR.mean.pos[[k]][FDR.mean.pos[[k]]>1] <- 1
}
for (k in 1:length(ROC.B[[i]]$neg) ){
FDR.mean.neg[[k]] <- 0
FDR.sd.neg[[k]] <- 0
for (i in 1:length(ROC.B)){
FDR.mean.neg[[k]] <- FDR.mean.neg[[k]] + FDR.B[[i]]$neg[[k]]
FDR.sd.neg[[k]] <- FDR.sd.neg[[k]] + FDR.B[[i]]$neg[[k]]^2
}
FDR.mean.neg[[k]] <- FDR.mean.neg[[k]] / length(ROC.B)
FDR.sd.neg[[k]] <- sqrt(FDR.sd.neg[[k]]/length(ROC.B) - FDR.mean.neg[[k]]^2 + 1e-10)/sqrt(length(ROC.B))
FDR.mean.neg[[k]][FDR.mean.neg[[k]]<0] <- 0
FDR.mean.neg[[k]][FDR.mean.neg[[k]]>1] <- 1
}
names(FDR.mean.pos) <- names(ROC.B[[1]]$pos)
names(FDR.sd.pos) <- names(ROC.B[[1]]$pos)
names(FDR.mean.neg) <- names(ROC.B[[1]]$neg)
names(FDR.mean.neg) <- names(ROC.B[[1]]$neg)
result <- list()
result$FDR.mean.pos <- FDR.mean.pos
result$FDR.sd.pos <- FDR.sd.pos
result$FDR.mean.neg <- FDR.mean.neg
result$FDR.sd.neg <- FDR.sd.neg
result$FDR.B <- FDR.B
result
}
|
b734bd34f0454dc6e6a4c380217b230b62197887
|
6310ea884f52bfddeebc31ab9fb66bfce105ede0
|
/tools/r-packages/nibrs/R/IncidentFunctions.R
|
46ef07f12dbcf5269a5f58a8599550198e4b5ced
|
[
"Apache-2.0"
] |
permissive
|
mark43/nibrs
|
24c1d59c005f4134a4f62c42c00b7b7306003d89
|
65dcd57f874f8211a9fb507fbfd082ec9177b669
|
refs/heads/master
| 2023-04-28T02:10:24.762013
| 2021-12-17T16:15:45
| 2021-12-17T16:15:45
| 98,336,429
| 0
| 0
|
Apache-2.0
| 2023-04-15T04:45:34
| 2017-07-25T18:13:14
|
Java
|
UTF-8
|
R
| false
| false
| 5,902
|
r
|
IncidentFunctions.R
|
# Copyright 2016 SEARCH-The National Consortium for Justice Information and Statistics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #' @importFrom readr read_fwf
# loadIncidentFile <- function(file, maxRecords = -1) {
# columnSpecsFile <- system.file("raw", "IncidentFileFormat.txt", package=getPackageName())
# columnSpecs <- getColumnSpecs(columnSpecsFile)
# read_fwf(file=file, col_positions = fwf_positions(start = columnSpecs$start, end = columnSpecs$end, col_names = columnSpecs$name),
# col_types=paste(columnSpecs$type, collapse=""), n_max = maxRecords) %>% ungroup() %>% mutate(AdministrativeSegmentID=row_number())
# }
#' @importFrom readr read_fwf
loadIncidentFile <- function(file, versionYear, maxRecords = -1) {
columnSpecsFile <- system.file("raw", paste0('IncidentFileFormat-', versionYear, '.txt'), package=getPackageName())
columnSpecs <- getColumnSpecs(columnSpecsFile)
read_fwf(file=file, col_positions = fwf_positions(start = columnSpecs$start, end = columnSpecs$end, col_names = columnSpecs$name),
col_types=paste(columnSpecs$type, collapse=""), n_max = maxRecords, progress=FALSE) %>%
ungroup() %>% mutate(AdministrativeSegmentID=row_number())
}
#' @import dplyr
addAdministrativeSegmentID <- function(rawIncidentsDataFrame) {
rawIncidentsDataFrame %>% ungroup() %>% mutate(AdministrativeSegmentID=row_number())
}
#' @importFrom DBI dbClearResult dbSendQuery
truncateIncidents <- function(conn) {
dbClearResult(dbSendQuery(conn, "truncate AdministrativeSegment"))
}
#' @import dplyr
#' @importFrom DBI dbWriteTable
#' @importFrom lubridate month year ymd
writeIncidents <- function(conn, rawIncidentDataFrame, segmentActionTypeTypeID, agencyDataFrame) {
currentMonth <- formatC(month(Sys.Date()), width=2, flag="0")
currentYear <- year(Sys.Date()) %>% as.integer()
AdministrativeSegment <- rawIncidentDataFrame %>%
processingMessage('Incident') %>%
select(AdministrativeSegmentID, ORI, IncidentNumber=INCNUM, INCDATE, IncidentHour=V1007,
ClearedExceptionallyTypeID=V1013,
ReportDateIndicator=V1006) %>%
mutate(INCDATE=ifelse(INCDATE==-5, NA, INCDATE)) %>%
mutate(IncidentDate=ymd(INCDATE),
IncidentDateID=createKeyFromDate(IncidentDate),
MonthOfTape=currentMonth, YearOfTape=currentYear, CityIndicator=NA, SegmentActionTypeTypeID=segmentActionTypeTypeID,
ClearedExceptionallyTypeID=ifelse(ClearedExceptionallyTypeID==-6, 6L, ClearedExceptionallyTypeID),
CargoTheftIndicatorTypeID=99998L,
IncidentHour=ifelse(IncidentHour < 0, NA_integer_, IncidentHour)) %>%
select(-INCDATE) %>%
left_join(agencyDataFrame %>% select(AgencyID, ORI=AgencyORI), by='ORI')
writeLines(paste0("Writing ", nrow(AdministrativeSegment), " administrative segments to database"))
dbWriteTable(conn=conn, name="AdministrativeSegment", value=AdministrativeSegment, append=TRUE, row.names = FALSE)
attr(AdministrativeSegment, 'type') <- 'FT'
AdministrativeSegment
}
#' @import dplyr
#' @import tidyr
#' @import tibble
#' @importFrom DBI dbWriteTable
writeRawAdministrativeSegmentTables <- function(conn, inputDfList, tableList) {
dfName <- load(inputDfList[2])
adminSegmentDf <- get(dfName) %>% mutate_if(is.factor, as.character) %>%
inner_join(tableList$Agency %>% select(AgencyORI), by=c('V1003'='AgencyORI'))
rm(list=dfName)
currentMonth <- formatC(month(Sys.Date()), width=2, flag="0")
currentYear <- year(Sys.Date()) %>% as.integer()
AdministrativeSegment <- adminSegmentDf %>%
select(ORI=V1003, IncidentNumber=V1004, INCDATE=V1005, IncidentHour=V1007,
V1013, V1016,
ReportDateIndicator=V1006) %>%
mutate(IncidentHour=gsub(x=IncidentHour, pattern='\\(([0-9]+)\\).+', replacement='\\1')) %>%
mutate(INCDATE=ifelse(trimws(INCDATE)=='' | is.na(INCDATE), NA, INCDATE)) %>%
mutate(IncidentDate=ymd(INCDATE),
IncidentDateID=createKeyFromDate(IncidentDate),
MonthOfTape=currentMonth, YearOfTape=currentYear, CityIndicator=NA_character_, SegmentActionTypeTypeID=99998L,
V1016=ifelse(trimws(V1016)=='' | is.na(V1016), 99998L, V1016),
IncidentHour=ifelse(is.na(IncidentHour), NA_integer_, as.integer(IncidentHour))) %>%
select(-INCDATE) %>%
mutate(AdministrativeSegmentID=row_number()) %>%
left_join(tableList$Agency %>% select(AgencyID, ORI=AgencyORI), by='ORI') %>%
left_join(tableList$ClearedExceptionallyType %>% select(ClearedExceptionallyTypeID, ClearedExceptionallyCode), by=c('V1013'='ClearedExceptionallyCode')) %>%
left_join(tableList$CargoTheftIndicatorType %>% select(CargoTheftIndicatorTypeID, CargoTheftIndicatorCode), by=c('V1016'='CargoTheftIndicatorCode')) %>%
mutate(ClearedExceptionallyTypeID=ifelse(is.na(ClearedExceptionallyTypeID), 99998L, ClearedExceptionallyTypeID)) %>%
mutate(CargoTheftIndicatorTypeID=ifelse(is.na(CargoTheftIndicatorTypeID), 99998L, CargoTheftIndicatorTypeID)) %>%
select(-V1013, -V1016) %>% as_tibble()
rm(adminSegmentDf)
writeLines(paste0("Writing ", nrow(AdministrativeSegment), " administrative segments to database"))
dbWriteTable(conn=conn, name="AdministrativeSegment", value=AdministrativeSegment, append=TRUE, row.names = FALSE)
attr(AdministrativeSegment, 'type') <- 'FT'
tableList$AdministrativeSegment <- AdministrativeSegment
tableList
}
|
56aaf8cdedf06cd987cf302b89af7aec7c34fd9d
|
5b73b50251eef41c0e41dce6cba8b50f0a3765a3
|
/preprocessing/weather data/weather_merge2020.R
|
b73d556311c28b47f79b765b1c4e8cb335292738
|
[] |
no_license
|
noranm/landslide-prediction
|
cc250cabb127bc5d93a198535e98992215e7507d
|
9c38b4b8305e74fe4fb116c1fcd901f1993a9aee
|
refs/heads/main
| 2023-06-17T20:55:56.925294
| 2021-07-20T12:06:25
| 2021-07-20T12:06:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,751
|
r
|
weather_merge2020.R
|
########## KN
library(dplyr)
path = "./data2/경상남도/"
KN_SGGs <- list.files(path)
final1 <- final2 <- final3 <- NULL
for (sgg in KN_SGGs) {
print(sgg)
sgg_dir = paste0(path, sgg, "/")
DATEs = list.files(sgg_dir)
for (dt in DATEs){
dt_dir = paste0(sgg_dir, dt, "/")
FILEs = list.files(dt_dir)
for (f in FILEs) {
set <- strsplit(f, "_")
umd = set[[1]][1]; value = set[[1]][2]; first_date = set[[1]][3]; end_date = substr(set[[1]][4],1,6)
CSVfile = read.csv(paste0(dt_dir, f))
CSVfile <- na.omit(CSVfile)
colnames(CSVfile) <- c("day", "hour", "val")
CSVfile$day <- as.numeric(CSVfile$day)
CSVfile$hour <- as.numeric(CSVfile$hour)
CSVfile$val <- as.numeric(CSVfile$val)
CSVfile$month <- c(rep(6,30*24), rep(7,31*24), rep(8,31*24), rep(9,30*24))
CSVfile$val[CSVfile$val == -1] <- 0
if (value == "강수") {
tmp1 <- CSVfile %>% group_by(month, day) %>% summarise(var1 = max(val, na.rm=TRUE),
var2 = sum(val, na.rm=TRUE)) %>% data.frame()
colnames(tmp1) <- c("month", "day", "최대시우량", "하루총강수량")
tmp1$SIDO <- "경상남도"; tmp1$SGG_NM <- sgg; tmp1$UMD <- umd;
tmp1$날짜 <- as.character(as.numeric(202000 + tmp1$month)*100 + tmp1$day);
final1 <- rbind(final1, tmp1)
} else if (value == "풍속") {
tmp2 <- CSVfile %>% group_by(month, day) %>% summarise(var = max(val, na.rm=TRUE)) %>% data.frame()
colnames(tmp2) <- c("month", "day", "최대풍속")
tmp2$SIDO <- "경상남도"; tmp2$SGG_NM <- sgg; tmp2$UMD <- umd;
tmp2$날짜 <- as.character(as.numeric(202000 + tmp2$month)*100 + tmp2$day);
final2 <- rbind(final2, tmp2)
} else {
# 없음(0), 비(1), 비/눈(2), 눈(3)
tmp3 <- CSVfile %>% group_by(month, day) %>% summarise("var1" = sum(val != 0, na.rm=TRUE)/24) %>% data.frame()
colnames(tmp3) <- c("month", "day", "강수비율")
tmp3$SIDO <- "경상남도"; tmp3$SGG_NM <- sgg; tmp3$UMD <- umd;
tmp3$날짜 <- as.character(as.numeric(202000 + tmp3$month)*100 + tmp3$day);
final3 <- rbind(final3, tmp3)
}
}
}
}
KN_final <- merge( final1[,c("SIDO", "SGG_NM", "UMD", "날짜", "최대시우량", "하루총강수량")],
final2[,c("SIDO", "SGG_NM", "UMD", "날짜", "최대풍속")],
by= c("SIDO", "SGG_NM", "UMD", "날짜"),
all=TRUE)
KN_final <- merge( KN_final, final3[,c("SIDO", "SGG_NM", "UMD", "날짜", "강수비율")],
by= c("SIDO", "SGG_NM", "UMD", "날짜"),
all=TRUE)
colSums(is.na(KN_final))
KN_final[KN_final$UMD == "장목면",]
########## KB
path = "./data2/경상북도/"
KB_SGGs <- list.files(path)
final1 <- final2 <- final3 <- NULL
for (sgg in KB_SGGs) {
print(sgg)
sgg_dir = paste0(path, sgg, "/")
DATEs = list.files(sgg_dir)
for (dt in DATEs){
dt_dir = paste0(sgg_dir, dt, "/")
FILEs = list.files(dt_dir)
for (f in FILEs) {
set <- strsplit(f, "_")
umd = set[[1]][1]; value = set[[1]][2]; first_date = set[[1]][3]; end_date = substr(set[[1]][4],1,6)
CSVfile = read.csv(paste0(dt_dir, f))
CSVfile <- na.omit(CSVfile)
colnames(CSVfile) <- c("day", "hour", "val")
CSVfile$day <- as.numeric(CSVfile$day)
CSVfile$hour <- as.numeric(CSVfile$hour)
CSVfile$month <- c(rep(6,30*24), rep(7,31*24), rep(8,31*24), rep(9,30*24), rep(10,31*24))
CSVfile$val[CSVfile$val == -1] <- 0
if (value == "강수") {
tmp1 <- CSVfile %>% group_by(month, day) %>% summarise(var1 = max(val, na.rm=TRUE),
var2 = sum(val, na.rm=TRUE)) %>% data.frame()
colnames(tmp1) <- c("month", "day", "최대시우량", "하루총강수량")
tmp1$SIDO <- "경상북도"; tmp1$SGG_NM <- sgg; tmp1$UMD <- umd;
tmp1$날짜 <- as.character(as.numeric(202000 + tmp1$month)*100 + tmp1$day);
final1 <- rbind(final1, tmp1)
} else if (value == "풍속") {
tmp2 <- CSVfile %>% group_by(month, day) %>% summarise(var = max(val, na.rm=TRUE)) %>% data.frame()
colnames(tmp2) <- c("month", "day", "최대풍속")
tmp2$SIDO <- "경상북도"; tmp2$SGG_NM <- sgg; tmp2$UMD <- umd;
tmp2$날짜 <- as.character(as.numeric(202000 + tmp2$month)*100 + tmp2$day);
final2 <- rbind(final2, tmp2)
} else {
# 없음(0), 비(1), 비/눈(2), 눈(3)
tmp3 <- CSVfile %>% group_by(month, day) %>% summarise("var1" = sum(val != 0, na.rm=TRUE)/24) %>% data.frame()
colnames(tmp3) <- c("month", "day", "강수비율")
tmp3$SIDO <- "경상북도"; tmp3$SGG_NM <- sgg; tmp3$UMD <- umd;
tmp3$날짜 <- as.character(as.numeric(202000 + tmp3$month)*100 + tmp3$day);
final3 <- rbind(final3, tmp3)
}
}
}
}
KB_final <- merge( final1[,c("SIDO", "SGG_NM", "UMD", "날짜", "최대시우량", "하루총강수량")],
final2[,c("SIDO", "SGG_NM", "UMD", "날짜", "최대풍속")],
by= c("SIDO", "SGG_NM", "UMD", "날짜"),
all=TRUE)
KB_final <- merge( KB_final, final3[,c("SIDO", "SGG_NM", "UMD", "날짜", "강수비율")],
by= c("SIDO", "SGG_NM", "UMD", "날짜"),
all=TRUE)
colSums(is.na(KB_final))
final <- rbind(KN_final, KB_final)
colSums(is.na(final))
final <- final[!is.na(final$최대시우량),]
file_format <- file("./WeatherInfo2020.csv", encoding='euc-kr')
write.csv(final, file_format, row.names=FALSE)
|
6cb769384acb6bd31533833b2951c059b941e5b0
|
a933950bb09c97480b7031abe7858d65e0bf2d87
|
/map_ze_breakfast.R
|
bfc1a9db45854ec94a4e590add5fa9211062f0b1
|
[] |
no_license
|
rachwhatsit/breakfast-report-card-map
|
9816559a2c0823ec4ecc7abbffc6b6b66cab664a
|
9364d17a97fb6325f456d5593549276a221d0f2a
|
refs/heads/master
| 2021-01-01T03:55:22.160928
| 2017-12-24T14:59:25
| 2017-12-24T14:59:25
| 59,143,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,491
|
r
|
map_ze_breakfast.R
|
library(choroplethr)
library(choroplethrMaps)
library(ggplot2);library(classInt)
setwd("/Users/rachel_wilkerson/Box Sync/RLW THI Projects/breakfast_heat_map/")
load("map_ze_breakfast.RData")
df=read.csv("heatmap.csv",strip.white=T,stringsAsFactors = F)
df$perc=as.numeric(substr(df$X..FRP.Breakfast.Participation,1,nchar(df$X..FRP.Breakfast.Participation)-1))
df=df[-c(253,252),]
df[is.na(df)]=0
classIntervals(df$perc, 6,"jenks")
data(county.regions)
county.regions.tx=county.regions[which(county.regions$state.name=="texas"),]
df$County=tolower(df$County)
df=merge(df,county.regions.tx,by.x="County",by.y="county.name",all=T)
df$perc[which(is.na(df$perc)==T)]<-0
df$value=cut(df$perc,breaks=c(0,10,20,30,40,50,60,70,80,90),include.lowest = T)
#df$region=county.regions.tx$region[match(tolower(df$County),county.regions.tx$county.name)]
#df$check=county.regions.tx$county.name[match(tolower(df$County),county.regions.tx$county.name)]
#length(unique(df$region))
#df=df[-c(252,253),]
df$region.y<-NULL
colnames(df)[6]<-"region"
county_choropleth(df,
title = "Participation",
legend = "Participation",
num_colors = 1,
state_zoom = "texas")
df$value[which(is.na(df$value)==T)]<-0
choro = CountyChoropleth$new(df)
choro$ggplot_scale = scale_fill_brewer(name="Participation",palette=7, drop=FALSE)
choro$set_zoom("texas")
choro$render()
ggsave("map_me_some_breakfast_orangeCUTS.pdf",height=7,width=7,units="in")
|
bda0fb4f3ea73b70098b0ac2fcb689d3137bf57d
|
b07bf8cef773d2611d329c93bcb645ccc2b87cba
|
/Code/Chapter 6.2 - Ridge and Lasso Regression.R
|
516f81741eeb90483b4ddba1244d4302b111a905
|
[] |
no_license
|
ebardelli/Introduction-to-Statistical-Learning-Labs
|
9333a4d38042bae2ef951a3ed43a04a9360084fc
|
be4dfd9d41d7eb46630a12eea7eaebb2d53b9fcc
|
refs/heads/master
| 2022-12-04T11:19:30.314256
| 2020-08-01T19:22:37
| 2020-08-01T19:22:37
| 278,771,282
| 0
| 0
| null | 2020-07-11T02:17:13
| 2020-07-11T02:17:12
| null |
UTF-8
|
R
| false
| false
| 4,981
|
r
|
Chapter 6.2 - Ridge and Lasso Regression.R
|
#This script corresponds with Chapter 6 of Inroduction to Statistical Learning
#Author : William Morgan
#6.6 Lab 2: Ridge Regression and the Lasso
library(glmnet) #glmnet() function for ridge/lasso
library(ISLR) #Hitters data
##### SECTION 1: Ridge Regression #####
#Prepare the Hitters data as done in the previous lab
hitters <- Hitters
hitters <- na.omit(hitters)
attach(hitters)
colnames(hitters) <- tolower(colnames(hitters))
#Before continuing, take a look at the glmnet() vignette to get an idea of what we need beforehand
#x - input matrix containing rows of observations
#y - response variable matrix
#lambda - a user created decreasing sequence of lambda values
#Lambda will be a vector of length 100, ranging from 10^10 to 10^-2
x <- model.matrix(salary~., hitters)[,-1]
y <- salary
grid <-10^seq(10,-2,length=100)
ridge.mod <- glmnet(x,y, alpha=0, lambda=grid)
### Pause for Analysis ###
#Because we supplied a 100 lambdas, we've got a coefficient matrix that is 20x100
#We expect that the coefficients of models with larger lambdas will be much smaller than those with smaller lambdas
#Remember, the sequence is decreasing so the further along the sequence the larger the coefficients
#Find the 50th and 100th lambdas, along with the coefficients associated with that model and their l_2 norm (excluding the intercept)
ridge.mod$lambda[50]
coef(ridge.mod)[,50]
sqrt(sum(coef(ridge.mod)[-1,50])^2)
ridge.mod$lambda[100]
coef(ridge.mod)[,100]
sqrt(sum(coef(ridge.mod)[-1,100])^2)
#Just for fun, let's practice writing a function to extract this information for any lambda
shrinkage_coef <- function(glmnet_mod, ld) {
coef_names <- names(coef(glmnet_mod)[,50])
return(list(print(paste("The Lambda value is:",glmnet_mod$lambda[ld])),
print(paste("The coefficient for",coef_names,"is:", coef(glmnet_mod)[,ld])),
print(paste("The l_2 norm of these coefficients is:", sqrt(sum(coef(glmnet_mod)[-1,ld])^2)))
))
}
#Now that we've got a practice run down, let's split our sample to do some testing
set.seed(1)
train <- sample(1:nrow(x), nrow(x)/2)
test <- (-train)
y.test <- y[test]
### Important Note ###
#There are two common ways to randomly split a data set:
#You can produce a random logical vector (TRUE/FALSE) and select observations corresponding to TRUE for the training data
#Alternatively, randomlychoose a subset of numbers between 1 and n, which can then be used as the indices for the training data
#We do (and have been doing) the former in previous labs; this lab makes use of the latter
#Fit a ridge regression on the training set and evaluate its MSE on the test, using lambda = 4
ridge_mod <- glmnet(x[train,], y[train], alpha=0, lambda=grid, thresh=1e-12)
ridge_pred <- predict(ridge_mod, s=4, newx=x[test,]) #s option sets the lambda value, newx specifies new observations used to make predicitons
mean((ridge_pred - y.test)^2) #MSE
#Compare to the test MSE when lambda is extremely large (coefficients are approximately 0)
ridge_pred <- predict(ridge_mod, s=1e10, newx=x[test,])
mean((ridge.pred - y.test)^2)
#Finally, let's check if the ridge regression gives us better results than the least sqaures option (lambda = 0)
ridge_pred <- predict(ridge_mod, s=0, newx=x[test,], exact = T) #the exact option allow us to specify that lambda is exactly 0, instead of searching for the smalles value of lambda in "grid"
mean((ridge_pred - y.test)^2)
#Instead of arbitrarily choosing a lambda value a priori, it is better to use cross-validation to find the best lambda
#This can be done using cv.glmnet(), which conducts 10-fold CV and can be increased to n-folds with the option nfolds
set.seed(1)
cv_out <- cv.glmnet(x[train,], y[train], alpha=0)
plot(cv_out)
best_lam <- cv_out$lambda.min
#What is the test MSE associated with this lambda?
ridge_pred <- predict(ridge_mod, s=best_lam, newx=x[test,])
mean((ridge_pred - y.test)^2)
#FINALLY we can run ridge regression on the entire data set, now that we have found the best value for our tuning parameter
out <- glmnet(x,y,alpha=0)
predict(out, type = "coefficients", s=best_lam)[1:20,]
##### End of 6.2.1 #####
##### SECTION 2: The Lasso #####
#Fit the lasso model and observe how some of the coefficients are exactly 0
lasso_mod <- glmnet(x[train,], y[train], alpha = 1, lambda = grid)
plot(lasso_mod)
#Perform CV and compute test errors
set.seed(1)
cv_out <- cv.glmnet(x[train,], y[train],alpha=1)
plot(cv_out)
best_lam <- cv_out$lambda.min
lasso_pred <- predict(lasso_mod, s=best_lam, newx=x[test,])
mean((lasso_pred - y.test)^2)
#Fit the lasso over the entire data set
out <- glmnet(x,y,alpha=1, lambda = grid)
lasso_coef <- predict(out, type = "coefficients", s=best_lam)[1:20,]
lasso_coef
lasso_coef[lasso_coef!=0]
### Pause for analysis ###
#The test MSE for the lasso is very similar to the ridge regression, but it does have a minor advantage:
#12 of the 19 coefficients in the lasso model are exactly 0
##### End of 6.2.2 #####
|
14ffe6da5eae92789e116f0af726ee16b4efecef
|
e56da52eb0eaccad038b8027c0a753d9eb2ff19e
|
/tests/testthat/test-split_analysis.R
|
aba5bd299c03dc5342db47cea8a55f62e36db1cb
|
[] |
no_license
|
ms609/TreeTools
|
fb1b656968aba57ab975ba1b88a3ddf465155235
|
3a2dfdef2e01d98bf1b58c8ee057350238a02b06
|
refs/heads/master
| 2023-08-31T10:02:01.031912
| 2023-08-18T12:21:10
| 2023-08-18T12:21:10
| 215,972,277
| 16
| 5
| null | 2023-08-16T16:04:19
| 2019-10-18T08:02:40
|
R
|
UTF-8
|
R
| false
| false
| 529
|
r
|
test-split_analysis.R
|
test_that("TipsInSplits() family", {
test <- TipsInSplits(BalancedTree(letters[1:5]))
expect_identical(test, c("7" = 3L, "8" = 2L, "9" = 2L)[names(test)])
expect_equal(TipsInSplits(PectinateTree(7), smallest = TRUE),
c("10" = 2, "11" = 3, "12" = 3, "13" = 2))
expect_identical(15:2, TipsInSplits(PectinateTree(17), keep.names = FALSE))
test <- SplitImbalance(BalancedTree(7))
expectation <- c("9" = 1L, "10" = 3L, "11" = 3L, "12" = 1L, "13" = 3L)
expect_identical(test, expectation[names(test)])
})
|
da9337f12be36190dfbfd2cb6b3e706ea5e9235c
|
672e6732d5d81ecb3b24bc47011439ef59a728b5
|
/Rstudio/LAB3_data_mining/lab3_exercises.R
|
4170bcb457148c260149831fc4698b2d2026aad7
|
[] |
no_license
|
sslowik/Mgr_Inf
|
4b4349a5e09a2354eefecf464fe53333d8365758
|
ddc65c67b2b2b6990f4591adeaa33b03bc34faaf
|
refs/heads/master
| 2022-12-01T14:33:39.213225
| 2021-01-19T20:07:57
| 2021-01-19T20:07:57
| 156,598,963
| 2
| 2
| null | 2022-11-24T09:31:00
| 2018-11-07T19:40:16
|
HTML
|
UTF-8
|
R
| false
| false
| 3,466
|
r
|
lab3_exercises.R
|
dirty.iris <- read.csv("dirty_iris.csv", header=TRUE, sep=",")
install.packages("editrules")
library(editrules)
a = subset(dirty.iris, is.finite(Sepal.Length) & is.finite(Sepal.Width) & is.finite(Petal.Length) & is.finite(Petal.Width))
nrow(a)[1] 95
E <- editset(c("Sepal.Length <= 30", "Species %in% c('setosa','versicolor','virginica')"))
E <- editset(c("Sepal.Length <= 30", "Species %in% c('setosa','versicolor','virginica')", "Sepal.Length>0", "Sepal.Width>0", "Petal.Length > 0", "Petal.Width>0", "Petal.Length >= 2* Petal.Width", "Sepal.Length > Petal.Length"))
E
ve <- violatedEdits(E, dirty.iris) ; ve
summary(ve)
plot(ve)
#zadanie 2.
install.packages("deducorrect")
library(deducorrect)
#rules.txt:
# if (!is.na(Petal.Width) & Petal.Width != 'Inf' & Petal.Width <= 0){
# Petal.Width <- NA
# }
#if (!is.na(Petal.Length) & Petal.Length <= 0){
# Petal.Length <- NA
#}
#if (!is.na(Sepal.Width) & Sepal.Width <= 0){
# Sepal.Width <- NA
#}
#if (!is.na(Sepal.Length) & (Sepal.Length <= 0 | Sepal.Length > 30)){
# Sepal.Length <- NA
#}
#if (!is.na(Petal.Width) & !is.na(Petal.Length) & 2*Petal.Width >= Petal.Length){
# Petal.Length <- NA
#}
#if (!is.na(Petal.Length) & !is.na(Sepal.Length) & Petal.Length >= Sepal.Length) {
# Sepal.Length <- NA
#}
R <- correctionRules("rules.txt")
corrected.dirty.iris <- correctWithRules(R, dirty.iris)
iris_corrected <- corrected.dirty.iris$corrected
iris_corrected
# zadanie 3.
#a)
install.packages("Hmisc")
library(Hmisc)
cbind.data.frame(Sepal.Length=impute(corrected$Sepal.Length, mean), Sepal.Width=impute(corrected$Sepal.Width, mean), Petal.Length=impute(corrected$Petal.Length, mean), Petal.Width=impute(corrected$Petal.Width, mean), corrected$Species)
#b)
install.packages("VIM")
library(VIM)
clean.iris.knn <- kNN(corrected)
clean.iris.knn.2 <- kNN(corrected)[1:5]
#zadanie.4.
# a)
iris
# b)
log
iris.log <- cbind.data.frame(Sepal.Length=log(iris$Sepal.Length), Sepal.Width=log(iris$Sepal.Width), Petal.Length=log(iris$Petal.Length), Petal.Width=log(iris$Petal.Width), Speciesiris$Species)
#c)
iris.log.scale <- cbind.data.frame(Sepal.Length=scale(iris.log$Sepal.Length), Sepal.Width=scale(iris.log$Sepal.Width), Petal.Length=scale(iris.log$Petal.Length), Petal.Width=scale(iris.log$Petal.Width), Species=iris.log$Species)
sd(iris.log.scale$Petal.Length) = 1
mean(iris.log.scale$Petal.Length) ?0
#zadanie 5.
#a)
iris.log.scale <- subset(iris.log.scale, select = -c(Species))
# or
iris.log.scale <- iris.log.scale[,-5]
#b)
iris.pca <- prcomp(iris.log.scale)
#c)
iris.pca
# Sdev1 = 1.7124583
# Sdev2 = 0.9523797
# Sdev3 = 0.3647029
# Sdev4 = 0.1656840
#d)
iris.predict <- predict(iris.pca)
iris.predict <- subset(iris.predict, select = -c(PC3))
iris.predict <- subset(iris.predict, select = -c(PC4))
iris.predict <- cbind.data.frame(iris.predict, Species=iris.log$Species)
#zadanie.6.
plot(iris.predict$PC1, iris.predict$PC2, type="p", col="red", xlab="PC1", ylab="PC2")
points(iris.predict$PC1[iris.predict$Species=="versicolor"], iris.predict$PC2[iris.predict$Species=="versicolor"], type="p", col="red")
points(iris.predict$PC1[iris.predict$Species=="virginica"], iris.predict$PC2[iris.predict$Species=="virginica"], type="p", col="blue")
legend("topleft", c("setosa","versicolor", "virginica"), col=c("red","blue", "green"), lty=1:1)
|
2547f9c9e3cbe4bf978a45b67e3fee397d3da703
|
9565039b0bb21e9e84dd98b619fc2684d195c405
|
/USCriminals.R
|
de812bfe70faedf9c270f7bef51bc26b8450731a
|
[] |
no_license
|
Xiaoxi-X-G/Propensity
|
f8620e9b8f070e7ef08dee49aca6a01667ddfb0f
|
c615102c8fbf41fc32c62801faaad551af6a6556
|
refs/heads/master
| 2021-01-13T15:56:06.991008
| 2016-12-18T22:27:28
| 2016-12-18T22:27:28
| 76,810,351
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 712
|
r
|
USCriminals.R
|
rm(list = ls())
RScriptPath<-"C:/gxx/r/project/USCriminal/"
Column.type <- c("POSIXct", #Dates
"factor", # Category
"character", # Description
"factor", # DayofWeek
"factor", #PdDistrict
"factor",#Resolution
"factor", #Add
"numeric",
"numeric")
DataRaw <- read.csv(paste(RScriptPath, "train.csv", sep=''),
na.strings = c("", "NA"),
colClasses = Column.type)
DataRaw$Dates <- format(DataRaw$Dates, "%Y-%m-%d")
##### Visu #####
barplot(table(DataRaw$DayOfWeek))
barplot(table(DataRaw$PdDistrict))
barplot(table(DataRaw$Category))
|
c5245891a47dd631ea6118879282bf1e26cede8d
|
9bbdcb3936c5063edf237fe550fba4f5bf0a9b49
|
/man/cpPolyShapeNew.Rd
|
66e649c8c905e0d41f9de48a0e0710e16adfb864
|
[
"MIT"
] |
permissive
|
coolbutuseless/chipmunkcore
|
b2281f89683e0b9268f26967496f560ea1b5bb99
|
97cc78ad3a68192f9c99cee93203510e20151dde
|
refs/heads/master
| 2022-12-10T17:56:15.459688
| 2020-09-08T22:40:10
| 2020-09-08T22:40:10
| 288,990,789
| 17
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 818
|
rd
|
cpPolyShapeNew.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpPolyShape.R
\name{cpPolyShapeNew}
\alias{cpPolyShapeNew}
\title{Allocate and initialize a polygon shape with rounded corners. A convex hull will
be created from the vertexes.}
\usage{
cpPolyShapeNew(body, count, verts, transform, radius)
}
\arguments{
\item{body}{[\code{cpBody *}]}
\item{count}{[\code{int}]}
\item{verts}{[\code{cpVect *}]}
\item{transform}{[\code{cpTransform *}]}
\item{radius}{[\code{cpFloat}]}
}
\value{
[\code{cpShape *}]
}
\description{
Allocate and initialize a polygon shape with rounded corners. A convex hull will
be created from the vertexes.
}
\details{
C function prototype: \code{CP_EXPORT cpShape* cpPolyShapeNew(cpBody *body, int count, const cpVect *verts, cpTransform transform, cpFloat radius);}
}
|
8496bd027da4ae0216a64a6098b6cfce37eb6093
|
a6a2e430afe20b8b347c959933d7131639b8f818
|
/scripts/diff_exp_chip_seq.R
|
1adbe4786618bbebda2f5c6f9b2f4afa8aa36817
|
[] |
no_license
|
fcadete/TRF2_siRNA
|
e1ee0bbcbaa3a885dff77427579beb6c53e51bcf
|
419d987ea1fc5b1c099b0658b21d01dcd99f507c
|
refs/heads/master
| 2020-04-01T17:29:06.315585
| 2019-01-15T10:07:02
| 2019-01-15T10:07:02
| 153,432,728
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,733
|
r
|
diff_exp_chip_seq.R
|
library("DESeq2")
library("Repitools")
library("tximport")
library("EnsDb.Hsapiens.v86")
library("tidyverse")
library("parallel")
pdf("diff_expressed_encode_analysis.pdf", width = 10)
samples <- read_tsv("sample_info.txt")
samples$TimePoint <- as.character(samples$TimePoint)
files <- file.path("salmon_on_hg19_output", samples$Filename, "quant.sf")
names(files) <- samples$Filename
tx2gene <- values(transcripts(EnsDb.Hsapiens.v86))[, c("tx_id", "gene_id")]
txi <- tximport(files, type = "salmon", tx2gene = tx2gene, ignoreTxVersion = TRUE)
ddsTxi <- DESeqDataSetFromTximport(txi,
colData = samples,
design = ~ siRNA + TimePoint + siRNA:TimePoint)
ddsTxi <- DESeq(ddsTxi)
res <- results(ddsTxi,
contrast = c("siRNA", "control", "TRF2"))
#Names of the genes
siRNATRF2.TimePoint48.results <- results(ddsTxi, name = "siRNATRF2.TimePoint48")
unique(genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
siRNATRF2.TimePoint48.results[which(siRNATRF2.TimePoint48.results$padj < 0.1),])))$symbol)
# Get and plot the distance that these genes have to the nearest end of their respective chromosomes
selected_genes_48h <- genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
siRNATRF2.TimePoint48.results[which(siRNATRF2.TimePoint48.results$padj < 0.1),])))
seqlevels(selected_genes_48h) <- paste0("chr", seqlevels(selected_genes_48h))
# Number of diff-expressed genes in the siRNATRF2.TimePoint96 interaction
siRNATRF2.TimePoint96.results <- results(ddsTxi, name = "siRNATRF2.TimePoint96")
unique(genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
siRNATRF2.TimePoint96.results[which(siRNATRF2.TimePoint96.results$padj < 0.1),])))$symbol)
# Get and plot the distance that these genes have to the nearest end of their respective chromosomes
selected_genes_96h <- genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
siRNATRF2.TimePoint96.results[which(siRNATRF2.TimePoint96.results$padj < 0.1),])))
seqlevels(selected_genes_96h) <- paste0("chr", seqlevels(selected_genes_96h))
all_genes <- genes(EnsDb.Hsapiens.v86)
seqlevels(all_genes) <- paste0("chr", seqlevels(all_genes))
expressed_genes <- genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
siRNATRF2.TimePoint96.results[which(siRNATRF2.TimePoint96.results$baseMean > 20),])))
seqlevels(expressed_genes) <- paste0("chr", seqlevels(expressed_genes))
extraCols_narrowPeak <- c(signalValue = "numeric", pValue = "numeric",
qValue = "numeric", peak = "integer")
hela_encode_metadata <- read_tsv("metadata.tsv", col_names = TRUE)
interesting_data <- hela_encode_metadata %>%
filter(`Output type` == "optimal idr thresholded peaks",
`File format` == "bed narrowPeak",
`File Status` == "released",
`Assembly` == "GRCh38")
overlap_proportions <- as.data.frame(do.call(rbind,
lapply(interesting_data$`File accession`, function(file_accession) {
print(file_accession)
this_bed <- rtracklayer::import(paste0("encode_files/", file_accession, ".bed.gz"),
format = "BED",
extraCols = extraCols_narrowPeak)
all_genes_bed_overlaps <- countOverlaps(all_genes, this_bed) > 0
all_genes_bed_overlap_proportion <- sum(all_genes_bed_overlaps == TRUE) /
length(all_genes_bed_overlaps)
expressed_genes_bed_overlaps <- countOverlaps(expressed_genes, this_bed) > 0
expressed_genes_bed_overlap_proportion <- sum(expressed_genes_bed_overlaps == TRUE) /
length(expressed_genes_bed_overlaps)
selected_genes_48h_bed_overlaps <- countOverlaps(selected_genes_48h, this_bed) > 0
selected_genes_48h_bed_overlap_proportion <- sum(selected_genes_48h_bed_overlaps == TRUE) /
length(selected_genes_48h_bed_overlaps)
selected_genes_96h_bed_overlaps <- countOverlaps(selected_genes_96h, this_bed) > 0
selected_genes_96h_bed_overlap_proportion <- sum(selected_genes_96h_bed_overlaps == TRUE) /
length(selected_genes_96h_bed_overlaps)
all_genes_bed_overlaps_promoters <- countOverlaps(promoters(all_genes), this_bed) > 0
all_genes_bed_overlap_promoter_proportion <- sum(all_genes_bed_overlaps_promoters == TRUE) /
length(all_genes_bed_overlaps_promoters)
expressed_genes_bed_overlaps_promoters <- countOverlaps(promoters(expressed_genes), this_bed) > 0
expressed_genes_bed_overlap_promoter_proportion <- sum(expressed_genes_bed_overlaps_promoters == TRUE) /
length(expressed_genes_bed_overlaps_promoters)
selected_genes_48h_bed_overlaps_promoters <- countOverlaps(promoters(selected_genes_48h), this_bed) > 0
selected_genes_48h_bed_overlap_promoter_proportion <- sum(selected_genes_48h_bed_overlaps_promoters == TRUE) /
length(selected_genes_48h_bed_overlaps_promoters)
selected_genes_96h_bed_overlaps_promoters <- countOverlaps(promoters(selected_genes_96h), this_bed) > 0
selected_genes_96h_bed_overlap_promoter_proportion <- sum(selected_genes_96h_bed_overlaps_promoters == TRUE) /
length(selected_genes_96h_bed_overlaps_promoters)
return(rbind(data.frame(`File accession` = file_accession, group = "all",
all_genes = all_genes_bed_overlap_proportion,
expressed_genes = expressed_genes_bed_overlap_proportion,
selected = selected_genes_48h_bed_overlap_proportion, time_point = "48h"),
data.frame(`File accession` = file_accession, group = "all",
all_genes = all_genes_bed_overlap_proportion,
expressed_genes = expressed_genes_bed_overlap_proportion,
selected = selected_genes_96h_bed_overlap_proportion, time_point = "96h"),
data.frame(`File accession` = file_accession, group = "promoters",
all_genes = all_genes_bed_overlap_promoter_proportion,
expressed_genes = expressed_genes_bed_overlap_promoter_proportion,
selected = selected_genes_48h_bed_overlap_promoter_proportion, time_point = "48h"),
data.frame(`File accession` = file_accession, group = "promoters",
all_genes = all_genes_bed_overlap_promoter_proportion,
expressed_genes = expressed_genes_bed_overlap_promoter_proportion,
selected = selected_genes_96h_bed_overlap_promoter_proportion, time_point = "96h")))
}
)), stringsAsFactors = FALSE)
overlap_proportions <- left_join(overlap_proportions,
select(interesting_data, `File accession`,
`Experiment target`, `Experiment accession`),
by = c(File.accession = "File accession"))
overlap_proportions_hg38 <- overlap_proportions %>%
mutate(all_genes = as.numeric(all_genes),
expressed_genes = as.numeric(expressed_genes),
selected = as.numeric(selected))
p <- ggplot(overlap_proportions_hg38,
aes(x = `Experiment target`, y = time_point, fill = log2(selected / all_genes))) +
geom_tile() +
coord_equal() +
scale_fill_gradient2(low = "darkblue", high = "gold", mid = "white") +
facet_grid(group ~ .) +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
print(p)
p <- ggplot(overlap_proportions_hg38,
aes(x = `Experiment target`, y = time_point, fill = log2(selected / expressed_genes))) +
geom_tile() +
coord_equal() +
scale_fill_gradient2(low = "darkblue", high = "gold", mid = "white") +
facet_grid(group ~ .) +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
print(p)
chain <- rtracklayer::import.chain("hg19ToHg38.over.chain")
interesting_data <- hela_encode_metadata %>%
filter(`Output type` == "optimal idr thresholded peaks",
`File format` == "bed narrowPeak",
`File Status` == "released",
`Assembly` == "hg19")
overlap_proportions <- as.data.frame(do.call(rbind,
lapply(interesting_data$`File accession`, function(file_accession) {
print(file_accession)
this_bed <- rtracklayer::import(paste0("encode_files/", file_accession, ".bed.gz"),
format = "BED",
extraCols = extraCols_narrowPeak)
this_bed <- unlist(rtracklayer::liftOver(this_bed, chain))
all_genes_bed_overlaps <- countOverlaps(all_genes, this_bed) > 0
all_genes_bed_overlap_proportion <- sum(all_genes_bed_overlaps == TRUE) / length(all_genes_bed_overlaps)
expressed_genes_bed_overlaps <- countOverlaps(expressed_genes, this_bed) > 0
expressed_genes_bed_overlap_proportion <- sum(expressed_genes_bed_overlaps == TRUE) /
length(expressed_genes_bed_overlaps)
selected_genes_48h_bed_overlaps <- countOverlaps(selected_genes_48h, this_bed) > 0
selected_genes_48h_bed_overlap_proportion <- sum(selected_genes_48h_bed_overlaps == TRUE) /
length(selected_genes_48h_bed_overlaps)
selected_genes_96h_bed_overlaps <- countOverlaps(selected_genes_96h, this_bed) > 0
selected_genes_96h_bed_overlap_proportion <- sum(selected_genes_96h_bed_overlaps == TRUE) /
length(selected_genes_96h_bed_overlaps)
all_genes_bed_overlaps_promoters <- countOverlaps(promoters(all_genes), this_bed) > 0
all_genes_bed_overlap_promoter_proportion <- sum(all_genes_bed_overlaps_promoters == TRUE) /
length(all_genes_bed_overlaps_promoters)
expressed_genes_bed_overlaps_promoters <- countOverlaps(promoters(expressed_genes), this_bed) > 0
expressed_genes_bed_overlap_promoter_proportion <- sum(expressed_genes_bed_overlaps_promoters == TRUE) /
length(expressed_genes_bed_overlaps_promoters)
selected_genes_48h_bed_overlaps_promoters <- countOverlaps(promoters(selected_genes_48h), this_bed) > 0
selected_genes_48h_bed_overlap_promoter_proportion <- sum(selected_genes_48h_bed_overlaps_promoters == TRUE) /
length(selected_genes_48h_bed_overlaps_promoters)
selected_genes_96h_bed_overlaps_promoters <- countOverlaps(promoters(selected_genes_96h), this_bed) > 0
selected_genes_96h_bed_overlap_promoter_proportion <- sum(selected_genes_96h_bed_overlaps_promoters == TRUE) /
length(selected_genes_96h_bed_overlaps_promoters)
return(rbind(data.frame(`File accession` = file_accession, group = "all",
all_genes = all_genes_bed_overlap_proportion,
expressed_genes = expressed_genes_bed_overlap_proportion,
selected = selected_genes_48h_bed_overlap_proportion, time_point = "48h"),
data.frame(`File accession` = file_accession, group = "all",
all_genes = all_genes_bed_overlap_proportion,
expressed_genes = expressed_genes_bed_overlap_proportion,
selected = selected_genes_96h_bed_overlap_proportion, time_point = "96h"),
data.frame(`File accession` = file_accession, group = "promoters",
all_genes = all_genes_bed_overlap_promoter_proportion,
expressed_genes = expressed_genes_bed_overlap_promoter_proportion,
selected = selected_genes_48h_bed_overlap_promoter_proportion, time_point = "48h"),
data.frame(`File accession` = file_accession, group = "promoters",
all_genes = all_genes_bed_overlap_promoter_proportion,
expressed_genes = expressed_genes_bed_overlap_promoter_proportion,
selected = selected_genes_96h_bed_overlap_promoter_proportion, time_point = "96h")))
}
)), stringsAsFactors = FALSE)
overlap_proportions <- left_join(overlap_proportions,
select(interesting_data, `File accession`,
`Experiment target`, `Experiment accession`),
by = c(File.accession = "File accession"))
overlap_proportions_hg19 <- overlap_proportions %>%
mutate(all_genes = as.numeric(all_genes),
expressed_genes = as.numeric(expressed_genes),
selected = as.numeric(selected))
p <- ggplot(overlap_proportions_hg19,
aes(x = `Experiment target`, y = time_point, fill = log2(selected / all_genes))) +
geom_tile() +
coord_equal() +
scale_fill_gradient2(low = "darkblue", high = "gold", mid = "white") +
facet_grid(group ~ .) +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
print(p)
p <- ggplot(overlap_proportions_hg19,
aes(x = `Experiment target`, y = time_point, fill = log2(selected / expressed_genes))) +
geom_tile() +
coord_equal() +
scale_fill_gradient2(low = "darkblue", high = "gold", mid = "white") +
facet_grid(group ~ .) +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
print(p)
wig_files_GRCh38 <- hela_encode_metadata %>%
filter(`File format`== "bigWig",
`Output type` == "fold change over control",
`Assembly` == "GRCh38",
`File Status` == "released",
`Biological replicate(s)` == "1, 2")
signal_windows <- mclapply(wig_files_GRCh38$`File accession`, function(file_accession) {
print(file_accession)
this_wig <- rtracklayer::import(paste0("encode_files/", file_accession, ".bigWig"),
format = "bigWig", as = "RleList")
selected_genes_48h_TSS <- promoters(selected_genes_48h)
selected_genes_48h_views <- Views(this_wig,
GRangesList(lapply(names(this_wig),
function(x) selected_genes_48h_TSS[seqnames(selected_genes_48h_TSS) == x])))
selected_genes_48h_views <- selected_genes_48h_views[unlist(lapply(selected_genes_48h_views, length)) > 0]
selected_genes_48h_matrix <- do.call(rbind, lapply(selected_genes_48h_views, as.matrix))
selected_genes_48h_frame <- data.frame(reshape2::melt(selected_genes_48h_matrix),
group = "48h_genes", file_accession)
selected_genes_96h_TSS <- promoters(selected_genes_96h)
selected_genes_96h_views <- Views(this_wig,
GRangesList(lapply(names(this_wig),
function(x) selected_genes_96h_TSS[seqnames(selected_genes_96h_TSS) == x])))
selected_genes_96h_views <- selected_genes_96h_views[unlist(lapply(selected_genes_96h_views, length)) > 0]
selected_genes_96h_matrix <- do.call(rbind, lapply(selected_genes_96h_views, as.matrix))
selected_genes_96h_frame <- data.frame(reshape2::melt(selected_genes_96h_matrix),
group = "96h_genes", file_accession)
expressed_genes_TSS <- promoters(expressed_genes)
expressed_genes_views <- Views(this_wig,
GRangesList(lapply(names(this_wig),
function(x) expressed_genes_TSS[seqnames(expressed_genes_TSS) == x])))
expressed_genes_views <- expressed_genes_views[unlist(lapply(expressed_genes_views, length)) > 0]
expressed_genes_matrix <- do.call(rbind, lapply(expressed_genes_views, as.matrix))
expressed_genes_frame <- data.frame(reshape2::melt(expressed_genes_matrix),
group = "Expressed", file_accession)
return(rbind(selected_genes_48h_frame, selected_genes_96h_frame, expressed_genes_frame))
}, mc.cores = 12
)
signal_windows <- do.call(rbind, signal_windows)
signal_windows <- left_join(signal_windows,
select(hela_encode_metadata, `File accession`, `Experiment target`),
by = c("file_accession" = "File accession"))
signal_windows <- signal_windows %>% ungroup() %>% mutate(diff_expressed = ifelse(group != "Expressed", TRUE, FALSE))
save(signal_windows, file="signal_windows.Rdata")
p <- signal_windows %>%
group_by(group, `Experiment target`, Var2) %>%
summarise(mean_per_pos = mean(value)) %>%
ggplot(aes(x = Var2 - 2000, y = mean_per_pos, colour = group)) +
geom_line() + facet_wrap(~ `Experiment target`, scales = "free_y") +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
print(p)
p <- signal_windows %>%
group_by(diff_expressed, `Experiment target`, Var2) %>%
summarise(mean_per_pos = mean(value)) %>%
ggplot(aes(x = Var2 - 2000, y = mean_per_pos, colour = diff_expressed)) +
geom_line() + facet_wrap(~ `Experiment target`, scales = "free_y") +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
print(p)
dev.off()
|
ea207d04f0a35d34355e24b066ca7efb44589c78
|
41dfdb38b9437a7cf9965a4aaf002894c973c4c4
|
/data/rivers/eauAM/plot-PHYSICO-CHIMIE.R
|
abfd1f5fa21cfca079835cc0075863429d55e8ad
|
[] |
no_license
|
jpgattuso/PointB-git
|
2f71f1c639b184c56d2c37a5c6fecc2c8bac5a20
|
f5acd1c2d82d59a7e95ad7153ad87ee4002399ec
|
refs/heads/master
| 2022-09-16T21:55:21.963042
| 2022-09-08T18:48:42
| 2022-09-08T18:48:42
| 154,498,482
| 0
| 5
| null | 2022-09-08T18:48:43
| 2018-10-24T12:36:44
|
JavaScript
|
UTF-8
|
R
| false
| false
| 1,074
|
r
|
plot-PHYSICO-CHIMIE.R
|
Sys.setenv(TZ = "UTC")
filein <- "eau-sup-AlpesMaritimes-PHYSICO-CHIMIE.RData"
filepdf <- "eau-sup-AlpesMaritimes-PHYSICO-CHIMIE.pdf"
load(filein)
str(eau)
for(n in names(eau)) {
if(n != "value") {
cat("--------------->", n, "\n")
cat(levels(factor(eau[[n]])), sep = ";")
cat("\n")
}
}
dates <- as.POSIXct(strptime(eau$date, format = "%d/%m/%y"))
range(dates)
stations <- levels(factor(eau[["station"]]))
params <- levels(factor(eau[["parameter"]]))
str(stations)
pdf(file = filepdf, width = 8, height = 11)
for(param in params) {
cat("----------------> plot", param, "\n")
par(mfrow = c(4, 3), cex.lab = 0.7, cex.main = 0.7)
for(station in stations) {
i <- eau$station == station & eau$parameter == param
if(!all(!i)) {
cat(station, "", sep = ";")
dtes <- dates[i]
values <- eau$value[i]
# Traitement des valeurs "<xxx"; ici mise à zéro si le signe "<" est rencontré
values[grep("<", values)] <- "0"
values <- as.numeric(values)
plot(dtes, values, xlab = "", ylab = param, main = station)
}
}
cat("\n")
dev.flush()
}
graphics.off()
|
c858c39e97f4979ed593e78737e8069ef3dde2d5
|
b008570d05edad60a3a1675d0053e6b8911c00ef
|
/Lab04/Lab04.R
|
7e890f7e516440abb3bd33a44393eee4d30236a1
|
[] |
no_license
|
brejai/CompeBioLabsAndHomework
|
328c80cdd1a95b972e8adcfec9f9c97bbe38e870
|
4d89ed47297508ff93686392cc8e7f41821db597
|
refs/heads/master
| 2020-12-31T07:09:37.637498
| 2017-04-21T15:46:40
| 2017-04-21T15:46:40
| 80,553,263
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 961
|
r
|
Lab04.R
|
PiggyBank <- 10
Allowance <- 5
Gum <- 2*(1.34)
for (i in seq(1,8)) {
x <- PiggyBank + Allowance - Gum
PiggyBank <- x
print(x)
}
PopSize <- 2000
Shrinkage <- PopSize*(.05)
for (i in seq(1,7)) {
print(PopSize-Shrinkage)
x <- PopSize-Shrinkage
PopSize <- x
Shrinkage <- PopSize*(.05)
}
Start <- 2500
n <- rep(2500, 12)
for (t in seq(2,12)) {
n[t] <- n[t-1] + (0.8 * n[t-1] * (10000-n[t-1])/10000)
print(n[t])
}
abundance <- n
time <- 1:12
plot(time,abundance)
n <- 18
data <- rep(0,n)
for (i in seq(1,n)) {
data[i] <- 3*i
}
data <- rep(0,n)
data[1] <- 1
for (i in seq(2,n)){
data[i] <- 1 + (2*data[i-1])
}
n <- 20
Fib <- rep(1,n)
for (i in seq (3,n)){
Fib[i] <- Fib[i-1]+Fib[i-2]
}
CO2Data <- read.csv("compBioSandbox/CompBio_on_git/Labs/Lab04/CO2_data_cut_paste.csv")
MetaData <- read.csv("compBioSandbox/CompBio_on_git/Labs/Lab04/MetaData_CO2_emissions.txt")
#preallocate a matrix for the data
nRows <- dim(CO2Data)[1]
|
b677a43c6e1b1172e2e7bcdbe1b9cedcf3d6ea63
|
47c81e91c91d6f321418042a69d5770b5aaadbdf
|
/tools/r_list_versions.R
|
dfc46ba5eba01648f9d1f7bc821608f531e1fc72
|
[
"Apache-2.0"
] |
permissive
|
Kaggle/docker-rstats
|
f6e4c28638e5f9d33de59bcc56ac296da49f2176
|
2a42e7619ff99579011ca9cace98ee4604d9c068
|
refs/heads/main
| 2023-09-01T11:24:00.881089
| 2023-08-22T16:43:21
| 2023-08-22T16:43:21
| 33,904,503
| 135
| 103
|
Apache-2.0
| 2023-08-29T14:50:52
| 2015-04-14T01:46:50
|
R
|
UTF-8
|
R
| false
| false
| 169
|
r
|
r_list_versions.R
|
ip <- as.data.frame(installed.packages()[,c(1,3:4)])
ip <- ip[is.na(ip$Priority),1:2,drop=FALSE]
write.table(ip, quote=FALSE, sep="==", row.names=FALSE, col.names=FALSE)
|
050fce9be861fc8106911aaf21ddd28090c6ee05
|
f044402735a52fa040c5cbc76737c7950406f8b2
|
/BrCa_Age_Associated_TMA/Packages/biostatUtil/R/plotKMDetail.R
|
0ba68c9f23515f9661fa0f36281b5cec8265c91b
|
[] |
no_license
|
BCCRCMO/BrCa_AgeAssociations
|
5cf34f3b2370c0d5381c34f8e0d2463354c4af5d
|
48a11c828a38a871f751c996b76b77bc33d5a3c3
|
refs/heads/master
| 2023-03-17T14:49:56.817589
| 2020-03-19T02:18:21
| 2020-03-19T02:18:21
| 247,175,174
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,778
|
r
|
plotKMDetail.R
|
#' Plot detailed Kaplan-Meier curves
#'
#' KM plots with details of event counts.
#'
#' @param input.data input `data.frame`
#' @param surv.formula survival formula to `Surv`
#' @param main.text plot title
#' @param xlab.text x-axis label
#' @param ylab.text y-axis label
#' @param line.name name of legend
#' @param line.color line colour of survival curves
#' @param line.pattern line pattern of survival curves
#' @param line.width line width of survival curves
#' @param show.test show single or the reference group value (for pairwise
#' comparisons). If `"none"`, then no test is show.
#' @param single.test.type test to show if specified `show.test =
#' "single"`. Possible choices are `"logrank"` (default),
#' `"wilcoxon"`, `"taroneware"`, or `"all"`.
#' @param round.digits.p.value number of digits for p-value
#' @param obs.survyrs show the observed survival years survival rate on KM plot
#' @param ten.years.surv.95CI show ten year survival 95\% confidence interval
#' @param event.count show the number of events at each time point
#' @param legend.pos legend position keyword
#' @param file.name name of file to save plot to
#' @param file.width width of figure in saved file
#' @param file.height height of figure in saved file
#' @param grey.scale logical. If `TRUE`, the plot will be in grey scale.
#' @param show.single.test.pos position to show single test; defaults to 0.5 if
#' `legend.pos = "top"`. Otherwise 0.1
#' @param ... additional arguments to `plot`
#' @author Samuel Leung
#' @references
#' http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes2.pdf
#' @seealso [plotKM()]
#' @export
plotKMDetail <- function(input.data, surv.formula,
main.text = "", xlab.text = "", ylab.text = "",
line.name, line.color, line.pattern = NULL,
line.width = NULL, show.test = "single",
single.test.type = "logrank",
round.digits.p.value = 4,
obs.survyrs, ten.years.surv.95CI, event.count,
legend.pos = "bottomleft",
file.name = "no.file",
file.width = 7, file.height = 7,
grey.scale = FALSE, show.single.test.pos, ...) {
var.name <- deparse(surv.formula[[3]]) # this should be the biomarker name
# the deparse() function is used to make sure var.name is a string
log.rank.p.values <- c()
wilcox.p.values <- c()
tarone.ware.p.values <- c()
fit <- survival::survfit(surv.formula, data = input.data)
# do not generate a file if "no.file" is specified
if (file.name != "no.file" & nchar(file.name) > 4) {
file.ext <- tools::file_ext(file.name)
if (file.ext == "pdf") {
grDevices::cairo_pdf(filename = file.name, width = file.width, height = file.height) # good for unicode character in e.g. line.name
} else if (file.ext %in% c("wmf", "emf", "wmz", "emz")) {
grDevices::png(filename = file.name, width = file.width, height = file.height)
} else if (file.ext == "tiff") {
grDevices::tiff(filename = file.name, width = file.width * 100, height = file.height * 100)
} else {
stop("Extension must be one of 'pdf', 'wmf', 'emf', 'wmz', 'emz', and 'tiff'.")
}
}
# in case some strata do not have any cases
which.strata.have.cases <- table(input.data[, var.name]) > 0
# default line width
if (is.null(line.width)) {
line.width <- 1
}
if (grey.scale) {
# gray scale plot
if (is.null(line.pattern)) {
line.pattern <- c(1:length(line.name))[which.strata.have.cases]
}
graphics::plot(fit, lty = line.pattern, lwd = line.width, main = main.text,
xlab = xlab.text, ylab = ylab.text, ...)
} else {
# color plot
if (is.null(line.pattern)) {
line.pattern <- 1
}
graphics::plot(fit, col = line.color[which.strata.have.cases],
lty = line.pattern, lwd = line.width, main = main.text,
xlab = xlab.text, ylab = ylab.text, ...)
}
# Legend 1
if (legend.pos == "top") {
x.pos <- diff(range(fit$time, na.rm = TRUE)) / 2
y.pos <- 0.99 # top 1% ... since survival plot always starts at 100% survival
} else {
x.pos <- legend.pos
y.pos <- NULL
}
l1 <- graphics::legend(
x = x.pos, y = y.pos, legend = line.name,
lty = line.pattern, lwd = line.width, box.lty = 0, cex = 0.8
)
# there seems to be need for the y-axis adjustment depending on the file.height ...
dy <- 0.02 * (file.height - 7) / (12 - 7) # determined empirically
if (legend.pos == "top") {
y.pos <- l1$rect$top + dy
} else {
y.pos <- l1$rect$h - dy
}
# Legend 2 & 3
l2 <- graphics::legend(
x = l1$rect$w + l1$rect$left, y = y.pos,
legend = ten.years.surv.95CI,
title = paste0(obs.survyrs, "yr 95% CI"), title.col = 1,
box.lty = 0, cex = 0.8
)
l3 <- graphics::legend(
x = l1$rect$w + l1$rect$left + l2$rect$w, y = y.pos,
legend = event.count,
title = "Events/N", title.col = 1,
box.lty = 0, cex = 0.8
)
graphics::box()
if (show.test == "single") {
log.rank.test <- survival::survdiff(surv.formula, data = input.data,
rho = 0)
gehan.wilcox.test <- survival::survdiff(surv.formula, data = input.data,
rho = 1)
tarone.ware.test <- survival::survdiff(surv.formula, data = input.data,
rho = 0.5)
p.value <- getPval(log.rank.test)
log.rank.p.values <- p.value
p.value <- round(p.value, digits = round.digits.p.value)
gehan.wilcox.p.value <- getPval(gehan.wilcox.test)
wilcox.p.values <- gehan.wilcox.p.value
gehan.wilcox.p.value <- round(gehan.wilcox.p.value, digits = round.digits.p.value)
tarone.ware.p.value <- getPval(tarone.ware.test)
tarone.ware.p.values <- tarone.ware.p.value
tarone.ware.p.value <- round(tarone.ware.p.value, digits = round.digits.p.value)
graphics::text(
x = l1$rect$w + l1$rect$left + l2$rect$w + 1.3 * l3$rect$w,
y = show.single.test.pos, # position of the test statistics on plot
paste0(
ifelse(sum(single.test.type %in% c("logrank", "all")) >= 1,
paste0("Log-Rank p=",
sprintf(paste0("%.", round.digits.p.value, "f"),
p.value), "\n"), ""),
ifelse(sum(single.test.type %in% c("wilcoxon", "all")) >= 1,
paste0("Wilcoxon p=",
sprintf(paste0("%.", round.digits.p.value, "f"),
gehan.wilcox.p.value), "\n"), ""),
ifelse(sum(single.test.type %in% c("taroneware", "all")) >= 1,
paste0("Tarone-Ware p=",
sprintf(paste0("%.", round.digits.p.value, "f"),
tarone.ware.p.value), "\n"), "")),
adj = c(0, 0),
cex = 0.8)
} else if (show.test != "none") {
# assume show.test shows the reference group index
legend.txt <- c()
value.names <- names(table(input.data[, var.name]))
for (value.name in value.names) {
if (value.name == show.test) {
# this is the reference group
legend.txt <- c(legend.txt, "reference group")
} else {
# construct data
temp.d <- input.data[input.data[, var.name] == show.test |
input.data[, var.name] == value.name, ]
if (sum(input.data[, var.name] == value.name, na.rm = TRUE) == 0) {
# no case in this group
p.value <- NA
w.p.value <- NA
t.p.value <- NA
} else {
# calculate log rank p-values
p.value <- getPval(survival::survdiff(surv.formula, data = temp.d, rho = 0))
log.rank.p.values <- c(log.rank.p.values, p.value)
p.value <- round(p.value, digits = round.digits.p.value)
w.p.value <- getPval(survival::survdiff(surv.formula, data = temp.d, rho = 1))
wilcox.p.values <- c(wilcox.p.values, w.p.value)
w.p.value <- round(w.p.value, digits = round.digits.p.value)
t.p.value <- getPval(survival::survdiff(surv.formula, data = temp.d, rho = 0.5))
tarone.ware.p.values <- c(tarone.ware.p.values, t.p.value)
t.p.value <- round(t.p.value, digits = round.digits.p.value)
}
new.txt <- paste0(
ifelse("logrank" %in% single.test.type, paste0(p.value, " / "), ""),
ifelse("wilcoxon" %in% single.test.type, paste0(w.p.value, " / "), ""),
ifelse("taroneware" %in% single.test.type, t.p.value, ""))
if (endsWith(new.txt, " / ")) {
new.txt <- substr(new.txt, 0, nchar(new.txt) - 3)
}
legend.txt <- c(legend.txt, new.txt)
}
}
legend.title <- paste0(
ifelse("logrank" %in% single.test.type, "Log-Rank / ", ""),
ifelse("wilcoxon" %in% single.test.type, "Wilcoxon / ", ""),
ifelse("taroneware" %in% single.test.type, "Tarone-Ware ", ""))
if (endsWith(legend.title, " / ")) {
legend.title <- substr(legend.title, 0, nchar(legend.title) - 2)
}
legend.title <- paste0(legend.title, "P-values")
l4 <- graphics::legend(
x = l1$rect$w + l2$rect$w + l3$rect$w, y = y.pos, #y=l1$rect$h,
legend = legend.txt,
#text.col=line.color,
title = legend.title,
title.col = 1,
box.lty = 0,
cex = 0.8
)
}
if (file.name != "no.file") {
# do not generate a file if "no.file" is specified
grDevices::dev.off()
}
return(list(
"log.rank.p.values" = log.rank.p.values,
"wilcox.p.values" = wilcox.p.values
))
}
|
85cf3f8990d0b90fe4c5b40073493ad01bef4c85
|
f3ef1a4de6101d315afaeadeb3c0192033fd1a57
|
/covid_notebook.R
|
834e6f6dd3d0a2a4a6bcab4650a720accd9eb5b6
|
[] |
no_license
|
increasingemtropy/covid_notebook
|
6e5a95e5dc9d80ca569ef375b54c8fc9bc475a45
|
b83678d193ad345da0c02c4a4cc4c29c567bb86b
|
refs/heads/master
| 2022-12-22T20:29:56.051606
| 2020-09-29T13:09:52
| 2020-09-29T13:09:52
| 293,780,123
| 0
| 0
| null | 2020-09-29T13:09:53
| 2020-09-08T10:43:32
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,068
|
r
|
covid_notebook.R
|
library(ggplot2)
# get up to date coronavirus data
coronavirus_data <- read.csv("https://opendata.ecdc.europa.eu/covid19/casedistribution/csv", na.strings = "", fileEncoding = "UTF-8-BOM")
data_subset <- subset(coronavirus_data,countriesAndTerritories %in% c("United_Kingdom","Germany","France","Spain","Japan","South_Korea"))
# plot the cases vs date for UK vs USA
plt <- ggplot(data_subset,aes(x=as.Date(dateRep,format = "%d/%m/%y"),y=Cumulative_number_for_14_days_of_COVID.19_cases_per_100000,color=countriesAndTerritories)) +
geom_line(size=1) +
coord_cartesian(xlim=c(as.Date('2020-03-15'),Sys.Date())) +
labs(title ="14 day total cases per 10k population", x = "Date", y = "14 day case sum per 10k pop")+
theme(legend.position = "top")+
scale_color_brewer(name="",breaks=c("United_Kingdom","Germany","France","Spain","Japan","South_Korea"),labels=c("UK","Germany","France","Spain","Japan","South Korea"),palette = "Dark2")
plt
filename = paste("coronavirus_plot_",Sys.Date(),".png",sep='')
ggsave(filename, width = 8, height = 5)
|
f86fb4ba757ae4dc026f725b184cebac4197ba68
|
9b1a760d45e21998b9d3871a1f4dac3a7a90c05a
|
/man/magplot.Rd
|
a2ac7276d9424766b6b9703fe91f5042b8276eab
|
[] |
no_license
|
asgr/magicaxis
|
ac10b0b054128025976cb6b51003816cbd2157a9
|
0e3a56587021f8c22f86a3eda87907d8dfbe9e39
|
refs/heads/master
| 2023-06-21T11:28:06.031052
| 2023-06-19T06:30:03
| 2023-06-19T06:30:03
| 13,343,972
| 9
| 4
| null | 2020-10-22T07:14:05
| 2013-10-05T11:12:59
|
R
|
UTF-8
|
R
| false
| false
| 11,633
|
rd
|
magplot.Rd
|
\name{magplot}
\alias{magplot}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Magically pretty plots
}
\description{
Makes scientific plots based on magaxis axes. Particularly designed for log plotting. Utilises base plot for the most part, but the axis drawing is replaced by a call to the magaxis fuction.
}
\usage{
magplot(x, y, z = NULL, log = "", main = "", side = 1:2, majorn = 5, minorn = 'auto',
tcl = 0.5, ratio = 0.5, labels = TRUE, unlog = "auto", mgp = c(2,0.5,0), mtline = 2,
xlab = '', ylab = '', crunch = TRUE, logpretty = TRUE, prettybase = 10, powbase = 10,
hersh = FALSE, family = "sans", frame.plot = TRUE, usepar = FALSE, grid = TRUE,
grid.col = 'grey90', grid.lty = 1, grid.lwd = 1, xlim = NULL, ylim = NULL, lwd = 1,
axis.lwd = 1, ticks.lwd = axis.lwd, axis.col = 'black', zcol = hcl.colors(21),
zstretch = 'lin', dobar = TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
The x coordinates of points/lines in the plot. Alternatively, a single plotting structure, function or any R object with a plot method can be provided.
}
\item{y}{
The y coordinates of points/lines in the plot, optional if x is an appropriate structure.
}
\item{z}{
The z coordinates for colour scaling of points in the plot. This will be passed through \code{\link{magmap}}, with dots passed as relevant.
}
\item{log}{
Log axis arguments to be passed to plot. E.g. use 'x', 'y', 'xy' or 'yx' as appropriate. Default '' assumes no logging of any axes.
}
\item{main}{
Title for the plot. Default is no title.
}
\item{side}{
The side to be used for axis labelling in the same sense as the base axis function (1=bottom, 2=left, 3=top, 4=right). A vector of multiple entries is allowed. By default, bottom and left axes are drawn (i.e. side 1 and 2). If \option{side}=FALSE then no sides or labels will be drawn.
}
\item{majorn}{
The target number of major-axis sub-divisions for pretty plotting. If length is 1 and length of side is longer than this value is used for all axes. If length of arguments is longer than 1 then these should tally with the relevant axes in side. Obvious reason for varying this is different pretty labelling between a and y axes.
}
\item{minorn}{
The exact number of minor-axis divisions (i.e. desired minor ticks + 1) to display in plotting. Auto will produce \code{\link{pretty}} ticks for linear scaling, and powbase-2 minor ticks for logged (this might seem odd, but for base 10 this means ticks at 2/3/4/5/6/7/8/9, which is probably as desired). If set manually, must be greater than 1 to have a visible effect. Minor ticks are always calculated to be equally spaced in linear space, so tick spaces vary when using log plotting. If length is 1 and length of side is longer than this value is used for all axes. If length of arguments is longer than 1 then these should tally with the relevant axes in side. An obvious reason for varying this is different pretty labelling between x and y axes.
}
\item{tcl}{
The length of major tick marks as a fraction of the height of a line of text. By default these face into the plot (in common with scientific plotting) with a value of 0.5, rather than the R default of -0.5. It is possible to force magaxis to inherit directly from par by setting usepar=TRUE (see below). See \code{\link{par}} for more details.
}
\item{ratio}{
Ratio of minor to major tick mark lengths.
}
\item{labels}{
Specifies whether major-axis ticks should be labelled for each axis. If length is 1 and length of side is longer than this value is used for all axes. If length of arguments is longer than 1 then these should tally with the relevant axes in side. Default is to label all axes.
}
\item{unlog}{
Determines if axis labels should be unlogged. If axis is found to be logged in par('usr') then the minor ticks are automatically log spaced, however "unlog" still controls how the labelling is done: either logged form (FALSE) or exponent form (TRUE). If axis has been explicitly logged (e.g. log10(x)) then this will can produce exponential axis marking/labelling if set to TRUE. This case will also produce log minor tick marks. If length of unlog is 1 and length of side is longer than 1 then the assigned unlog value is used for all axes. If length of arguments is longer than 1 then these should tally with the relevant axes in side. Can also take the text argument 'x', 'y', 'xy' or 'yx', where these refer to which axes have been logged. If left at the default of `auto' then unlog is assumed to be true when the axis in question is logged, and false otherwise.
}
\item{mgp}{
The margin line (in mex units) for the axis title, axis labels and axis line. This has different (i.e. prettier) defaults than R of c(2,0.5,0) rather than c(3,1,0). This pushes the numbers and labels nearer to the plot compared to the defaults. It is possible to force magaxis to inherit directly from par by setting usepar=TRUE (see below). See \code{\link{par}} for more details.
}
\item{mtline}{
Number of lines separating axis name from axis. If length 2 then specifies x and y axis separation respectively (else these are the same).
}
\item{xlab}{
x axis name.
}
\item{ylab}{
y axis name.
}
\item{crunch}{
In cases where the scientific text would be written as 1x10^8, should the 1x be removed so it reads 10^8. If length is 1 and length of side is longer then this value is used for all axes. If length of arguments is longer than 1 then these should tally with the relevant axes in side. TRUE by default.
}
\item{logpretty}{
Should the major-ticks only be located at powers of 10. This changes cases where ticks are placed at 1, 3.1, 10, 31, 100 etc to 1, 10, 100. If length is 1 and length of side is longer then this value is used for all axes. If length of arguments is longer than 1 then these should tally with the relevant axes in side. TRUE by default.
}
\item{prettybase}{
The unit of repitition desired. By default it is 10, implying a pretty plot is one with marks at 10, 20, 30 etc. If you are plotting degrees then it might be prettier to display 90, 180, 270 etc. In which case prettybase should be set to 90. If log=TRUE then the reference location of 10 is changed, so in the previous example the labels generated would be at 9, 90, 900 etc rather than the deafult of 1, 10, 100 etc. If length is 1 and length of side is longer then this value is used for all axes. If length of arguments is longer than 1 then these should tally with the relevant axes in side.
}
\item{powbase}{
Set the base to use for logarithmic axes. Default is to use 10.
}
\item{hersh}{
To determines whether all plot text should be passed using Hershey vector fonts. This applies to the axis labels (which are handled automatically) and the axis names. In the case of axis names the user must be careful to use the correct plot utils escape characters: http://www.gnu.org/software/plotutils/manual/en/html_node/Text-String-Format.html.
magaxis will return back to the current plotting family after the function has executed.
}
\item{family}{
Specifies the plotting family to be used. Allowed options are 'sans' and 'serif'. Depending on whether hersh is TRUE or FALSE these otions are either applied to the Hershey vector fonts (hersh=TRUE) or the default R Helvetica font (hersh=FALSE). magaxis will return back to the current plotting family after the function has executed.
}
\item{frame.plot}{
Logical indicating whether a box should be drawn around the plot.
}
\item{usepar}{
Logical indicating whether tcl and mgp should be forced to inherit the global par values. This might be preferred when you want to define global plot settings at the start of a script.
}
\item{grid}{
Logical indicating whether a background grid should be drawn onto the plotting area. If true this will generate vertical and horiztonal grid lines. For more control (i.e. to only draw horizontal or verical lines) see \code{link{magaxis}}.
}
\item{grid.col}{
The colour of the grid to be drawn.
}
\item{grid.lty}{
The line type of the grid to be drawn.
}
\item{grid.lwd}{
The line width of the grid to be drawn.
}
\item{xlim}{
Vector; range of data to display. If this is set to NULL (default) then the limits will be estimated from the data dynamically. If length equals 1 then the argument is taken to mean the sigma range to select for plotting and the clipping is done by \code{\link{magclip}}.
}
\item{ylim}{
Vector; range of data to display. If this is set to NULL (default) then the limits will be estimated from the data dynamically. If length equals 1 then the argument is taken to mean the sigma range to select for plotting and the clipping is done by \code{\link{magclip}}.
}
\item{lwd}{
The width of plot lines to be drawn. This has different behaviour depending on the plot type.
}
\item{axis.lwd}{
The line width of the axis to be drawn. This is passed to \option{lwd} argument in \code{\link{axis}}.
}
\item{ticks.lwd}{
The line width of the ticks to be drawn. This is passed to \option{ticks.lwd} argument in \code{\link{axis}}.
}
\item{axis.col}{
Colour argument to pass directly to \option{col} in axis. It is a bit clunky to have to specify this, but the option 'col' clashes too much with line and point colours.
}
\item{zcol}{
Vector; a colour palette to use for \option{z} mapped colours. Must be a vector and not a function.. Only relevant if data has been passed to \option{z}
}
\item{zstretch}{
Character scalar; \option{z} colour stretch, either linear (lin, default) or logarithmic (log, good for large dynamic ranges).
}
\item{dobar}{
Logical; should a colour bar be added to the plot?
}
\item{\dots}{
Further arguments to be passed to: base \code{\link{plot}}; \code{\link{magaxis}} -> \code{\link{axis}}; \code{\link{magmap}} and \code{\link{magbar}} if \option{z} scaling is being used.
}
}
\details{
This is a simple function that just turns off most of the plotting output of base plot, and replaces where possible those present in magaxis.
If \option{x} is a data.frame with more than 2 columns then the utility base \code{\link{plot}} data.frame plotting function is used to create a full plotting grid. This ignores \code{\link{magaxis}} settings entirely.
Setting \option{xlim} and \option{ylim}
}
\value{
No output. Run for the side effect of producing nice plotting axes.
}
\author{
Aaron Robotham
}
\seealso{
\code{\link{magaxis}}, \code{\link{maglab}}, \code{\link{magerr}}, \code{\link{magmap}}, \code{\link{magrun}}
}
\examples{
x=10^{1:9}
y=1:9
magplot(log10(x),y,unlog='x')
magplot(x,y,log='x')
#Not ideal to have two decades between major labels:
magplot(x,y,log='x',majorn=c(10,5))
magplot(x,y,log='xy',majorn=c(10,5,5,5),side=1:4)
#Sometimes it is helpful to focus on where most of the data actually is.
#Using a single value for xlim and ylim sigma clips the data to that range.
#Here a value of 2 means we only show the inner 2-sigma (2\% to 98\%) range.
#The 'auto' option allows magclip to dynamically estimate a clip value.
temp=cbind(rt(1e3,1.5),rt(1e3,1.5))
magplot(temp)
magplot(temp, xlim=2, ylim=2)
magplot(temp, xlim='auto', ylim='auto')
#Some astronomy related examples (and how to display the solar symbol):
temp=cbind(runif(10,8,12),runif(10,0,5))
magplot(temp[,1:2], xlab=expression(M['\u0298']), ylab=expression(M['\u0298']/Yr), unlog='xy')
#With z scaling
z=sqrt(9:1)
magplot(x, y, z, log='x', position='topleft')
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{plot}
\keyword{axis}% __ONLY ONE__ keyword per line
\keyword{log}
|
169d7c25787eac2b488ef76575988a9b6f622bcb
|
9bc986ed5c9830e9b28406758b6c5a0e41f1fdcf
|
/R_DataAnalysis/R/1.lm_basic3.R
|
9279193c20061774cfabf180bef7390517cca7e5
|
[] |
no_license
|
Jerrykim91/Bigdata_Analytics
|
ca9e8c86f494d44336091729c08da541e315a929
|
fab1f0522ff575537903790facf00f5a5c913262
|
refs/heads/master
| 2020-11-25T06:35:34.169050
| 2020-05-11T01:48:09
| 2020-05-11T01:48:09
| 228,540,309
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 865
|
r
|
1.lm_basic3.R
|
require(graphics)
## 100 -> 70 : 30 / 80 : 20
# 70 -> 학습/ 30-> 검증(테스트)
# 4 : 3 : 3 (학습/검증/테스트)
m1<-c()
m2<-c()
for(i in 1:10){
sam<-sample(1:nrow(mtcars),nrow(mtcars)*0.7)
fit2<-lm(mpg~.,data=mtcars[sam,])
# mean((fit2$residuals^2)) # MSE
pred<-predict(fit2,mtcars[-sam,])
m1[i]<-mean((pred-mtcars[-sam,1])^2)
##회귀 분석의 경우 모델의 성능지표 MSE / MAPE / MAE
index<-abs(fit2$coefficients)[-1] >0.5
var<-names(index)[index==T]
fo<-paste0("mpg~",paste(var,collapse = "+"))
fit3<-lm(fo,data=mtcars[sam,])
pred2<-predict(fit3,mtcars[-sam,])
m2[i]<-mean((pred2-mtcars[-sam,1])^2)
cat("\n",i)
ts.plot(cbind(m1,m2),col=c("red","blue"))
}
mean(m1)
mean(m2)
## 다중 선형 회귀분석
## (변수가 여러개, x와 y가 선형관계가 있다 라는 가정)
ts.plot(fit3$residuals)
|
3297148f6201f79a95ba778c93fe3731e25e364e
|
162ad14e40fb0ffba7a8b52c83c3a3406d60adc2
|
/man/get.mat.omega.Rd
|
3040bf671fed15580787cf525456026bbb751dc6
|
[] |
no_license
|
guillaumeevin/GWEX
|
c09c1f53a7c54eebc209b1f4aa5b8484fb59faf6
|
b1cae5f753a625d5963507b619af34efa2459280
|
refs/heads/master
| 2023-01-21T10:01:28.873553
| 2023-01-16T11:10:16
| 2023-01-16T11:10:16
| 172,738,929
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 855
|
rd
|
get.mat.omega.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GWexPrec_lib.r
\name{get.mat.omega}
\alias{get.mat.omega}
\title{get.mat.omega}
\usage{
get.mat.omega(cor.obs, Qtrans.mat, mat.comb, nLag, nChainFit, isParallel)
}
\arguments{
\item{cor.obs}{matrix p x p of observed correlations between occurrences for all pairs of stations}
\item{Qtrans.mat}{transition probabilities, 2 x ncomb matrix}
\item{mat.comb}{matrix of logical: ncomb x nlag}
\item{nLag}{order of the Markov chain}
\item{nChainFit}{length of the simulated chains used during the fitting}
\item{isParallel}{logical: indicate computation in parallel or not (easier for debugging)}
}
\value{
\item{matrix}{omega correlations for all pairs of stations}
}
\description{
find omega correlation leading to estimates cor between occurrences
}
\author{
Guillaume Evin
}
|
ee357d4acd9c4873c786776299abeff475585762
|
eb89d6f54071a8c7bb9fc213b94d1af101121723
|
/tests/testthat/test-sag-gradients.R
|
8695c020c6567b34007ecab2017d8f7a9d0d1af5
|
[] |
no_license
|
tdhock/bigoptim
|
cfe613c7098589e8179bee7c5805e69bd1e2fdf5
|
60245e925c9e8eb6ca7c48da9662eb0f2109d15f
|
refs/heads/master
| 2020-12-01T13:07:31.289218
| 2015-06-09T14:26:03
| 2015-06-09T14:26:03
| 37,132,104
| 0
| 0
| null | 2015-06-09T12:58:35
| 2015-06-09T12:58:34
| null |
UTF-8
|
R
| false
| false
| 1,726
|
r
|
test-sag-gradients.R
|
context("Gradient tests")
## test parameters
eps <- 1e-08
L2regularized.logistic.regression.gradient <- function(X, y, lambda, weight){
stop("TODO compute gradient in R code")
}
data(covtype.libsvm)
test_that("gradient of covtype.libsvm is close to 0", {
lambda <- 1
fit <- with(covtype.libsvm, sag_constant(X, y, lambda))
gradient <- with(covtype.libsvm, {
L2regularized.logistic.regression.gradient(X, y, lambda, fit$w)
})
expect_less_than(sum(abs(gradient)), eps)
})
data(rcv1_train)
test_that("gradient of rcv1_train is close to 0", {
lambda <- 1
fit <- with(rcv1_train, sag_constant(X, y, lambda))
gradient <- with(rcv1_train, {
L2regularized.logistic.regression.gradient(X, y, lambda, fit$w)
})
expect_less_than(sum(abs(gradient)), eps)
})
## Simulating logistic datasets
true_params <- c(1, 2, 3)
sample_size <- 1000
sim <- .simulate_logistic(true_params, sample_size, intercept=FALSE)
#################################
## SAG with Constant Step Size ##
#################################
test_that("constant step size Sag gradient norm is zero", {
## Fitting SAG
pryr::mem_change({
sag_fit <- sag_constant(sim$X, sim$y, lambda=0, maxiter=NROW(sim$X) * 100)
})
expect_less_than(norm(sag_fit$d, type="F"), eps)
})
#########################
## SAG with linesearch ##
#########################
test_that("linesearch SAG gradient norm is zero", {
expect_less_than(sag_ls(), eps)
})
##########################################################
## SAG with line-search and adaptive Lipschitz Sampling ##
##########################################################
test_that("linesearch adaptive sag gradient norm is zero", {
expect_less_than(sag_adaptive_ls(), eps)
})
|
77b63fd9fad3a836b137661aa6475a17148df3fb
|
77776736c4b7311499e249e2d5f636e6ecbb4768
|
/static/slides/penalized-regression.R
|
2777a5b99466c754857d358b14a6e5d1b08c59d6
|
[] |
no_license
|
turgeonmaxime/w20-stat7200
|
69227bed4b8ba30bfbce7c954cf91f5b278b45f8
|
0f30c8f753dd73756029bd44e8379610f4d9ed96
|
refs/heads/master
| 2020-09-29T02:38:56.815904
| 2020-04-03T22:40:07
| 2020-04-03T22:40:07
| 226,928,767
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,983
|
r
|
penalized-regression.R
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(cache=FALSE)
## ---- message = FALSE---------------------------------------------------------
library(tidyverse)
url <- "https://maxturgeon.ca/w20-stat7200/prostate.csv"
prostate <- read_csv(url)
# Separate into training and testing sets
data_train <- filter(prostate, train == TRUE) %>%
dplyr::select(-train)
data_test <- filter(prostate, train == FALSE) %>%
dplyr::select(-train)
## -----------------------------------------------------------------------------
# OLS
model1 <- lm(lpsa ~ .,
data = data_train)
pred1 <- predict(model1, data_test)
mean((data_test$lpsa - pred1)^2)
## -----------------------------------------------------------------------------
# Ridge regression
X_train <- model.matrix(lpsa ~ .,
data = data_train)
Y_train <- data_train$lpsa
B_ridge <- solve(crossprod(X_train) + diag(0.7, 9),
t(X_train)) %*% Y_train
## -----------------------------------------------------------------------------
X_test <- model.matrix(lpsa ~ .,
data = data_test)
pred2 <- X_test %*% B_ridge
mean((data_test$lpsa - pred2)^2)
## -----------------------------------------------------------------------------
# Compare both estimates
head(cbind(coef(model1), B_ridge))
## -----------------------------------------------------------------------------
mse_df <- purrr::map_df(seq(0, 5, by = 0.1),
function(lambda) {
B_ridge <- solve(crossprod(X_train) + diag(lambda, 9),
t(X_train)) %*% Y_train
pred2 <- X_test %*% B_ridge
mse <- mean((data_test$lpsa - pred2)^2)
return(data.frame(MSE = mse,
lambda = lambda))
})
## -----------------------------------------------------------------------------
ols_mse <- mean((data_test$lpsa - pred1)^2)
ggplot(mse_df, aes(lambda, MSE)) +
geom_line() + theme_minimal() +
geom_hline(yintercept = ols_mse)
## ---- message = FALSE---------------------------------------------------------
library(glmnet)
# Fit for multiple values of lambda
X_train <- model.matrix(lpsa ~ . - 1,
data = data_train)
ridge_fit <- glmnet(X_train, data_train$lpsa,
alpha = 0,
lambda = seq(0, 5, by = 0.1))
## ---- message = FALSE---------------------------------------------------------
# Plot the value of the coefficients
# as a function of lambda
plot(ridge_fit, xvar = "lambda")
abline(h = 0, lty = 2)
## -----------------------------------------------------------------------------
# Fit lasso regression along the same lambda sequence
lasso_fit <- glmnet(X_train, data_train$lpsa,
alpha = 1, # For lasso regression
lambda = seq(0, 5, by = 0.1))
## -----------------------------------------------------------------------------
X_test <- model.matrix(lpsa ~ . - 1,
data = data_test)
lasso_pred <- predict(lasso_fit, newx = X_test)
lasso_mse <- apply(lasso_pred, 2, function(col) {
mean((data_test$lpsa - col)^2)
})
## -----------------------------------------------------------------------------
lasso_mse_df <- data.frame(MSE = lasso_mse,
lambda = seq(0, 5, by = 0.1))
ggplot(mse_df, aes(lambda, MSE)) +
geom_line() + theme_minimal() +
geom_hline(yintercept = ols_mse) +
geom_line(data = lasso_mse_df, colour = 'red')
## -----------------------------------------------------------------------------
# Plot the value of the coefficients
# as a function of lambda
plot(lasso_fit, xvar = "lambda")
abline(h = 0, lty = 2)
## -----------------------------------------------------------------------------
# Where is the min MSE?
filter(lasso_mse_df, MSE == min(MSE))
# What are the estimates?
coef(lasso_fit, s = 4.9)
## -----------------------------------------------------------------------------
# Take all the data
dataset <- dplyr::select(prostate, -train)
dim(dataset)
## ---- message = FALSE---------------------------------------------------------
set.seed(7200)
library(caret)
# 5-fold CV
trainIndex <- createFolds(dataset$lpsa, k = 5)
str(trainIndex)
## -----------------------------------------------------------------------------
# Define function to compute MSE
compute_mse <- function(prediction, actual) {
# Recall: the prediction comes in an array
apply(prediction, 2, function(col) {
mean((actual - col)^2)
})
}
## -----------------------------------------------------------------------------
MSEs <- sapply(trainIndex, function(indices){
X_train <- model.matrix(lpsa ~ . - 1,
data = dataset[-indices,])
Y_train <- dataset$lpsa[-indices]
X_test <- model.matrix(lpsa ~ . - 1,
data = dataset[indices,])
lasso_fit <- glmnet(X_train, Y_train, alpha = 1,
lambda = seq(0, 5, by = 0.1))
lasso_pred <- predict(lasso_fit, newx = X_test)
compute_mse(lasso_pred, dataset$lpsa[indices])
})
## -----------------------------------------------------------------------------
# Each column is for a different fold
dim(MSEs)
CV_MSE <- colMeans(MSEs)
seq(0, 5, by = 0.1)[which.min(CV_MSE)]
## -----------------------------------------------------------------------------
# What are the estimates?
coef(lasso_fit, s = 0.4)
## -----------------------------------------------------------------------------
# Conveniently, glmnet has a function for CV
# It also chooses the lambda sequence for you
X <- model.matrix(lpsa ~ . -1, data = dataset)
lasso_cv_fit <- cv.glmnet(X, dataset$lpsa, alpha = 1,
nfolds = 5)
## -----------------------------------------------------------------------------
c("lambda.min" = lasso_cv_fit$lambda.min,
"lambda.1se" = lasso_cv_fit$lambda.1se)
# What are the estimates?
coef(lasso_cv_fit, s = 'lambda.min')
# 1 SE rule
coef(lasso_cv_fit, s = 'lambda.1se')
|
d54304e426a7dc1c4101236a6a4fd361ccb0e37b
|
19984c47727b920e9c32ef30639abfd1d744b8ba
|
/Lab07/Lab07.R
|
f917dc322b260148661fbb1735a153cc30136741
|
[] |
no_license
|
jadi9906/LABS
|
91665dc0476d561bcf2927c5fd371079e7e03201
|
8b94e8a55dc1cca154512d8aac8dd1dde9964c3a
|
refs/heads/master
| 2020-12-20T12:16:06.560803
| 2020-05-01T02:10:03
| 2020-05-01T02:10:03
| 236,072,496
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,271
|
r
|
Lab07.R
|
#Lab 07: "Put the FUN in FUNction! :-)
#Feb 28, 2020
#Jacob Di Biase
#
#Problem 1
#The area of a triangle can be calculated as 0.5 * base * height
# Write a function named triangleArea that calculates and returns
#the area of a triangle when given two arguments (base and height).
triangleArea <- function(b,h) {
area <- (b + h) * 0.5
return(area)
}
triangleArea(10,9)
#Problem #2
#2A:Write a function named myAbs() that calculates and returns absolute values.
myAbs <- function(x) {
newx <- sqrt(x * x)
return(newx)
}
myAbs(5)
myAbs(-2.3)
#2B:Revise your function to make it work on vectors
#it already worked on vectors
myAbs <- function(x) {
newx <- sqrt(x * x)
return(newx)
}
example <- c(1.1, 2, 0, -4.3, 9, -12)
myAbs(example)
#Problem #3 Fibonacci sequence
#write a function that returns a vector of the first n Fibonacci numbers,
#Your function should take two arguments: the user's desired value of n and the user's desired starting number
#
#
myfib <- function(x,y){
fib <- rep(0,x)
if(y==1){
fib[1] <- 1
fib[2] <- 1}
if(y==0){
fib[1] <- 0
fib[2] <- 1}
#if functions decide where the sequence begins
for (i in 3:x)
fib[i] <- fib[i-1] + fib[i-2]
#for loop is the same fib sequence as previously done in an earlier lab
return(fib)
}
myfib(10,1)
myfib(10,0)
#Problem #4
#4A:Write a function that takes two numbers as
#its arguments and returns the square of the difference between them.
fourthfunction <- function(x,y){
answer <- (x - y) ^ 2
return(answer)
}
fourthfunction(3,5)
fourthfunction(c(2, 4, 6), 4)
#Part 4b: Write a function of your own that calculates the average of a vector of numbers.
arithmeticmean <- function(x){
XSum <- sum(x)
XLength <- length(x)
exampleMEAN <- (XSum/XLength)
return(exampleMEAN)
}
arithmeticmean(c(5, 15, 10))
Lab07NewData <- DataForLab07[[1]]
arithmeticmean(Lab07NewData)
#Part 4c:the sum of squares can be calculated as the sum of the squared deviations from the mean
#sum of squares
SumofSquares <- function(x){
y <- arithmeticmean(x)
newdata <- fourthfunction(x,y)
results <- sum(newdata)
#used the two previously made functions for mean and position - mean
return(results)
}
Lab07NewData <- DataForLab07[[1]]
SumofSquares(Lab07NewData)
|
eaf01c57c3793d5398e0a45be5022cc372b08c70
|
a27b79fc527614f1ae9ab192bec123f7ad55ff36
|
/R/markdown.R
|
c0ce0b7dd6b736425c5cc40e42b0a14b8dab00c9
|
[
"MIT"
] |
permissive
|
r-lib/pkgdown
|
59528c00deab7466f678c48ed6e26227eecf1e6c
|
c9206802f2888992de92aa41f517ba7812f05331
|
refs/heads/main
| 2023-08-29T05:25:38.049588
| 2023-07-19T14:26:10
| 2023-07-19T14:26:10
| 3,723,845
| 443
| 330
|
NOASSERTION
| 2023-09-06T09:08:11
| 2012-03-15T00:36:24
|
R
|
UTF-8
|
R
| false
| false
| 3,323
|
r
|
markdown.R
|
markdown_text <- function(text, ...) {
if (identical(text, NA_character_) || is.null(text)) {
return(NULL)
}
md_path <- withr::local_tempfile()
write_lines(text, md_path)
markdown_path_html(md_path, ...)
}
markdown_text_inline <- function(text, where = "<inline>", ...) {
html <- markdown_text(text, ...)
if (is.null(html)) {
return()
}
children <- xml2::xml_children(xml2::xml_find_first(html, ".//body"))
if (length(children) > 1) {
abort(
sprintf(
"Can't use a block element in %s, need an inline element: \n%s",
where,
text
)
)
}
paste0(xml2::xml_contents(children), collapse="")
}
markdown_text_block <- function(text, ...) {
html <- markdown_text(text, ...)
if (is.null(html)) {
return()
}
children <- xml2::xml_children(xml2::xml_find_first(html, ".//body"))
paste0(as.character(children, options = character()), collapse = "")
}
markdown_body <- function(path, strip_header = FALSE) {
xml <- markdown_path_html(path, strip_header = strip_header)
# Extract body of html - as.character renders as xml which adds
# significant whitespace in tags like pre
transformed_path <- withr::local_tempfile()
xml %>%
xml2::xml_find_first(".//body") %>%
xml2::write_html(transformed_path, format = FALSE)
lines <- read_lines(transformed_path)
lines <- sub("<body>", "", lines, fixed = TRUE)
lines <- sub("</body>", "", lines, fixed = TRUE)
structure(
paste(lines, collapse = "\n"),
title = attr(xml, "title")
)
}
markdown_path_html <- function(path, strip_header = FALSE) {
html_path <- withr::local_tempfile()
convert_markdown_to_html(path, html_path)
xml <- xml2::read_html(html_path, encoding = "UTF-8")
if (!inherits(xml, "xml_node")) {
return(NULL)
}
# Capture heading, and optionally remove
h1 <- xml2::xml_find_first(xml, ".//h1")
title <- xml2::xml_text(h1)
if (strip_header) {
xml2::xml_remove(h1)
}
structure(xml, title = title)
}
markdown_to_html <- function(text, dedent = 4, bs_version = 3) {
if (dedent) {
text <- gsub(paste0("($|\n)", strrep(" ", dedent)), "\\1", text, perl = TRUE)
}
md_path <- withr::local_tempfile()
html_path <- withr::local_tempfile()
write_lines(text, md_path)
convert_markdown_to_html(md_path, html_path)
html <- xml2::read_html(html_path, encoding = "UTF-8")
tweak_page(html, "markdown", list(bs_version = bs_version))
html
}
convert_markdown_to_html <- function(in_path, out_path, ...) {
if (rmarkdown::pandoc_available("2.0")) {
from <- "markdown+gfm_auto_identifiers-citations+emoji+autolink_bare_uris"
} else if (rmarkdown::pandoc_available("1.12.3")) {
from <- "markdown_github-hard_line_breaks+tex_math_dollars+tex_math_single_backslash+header_attributes"
} else {
if (is_testing()) {
testthat::skip("Pandoc not available")
} else {
abort("Pandoc not available")
}
}
rmarkdown::pandoc_convert(
input = in_path,
output = out_path,
from = from,
to = "html",
options = purrr::compact(c(
if (!rmarkdown::pandoc_available("2.0")) "--smart",
if (rmarkdown::pandoc_available("2.0")) c("-t", "html4"),
"--indented-code-classes=R",
"--section-divs",
"--wrap=none",
...
))
)
invisible()
}
|
3f19b28aa1ae9be91811b7393cb009eec58bad54
|
2a6f46cc8b818b8a498433df63f822673ef6d4b0
|
/LEARNINGPLOT.R
|
faecd6931cc55dc9c6a0b4085d99780b3948fad2
|
[] |
no_license
|
goredoc/DissertationFilesFinalVersion
|
45b882d1166abeed790d8ce4a284bd1437a06314
|
d2c55e383f937271bb84bb12450dd6c21a9e307e
|
refs/heads/main
| 2023-02-19T02:55:57.040135
| 2021-01-24T18:37:24
| 2021-01-24T18:37:24
| 326,014,503
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 810
|
r
|
LEARNINGPLOT.R
|
# LEARNINGPLOT
learningPlot = function(burnin,i)
{
burnin = 500
oddsMeans = (1:(length(phi_names) / 2) * 2) - 1
samples = phi[(i*2)-1,,(burnin+1):length(phi[1,1,])]
priorMu = mu_mean_vec[i]
priorSigma = mu_sd_vec[i]
loci = priorMu - 2*priorSigma
hici = priorMu + 2*priorSigma
losamp= min(samples)
hisamp= max(samples)
lowx = min(loci,losamp)
highx= max(hici,hisamp)
x=seq(lowx,highx,.001)
y=dnorm(x,priorMu,priorSigma)
hist(samples,xlim=c(lowx,highx),freq=FALSE,col="red",main=(paste0("Histogram with Prior Curve, ", phi_names[(i*2)-1])),xlab="Posterior Samples (Post-Burn-In)")
arrows(mean(samples),0,mean(samples),10,len=0,lwd=1,col="white")
points(y~x,xlim=c(lowx,highx),lty=3,col="gray",type="l",lwd=1)
arrows(priorMu,0,priorMu,10,len=0,col="gray",lwd=1,lty=3)
}
|
da380f77007e6f0dff195715fbacad295fe60399
|
cd951d8c027e1c679bfec654e4850d787c5ba960
|
/tests/test_bcSeq.R
|
0fced2e941ad2f47a3ee326f25e4aee784c8e67e
|
[] |
no_license
|
jl354/bcSeq
|
ee0666fa402e90bb0bd7ccde0289906b54b9cc87
|
e97586cf05198876cda8a425e5d55810a2ba4137
|
refs/heads/master
| 2023-04-08T16:45:29.366402
| 2021-04-14T16:54:33
| 2021-04-14T16:54:33
| 104,780,130
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,379
|
r
|
test_bcSeq.R
|
library(bcSeq)
#devtools::load_all("../")
#### Set the seed
set.seed(4523)
#### Generate barcode
lFName <- "./libFile.fasta"
bases <- c(rep('A', 4), rep('C',4), rep('G',4), rep('T',4))
numOfBars <- 7
Barcodes <- rep(NA, numOfBars*2)
for (i in 1:numOfBars){
Barcodes[2*i-1] <- paste0(">barcode_ID: ", i)
Barcodes[2*i] <- paste(sample(bases, length(bases)), collapse = '')
}
write(Barcodes, lFName)
#### Generate reads and phred score
rFName <- "./readFile.fastq"
numOfReads <- 8
Reads <- rep(NA, numOfReads*4)
for (i in 1:numOfReads){
Reads[4*i-3] <- paste0("@read_ID_",i)
Reads[4*i-2] <- Barcodes[2*sample(1:numOfBars,1,
replace=TRUE, prob=seq(1:numOfBars))]
Reads[4*i-1] <- "+"
Reads[4*i] <- paste(rawToChar(as.raw(
33+sample(20:30, length(bases),replace=TRUE))),
collapse='')
}
write(Reads, rFName)
#### perform alignment
ReadFile <- "./readFile.fastq"
BarFile <- "./libFile.fasta"
outFile <- "./countH.csv"
#### with default output for bcSeq_hamming
#res <- bcSeq_hamming(ReadFile, BarFile, outFile, misMatch = 2,
# tMat = NULL, numThread = 2, count_only = TRUE )
#res <- read.csv(outFile, header=FALSE)
#res
#### with return of alignment probability matrix to R
#outFile <- "./countH2.csv"
#res <- bcSeq_hamming(ReadFile, BarFile, outFile, misMatch = 2,
# tMat = NULL, numThread = 2, count_only = FALSE )
#res
#### with default output for bcSeq_edit
outFile <- "./countE.csv"
#res <- bcSeq_edit(ReadFile, BarFile, outFile, misMatch = 2,
# tMat = NULL, numThread = 2, count_only = TRUE,
# gap_left = 2, ext_left = 1, gap_right = 2, ext_right = 1,
# pen_max = 7)
#res <- read.csv(outFile, header=FALSE)
#res
#### with return of alignment probability matrix to R
#outFile <- "./countE2.csv"
#res <- bcSeq_edit(ReadFile, BarFile, outFile, misMatch = 2,
# tMat = NULL, numThread = 2, count_only = FALSE,
# gap_left = 2, ext_left = 1, gap_right = 2, ext_right = 1,
# pen_max = 7)
#res
#### user-defined probability model
comstomizeP <- function(m, x, y)
{
x * (1 - log(2) + log(1 + m / (m + y) ) )
}
outFile = "comstomizeP.csv"
#bcSeq_edit(ReadFile, BarFile, outFile, misMatch = 2,
# tMat = NULL, numThread = 2, count_only = TRUE,
# gap_left = 2, ext_left = 1, gap_right = 2, ext_right = 1,
# pen_max = 7, userProb = comstomizeP)
|
24f584e7ff20291a00138bee5cb59cd2533b9e09
|
b83cde74005d5d837f0494ce17ff75af290cc12d
|
/man/techrep_fdata.Rd
|
33809b3244f203876f2d8bcf570b76cabfe31e42
|
[] |
no_license
|
clabornd/pmartRdata
|
eea42713a6af5389a9e2035e14c8db990d03095f
|
34ea559378f1f0523bfaa09609c98cfa368c103a
|
refs/heads/master
| 2020-04-08T21:19:04.424102
| 2019-11-06T19:12:29
| 2019-11-06T19:12:29
| 159,738,842
| 0
| 0
| null | 2018-11-29T23:05:44
| 2018-11-29T23:05:44
| null |
UTF-8
|
R
| false
| true
| 855
|
rd
|
techrep_fdata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{techrep_fdata}
\alias{techrep_fdata}
\title{Peptide-level Technical Replicate Feature Data (f_data)}
\format{A data.frame with 64 rows (samples) and 4 columns:
\describe{
\item{RunID}{LC-MS run identifier (matches column headers in pep_edata)}
\item{FACTOR}{Character string indicating either regular weight (RW), or obese (OB) groups}
\item{DILUTION}{Character string indicating dilution of mouse plasma to Shewanella Oneidensis MR-1}
\item{TECH_REP}{Character string indicating which technical replicates belong to the same biological sample}
}}
\source{
See details of \code{\link{pmartRdata}} for relevant grant numbers.
}
\description{
A dataset containing the technical replicate metadata, including technical replicate sample assignment variable.
}
|
1e4d5330525733e69d5719b7577a85ad4f7c7a80
|
de882c7604b62c5975274bf0e3027da96a2f7b4d
|
/man/QTL_res_list.Rd
|
99d8e8094ea851a8fdb2294f420c548e01e62db4
|
[] |
no_license
|
vincentgarin/mppR
|
bc061f2d0284adc6468619e162e8ffd45b641fb3
|
2fb0286257697f796c2c1b2590e9284ad281a939
|
refs/heads/master
| 2023-01-28T18:07:19.180068
| 2023-01-02T13:27:41
| 2023-01-02T13:27:41
| 75,842,226
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,043
|
rd
|
QTL_res_list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QTL_res_list.R
\name{QTL_res_list}
\alias{QTL_res_list}
\title{List of QTL results}
\usage{
QTL_res_list(mppData, MPP_out, trait, Q.eff, VCOV, res_file = c())
}
\arguments{
\item{mppData}{An object of class \code{mppData}.}
\item{MPP_out}{Output from \code{\link{mpp_proc}}.}
\item{trait}{\code{character} indicator to specify the trait name.}
\item{Q.eff}{\code{Character} expression indicating the assumption concerning
the QTL effect: 'cr', 'par', 'anc', 'biall' or 'MQE'.}
\item{VCOV}{\code{Character} expression defining the type of variance
covariance structure used.}
\item{res_file}{\code{data.frame} to store the QTL effects results. Default,
empty file.}
}
\value{
The results of \code{MPP_out} are appended to \code{res_file}.
}
\description{
Form a list of QTL results appending QTL effects results obtained during
different QTL detection procedure. The results are appended to
\code{res_file}.
}
\examples{
# not yet
}
\author{
Vincent Garin
}
|
b6c175bbead875f7b13d51ee383d1eba284d3b3d
|
8dc13f15bc5c086d658b71a67a120b8eba2f388c
|
/R/vsm_info.R
|
9c56aa5d1b2ff207ae714548e6185a0232a90cf2
|
[] |
no_license
|
thomasgredig/quantumPPMS
|
4499966224e302f27e0a138f6ed74408493204b2
|
c79cd5027b21446d168d8775a378e95b7dabe7e5
|
refs/heads/master
| 2022-05-23T18:35:58.955287
| 2022-04-09T21:29:55
| 2022-04-09T21:29:55
| 182,595,380
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,555
|
r
|
vsm_info.R
|
#' Reads VSM data file header
#'
#' also returns the PPMS option (VSM,ACMS,LogData,Resistivity)
#'
#' @param filename filename including path
#' @return data frame
#' @examples
#' filename = vsm.getSampleFiles()[1]
#' vsm.info(filename)
#' @export
vsm.info <- function(filename) {
# check if file exists
if (!file.exists(filename)) {
warning(paste('Cannot find file:',filename))
return()
}
v = vsm.version(filename)
if (v==1.5667) skipLEN = list(30,30,TRUE, cols=c(1,4,3,5,6))
if (v==1.56) skipLEN = list(19,19,TRUE, cols=c(2,4,5,7,8))
if (v==1.0914) skipLEN = list(20,20,TRUE, cols=c(2,3,4,7,8))
if (v==1.2401) skipLEN = list(22,23,FALSE, cols=c(2,3,4,5,6))
if (v==1.36) skipLEN = list(22,23,FALSE, cols=c(2,3,4,5,6))
if (v==1.3702) skipLEN = list(22,23,FALSE, cols=c(2,3,4,5,6))
scan(file = filename, nlines=skipLEN[[1]], what=character(0), sep='\n', quiet = TRUE) -> header
d=data.frame()
if ((length(header)>0) && (header[1]=='[Header]')) {
ppms.option = gsub(' ','',strsplit(header[grep('^BYAPP,',header)],',')[[1]][2])
title = gsub('TITLE,','',header[grep('^TITLE', header)])
# [1] "FILEOPENTIME" "5500334.30" "09/21/2018" "4:50 pm"
filedate = as.character(strptime(paste(gsub(',','',strsplit(header[grep('FILEOPENTIME,',header)],' ')[[1]][c(3,4,5)]), collapse=' '),
format='%m/%d/%Y %I:%M:%S %p'))
# filedate = as.character(strptime(paste(strsplit(header[grep('FILEOPENTIME,',header)],',')[[1]][c(3,4)], collapse=' '),
# format='%m/%d/%Y %I:%M %p'))
dl.appname = grep('APPNAME',header)
appname = gsub(',\\s*','',gsub('INFO','',gsub('APPNAME','',header[dl.appname])))
header = header[-dl.appname]
info.str = gsub('^INFO,','',header[grep('INFO',header)])
if (ppms.option == 'ACMS') {
attr = info.str[1:4]
attr.names = paste0('ACMS.INFO',1:4)
} else {
attr = gsub('\\s*(.*)[,:][^,]+','\\1',info.str)
attr.names = gsub('.*[,:]\\s*([^,]+)','\\1',info.str)
if (v==1.5667) {
tmp = attr
attr = attr.names
attr.names = tmp
}
}
d = data.frame(rbind(c(ppms.option, title, filedate, appname, attr)), stringsAsFactors = FALSE)
names(d) = c('option','title','file.open.time','AppName', attr.names)
# guess the sample name
d$sample.name = gsub('.*([A-Z]{2,3}\\d{6,8}[a-zA-Z]{0,2}\\d{0,1}).*','\\1',
paste(paste(d, collapse=' == '),
filename))
}
d
}
|
fa9537401de80d14674fb5418985769c424b8652
|
24c8edec774d1b2ec1fa7f75af41fec5c036f9a6
|
/R Programming Programming Assignment 2/cachesolve.R
|
f28490c8a523dd651ef84817bb01fd76e59ca43f
|
[] |
no_license
|
Dingkai1996/R-programing-Programming-Assignment-2
|
f12970d8b29b1590c41d88f1f1f666b320bad383
|
bd83be78d8a479323c5895b7927369654d3119c0
|
refs/heads/main
| 2023-06-24T14:14:13.360753
| 2021-07-22T21:30:35
| 2021-07-22T21:30:35
| 384,751,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,486
|
r
|
cachesolve.R
|
makeCacheMatrix <- function(x=numeric(), y, z){
m <- NULL ##initially set the inverse to NULL
if( lapply(length(x), as.numeric) != y*z){ ##check whether it can form matrix or not
message("can't make matrix")
return()
}
else if(y != z){ ##check whether the matrix have inverse or not
message("can't make the matrix with inverse")
return()
}
else{
set <- function(a, b, c){ ##set matrix in parent env with the desired value, if inverse is already set, get rid of it!
x <<- a
y <<- b
z <<- c
m <<- NULL
}
s <- matrix(x, y, z) ##make the matrix with value
get <- function() s ##get the matrix, add it into get
setinverse <- function(inverse) m <<- inverse ##set the inverse for the matrix as m
getinverse <- function() m ##get the inverse of matrix, add it into getinverse
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) ##given the list variable from the first function, will first check to see if there's already a cached inverse and return
##otherwise will attempt to solve its inverse and set/return it
}
}
cachesolve <- function(s, ...){
m <- s$getinverse() ##check if there's cached value there already
if(!is.null(m)){ ##if there is cached inverse already, return it
message("getting cached data")
return(m)
}
data <- s$get() ## else, get the matrix from get
m <- solve(data, ...) ## make the inverse of matrix
s$setinverse(m) ## set the inverse into setinverse
m ## show the result of inverse
}
|
1a3fa86c503a8d34f3974fd20164fca33cfb0b90
|
2d02239b9d095490b34dcf77b891b65263aa9ddc
|
/lee_ready.R
|
a9b158b9422e27cdc4a66aa93b1788cfec166768
|
[] |
no_license
|
julienneves/herding
|
f629d8858b836e84e1b3253b953cce2baa06c2d8
|
e6891da7394eb266e1e3f45589d9e7bbbaed173f
|
refs/heads/master
| 2021-01-11T00:27:25.605447
| 2017-01-20T19:37:53
| 2017-01-20T19:37:53
| 70,556,432
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,698
|
r
|
lee_ready.R
|
lee_ready <- function(tqdata, delay = default_delay(tqdata)){
x = getTradeDirection(tqdata)
trade = xts(x = x, order.by = time(tqdata), unique = FALSE, tzone = attr(tqdata,'tzone'))
trade_split <- split.xts(trade, f= "days")
trade_split <- lapply(trade_split, insert_no_trade, delay)
trade <- do.call(rbind,trade_split)
trade <- merge(trade,xts(x = matrix(rep(NA,9*length(trade)),ncol = 9), order.by = time(trade)))
colnames(trade) <- c("x", "prob_x", "beta", "sigma", "prob_x_h", "prob_x_l", "prob_x_n", "prob_v_h", "prob_v_l", "prob_v_n")
return(trade)
}
insert_no_trade <- function(trade, delay = NULL){
time_trade <- time(trade)
time_no_trade <- test_inactivity(time_trade, delay)
no_trade <- xts(x = rep(0,length(time_no_trade)), order.by = time_no_trade, unique = FALSE, tzone = attr(tqdata,'tzone'))
no_trade[time_trade,] <- trade
trade <- no_trade
return(trade)
}
test_inactivity <- function(time_trade, delay){
diff_time <- diff(time_trade)
if(!all(diff_time<=delay)){
time_trade <- c(time_trade[diff_time>delay]+delay,time_trade)
time_trade <- time_trade[order(time_trade)]
time_trade <- add_no_trade(time_trade, delay)
}
return(time_trade)
}
delay = trade_split <- split.xts(trade, f= "days")
default_delay <- function(tqdata) {
x = getTradeDirection(tqdata)
trade = xts(x = x, order.by = time(tqdata), unique = FALSE, tzone = attr(tqdata,'tzone'))
trade_split <- split.xts(trade, f= "days")
time_mean <- function(trade) mean(diff(time(trade)))
return(mean(sapply(trade_split, time_mean)))
}
|
f8608efe3c48967ac13bce660d74aa90a350a56e
|
03c99906a94c70e9a13e7714aad996f461f339c1
|
/R/dsimFun.R
|
a6097dec490cd9e65001f6e1b24e774ccd3ac0f3
|
[] |
no_license
|
cran/adiv
|
6a111f6a1ef39fe302a2f882b9a9d04e7d652c04
|
d65d6e0301e4611a94a91933299bff1fdc06d96b
|
refs/heads/master
| 2022-10-28T08:07:33.352817
| 2022-10-06T12:40:04
| 2022-10-06T12:40:04
| 97,764,074
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,751
|
r
|
dsimFun.R
|
dsimFun <-
function(df, vartype=c("Q","N","M","P"), method=1:5, type=c("similarity", "dissimilarity")){
type <- type[1]
if(!type%in% c("dissimilarity", "similarity")) stop("type must be either dissimilarity or similarity")
meantype <- method[1]
if(!meantype%in%(1:5)) stop("Incorrect definition of method")
fun0 <- function(i){
df0 <- as.matrix(df[[i]])
type <- type[1]
vartype0 <- vartype[i]
if(vartype0=="Q" | vartype0=="N"){
if(type=="dissimilarity")
return(daisy(df0, metric = "gower")*ncol(df0))
else
return((1-as.matrix(daisy(df0, metric = "gower")))*ncol(df0))
}
if(vartype0=="P"){
df0 <- sweep(df0, 1, rowSums(df0), "/")
}
if(vartype0=="P" | vartype0=="M"){
A <- df0%*%t(df0)
B <- diag(A)%*%t(rep(1, nrow(df0)))
C <- rep(1, nrow(df0))%*%t(diag(A))
if(meantype==4) S <- A/sqrt(B)/sqrt(C)
else if(meantype==3){
S <- 2*A/(B+C)
}
else if(meantype==1){
S <- A/(2*B+2*C-3*A)
}
else if(meantype==2){
S <- A/(B+C-A)
}
else S <- 4*A/(2*A+B+C)
rownames(S)<-colnames(S)<-rownames(df0)
if(type=="dissimilarity")
return(as.dist(1-S))
else
return(S)
}
}
if(inherits(df, "ktab")){
listdsim <- lapply(1:length(df$blo), fun0)
res <- listdsim[[1]]
if(length(listdsim)>1){
for(i in 2:length(listdsim))
res <- res + listdsim[[i]]
}
nk <- length(vartype[vartype!="Q" & vartype!="N"])
nk <- nk + sum(df$blo[vartype=="Q" | vartype=="N"])
return(res/nk)
}
else{
df <- as.matrix(df)
type <- type[1]
vartype <- vartype[1]
if(vartype=="Q" | vartype=="N"){
if(type=="dissimilarity")
return(daisy(df, metric = "gower"))
else
return(1-as.matrix(daisy(df, metric = "gower")))
}
if(vartype=="P"){
df <- sweep(df, 1, rowSums(df), "/")
}
if(vartype=="P" | vartype=="M"){
A <- df%*%t(df)
B <- diag(A)%*%t(rep(1, nrow(df)))
C <- rep(1, nrow(df))%*%t(diag(A))
if(meantype==4) S <- A/sqrt(B)/sqrt(C)
else if(meantype==3){
S <- 2*A/(B+C)
}
else if(meantype==1){
S <- A/(2*B+2*C-3*A)
}
else if(meantype==2){
S <- A/(B+C-A)
}
else S <- 4*A/(2*A+B+C)
rownames(S)<-colnames(S)<-rownames(df)
if(type=="dissimilarity")
return(sqrt(1-S))
else
return(S)
}
}
}
|
0c95ef1fdd2981474df8f691cf9bbaeb3446cf00
|
f7a0f3cbeefdc01fc0f172a47359c0c4610c95a7
|
/code_active/feature_fit.R
|
cc0d357036a87727c7dd93ef49d5f3a1e04a830b
|
[] |
no_license
|
EESI/exploring_thematic_structure
|
65e77efbb56fea646a9f165eaa94f955f68259ff
|
06f7ea096c31dbb63b09fc117ee22411e52ab60e
|
refs/heads/master
| 2020-08-25T03:30:04.253394
| 2019-10-23T03:02:18
| 2019-10-23T03:02:18
| 216,955,082
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,568
|
r
|
feature_fit.R
|
feature_fit <- function(num_features,out,fit,
train_docs,test_docs,train_meta,test_meta,
vocab,variable,beta=NULL){
print(out$dx$imp$RF1500DS)
if (is.null(beta)){
topic_features <- order(out$dx$imp$RF1500DS$importance[,1],decreasing=TRUE)[1:num_features]
fit$beta$logbeta[[3]] <- do.call('rbind',fit$beta$logbeta)[topic_features,]
fit$settings$dim$K <- NROW(fit$beta$logbeta[[3]])
}else{
topic_features <- 1:NROW(beta)
fit$beta$logbeta[[3]] <- beta
fit$settings$dim$K <- NROW(fit$beta$logbeta[[3]])
}
cat('\nSelecting topics: ',topic_features,'\n\n')
z_covariate <- 3
cat('Exploring training set posterior\n')
train_fit <- ctm_frozen(fit,train_docs,vocab,
seed=seed_train,max.em.its=500,emtol=1e-5,avg_iters=1,
verbose=TRUE,true_doc_content=TRUE,
data=train_meta,covariate=z_covariate,
parallel=TRUE,nc=25)
if(exists('cl')) stopCluster(cl)
cat('Exploring testing set posterior\n')
test_fit <- ctm_frozen(fit,test_docs,vocab,
seed=seed_test,max.em.its=500,emtol=1e-5,avg_iters=1,
verbose=TRUE,true_doc_content=TRUE,
data=test_meta,covariate=z_covariate,
parallel=FALSE,nc=10)
if(exists('cl')) stopCluster(cl)
K <- train_fit$settings$dim$K
load_fit <- FALSE
save_fit <- FALSE
betas <- beta_prep(train_fit,test_fit,counts,otu_taxa_xavier,vocab,
save_fit,save_dir,save_fit_foldername,dupnum,seed_permuted)
beta_frozen_ra <- betas$beta_frozen_ra
beta_frozen <- betas$beta_frozen
beta_meta <- betas$beta_meta
beta_otu <- betas$beta_otu
out_new <- eval_labels(save_fit,load_fit,train_fit$Z_bar,test_fit$Z_bar,train_meta,test_meta,
save_dir,save_fit_foldername,save_coef_filename,beta_type=model,
nc=60,test='dx')
colnames(train_fit$Z_bar) <- paste0('T',topic_features)
names(out_new$dx$lasso) <- paste0('T',topic_features)
names(out_new$dx$en1) <- paste0('T',topic_features)
names(out_new$dx$en2) <- paste0('T',topic_features)
rownames(out_new$dx$imp$RF1500DS$importance) <- paste0('T',topic_features)
plot_z_heatmap(train_fit$Z_bar,train_meta,out_new$dx$en2,
dist1='jaccard',dist2='jaccard',
clust1='ward.D2',clust2='ward.D2',
transform='none',
main='',
rowclust=FALSE,variable=variable)
colnames(test_fit$Z_bar) <- paste0('T',topic_features)
plot_z_heatmap(test_fit$Z_bar,test_meta,out_new$dx$en2,
dist1='jaccard',dist2='jaccard',
clust1='ward.D2',clust2='ward.D2',
transform='none',
main='',
rowclust=FALSE,variable)
return(list(out=out_new,
z_bar_train=train_fit$Z_bar,
train_meta=train_meta,
z_bar_test=test_fit$Z_bar,
test_meta=test_meta))
}
library(stm)
library(biom)
library(readr)
library(tidyr)
library(dplyr)
library(fastICA)
library(randomForest)
library(stringr)
library(kernlab)
library(Rcpp)
library(parallel)
library(foreach)
library(ape)
library(phyloseq)
library(doParallel)
library(stm)
library(LDAvis)
library(caret)
library(glmnet)
library(ggplot2)
library(knitr)
library(gridExtra)
params <- expand.grid(K=c(25,50,75,125,200),
content=c(FALSE,TRUE),
variable=c('DIAGNOSIS','ISOLATION_SOURCE'))
params <- params %>%
filter(!(content == FALSE & variable == 'ISOLATION_SOURCE'),
!(content == TRUE & K == 200)) %>%
arrange(K,variable)
param <- 2
source('~/Dropbox/stm_microbiome/code_active/stm_functions.R')
source('~/Dropbox/stm_microbiome/code_active/nav_froz_fxns_3.R')
source('~/Dropbox/stm_microbiome/code_active/performance_1.R')
source('~/Dropbox/stm_microbiome/code_active/framework.R')
load_fit <- TRUE
save_fit <- FALSE
save_output <- FALSE
random_seed <- FALSE
K <- params[param,]$K
cn_normalize <- TRUE
content <- params[param,]$content
seq_sim <- 's97'
variable <- as.character(params[param,]$variable)
dupnum <- NULL
prepare_framework(random_seed,K,cn_normalize,content,variable,seq_sim)
load_fits(file.path(save_dir,save_fit_foldername,save_fit_filename))
# take combined (K x M where M is the number of features, 2 for dx)
b <- 1
train_fit <- fit_frozen[[b]][['train']]
test_fit <- fit_frozen[[b]][['test']]
K <- train_fit$settings$dim$K
model <- str_replace_all(names(fit_frozen)[b],' ','')
betas <- beta_prep(train_fit,test_fit,counts,otu_taxa_xavier,vocab,
save_fit,save_dir,save_fit_foldername,dupnum,seed_permuted,model)
beta_frozen_ra <- betas$beta_frozen_ra
beta_frozen <- betas$beta_frozen
beta_meta <- betas$beta_meta
beta_otu <- betas$beta_otu
# evaluate the performance of the combined beta matrix
cat('Evaluating predictive performance for',names(fit_frozen)[b],'\n')
out <- eval_labels(save_fit,load_fit,train_fit$Z_bar,test_fit$Z_bar,train_meta,test_meta,
save_dir,save_fit_foldername,save_coef_filename,beta_type=model,
nc=60)
# take the top F topics via RF importance (tree = 1500) for predicting the target labels (dx)
# and subset the beta matrix with the indexes for these features, yielding an N x F matrix.
# Generate the topic assignments (z bar) via the subsetted beta matrix. Then, evaluluate the
# predictive performance to color code the topics via the EN output, then plot a heatmap. For
# the heatmap, the columns are ordered in terms for decreasing PCDAI score, where CD- gets a 0.
ff <- feature_fit(30,out,fit,train_docs,test_docs,train_meta,test_meta,vocab,variable)
ff$out$dx$score
ff$out$dx$imp$RF1500DS
plot_z_heatmap(ff$z_bar_train,ff$train_meta,ff$out$dx$lasso,
dist1='jaccard',dist2='jaccard',
clust1='ward.D2',clust2='ward.D2',
transform='none',
main='',
rowclust=TRUE,variable='DIAGNOSIS')
plot_z_heatmap(ff$z_bar_test,ff$test_meta,ff$out$dx$lasso,
dist1='jaccard',dist2='jaccard',
clust1='ward.D2',clust2='ward.D2',
transform='none',
main='',
rowclust=TRUE,variable='DIAGNOSIS')
|
2b17b9ea100c8e7a4ca35d4885c5895e4dc0779b
|
d359475bd587e8f364ca2ece794f9358fce66a84
|
/model/r_scripts/functions/get_init.R
|
c1bc37fc07340a45b3c61de99d7fdbd6103bb04a
|
[] |
no_license
|
umich-cphds/cov-ind-19
|
c22cc191bb64b1b2c581e28afaa2260a7583143c
|
ebb3c0093fff66d39d7a7cf14397efe269731b4a
|
refs/heads/master
| 2023-02-18T07:43:37.731747
| 2023-02-06T17:24:29
| 2023-02-06T17:24:29
| 249,284,918
| 15
| 6
| null | 2023-02-02T16:17:31
| 2020-03-22T22:32:42
|
R
|
UTF-8
|
R
| false
| false
| 769
|
r
|
get_init.R
|
get_init <- function(data) {
tmp_data <- data %>%
filter(date < min_date) %>%
dplyr::select(-date) %>%
summarize(
Confirmed = sum(Confirmed, na.rm = TRUE),
Recovered = sum(Recovered, na.rm = TRUE),
Deceased = sum(Deceased, na.rm = TRUE)
) %>%
as.numeric(as.vector(.))
tmp <- data %>% filter(date >= min_date & date <= max_date)
data_initial <- c(tmp_data,
tmp %>%
filter(date == min_date) %>%
dplyr::select(-date) %>%
as.numeric(as.vector(.))
)
if (data_initial[1] == 0) {data_initial[1] <- 1}
if (data_initial[4] == 0) {data_initial[4] <- 1} # check with Ritwik/Ritoban if this is necessary
return(data_initial)
}
|
44b10a057744aec3595f649561413b730ed68a96
|
ee5573b3b198214f0d5db33015e635182a75e1ca
|
/binomial/R/bin_distribution.R
|
22d36f1e3c1754fc4e1d747ba35e85e22384c425
|
[] |
no_license
|
stat133-sp19/hw-stat133-TheGoldenKyle
|
e6b6a946963ac9aececeefdfedbc59133a7786d2
|
e11cb3f4722b1a490acebc548607e08e7861b222
|
refs/heads/master
| 2020-04-28T13:28:53.897190
| 2019-05-03T19:55:32
| 2019-05-03T19:55:32
| 175,308,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,022
|
r
|
bin_distribution.R
|
#' @title Binomial Distribution
#' @description Computes the binomial distribution of a certain number of success over a given number of trials
#' @param trials Number of trials (numeric)
#' @param prob Probability of successs (real)
#' @return Returns a data.frame of the number of successes and their probabilities
#' @export
#' @examples
#' x <- bin_distribution(trials = 5, prob = 0.5)
#' x
#' success probability
#' 1 0 0.03125
#' 2 1 0.15625
#' 3 2 0.31250
#' 4 3 0.31250
#' 5 4 0.15625
#' 6 5 0.03125
#'
#' plot(x)
#' Returns a barplot of the distribution
#'
bin_distribution <- function(trials, prob) {
probabilities <- c()
for (i in 0:trials) {
probabilities <- c(probabilities, bin_probability(i, trials, prob))
}
object <- data.frame(success = 0:trials, probability = probabilities)
class(object) <- c("bindis", "data.frame")
object
}
#' @export
plot.bindis <- function(distribution) {
barplot(distribution$prob, names.arg = distribution$success, xlab = "successes", ylab = "probability")
}
|
adaed9489e39b4101783705bc1de7d20762c378a
|
41e59bef1fe26f89626e6cba7be15ee98e6d80f8
|
/R/projection_domain.R
|
b1e7385f79137362d901e4a8a2988d1fe35d6e5f
|
[] |
no_license
|
ardeeshany/FLAME
|
32ae0a694a9c4cc3bd946757c1ccdb15c20c5673
|
e8ba1670778f32988db6b9f57ede425f9f2d8d2e
|
refs/heads/master
| 2021-07-05T03:49:04.885156
| 2017-09-25T00:10:40
| 2017-09-25T00:10:40
| 103,167,356
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,238
|
r
|
projection_domain.R
|
#' Representation on the time domain of a function defined by the coefficients of the kernel basis
#'
#' It computes the pointwise evaluation of a function (or a set of
#' functions) on the time domain, given its (their) projection on the kernel basis.
#'
#' @param y matrix. \code{J} \eqn{\times} \code{N} matrix containing in
#' column \eqn{n} the coefficients of the projection of the function \eqn{y_n}
#' on the \code{J} eigenfunctions of the kernel.
#' @param eigenfun matrix. \code{m} \eqn{\times} \code{J} matrix containing
#' in each column the
#' point-wise evaluation of the eigenfunctions on the
#' kernel
#'
#' @return \code{N} \eqn{\times} \code{m} matrix containing in the row \eqn{n} the pointwise
#' evaluation of the function \eqn{y_n} on the domain \eqn{D} of length \code{m}.
#' @export
#'
#' @examples
#' data(SobolevKernel)
#' data(simulation)
#' projection_domain(Y_matrix, eigenvect) # projection of the data
#' # on the time domain seq(0, 1, length = 50)
#'
## @A: Here we do not know what the domain D is, but eigenfun is containing the pointwise evaluation
## of the eigenfunctions of kernel and it is enough.
projection_domain <- function(y, eigenfun)
{
y_mat <- eigenfun %*% y
return(t(y_mat))
}
|
0c69409597e8c0fbb28e953cc5a11065f08deaab
|
faab8fe6f24c90dff7c6a7eb42b955b073dd7c9f
|
/TLSfinder_GetTif.R
|
2699134878afe2a776f9a4e8f436b652776dc930
|
[] |
no_license
|
Famingzhao/TLS-finder
|
5d638d575ae76174174f4b2fb5b1265a85d5d44a
|
d4d221441e667161a8328c333f0fdab397aac1cc
|
refs/heads/master
| 2023-08-08T04:03:15.098597
| 2020-07-13T09:33:58
| 2020-07-13T09:33:58
| 408,386,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 696
|
r
|
TLSfinder_GetTif.R
|
# 3.0_TLSfinder_GetTif.R
# Simply takes RData count matrices for B-cells saved with GridQuant and print them as tables with '.tif' extenstion so they can be loaded in Fiji
# Author: Daniele Tavernari
# Please cite: Tavernari et al., 2021 (see full citation on GitHub repository's README)
########## Input
OutDir_matrices = "Grid_matrices/"
s = "s8B"
grid_spacing = 20
####################
#################### Main #####################
load(file = paste0(OutDir_matrices,s,"_tileSizeMicrons",grid_spacing,"_Bcell_meaMat_and_coos.RData"))
write.table(meaMat, file = paste0(OutDir_matrices,s,"_tileSizeMicrons",grid_spacing,"_Bcell_meaMatTab.tif"), row.names = F, col.names = F, sep = "\t")
|
380ba92fcbaf1176279c539cb13f6072be101f0d
|
da444894ad9f8181b0f89ac026d8e988779f9850
|
/Ejercicio2.R
|
faa934ce45ae8927c99b22c3ba7666194be098f7
|
[] |
no_license
|
romeoaxpuac123/Practica2Semi2
|
2ca46d63617c5d5230a1fc2f7d4dc112ee6abf24
|
c4d711fd9df294e8a87624a519864748c2bfd172
|
refs/heads/master
| 2022-11-12T22:50:46.436362
| 2020-06-28T21:05:06
| 2020-06-28T21:05:06
| 275,077,449
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 500
|
r
|
Ejercicio2.R
|
print("Romeo Axpuac")
library(ggplot2)
#Guardamos el path del primer archivo
archivo <- "C:\\Users\\Bayyron\\Desktop\\Junio2020\\Seminario2\\Laboratorio\\Practica2\\Archivo\\cardio_train.csv"
informacion <- read.csv(archivo)
#head(informacion)
columnasReporte2 <- c("id","weight")
datos <- informacion[columnasReporte2]
h= hist(datos$weight, main = "HISTOGRAMA Y FRECUENCIAS DE PESOS", col="red", xlab="PESOS",labels = T)
lines(c(0,h$mids),c(0,h$counts), type = "b", pch = 20, col = "blue", lwd = 3)
|
b691153e616246676424b8ac9452cbf0fa22fc47
|
622eb3dc154d7779473fc161da613002b3ee877c
|
/scripts/r/xxtemplate-r.r
|
1eb2f9e7dd9d9218593601632a3f09b6020fadba
|
[] |
no_license
|
thereisnotime/xxToolbelt
|
d9e99e2139da31a4d6e034f4c56753527d120157
|
2d2a311737d4a910816adc328f6cf4414caed61c
|
refs/heads/main
| 2023-04-04T10:35:53.270861
| 2021-04-05T19:06:50
| 2021-04-05T19:06:50
| 354,077,716
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 66
|
r
|
xxtemplate-r.r
|
#!/usr/bin/env Rscript
args <- commandArgs()
cat(args, sep = "\n")
|
19233423044d61fc345497c940acb65d4f411cf7
|
5d5d7785f5ce2ff377ebec29d74382652502c1d8
|
/R/calculate_IV.R
|
0b3baba94526c2d571295a22d541a8328568cc0f
|
[
"MIT"
] |
permissive
|
standardgalactic/wpa
|
d7256e719732c7c3f067e88d253e600cd1d66a06
|
b64b562cee59ea737df58a9cd2b3afaec5d9db64
|
refs/heads/main
| 2023-08-10T19:11:03.211088
| 2021-09-08T13:40:35
| 2021-09-08T13:40:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,905
|
r
|
calculate_IV.R
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#' @title
#' Calculate Weight of Evidence (WOE) and Information Value (IV) between a
#' single predictor and a single outcome variable.
#'
#' @description
#' Calculates Weight of Evidence (WOE) and Information Value (IV) between a
#' single predictor and a single outcome variable. This function implements the
#' common Information Value calculations whilst maintaining the minimum reliance
#' on external dependencies. Use `map_IV()` for the equivalent of
#' `Information::create_infotables()`, which performs calculations for multiple
#' predictors and a single outcome variable.
#'
#' @details
#' The approach used mirrors the one used in `Information::create_infotables()`.
#'
#' @param data Data frame containing the data.
#' @param outcome String containing the name of the outcome variable.
#' @param predictor String containing the name of the predictor variable.
#' @param bins Numeric value representing the number of bins to use.
#'
#' @import dplyr
#'
#' @return A data frame is returned as an output.
#'
calculate_IV <- function(data,
outcome,
predictor,
bins){
pred_var <- data[[predictor]]
outc_var <- data[[outcome]]
# Check inputs
if(sum(is.na(outc_var)) > 0){
stop(
glue::glue(
"dependent variable {outcome} has missing values in the input training data frame"
)
)
}
# Compute q
q <- stats::quantile(
pred_var,
probs = c(1:(bins - 1) / bins),
na.rm = TRUE,
type = 3
)
# Compute cuts
cuts <- unique(q)
# Compute intervals
intervals <-
findInterval(
pred_var,
vec = cuts,
rightmost.closed = FALSE)
# Compute cut_table
cut_table <-
table(
intervals,
outc_var) %>%
as.data.frame.matrix()
## get min/max
cut_table_2 <-
data.frame(
var = pred_var,
intervals
) %>%
group_by(intervals) %>%
summarise(
min = min(var, na.rm = TRUE) %>% round(digits = 1),
max = max(var, na.rm = TRUE) %>% round(digits = 1),
n = n(),
.groups = "drop"
) %>%
mutate(!!sym(predictor) :=
glue::glue("[{round(min, digits = 1)},{round(max, digits = 1)}]")) %>%
mutate(percentage = n / sum(n)) %>%
select(!!sym(predictor), intervals, n, percentage)
# Create variables that are double
cut_table_1 <- as.numeric(cut_table$`1`)
cut_table_0 <- as.numeric(cut_table$`0`)
# Non-events in group
n_non_event <- cut_table_1 * sum(cut_table_0) # t$y_1*sum_y_0
n_yes_event <- cut_table_0 * sum(cut_table_1) # t$y_0*sum_y_1
# Compute WOE
cut_table_2$WOE <-
ifelse(
cut_table$`1` > 0 & cut_table$`0` > 0, # Both positive
log(n_non_event / n_yes_event), # % of non-events divided by % of events
0) # Otherwise impute 0
# Compute IV_weight
p1 <- cut_table$`1` / sum(cut_table$`1`)
p0 <- cut_table$`0` / sum(cut_table$`0`)
cut_table_2$IV_weight <- p1 - p0
cut_table_2$IV <- cut_table_2$WOE * cut_table_2$IV_weight
cut_table_2 %>%
mutate(IV = cumsum(IV)) %>%
# Maintain consistency with `Information::create_infotables()`
select(
!!sym(predictor),
N = "n",
Percent = "percentage",
WOE,
IV)
}
#' @title
#' Calculate Weight of Evidence (WOE) and Information Value (IV) between
#' multiple predictors and a single outcome variable, returning a list of
#' statistics.
#'
#' @description
#' This is a wrapper around `calculate_IV()` to loop through multiple predictors
#' and calculate their Weight of Evidence (WOE) and Information Value (IV) with
#' respect to an outcome variable.
#'
#' @details
#' The approach used mirrors the one used in `Information::create_infotables()`.
#'
#' @param data Data frame containing the data.
#' @param outcome String containing the name of the outcome variable.
#' @param predictors Character vector containing the names of the predictor
#' variables. If `NULL` (default) is supplied, all numeric variables in the
#' data will be used.
#' @param bins Numeric value representing the number of bins to use. Defaults to
#' 10.
#'
#' @import dplyr
#'
#' @return A list of data frames is returned as an output. The first layer of
#' the list contains `Tables` and `Summary`:
#' - `Tables` is a list of data frames containing the WOE and cumulative sum
#' IV for each predictor.
#' - `Summary` is a single data frame containing the IV for all predictors.
#'
map_IV <- function(data,
predictors = NULL,
outcome,
bins = 10){
if(is.null(predictors)){
predictors <-
data %>%
select(-!!sym(outcome)) %>%
select(
where(is.numeric)
) %>%
names()
}
# List of individual tables
Tables <-
predictors %>%
purrr::map(function(pred){
calculate_IV(
data = data,
outcome = outcome,
predictor = pred,
bins = bins
)
}) %>%
purrr::set_names(
nm = purrr::map(
.,
function(df){
names(df)[[1]]
}
)
)
# Compile Summary Table
Summary <-
list("df" = Tables,
"names" = names(Tables)) %>%
purrr::pmap(function(df, names){
IV_final <-
df %>%
slice(nrow(df)) %>%
pull(IV)
data.frame(
Variable = names,
IV = IV_final
)
}) %>%
bind_rows() %>%
arrange(desc(IV))
# Reorder and combine list
c(
list("Tables" = Tables[Summary$Variable]), # Reordered
list("Summary" = Summary)
)
}
|
979b774695439bbe29c8e18a64d1f0c0dfec2efb
|
1fcfa19b2fdb270e0862990db0cae8e733c1a7f7
|
/R/UCTSUpload.R
|
786b2631b0a9adcf483521e2b6cd53b3c5b5bfff
|
[] |
permissive
|
GreenGrassBlueOcean/DatastreamDSWS2R
|
28ae2916239c8f2c7a76a42691cf5cff86494b48
|
1250c37d360e14ee0beaa709e9033ef363257306
|
refs/heads/master
| 2022-12-08T07:45:12.499165
| 2020-06-03T21:05:51
| 2020-06-03T21:05:51
| 267,281,708
| 0
| 0
|
Apache-2.0
| 2020-05-27T09:50:38
| 2020-05-27T09:50:37
| null |
UTF-8
|
R
| false
| false
| 17,145
|
r
|
UCTSUpload.R
|
#' @include common.R
#' @include classConstructor.R
#' @include wrapper.R
#'
#' @name dotEncryptPassword
#' @title Encrypt the Datastream password
#' @description This is a port of the VBA code
#'
#' @param strPassword the password to be encrypted
#' @return an encrypted password
#'
#'
#' @keywords internal
#'
.EncryptPassword <- function(strPassword=""){
iSeed <- as.raw(199L) # arbitrary number
strCrypted <- ""
bBytes <- charToRaw(strPassword)
for(b in bBytes)
{
iCryptedByte <- as.raw(xor(b , iSeed))
strCrypted <- paste0(strCrypted, formatC(as.integer(iCryptedByte),digits=3,width=3,flag="0"))
# add previous byte, XOR with arbitrary value
iSeed <- xor(as.raw((as.integer(iSeed) + as.integer(iCryptedByte)) %% 255L), as.raw(67L))
}
return(strCrypted)
}
#' @name dotgetTimeseries
#' @title convert xts timeseries into a string that can be sent to
#' the Datastream server
#'
#' @param Data the xts timeseries to be converted
#' @param freq the frequency of the data
#' @param digits the number of decimal places to round the data to
#' @param NA_VALUE the string to replace NA data with
#'
#' @return A string of the core data of Data
#'
#'
#' @importFrom zoo zoo index
#' @importFrom xts merge.xts .indexwday
#' @importFrom stringr str_trim
#' @keywords internal
#'
.getTimeseries <- function(Data, freq, digits, NA_VALUE){
if(ncol(Data) > 1) {
# Make sure we are only dealing with a single column xts
Data <- Data[,1]
}
if (freq == "D")
{
# We have a daily frequency, which means we need to do more work matching up the dates as
# Datastream assumes that they are in weekday order. The loaded timeseries might have gaps or weekend
# measures
# the xts .indexwday gives the day of the week with 0=Sunday and 6=Saturday
# We need to make sure there are no blanks in the data
startDate <- zoo::index(first(Data))
endDate <- zoo::index(last(Data))
NADates <- seq(from=startDate, to=endDate, by="days")
NAData <- zoo(c(NA), order.by=NADates)
#merge and fill missing rows with NAs
wData <- xts::merge.xts(Data, NAData, fill=NA)
# This only picks the weeksdays from the original series
wData <- wData[which(xts::.indexwday(wData) %in% 1:5),1]
}else{
wData <- Data
#If we do not have a daily frequency then we can just load up the datapoints, with the implicit
#assumption that they are in the right frequency
}
sFormattedData <- suppressWarnings(formatC(wData, digits = digits, mode="double", format="f"))
sFormattedData <- stringr::str_trim(sFormattedData)
#We need to make sure that any missing data is replaced with the
# the correct symbol
sFormattedData[which(sFormattedData=="NaN")] <- NA_VALUE
#Collapse the array into a string
sData<-paste0(sFormattedData,collapse=",")
sData<-paste0(sData,",")
return(sData)
}
#' @title Upload a UCTS timeseries into Datastream
#'
#' @description Uploads an xts into a UCTS in the Datastream Database
#' @details Note this function does not check to see if there is
#' a pre-existing timeseries already in Datastream. It will just overwrite
#' any existing UCTS.
#' @param tsData - an xts (or timeseries object that can be converted to
#' one) to be uploaded.
#' @param TSCode The mnemonic of the target UCTS
#' @param MGMTGroup Must have managment group. Only the first
#' characters will be used.
#' @param freq The frequency of the data to be uploaded
#' @param seriesName the name of the series
#' @param Units Units of the data - can be no more than 12 characters -
#' excess will be trimmed to that length
#' @param Decimals Number of Decimals in the data - a number between 0 and
#' 9 - if outside that range then trimmed
#' @param ActPer Whether the values are percentages ("N") or actual
#' numbers ("Y")
#' @param freqConversion How to do any FX conversions
#' @param Alignment Alignment of the data within periods
#' @param Carry whether to carry data over missing dates
#' @param PrimeCurr the currency of the timeseries
#' @param strUsername your Datastream username
#' @param strPassword your Datastream Password
#' @param strServerName URL of the Datastream server
#' @param strServerPage page on the datastream server
#' @return TRUE if the upload has been a success, otherwise an error message
#'
#' @export
#'
#' @importFrom zoo index
#' @importFrom httr POST add_headers content content_type
#' @importFrom xts as.xts first last xtsible
#'
UCTSUpload <- function(tsData,
TSCode="",
MGMTGroup="ABC",
freq = c("D","W","M","Q","Y"),
seriesName,
Units="",
Decimals=2,
ActPer=c("N","Y"),
freqConversion= c("ACT","SUM","AVG","END"),
Alignment=c("1ST","MID","END"),
Carry=c("YES","NO","PAD"),
PrimeCurr="",
strUsername = ifelse(Sys.getenv("DatastreamUsername") != "",
Sys.getenv("DatastreamUsername"),
options()$Datastream.Username),
strPassword = ifelse(Sys.getenv("DatastreamPassword") != "",
Sys.getenv("DatastreamPassword"),
options()$Datastream.Password),
strServerName="http://product.datastream.com",
strServerPage="/UCTS/UCTSMaint.asp"){
#Check inputs are valid
if(!xtsible(tsData)){
stop(paste0("tsData must be a time-based object and not of class ",class(tsData)))
}
if(!freq[1] %in% c("D","W","M","Q","Y")){
stop("freq is not an allowed value")
}
if(!ActPer[1] %in% c("N","Y")){
stop("ActPer is not an allowed value")
}
if(!freqConversion[1] %in% c("ACT","SUM","AVG","END")){
stop("freqConversion is not an allowed value")
}
if(!Alignment[1] %in% c("1ST","MID","END")){
stop("Alignment is not an allowed value")
}
if(!Carry[1] %in% c("YES","NO","PAD")){
stop("Carry is not an allowed value")
}
# Limit decimals a number in range to the range 0-9
if(!is.numeric(Decimals)) Decimals <- 2L
Decimals <- as.integer(Decimals)
if(Decimals < 0) Decimals <- 0
if(Decimals > 9) Decimals <- 9
# Trim any excess for units
Units <- substr(Units,0,12)
# Replace any ISO currency codes with DS codes
if(is.null(PrimeCurr)) {
PrimeCurr <- ""
}
if(nchar(PrimeCurr) > 3){
stop("Invalid currency. Should be either 3 digit ISO code or Datastream code")
} else if(nchar(PrimeCurr) == 3 ){
# Check ISO code is valid and convert to DS Code
dfXRef <- DatastreamDSWS2R::currencyDS2ISO
if(PrimeCurr %in% dfXRef$isoCode){
PrimeCurr <- dfXRef$dsCode[which(PrimeCurr == dfXRef$isoCode &
dfXRef$primeCode == TRUE)]
} else {
stop("Invalid currency. Should be an ISO code in table currencyDS2ISO.")
}
} else if(nchar(PrimeCurr) > 0 ){
# Check DS Code is valid
PrimeCurr <- iconv(PrimeCurr, from="utf-8", to = "latin1")
dfXRef <- DatastreamDSWS2R::currencyDS2ISO
if(!PrimeCurr %in% dfXRef$dsCode){
stop("Invalid currency. Should be an Datastream code in table currencyDS2ISO.")
}
}
# At the moment everything will be a full update, and a hard coded NA value
NA_VALUE <- "NA"
# Add Start Date for values - make sure it is in DD/MM/YY format
#CMC actually the function returns a dd/MM/yyyy format post Y2K
# convert to xts object
myXtsData <- as.xts(tsData)
startDate <- zoo::index(first(myXtsData))
endDate <- zoo::index(last(myXtsData))
# Now create the URL to post the form to
dsURL <- paste0(strServerName , strServerPage , "?UserID=" , strUsername)
# Create a list of the parameters to be uploaded
# We have not included the pair AmendFlag="Y", so all these will be full updates
dsParams <- list(CallType = "Upload",
TSMnemonic = toupper(TSCode),
TSMLM = toupper(MGMTGroup),
TSStartDate = format(startDate,format="%d/%m/%Y"),
TSEndDate = format(endDate,format="%d/%m/%Y"),
TSFrequency = freq[1],
TSTitle = seriesName,
TSUnits = Units,
TSDecPlaces = Decimals,
TSAsPerc = ActPer[1],
TSFreqConv = freqConversion[1], # Add "Frequency Conversion"
TSAlignment = Alignment[1], # Add "Alignment"
TSCarryInd = Carry[1], # Add "Carry Indicator"
TSPrimeCurr = I(PrimeCurr), # Add "Prime Currency"
TSULCurr = "", # no longer use Underlying Currency, but need to pass up a null value as the mainframe is expecting it
ForceUpdateFlag1 = "Y",
ForceUpdateFlag2 = "Y", # We have ignored some logic in the original UCTS VBA code
# AmendFlag = "Y",
TSValsStart = format(startDate,format="%d/%m/%Y"), #TODO adjust this date according to the frequency of the data VBA function AdjustDateTo1st
NAValue = NA_VALUE,
TSValues = .getTimeseries(myXtsData,
freq= freq[1],
digits=Decimals,
NA_VALUE), #Now add the datapoints - the date element of the series is discarded here, with obvious risks
UserOption = .EncryptPassword(strPassword)
)
# Now post the form
# We will give it three tries
nLoop <- 1
waitTimeBase <- 2
maxLoop <- 4
retValue <- ""
while(nLoop < maxLoop){
retValue <- tryCatch(httr::POST(url = dsURL,
body = dsParams,
config = httr::add_headers(encoding = "utf-8"),
httr::content_type("application/x-www-form-urlencoded; charset=utf-8"),
encode = "form"),
error = function(e) e)
# Break if an error or null
if(is.null(retValue)) break
if("error" %in% class(retValue)) break
# If did not get a time out then break
if(httr::status_code(retValue) != 408) break
# If not succesful then wait 2 seconds before re-submitting, ie give time for the
# server/network to recover.
Sys.sleep(waitTimeBase ^ nLoop)
nLoop <- nLoop + 1
}
if(is.null(retValue)){
return(structure(FALSE,
error = "NULL value returned"))
}
if("error" %in% class(retValue)){
return(structure(FALSE,
error = paste("Error ", retValue$message)))
}
if(httr::http_error(retValue)){
return(structure(FALSE,
error = paste("http Error: ", paste0(httr::http_status(retValue,
collapse = " : ")))))
}
myResponse <- content(retValue, as = "text")
if(myResponse[1] == "*OK*"){
return(structure(TRUE,
error = ""))
}
else{
return(structure(FALSE,
error = paste("*Error* Upload failed after ", nLoop,
" attempts with error ", myResponse[1])))
}
}
#' @title Append a xts to an existing UCTS timeseries in Datastream
#'
#' @description Uploads and appends an xts into a UCTS in the Datastream Database
#' @details This function checks if there is a pre-existing timeseries already in Datastream.
#' If there is then it will append the xts onto the existing series. If there are any
#' overlapping dates then depending on the setting of overwrite then the new data
#' will overwrite the existing data in the UCTS
#'
#' @param tsData - an xts (or timeseries object that can be converted to
#' one) to be uploaded.
#' @param TSCode The mnemonic of the target UCTS
#' @param MGMTGroup Must have managment group. Only the first
#' characters will be used.
#' @param freq The frequency of the data to be uploaded
#' @param seriesName the name of the series
#' @param Units Units of the data - can be no more than 12 characters -
#' excess will be trimmed to that length
#' @param Decimals Number of Decimals in the data - a number between 0 and
#' 9 - if outside that range then trimmed
#' @param ActPer Whether the values are percentages ("N") or actual
#' numbers ("Y")
#' @param freqConversion How to do any FX conversions
#' @param Alignment Alignment of the data within periods
#' @param Carry whether to carry data over missing dates
#' @param PrimeCurr the currency of the timeseries
#' @param overwrite if TRUE then existing data in the UCTS will be overwritten
#' @param strUsername your Datastream username
#' @param strPassword your Datastream Password
#' @param strServerName URL of the Datastream server
#' @param strServerPage page on the datastream server
#' @return TRUE if the upload has been a success, otherwise an error message
#'
#' @export
#'
#' @importFrom zoo index
#' @importFrom xts as.xts first last xtsible
#'
UCTSAppend <- function(tsData,
TSCode = "",
MGMTGroup = "ABC",
freq = c("D","W","M","Q","Y"),
seriesName,
Units = "",
Decimals = 2,
ActPer = c("N","Y"),
freqConversion = c("ACT","SUM","AVG","END"),
Alignment = c("1ST","MID","END"),
Carry = c("YES","NO","PAD"),
PrimeCurr ="",
overwrite = TRUE,
strUsername = ifelse(Sys.getenv("DatastreamUsername") != "",
Sys.getenv("DatastreamUsername"),
options()$Datastream.Username),
strPassword = ifelse(Sys.getenv("DatastreamPassword") != "",
Sys.getenv("DatastreamPassword"),
options()$Datastream.Password),
strServerName = "http://product.datastream.com",
strServerPage = "/UCTS/UCTSMaint.asp"){
#Check inputs are valid - we can also rely on checks in UCTSUpload later
if(!xtsible(tsData)){
stop(paste0("tsData must be a time-based object and not of class ", class(tsData)))
}
tsData <- as.xts(tsData)
if(!freq[1] %in% c("D","W","M","Q","Y")){
stop("freq is not an allowed value")
}
# Get the existing UCTS from Datastream
mydsws <- dsws$new()
tsExisting <- mydsws$timeSeriesRequest(instrument = TSCode,
startDate = as.Date("1950-01-01"),
endDate = index(last(tsData)),
frequency = freq)
if(is.null(tsExisting)){
errMsg <- paste0("Datastream Server Error retrieving existing series\n",
paste(mydsws$errorlist, collapse = "\n", sep = "\n"))
stop(errMsg)
}
# In the absence of being able to define start and end dates for UCTS as defined
# on http://product.datastream.com/DSWSClient/Docs/SoapApiHelp/EnumDetails.html#DSDateNames
# We are going to trim the start and end of the series of any null values
# If this is fixed by Datastream or another way is suggested then these lines
# could be removed
validRows <- which(!is.na(tsExisting))
# Check if any data was found
if(length(validRows) != 0){
# There was no existing timeseries
# Take the non-null middle segment
firstNotNULL <- min(validRows)
lastNotNULL <- max(validRows)
tsExisting <- tsExisting[firstNotNULL:lastNotNULL, ]
# Combine the new data with the existing data
if(overwrite){
# append with new data overwriting the old
tsData <- xts::make.index.unique(rbind(tsData, tsExisting), drop = TRUE)
} else {
# append with old data being kept
tsData <- xts::make.index.unique(rbind(tsExisting, tsData), drop = TRUE)
}
}
# Upload combined timeseries
return(UCTSUpload(tsData = tsData,
TSCode = TSCode,
MGMTGroup = MGMTGroup,
freq = freq,
seriesName = seriesName,
Units = Units,
Decimals = Decimals,
ActPer = ActPer,
freqConversion = freqConversion,
Alignment = Alignment,
Carry = Carry,
PrimeCurr = PrimeCurr,
strUsername = strUsername,
strPassword = strPassword,
strServerName = strServerName,
strServerPage = strServerPage))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.