blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b003215c3ed92fcce1882a6a6f813f9676c60672 | f41a57727e712813ae285f4302960f2b12a95158 | /R/db_helper.R | 73237b71d2aaa6cebde56e6c4c3e578d27e2bcbb | [] | no_license | exoulster/dbhelper | 10202f5e55f32f763a7be377ee67468ff4fe1ba2 | ee513379883bb636da9d8ff10c95301f2e133e8f | refs/heads/master | 2021-02-09T07:41:22.676280 | 2020-07-12T11:47:39 | 2020-07-12T11:47:39 | 244,258,437 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,349 | r | db_helper.R |
parse_table_name = function(conn=NULL, table_name) {
table_name_txt = table_name %>%
strsplit('.', fixed=TRUE) %>%
.[[1]]
if (!is.null(conn)) {
if ('OraConnection' %in% class(conn)) {
table_name_txt = toupper(table_name_txt)
}
}
if (length(table_name_txt)==1) { # does not contain schema, use default from option
schema = NULL
table_name = table_name_txt[1]
} else {
schema = table_name_txt[1]
table_name = table_name_txt[2]
}
return(list(schema=schema, table=table_name))
}
#' Set Default Database
#' @param schema schema name
#' @export
use = function(schema) {
schema_enq = rlang::enquo(schema)
schema_txt = rlang::quo_text(schema_enq)
options(dbhelper.schema=schema_txt)
message(paste('default schema', schema_txt, 'is set'))
}
#' List Databases
#' @param schema schema name
#' @param conn connection object. Will search global environment for "con" if conn is NULL
#' @import dplyr
list_databases = function(conn=NULL) {
return()
}
#' List Tables
#' @param schema schema name
#' @param conn connection object. Will search global environment for "con" if conn is NULL
#' @import dplyr
#' @export
list_tables = function(conn=NULL, schema=NULL, pattern='.*') {
if (is.null(conn)) {
if (is.null(globalenv()$con)) stop('con is not available, please set up connection')
conn = globalenv()$con
}
if (is.null(schema)) {
schema = getOption('dbhelper.schema')
message(paste('Listing tables from schema', schema))
}
DBI::dbListTables(conn, schema=schema) %>%
.[stringr::str_detect(., pattern)] %>%
sort()
}
#' List fields of a table
#' @param table_name table name
#' @export
list_fields = function(conn=NULL, table_name) {
params = parse_table_name(conn=conn, table_name=table_name)
DBI::dbListFields(conn, params$table, schema=params$schema)
}
#' Connect DB Table
#' @param table_name table name, could be full name with schema
#' @param conn connection object. Will search global environment for "con" if conn is NULL
#' @export
tbbl = function(conn, table_name) {
if (is.null(conn)) {
if (is.null(globalenv()$con)) stop('con is not available, please set up connection')
conn = globalenv()$con
}
table_name_txt = parse_table_name(conn=conn, table_name=table_name)
schema = table_name_txt[1]
table = table_name_txt[2]
if (is.null(schema)) {
dplyr::tbl(conn, table)
} else {
dplyr::tbl(conn, dbplyr::in_schema(schema, table))
}
}
remove_comments = function(s) {
# remove block comments
s = stringr::str_replace_all(s, '\\/\\*.*\\*\\/', '')
# remove single line comments
s = stringr::str_replace_all(s, '\\-\\-.*', '')
s
}
replace_double_quotes = function(s) {
s = stringr::str_replace_all(s, '\\"', "\\'")
s
}
replace_vars = function(s) {
s = stringr::str_replace_all(s, '[:punct:]?\\$(\\{[[:alnum:][:punct:]]+\\})[:punct:]?', '\\1')
s
}
#' Clean up sql query
#' @export
parse_sql = function(sql) {
sqls = strsplit(sql, '\\;')[[1]] %>%
lapply(function(s) {
s %>%
remove_comments() %>%
replace_double_quotes() %>%
replace_vars()
}) %>%
trimws()
Filter(function(x) x != '', sqls) %>%
dplyr::sql()
}
#' Get sql query from file
#' @export
parse_sql_script = function(sql_script) {
s = readr::read_file(sql_script)
parse_sql(s)
}
|
ba385d83e887b6f145fd08fe82ca5bb5ff598034 | dfe41238836c90cf97a5370d77e7c40a9d6547ca | /R/preprocess_omic_list.R | 12169c6d771b7726392ac381621196744125aa0c | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Valledor/pRocessomics | 6357053773820971335da5c077cab77b3e4eb1de | 35ee785e1ce3c088519812f42cec8cc9ed5cb6f8 | refs/heads/master | 2023-03-10T12:29:02.815741 | 2023-03-02T05:47:26 | 2023-03-02T05:47:26 | 173,298,170 | 3 | 0 | Apache-2.0 | 2019-03-12T18:24:27 | 2019-03-01T12:26:53 | null | UTF-8 | R | false | false | 11,932 | r | preprocess_omic_list.R | #
# Copyright 2019 Luis Valledor / Laura Lamelas
# Last revision 01.2023
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @name preprocess_omic_list
#' @title Preprocess datalist
#' @description A function to preprocess omics data by imputing (or not) missing values, balance abundance among samples and pre-filter the data
#' @usage preprocess_omic_list(datalist, initialrow=1, initialcolumn=2,
#' treatment1col=1, treatment2col=2, treatment=1, imputation=NULL, imputhld=0.25,
#' k=3, parallel=FALSE, varsel=FALSE, varselthld=0.25, abdbal=NULL)
#' @param datalist List with different preprocessed omic levels. pRoDS class object.
#' @param initialrow First row of numeric data within datasets.
#' @param initialcolumn First column of numeric data within datasets.
#' @param treatment1col Column number in which treatment 1 is indicated.
#' @param treatment2col Column number in which treatment 2 is indicated (if applicable).
#' If there is only one treatment indicate the same column as treatment1col.
#' @param treatment numeric. Set 1 to split the dataset according to treatment1, 2 to split the
#' dataset according to treatment 2 and 3 to split the dataset according to the combination of both treatments.
#' @param imputation Method for imputation, "RF" - Random Forests,"KNN" - K-nearest neighbor, or "none". If NULL Random Forest methods will be employed.This can be provided as a single value or a vector with the same number of elements than list. Each vector element can be a different imputation method or none
#' @param imputhld Maximum number of NAs, in %, per variable to be imputed.
#' @param k Number of neighbors employed in KNN algorithm.
#' @param parallel boleean. If parallel=TRUE missing value imputation algorithm will be run in parallel.
#' @param varsel boleean, If TRUE Variable selection based on consistency criteria will be used to pre-filter the data. Variables present in less than the stablished varselthld will be dropped out the analysis.
#' @param varselthld Minimum number of significant (not zero) values, in %, per variable to be kept.
#' @param abdbal Abundance balancing normalization. "sample" - sample centric approach, "AvgIntensity" - data will be processed according to a sample centric approach and then each value is multipled by average intensity (sum of the intensities of all variables within a sample) of all samples. "TreatAvgIntensity" - data is sample centric normalized and then multiplied by the average intensity of samples of specific treatments. "none" - no abundance balancing is performed. If NULL "AvgIntensity" will be employed. This can be provided as a single value or a vector with the same number of elements than list. Each vector element can be a different abundance balancing method or none
#' @return A POL class object, a list with the processed dataset
#' @details The objective \code{preprocess_omic_list} is providing a first step of data preprocessing before uni and multivariate statistics are performed. The need of removing inconsistent variables (those present in only one or two samples, or really close to detection limit) and also defining which values are NA or zeroes for later imputation is a constant when working with omic datasets. To this end within this function the user can select the range of data to be analyzed, how it will be processed (considering all dataset together, or splitting it in the different treatments) towards the definition and imputation of missing values and also for balancing the abundance of each sample.
#' All tables must have the same structure with cases in rows and variables in columns. Row names, and columns in which treatments are defined should have the same name and order across the different datasets. By default, first columns should be devoted to indicate treatments. Row names should be unique, and preferable equal to database accession names in order to use later annotation steps (see example datasets).
#' First step of this function is deciding which value should be a 0 or NA. To this end all NAs are turned to 0, and then it is decided if specific values should remain as 0 or set as NA for imputation. \code{threshold} argument defines the maximum number of zeroes allowed for imputation within each variable and treatment (defined in \code{treatment}).
#' The second step of the function balance the abundance of the variables according to different methods: sample centric approach; average intensity of samples, or average intensity within treatment.
#' @author Luis Valledor and Laura Lamelas
#' @export
#' @importFrom methods hasArg
#' @importFrom parallel detectCores
#' @importFrom stats var
preprocess_omic_list <- function(datalist, initialrow=1, initialcolumn=2, treatment1col=1, treatment2col=2, treatment=1, imputation=NULL, imputhld=0.25, k=3, parallel=FALSE, varsel=FALSE, varselthld=0.25, abdbal=NULL){
#### Initial checks ####
# Dataset names, number of rows, cases, etc.
if(methods::hasArg(datalist)==FALSE) stop("\nPlease introduce a valid dataset\n")
#if(class(datalist) != "list")
if(is.list(datalist)==F)
stop("A list of matrices or dataframes corresponding to each level is expected")
if(is.null(names(datalist)))
stop("Names of elements in list are missing. Please provide names for each level")
if(all(sapply(lapply(datalist, function(x) t(x[initialrow:nrow(x),initialcolumn:ncol(x)])), dim)[2,]==sapply(lapply(datalist, function(x) t(x[initialrow:nrow(x),initialcolumn:ncol(x)])), dim)[2,1])==FALSE)
stop("The different matrices have an unequal number of individuals. The number of cases of each level should be the same. Please read instructions for more information")
dataframesnames<-lapply(datalist, function(x) rownames(x))
if(all(unlist(lapply(dataframesnames, function(x) identical(dataframesnames[[1]],x)))==TRUE)==FALSE)
stop("Individuals have different names across levels. Please check original matrices")
# Are all variables to be analyzed numeric?
for(i in 1:length(datalist)){
if(all(apply(datalist[[i]][initialrow:nrow(datalist[[i]]),initialcolumn:ncol(datalist[[i]])],2,function(x) is.numeric(x))==F)) stop(paste("Check your input. Non numeric values in",names(datalist)[i], "dataset",sep=" "))
}
# Set imputation and abundance balancing defaults if not given by user
if(is.null(imputation)) imputation <-"RF"
if(is.null(abdbal)) abdbal <-"AvgIntensity"
# Check imputation method(s)
if(FALSE %in% (imputation %in% c("RF","KNN","none"))==TRUE) stop("Please select a valid imputation method")
if(length(imputation)==1) imputation <- c(rep(imputation, length(datalist)))
if(length(imputation)!=length(datalist)) stop("A vector containing one-common- or n elements (where n is the number of datasets) indicating imputation method(s) is expected")
# Check abundance balancing methods
if(FALSE %in% (abdbal %in% c("Sample","AvgIntensity","TreatAvgIntensity","none"))==TRUE) stop("Please select a valid abundance balancing method")
if(length(abdbal)==1) abdbal <- rep(abdbal, length(datalist))
if(length(abdbal)!=length(datalist)) stop("A vector containing one-common- or n elements (where n is the number of datasets) indicating abundance balancing method(s) is expected")
# Check k and threshold
if(!is.numeric(k)) stop("Number of neighbors, k, should be a numeric constant")
if(any(imputation %in% "KNN")==FALSE) cat("Random Forest or none imputation method has been selected. k value,if provided, will be ignored")
threshold<-as.numeric(imputhld)
if(!is.numeric(threshold)) stop("Threshold for defining NA or 0, threshold, should be a numeric constant betwen 0 and 1")
# Check variable selection settings
if(varsel==TRUE & is.null(varselthld)) stop("Please select adequate threshold for VarSelect")
if(!is.null(varselthld)) if(length(varselthld)!=1&length(varselthld)!=length(datalist)) stop("A vector containing one -common- or n (where n is the number of datasets) thresholds is expected")
if(!is.null(varselthld)) if(length(varselthld)==1) varselthld <- rep(varselthld, length(datalist))
datasetnames<-names(datalist)
#### Removing empty columns ####
texts_wizard(paste("\nREMOVING EMPTY COLUMNS OF ALL DATASETS\n"))
texts_wizard("Single processor core will be used. It may take a while...\n")
datalist<-lapply(datalist, function(x) RemoveEmptyColumns(x,initialrow=initialrow,initialcolumn=initialcolumn))
names(datalist)<-datasetnames
#### Imputation ####
datasetnames<-names(datalist)
if(parallel==FALSE){
if(!is.null(imputation)){
texts_wizard("\n\nMISSING VALUE IMPUTATION\n")
texts_wizard("Single processor core will be used. It may take a while...\n")
for (i in 1:length(datalist)){
texts_wizard(paste(imputation[i], " imputation method will be used for ", names(datalist)[i]," dataset\n",sep=""))
}
datalist<-mapply(function(x,y,z){
NAorZero_Imput(x,initialrow=initialrow,initialcolumn=initialcolumn,treatment1col=treatment1col, treatment2col=treatment2col, treatment=treatment,threshold=threshold,imputation=y,k=k,cores=1,datasetname = z)
},datalist,imputation,datasetnames,SIMPLIFY = FALSE)
}
}
if(parallel==TRUE){
#suppressMessages(suppressWarnings(require(doParallel)))
cores=(parallel::detectCores()-1)
if(!is.null(imputation)){
texts_wizard("\n\nMISSING VALUE IMPUTATION\n")
texts_wizard(paste("Multiple processor cores (", cores, ") will be used. It may take a while...\n",sep=""))
for (i in 1:length(datalist)){
texts_wizard(paste(imputation[i], " imputation method will be used for ", names(datalist)[i]," dataset\n",sep=""))
}
datalist<-mapply(function(x,y,z){
NAorZero_Imput(x,initialrow=initialrow,initialcolumn=initialcolumn,treatment1col=treatment1col, treatment2col=treatment2col, treatment=treatment,threshold=threshold,imputation=y,k=k,cores=cores,datasetname = z)
},datalist,imputation,datasetnames,SIMPLIFY = FALSE)
}
}
#### Variable selection ####
if(varsel==TRUE){
texts_wizard("\n\nSELECTING VARIABLES BASED ON CONSISTENCY\n")
texts_wizard("Single processor core will be used. It may take a while...\n")
datalist<-mapply(function(x,y,z){
VarSelect(x,initialrow=initialrow,initialcolumn=initialcolumn,treatment1col = treatment1col, treatment2col = treatment2col, treatment = treatment,datasetname = z,threshold=y)
},datalist,varselthld,datasetnames,SIMPLIFY = FALSE)
}
#### Abundance balancing ####
if(!is.null(abdbal)){
texts_wizard("\n\nABUNDANCE BALANCING\n")
texts_wizard("Single processor core will be used. It may take a while...\n")
for (i in 1:length(datalist)){
texts_wizard(paste(abdbal[i], " balancing will be used for ", names(datalist)[i]," dataset\n",sep=""))
}
datalist<-mapply(function(x,y,z){
AbdBal(x,initialrow=initialrow,initialcolumn=initialcolumn,treatment1col=treatment1col, treatment2col=treatment2col, treatment=treatment,norm=y,datasetname=z)
},datalist,abdbal,datasetnames,SIMPLIFY = FALSE)
}
#### Final report ####
header <- c("\n\n\nSUMMARY: Dataset imputation, filtering and balancing\n-----------------------------------------\n")
header <- paste(header, paste(names(datalist),collapse=", "), " datasets were considered\n",sep="")
class(datalist) <-"POL"
texts_wizard("\n\n Job finished!\n\n")
return(datalist)
}
|
dd96d20c7192cf02e3eb12c60e87a2ebbf34a41c | b3aea4451ebe458f962fbd24bfa2b2b415f2c3e9 | /Team_D/Brenda_Li_Final_Project_Code.R | 1e3c9e688329de8c2093d0ad2e94c538f92b02b6 | [] | no_license | Middlebury-Statistical-Learning/Final_Project | 1d084f14a5ab6155f4336e382093459663ea640c | 13a532daf2cda2f7f2efb79a6d4daa61b672e5e4 | refs/heads/master | 2021-01-20T14:46:51.181848 | 2017-06-01T22:14:31 | 2017-06-01T22:14:31 | 90,657,107 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 14,024 | r | Brenda_Li_Final_Project_Code.R | #-------------------------------------------------------------------------------
# Spring 2017 MATH 218 Statistical Learning Final Project
# Due Tuesday 2017/5/23 12:00pm
#
# Team Members: Brenda Li
# Kaggle Competition Name: Africa Soil Property Prediction
# Kaggle Competition URL: https://www.kaggle.com/c/afsis-soil-properties
#-------------------------------------------------------------------------------
# 1. Load All Necessary Packages ------------------------------------------
library(dplyr)
library(ggplot2)
library(gridExtra)
library(glmnet)
library(tidyverse)
library(broom)
library(grid)
library(gridExtra)
library(knitr)
# 2. Load Data Files & Data Cleaning --------------------------------------
training<-read.csv("Files/soil_training.csv") %>%
mutate(int_depth=ifelse(Depth=="Subsoil",1,0)) %>% #Changing the Depth variable to numerical instead of categorical
select(-Depth)
test<-read.csv("Files/soil_test.csv")
# 3. Top 4-5 Visualizations/Tables of EDA ---------------------------------
# EDA of Outcome Variables
# Histograms of the distributions of each outcome variable
p1<-ggplot(training,aes(x=Ca))+geom_histogram(bins=50)+ggtitle("Distribution of Ca Values")
p2<-ggplot(training,aes(x=P))+geom_histogram(bins=50)+ggtitle("Distribution of P Values")
p3<-ggplot(training,aes(x=pH))+geom_histogram(bins=50)+ggtitle("Distribution of pH Values")
p4<-ggplot(training,aes(x=Sand))+geom_histogram(bins=50)+ggtitle("Distribution of Sand Values")
p5<-ggplot(training,aes(x=SOC))+geom_histogram(bins=50)+ggtitle("Distribution of SOC Values")
grid.arrange(p1,p2,p3,p4,p5,ncol=3,
top="Distribution of Each Outcome Variable in the Training Set")
# We can see that the distribution of Ca, P, and Soc are unimodal and peak around 0 but are all skewed
# to the right. The distributions of pH and Sand are centered around 0 and are bimodal and multimodal
# respectively.
# EDA with respect to the spatial autocorrelation issues (See report PDF for context)
# Histograms of Elevation
p6<-ggplot(training,aes(x=ELEV))+geom_histogram(bins=50)
p7<-ggplot(test,aes(x=ELEV))+geom_histogram(bins=50)
grid.arrange(p6,p7,ncol=2,
top="Distribution of Elevation Values in the Training Set Versus the Test Set")
# It seems that most of the samples in the training data were collected from one of two elevation intervals,
# between -1.33 and 0,and between 0 and 2 (note that the elevation values have been sampled and scaled).
# However, we see in the case of the test data that while the distribution peak at approximately the same
# two intervals, the spread of each interval is different and there are hardly any outlier valaues that lie
# outside of those intervals in the test set as opposed to in the training data. This suggests that the
# training data and the test data do seem to be representative of different "soil populations", which
# implies that my model will be overfit to the training data.
# Histograms of Precipitation
p8<-ggplot(training,aes(x=TMAP))+geom_histogram(bins=30)
p9<-ggplot(test,aes(x=TMAP))+geom_histogram(bins=30)
grid.arrange(p8,p9,ncol=2,
top="Distribution of Mean Annual Precipitation Values in the Training Set Versus the Test Set")
# We can see that the distributions of the Mean Annual Precipitation differ significantly between the test
# dataset and the training dataset. Whereas the histogram for the training data is almost a bell curve, the
# distribution for the test set looks almost uniform aside from a couple peaks. Again, this confirms that
# there is a notable difference between the regions from which the training and test datasets were sampled.
# 4. Cross-Validation of Final Model --------------------------------------
# I commented out all the CV portions as they are incredibly time consuming to run and instead
# I added code that stores the final optimal lambda values for each variables as well as the
# final error values.
# Final CV for Ca
model_formula_Ca <- training %>%
names() %>%
setdiff(c("PIDN","Ca","P","pH","SOC","Sand")) %>%
stringr::str_c(collapse=" + ") %>%
stringr::str_c("Ca ~ ", .)
model_formula_Ca <- as.formula(model_formula_Ca)
# X <- model.matrix(model_formula_Ca, data = training)[, -5]
# y <- training$Ca
#
# lambda_values_Ca <- 10^seq(from = -6, to = -3, length=1000)
#
# cvfit_Ca <- cv.glmnet(X, y, alpha = 1,type.measure = "mse", lambda=lambda_values_Ca,nfolds=5)
#
# lambda_star_LASSO_Ca <- cvfit_Ca %>%
# glance() %>%
# .[["lambda.min"]]
lambda_star_LASSO_Ca <- 0.000807062
# Ca_index<-match(lambda_star_LASSO_Ca,lambda_values_Ca)
# MSE_Ca<-cvfit_Ca$cvm[Ca_index]
MSE_Ca <- 0.1489273
# Final CV for P
model_formula_P <- training %>%
names() %>%
setdiff(c("PIDN","Ca","P","pH","SOC","Sand")) %>%
stringr::str_c(collapse=" + ") %>%
stringr::str_c("P ~ ", .)
model_formula_P <- as.formula(model_formula_P)
# X <- model.matrix(model_formula_P, data = training)[, -5]
# y <- training$P
#
# lambda_values_P <- 10^seq(from = -6, to = -3, length=1000)
#
# cvfit_P <- cv.glmnet(X, y, alpha = 1,type.measure = "mse", lambda=lambda_values_P,nfolds=5)
#
# lambda_star_LASSO_P <- cvfit_P %>%
# glance() %>%
# .[["lambda.min"]]
lambda_star_LASSO_P <- 1.013925e-06
# P_index<-match(lambda_star_LASSO_P,lambda_values_P)
# MSE_P<-cvfit_P$cvm[P_index]
MSE_P <- 0.1405889
# Final CV for SOC
model_formula_SOC <- training %>%
names() %>%
setdiff(c("PIDN","Ca","P","pH","SOC","Sand")) %>%
stringr::str_c(collapse=" + ") %>%
stringr::str_c("SOC ~ ", .)
model_formula_SOC <- as.formula(model_formula_SOC)
# X <- model.matrix(model_formula_SOC, data = training)[, -5]
# y <- training$SOC
#
# lambda_values_SOC <- 10^seq(from = -7, to = -5, length=1000)
#
# cvfit_SOC <- cv.glmnet(X, y, alpha = 1,type.measure = "mse", lambda=lambda_values_SOC,nfolds=5)
#
# lambda_star_LASSO_SOC <- cvfit_SOC %>%
# glance() %>%
# .[["lambda.min"]]
lambda_star_LASSO_SOC <- 9.954008e-06
# SOC_index<-match(lambda_star_LASSO_SOC,lambda_values_SOC)
# MSE_SOC<-cvfit_SOC$cvm[SOC_index]
MSE_SOC <- 0.1542125
# Final CV for pH
model_formula_pH <- training %>%
names() %>%
setdiff(c("PIDN","Ca","P","pH","SOC","Sand")) %>%
stringr::str_c(collapse=" + ") %>%
stringr::str_c("pH ~ ", .)
model_formula_pH <- as.formula(model_formula_pH)
# X <- model.matrix(model_formula_pH, data = training)[, -5]
# y <- training$pH
#
# lambda_values_pH <- 10^seq(from = 0, to = -10, length=1000)
#
# cvfit_pH <- cv.glmnet(X, y, alpha = 1,type.measure = "mse", lambda=lambda_values_pH,nfolds=5)
#
# lambda_star_LASSO_pH <- cvfit_pH %>%
# glance() %>%
# .[["lambda.min"]]
lambda_star_LASSO_pH <- 5.293266e-07
# pH_index<-match(lambda_star_LASSO_pH,lambda_values_pH)
# MSE_pH<-cvfit_pH$cvm[pH_index]
MSE_pH <- 0.1105821
# Final CV for Sand
model_formula_Sand <- training %>%
names() %>%
setdiff(c("PIDN","Ca","P","pH","SOC","Sand")) %>%
stringr::str_c(collapse=" + ") %>%
stringr::str_c("Sand ~ ", .)
model_formula_Sand <- as.formula(model_formula_Sand)
# X <- model.matrix(model_formula_Sand, data = training)[, -5]
# y <- training$Sand
#
# lambda_values_Sand <- 10^seq(from = -4, to = -6, length=1000)
#
# cvfit_Sand <- cv.glmnet(X, y, alpha = 1,type.measure = "mse", lambda=lambda_values_Sand,nfolds=5)
#
# lambda_star_LASSO_Sand <- cvfit_Sand %>%
# glance() %>%
# .[["lambda.min"]]
lambda_star_LASSO_Sand <- 4.452959e-06
# Sand_index<-match(lambda_star_LASSO_Sand,lambda_values_Sand)
# MSE_Sand<-cvfit_Sand$cvm[Sand_index]
MSE_Sand <- 0.1235523
MSE_vector<-c(sqrt(MSE_Ca),sqrt(MSE_P),sqrt(MSE_pH),sqrt(MSE_Sand),sqrt(MSE_SOC))
MCRMSE<-mean(MSE_vector)
MCRMSE
# The CV estimated MCRMSE is 0.3675202, which is quite far off from the error generated by
# Kaggle probably due to the fact that the test dataset was sampled from different regions than
# the training dataset. As discussed in the report, the training and test datasets are not
# representative of the same "soil populations", which means that my model will probably
# be overfit to the training data.
# 5. Create Submission ----------------------------------------------------
# This section of code also takes a while to run but is more feasible than the commented sections above
fake_test<-test %>% mutate(Ca=1,pH=1,P=1,Sand=1,SOC=1) %>%
mutate(int_depth=ifelse(Depth=="SubSoil",1,0)) %>%
select(-Depth)
# Final Ca Model & Predictions
X <- model.matrix(model_formula_Ca, data = training)[, -5]
y <- training$Ca
X_test <- model.matrix(model_formula_Ca, data=fake_test)[, -5]
optimal_model_LASSO_Ca<-glmnet(X, y, alpha = 1, lambda = lambda_star_LASSO_Ca)
LASSO_predictions_Ca <- optimal_model_LASSO_Ca %>% predict(newx=X_test)
# Final pH Model & Predictions
X <- model.matrix(model_formula_pH, data = training)[, -5]
y <- training$pH
X_test <- model.matrix(model_formula_pH, data=fake_test)[, -5]
optimal_model_LASSO_pH<-glmnet(X, y, alpha = 1, lambda = lambda_star_LASSO_pH)
LASSO_predictions_pH <- optimal_model_LASSO_pH %>% predict(newx=X_test)
# Final P Model & Predictions
X <- model.matrix(model_formula_P, data = training)[, -5]
y <- training$P
X_test <- model.matrix(model_formula_P, data=fake_test)[, -5]
optimal_model_LASSO_P<-glmnet(X, y, alpha = 1, lambda = lambda_star_LASSO_P)
LASSO_predictions_P <- optimal_model_LASSO_P %>% predict(newx=X_test)
# Final Sand Model & Predictions
X <- model.matrix(model_formula_Sand, data = training)[, -5]
y <- training$Sand
X_test <- model.matrix(model_formula_Sand, data=fake_test)[, -5]
optimal_model_LASSO_Sand<-glmnet(X, y, alpha = 1, lambda = lambda_star_LASSO_Sand)
LASSO_predictions_Sand <- optimal_model_LASSO_Sand %>% predict(newx=X_test)
# Final SOC Model & Predictions
X <- model.matrix(model_formula_SOC, data = training)[, -5]
y <- training$SOC
X_test <- model.matrix(model_formula_SOC, data=fake_test)[, -5]
optimal_model_LASSO_SOC<-glmnet(X, y, alpha = 1, lambda = lambda_star_LASSO_SOC)
LASSO_predictions_SOC <- optimal_model_LASSO_SOC %>% predict(newx=X_test)
# Binding all the predictions together (You still have to go in and add the right
# variable names in the CSV file before Kaggle Submission)
submission_LASSO2<-cbind(as.character(test$PIDN),LASSO_predictions_Ca,LASSO_predictions_P,
LASSO_predictions_pH,LASSO_predictions_Sand,LASSO_predictions_SOC)
submission_LASSO2 %>%
write.csv("Files/Soil_Submission_LASSO2.csv")
# Submitting the above submission.csv file to Kaggle, we get a score of 0.62444, which is
# not very close to the estimated score above but again, that's due to the issues described
# in the report.
# 6. Extras ---------------------------------------------------------------
# ANALYSIS OF OPTIMAL MODELS
# Tidy tables of the optimal models
Ca_model_table<-optimal_model_LASSO_Ca %>%
tidy() %>%
as.data.frame() %>%
mutate(abs_estimate=abs(estimate))
P_model_table<-optimal_model_LASSO_P %>%
tidy() %>%
as.data.frame() %>%
mutate(abs_estimate=abs(estimate))
pH_model_table<-optimal_model_LASSO_pH %>%
tidy() %>%
as.data.frame() %>%
mutate(abs_estimate=abs(estimate))
Sand_model_table<-optimal_model_LASSO_Sand %>%
tidy() %>%
as.data.frame() %>%
mutate(abs_estimate=abs(estimate))
SOC_model_table<-optimal_model_LASSO_SOC %>%
tidy() %>%
as.data.frame() %>%
mutate(abs_estimate=abs(estimate)) %>%
rename(SOC_Coeff=term)
# Examining the lambdas used in the optimal models:
optimal_lambdas<-c(lambda_star_LASSO_Ca,lambda_star_LASSO_P,lambda_star_LASSO_pH,
lambda_star_LASSO_Sand,lambda_star_LASSO_SOC)
optimal_lambdas
# All the lambda values are quite small, which means not a lot of shrinkage occured
# Examining how many coefficients were reduced to zero:
n_Shrunk_Ca<-nrow(Ca_model_table %>% filter(abs_estimate==0))
n_Shrunk_P<-nrow(P_model_table %>% filter(abs_estimate==0))
n_Shrunk_pH<-nrow(pH_model_table %>% filter(abs_estimate==0))
n_Shrunk_Sand<-nrow(Sand_model_table %>% filter(abs_estimate==0))
n_Shrunk_SOC<-nrow(SOC_model_table %>% filter(abs_estimate==0))
n_Shrunk_all<-c(n_Shrunk_Ca,n_Shrunk_P,n_Shrunk_pH,n_Shrunk_Sand,n_Shrunk_SOC)
# Table of lambda versus number of variables shrunk to zero for each optimal model
grid.newpage()
grid.arrange(tableGrob(cbind(optimal_lambdas,n_Shrunk_all)),
top="Lambda Values and Number of Predictor Variables Shrunk to 0 for
the Optimal Ca, P, pH, Sand, and SOC Models")
# We can see that the larger the lambda, the more predictor variables are shrunk to 0,
# which obviously makes sense. I did find it interesting, however, that setting lambda
# to a mere 0.0008 shrinks 3304 variables to 0, leaving only 276 variables left in the model.
# Tables of the variables with the most predictive power of each optimal model
mini_Ca_table<-Ca_model_table %>%
arrange(desc(abs_estimate)) %>%
select(term, abs_estimate) %>%
rename(Ca_Coeff=term) %>%
head(10)
mini_P_table<-P_model_table %>%
arrange(desc(abs_estimate)) %>%
select(term, abs_estimate) %>%
rename(P_Coeff=term) %>%
head(10)
mini_pH_table<-pH_model_table %>%
arrange(desc(abs_estimate)) %>%
select(term, abs_estimate) %>%
rename(pH_Coeff=term) %>%
head(10)
mini_Sand_table<-Sand_model_table %>%
arrange(desc(abs_estimate)) %>%
select(term, abs_estimate) %>%
rename(Sand_Coeff=term) %>%
head(10)
mini_SOC_table<-SOC_model_table %>%
arrange(desc(abs_estimate)) %>%
select(SOC_Coeff, abs_estimate) %>%
head(10)
grid.newpage()
grid.arrange(tableGrob(mini_Ca_table),tableGrob(mini_P_table),tableGrob(mini_pH_table),
tableGrob(mini_Sand_table),tableGrob(mini_SOC_table),ncol=3,
top="Most Predictive Variables of Each Outcome Variable")
# We can see that there are only a few variables that are among the top ten most predictive
# variables for more than one model. For example, m1787.71 is a highly predictive variable for both
# the Ca model as well as the Sand and SOC models, but most variables are only a strong predictor for one
# of the outcome variables.
|
7b25010de8f1c789440650953184402c1f3de480 | 675d0e46df0b750781353690b07c7ea31d4e824c | /plot3.R | 941b9045c3391ab328e5b53bfab8ccb3fad08328 | [] | no_license | rabund/ExData_Plotting1 | 5486795e85551cb96e353b633b4fc6e6a819b76a | 61cdf09c2e773d9b4ada6ffcd21d0d892fba3a23 | refs/heads/master | 2021-01-19T07:09:30.874631 | 2017-04-07T19:56:48 | 2017-04-07T19:56:48 | 87,526,377 | 0 | 0 | null | 2017-04-07T08:59:18 | 2017-04-07T08:59:17 | null | UTF-8 | R | false | false | 1,003 | r | plot3.R | plot3 <- function(t) {
## plot all three sub meterings per timestamp
## open the output device = png
outFile <- ".\\plot3.png"
png(outFile, width = 480, height = 480)
## set the ranges to define the axis
xrange <- range(t$timestamp)
yrange <- range(c(range(t$Sub_metering_1), range(t$Sub_metering_2), range(t$Sub_metering_3)))
plot(xrange, yrange, type = "n",
main = NULL,
xlab = "",
ylab = "Energy sub metering" )
## add the lines per meter
lines(t$timestamp, t$Sub_metering_1, type = "l", col = "black")
lines(t$timestamp, t$Sub_metering_2, type = "l", col = "red")
lines(t$timestamp, t$Sub_metering_3, type = "l", col = "blue")
## add the legend
legend("topright", legend = names(t)[grep("^Sub*", names(t))],
lty = c(1, 1, 1), col = c("black", "red", "blue"),
lwd = c(2, 2, 2))
## close the device
dev.off()
} |
52dfcd3018aa93610d848d0ba904b5f9b16f019d | 40c40770f7ff82c30fa6e58d948f9b90508758c8 | /man/ML_treeplot.Rd | 0859b30376c4eff7b30dd661d5d99158f22ee139 | [] | no_license | NatalieKAndersson/DEVOLUTION | c61a5ff65d1322f1ec4794435d2c17bf6f2e65fa | 7ec9cec2ef04cdacb47db6413b41c352b0460d7f | refs/heads/master | 2023-08-09T20:13:18.565107 | 2023-07-12T13:05:39 | 2023-07-12T13:05:39 | 297,145,258 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 526 | rd | ML_treeplot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DEVOLUTION.R
\name{ML_treeplot}
\alias{ML_treeplot}
\title{Visualising the ML-tree.}
\usage{
ML_treeplot(ML_tree, limitml, col)
}
\arguments{
\item{col}{If the phylogeny should be colored or not.}
\item{MP_tree}{The MP-tree object obtained from ml_tree()}
\item{limitmp}{The size of the plotting environment. Choose so that the entire phylogeny is clearly visible.}
}
\value{
The ML tree.
}
\description{
#' #' Visualizing the ML-tree object.
}
|
7455546fd5f82340ebfa7339c108904711c4c6c4 | dc3e41f013c39f912f6e8058d1feaa855244841c | /ShinyApp/server.R | 066e72f3e7b42b3d8622c05f62963d6cd24f9247 | [] | no_license | dfan/CWAS_Census | 5848a1bcc4a5c71b9eb89c37152d72acc623920e | f7147bf96bfb425dcde6d28c2830888a0449a0fe | refs/heads/master | 2022-12-10T02:15:53.518476 | 2020-09-14T03:40:06 | 2020-09-14T03:40:06 | 61,927,254 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,152 | r | server.R |
# Define server logic required to output displays
shinyServer(function(input, output, session) {
observeEvent(input$action,{
session$sendCustomMessage(type = 'resize', message = paste0(100 * getTotal(), 'vh'))
})
# loading stuff
withProgress(message = 'Loading...', value = 0.1, {
# use install.packages("devtools") and install_github('arilamstein/choroplethrZip@v1.3.0', force = TRUE) for zip maps
libraries <- c('shiny', 'RMySQL', 'choroplethr', 'ggplot2', 'gtable', 'gridExtra', 'grid', 'choroplethrZip', 'shinysky', 'DT')
withProgress(message = 'Packages: ', value = 0.0, {
for (i in 1:length(libraries)) {
Sys.sleep(0.05)
library(libraries[i],character.only=TRUE) # loading from string
incProgress(1 / length(libraries), detail = libraries[i])
}
})
data(state)
source('helper.R')
# for animation purposes
sapply(seq(from=0, to=100, by=1), function(i) incProgress(0.01, detail = paste0(i, '%')))
con <- dbConnect(MySQL(), user = "root", password = "root", dbname = "census2000", unix.sock="/Applications/MAMP/tmp/mysql/mysql.sock")
data2000 <- dbReadTable(conn = con, name = "acs")
dbDisconnect(con)
con <- dbConnect(MySQL(), user = "root", password = "root", dbname = "census2010", unix.sock="/Applications/MAMP/tmp/mysql/mysql.sock")
data2010 <- dbReadTable(conn = con, name = "acs")
setwd('/Users/dfan/Dropbox/Research\ Lab\ Projects/Undergraduate/Harvard-MIT\ 2016/Code/CWAS_Census/ShinyApp')
dataCombined <- read.csv('censusCombined.csv', stringsAsFactors=FALSE, check.names=FALSE)
dataCombined <- read.csv('censusCombined.csv', stringsAsFactors=FALSE, check.names=FALSE, colClasses = c('character', sapply(names(dataCombined), function(x) class(dataCombined[,x]))[-1]))
colnames(dataCombined) <- c('county', read.csv('../Data/censusColNames.csv', stringsAsFactors=FALSE)[, 1])
dbDisconnect(con)
zipTable <- read.csv('../Data/zcta_county.csv', stringsAsFactors=FALSE, colClasses=c("ZCTA5"="character", "STATE" = "character", "COUNTY" = "character"))
# already aggregated in another file to save time
# read once to get column classes
dataState <- read.csv('censusState.csv', stringsAsFactors=FALSE, check.names=FALSE)
dataState <- read.csv('censusState.csv', stringsAsFactors=FALSE, check.names=FALSE, colClasses = c('character', sapply(names(dataState), function(x) class(dataState[,x]))[-1]))
dataRegion <- read.csv('censusRegion.csv', stringsAsFactors=FALSE, check.names=FALSE)
dataRegion <- read.csv('censusRegion.csv', stringsAsFactors=FALSE, check.names=FALSE, colClasses = c('character', sapply(names(dataRegion), function(x) class(dataRegion[,x]))[-1]))
# retain leading zeros for ZCTA5 codes
options(shiny.maxRequestSize=150*1024^2)
# Increment the top-level progress indicator
})
observeEvent(input$button1, {
updateTabsetPanel(session, "Page", selected = "Maps")
})
observeEvent(input$button2, {
updateTabsetPanel(session, "Page", selected = "Maps")
})
observeEvent(input$button3, {
updateTabsetPanel(session, "Page", selected = "Table")
})
observeEvent(input$button4, {
updateTabsetPanel(session, "Page", selected = "ICD9s")
})
getTotal <- reactive({
return (as.numeric(input$total))
})
getNumCols <- reactive({
return (as.numeric(input$cols))
})
getNumRows <- reactive({
return (as.numeric(input$rows))
})
whichData <- reactive({
return (input$whichMapData)
})
getDetail <- reactive({
return (input$detailLevel)
})
# height parameter in plotOuput doesn't work when you do arithmetic.. even tho the number is rendered
getWidth <- reactive({
return (session$clientData$output_allmaps_width)
# return (session$clientData[[paste0('output_', 'map1', '_height')]])
})
getHeight <- reactive({
return (session$clientData$output_allmaps_width)
})
# for map display (suppressing error messages)
isUploaded <- reactive({
return(!is.null(input$file1))
})
# for conditional panel
output$isUploaded <- reactive({
return(!is.null(input$file1))
})
outputOptions(output, 'isUploaded', suspendWhenHidden=FALSE)
bucketData <- reactive({
input$action
isolate(total <- getTotal())
if (input$whichMapData == 'Plot by census data') {
if (input$detailLevel == 'County') {
data <- dataCombined
} else if (input$detailLevel == 'State') {
data <- dataState
} else if (input$detailLevel == 'Region') {
data <- dataRegion
}
}
if (input$whichMapData == 'Plot by user data') {
raw <- readTable()
if (input$detailLevel == 'State') {
data <- aggregateUserToState(raw)
} else if (input$detailLevel == 'Region') {
data <- aggregateUserToRegion(aggregateUserToState(raw))
} else {
data <- raw
}
}
list <- sapply(1:total, function(i) {
if (input$percentdifference) {
# we don't want division by zero. But save in temp so that original isn't overwritten if multiple plots are generated
temp <- data[which(data[, input[[paste0('variable', i, 'a')]]] != 0), ]
(temp[, input[[paste0('variable', i, 'b')]]] - temp[, input[[paste0('variable', i, 'a')]]]) / temp[, input[[paste0('variable', i, 'a')]]]
} else if (!input$difference) {
data[, input[[paste0('variable', i, 'a')]]]
} else if (input$difference) {
data[, input[[paste0('variable', i, 'b')]]] - data[, input[[paste0('variable', i, 'a')]]]
}
})
return(list)
})
colorList <- reactive({
input$action
isolate(total <- getTotal())
list <- sapply(1:total, function(i) {
input[[paste0('color', i)]]
})
return(list)
})
legendColor <- reactive({
return(input$legendcolor)
})
# isolate -> dependency on go button
plotObjects <- eventReactive(input$action, {
# isolate mapType value update so that reactive dependencies don't override the isolated go button
isolate(total <- getTotal())
values <- reactiveValues(i = 0)
# change from county to state data here
if (input$whichMapData == 'Plot by census data') {
type <- 'census'
if (input$detailLevel == 'County') {
data <- dataCombined
# get rid of leading zeros
data[, 1] <- as.numeric(sapply(data[, 1], function(y) sub('^0+([1-9])', '\\1', y)))
} else if (input$detailLevel == 'State') {
data <- dataState
} else if (input$detailLevel == 'Region') {
data <- dataRegion
}
}
if (input$whichMapData == 'Plot by user data') {
type <- 'user'
raw <- readTable()
if (input$detailLevel == 'County') {
#data <- aggregateUsertoCounty(raw, zipTable)
data <- raw
data[, 1] <- as.numeric(sapply(data[, 1], function(y) sub('^0+([1-9])', '\\1', y)))
} else if (input$detailLevel == 'State') {
# leading 0s are removed in aggregate function
data <- aggregateUserToState(raw)
} else if (input$detailLevel == 'Region') {
data <- aggregateUserToRegion(aggregateUserToState(raw))
}
}
legend <- ''
if (total == 1)
legend <- 'legendandmap'
colorList <- colorList()
progress <- shiny::Progress$new()
progress$set(message = "Plotting...", value = 0)
on.exit(progress$close())
plotList <- lapply(1:total, function(i) {
if (input$percentdifference) {
progress$inc(1/total, detail = paste("map", i))
# don't overwrite in case there are multiple plots
temp <- data[which(data[, input[[paste0('variable', i, 'a')]]] != 0), ]
plotPercentDiffMap(input[[paste0('variable', i, 'a')]], input[[paste0('variable', i, 'b')]], type, temp, paste("USA Colored by % Difference in", input[[paste0('variable', i, 'a')]], 'and', input[[paste0('variable', i, 'b')]]), colorList[i], getBuckets(bucketData(), 'Percent'), getDetail(), legend, input$percent, NULL)$render()
} else if (input$difference) {
# needs to be inside for some reason
progress$inc(1/total, detail = paste("map", i))
plotDiffMap(input[[paste0('variable', i, 'a')]], input[[paste0('variable', i, 'b')]], type, data, paste("USA Colored by Difference in", input[[paste0('variable', i, 'a')]], 'and', input[[paste0('variable', i, 'b')]]), colorList[i], getBuckets(bucketData(), 'Difference'), getDetail(), legend, input$percent, NULL)$render()
} else if (!input$difference) {
# order matters; value line goes first
progress$inc(1/total, detail = paste("map", i))
plotMap(input[[paste0('variable', i, 'a')]], type, data, paste("USA Colored by", input[[paste0('variable', i, 'a')]]), colorList[i], getBuckets(bucketData(), 'notpercent'), getDetail(), legend, input$percent, NULL)$render()
}
})
return(plotList)
})
plotLegend <- eventReactive(input$action, {
# values <- reactiveValues(i = 0)
if (input$whichMapData == 'Plot by census data') {
type <- 'census'
if (input$detailLevel == 'County') {
data <- dataCombined
# get rid of leading zeros
data[, 1] <- as.numeric(sapply(data[, 1], function(y) sub('^0+([1-9])', '\\1', y)))
} else if (input$detailLevel == 'State') {
data <- dataState
} else if (input$detailLevel == 'Region') {
data <- dataRegion
}
}
if (input$whichMapData == 'Plot by user data') {
type <- 'user'
raw <- readTable()
if (input$detailLevel == 'County') {
#data <- aggregateUsertoCounty(raw, zipTable)
data <- raw
# get rid of leading zeros
data[, 1] <- as.numeric(sapply(data[, 1], function(y) sub('^0+([1-9])', '\\1', y)))
} else if (input$detailLevel == 'State') {
# leading 0s are removed in aggregate function
data <- aggregateUserToState(raw)
} else if (input$detailLevel == 'Region') {
data <- aggregateUserToRegion(aggregateUserToState(raw))
}
}
if (input$percentdifference) {
if (length(which(data[, input[[paste0('variable', i, 'a')]]] == 0)) > 0) {
data <- data[which(data[, input[[paste0('variable', i, 'a')]]] != 0), ]
}
return(list(plotPercentDiffMap(input[['variable1a']], input[['variable1b']], type, data, paste("USA Colored by % Difference in", input[['variable1a']], 'and', input[['variable1b']]), legendColor(), getBuckets(bucketData(), 'Percent'), getDetail(), 'legendonly', input$percent, NULL)))
} else if (input$difference) {
return(list(plotDiffMap(input[['variable1a']], input[['variable1b']], type, data, paste("USA Colored by Difference in", input[['variable1a']], 'and', input[['variable1b']]), legendColor(), getBuckets(bucketData(), 'Difference'), getDetail(), 'legendonly', input$percent, NULL)))
} else if (!input$difference) {
return(list(plotMap(input[['variable1a']], type, data, paste("USA Colored by", input[['variable1a']]), legendColor(), getBuckets(bucketData(), 'notpercent'), getDetail(), 'legendonly', input$percent, NULL)))
}
})
# http://stackoverflow.com/questions/33250075/get-screen-resolution-from-javascript-in-r-shiny
output$allmaps <- renderPlot({
input$action
isolate(total <- getTotal())
isolate(col <- getNumCols())
isolate(plotlist <- plotObjects())
do.call("grid.arrange", c(plotObjects(), nrow = ceiling(total / col), ncol = col))
})
output$legend <- renderPlot({
# call grid.draw here instead of helper so legend doesn't disappear when page resizes
# legend is fixed size unfortunately in ggplot
input$action
isolate(total <- getTotal())
if (total > 1) {
do.call('grid.draw', plotLegend())
}
})
# Reactive scope reference: https://shinydata.wordpress.com/2015/02/02/a-few-things-i-learned-about-shiny-and-reactive-programming/
output$maps <- renderUI({
# isolate mapType value update so that reactive dependencies don't override the isolated go button
input$action
isolate(whichMap <- whichData())
isolate(whichDetail <- getDetail())
isolate(uploaded <- isUploaded())
if (whichMap != "None" && whichDetail != "None" && !(whichMap == "Plot by user data" && !uploaded)) {
input$action
isolate(total <- getTotal())
isolate(col <- getNumCols())
isolate(objects <- plotObjects())
column(12, align = "center",
fluidRow(
tagList(
# can't set width here and in UI or it resolves to 0
column(width = 12, align = 'center', plotOutput("allmaps")),
column(width = 12, align = 'center', plotOutput("legend", width = '100%', height = 75))
)
)
)
# don't display error at start of the app or when you've only updated one section
}
# splitLayout avoids columns
})
output$png <- downloadHandler(
filename = 'plots.png',
content = function(file) {
if (getNumCols() == 1) {
png(file = file, width = 11, height = 8.5, units = "in", res = 300)
list <- plotObjects()
final <- c(list)
grid.arrange(grobs = final, ncol = getNumCols(), layout_matrix = matrix(1:getTotal(), byrow = TRUE, nrow = ceiling(getTotal() / getNumCols())))
dev.off()
if (getTotal() > 1) {
png(file = file, width = 8.5, height = 11, units = "in", res = 300)
leg <- plotLegend()
final <- c(list, leg)
grid.arrange(grobs = final, ncol = getNumCols(), layout_matrix = rbind(matrix(1:getTotal(), byrow = TRUE, nrow = ceiling(getTotal() / getNumCols())), rep(getTotal() + 1, getNumCols())))
dev.off()
}
} else {
png(file = file, width = 11, height = 8.5, units = "in", res = 300)
list <- plotObjects()
leg <- plotLegend()
final <- c(list, leg)
# arrangeGrob won't work
#do.call('grid.arrange', c(final, layout_matrix = rbind(matrix(1:getTotal(), byrow = TRUE, nrow = ceiling(getTotal() / getNumCols())), rep(getTotal() + 1, getNumCols()))))
grid.arrange(grobs = final, ncol = getNumCols(), layout_matrix = rbind(matrix(1:getTotal(), byrow = TRUE, nrow = ceiling(getTotal() / getNumCols())), rep(getTotal() + 1, getNumCols())))
#grid.arrange(grid1, plotLegend(), nrow = 2)
dev.off()
}
}
)
output$pdf <- downloadHandler(
filename = 'plots.pdf',
content = function(file) {
if (getNumCols() == 1) {
pdf(file = file, width = 11, height = 8.5)
list <- plotObjects()
final <- c(list)
grid.arrange(grobs = final, ncol = getNumCols(), layout_matrix = matrix(1:getTotal(), byrow = TRUE, nrow = ceiling(getTotal() / getNumCols())))
dev.off()
if (getTotal() > 1) {
pdf(file = file, width = 8.5, height = 11)
leg <- plotLegend()
final <- c(list, leg)
grid.arrange(grobs = final, ncol = getNumCols(), layout_matrix = rbind(matrix(1:getTotal(), byrow = TRUE, nrow = ceiling(getTotal() / getNumCols())), rep(getTotal() + 1, getNumCols())))
dev.off()
}
} else {
pdf(file = file, width = 11, height = 8.5)
list <- plotObjects()
leg <- plotLegend()
final <- c(list, leg)
grid.arrange(grobs = final, ncol = getNumCols(), layout_matrix = rbind(matrix(1:getTotal(), byrow = TRUE, nrow = ceiling(getTotal() / getNumCols())), rep(getTotal() + 1, getNumCols())))
dev.off()
}
}
)
tableStat <- eventReactive(input$displayTable, {
if (input$useData == 'Sort table by census data') {
data <- dataCombined
}
if (input$useData == 'Sort table by user data') {
data <- readTable()
}
if (length(getTableCols()) == 1) {
showshinyalert(session, "delimitter", "No entries? Make sure the right delimitter is selected (tab, comma, etc)")
} else if (input$stat == 'None' || input$sort1By == 'None' || input$sort2By == 'None') {
showshinyalert(session, "noselection", "Please make a selection")
} else if ((length(which(data[, input$sort1By] < 0)) > 0 || length(which(data[, input$sort2By] < 0)) > 0 || length(which(data[, input$sort2By] > 1)) > 0 || length(which(data[, input$sort1By] > 1)) > 0) && input$stat != 'None' && input$stat != 'Percent Difference') {
showshinyalert(session, "incompatible", "Selected columns must be rates between 0 and 1 for this statistic.")
} else {
data <- as.data.frame(cbind(data[, 1], data[, input$sort1By], data[, input$sort2By]))
names(data) <- c('county', input$sort1By, input$sort2By)
# ensure data is in numeric format so division happens correctly below. as.character prevents numeric from removing decimals
data[, input$sort1By] <- as.numeric(as.character(data[, input$sort1By]))
data[, input$sort2By] <- as.numeric(as.character(data[, input$sort2By]))
# If data didn't exist in 2000 but did in 2010, then set % change to 0. We don't want Inf values
progress <- shiny::Progress$new()
progress$set(message = "Computing table statistics...", value = 0)
on.exit(progress$close())
n <- length(data[, 1])
updateProgress <- function(detail = NULL) {
progress$inc(amount = 1/n, detail = detail)
}
data <- addStatCol(input$stat, data, pop1 = dataCombined$population2000, pop2 = dataCombined$population2010, updateProgress)
data[, input$stat] <- sapply(data[, input$stat], function(x) {
if (abs(as.numeric(x)) < 1E-8) {
paste0('< 1e-8')
} else {
format(x, scientific = TRUE)
}
})
data
}
})
### for second panel ###
output$table <- renderDataTable({
datatable(tableStat(), options = list(dom = 'Bfrtip', buttons = c('copy', 'excel', 'pdf', 'print', 'colvis'), paging = FALSE, scrollY = "90vh",
list(targets = c(0), type = "num-fmt")), extensions = 'Buttons')
# allow top search bar but not column filters
# (not for datatable) processing = FALSE, paging = FALSE, scrollX = TRUE, scrollY = "100vh", columnDefs = list(list(targets = c(-(1:4)), searchable = FALSE)))
})
readMap <- reactive({
inFile <- ''
if (input$whichMapData == 'Plot by census data') {
a <- read.csv('../Data/censusColNames.csv', stringsAsFactors=FALSE)
} else if (is.null(input$file1)) {
return(NULL)
} else {
inFile <- input$file1
# first time to get column classes
# check.names=FALSE prevents column names from being modified
a <- read.csv(inFile$datapath, header=input$header, sep=input$sep, check.names=FALSE)
a <- read.csv(inFile$datapath, header=input$header, sep=input$sep, check.names=FALSE, colClasses = c('character', sapply(names(a), function(x) class(a[,x]))[-1]))
}
a
})
readTable <- reactive({
inFile <- ''
if (input$useData == 'Sort table by census data') {
a <- read.csv('../Data/censusColNames.csv', stringsAsFactors=FALSE)
} else if (is.null(input$file1)) {
return(NULL)
} else {
inFile <- input$file1
# first time to get column classes
# check.names=FALSE prevents column names from being modified
a <- read.csv(inFile$datapath, header=input$header, sep=input$sep, check.names=FALSE)
a <- read.csv(inFile$datapath, header=input$header, sep=input$sep, check.names=FALSE, colClasses = c('character', sapply(names(a), function(x) class(a[,x]))[-1]))
}
a
})
output$mapselection1 <- renderUI({
# dont' isolate or else the number of options won't render
lapply(1:getTotal(), function(i) {
selectInput(paste0('variable', i, 'a'), paste0('Map ', i), getMapCols())
})
})
output$mapselection2 <- renderUI({
lapply(1:getTotal(), function(i) {
selectInput(paste0('variable', i, 'b'), paste0('Map ', i), getMapCols())
})
})
output$colorselection <- renderUI({
lapply(1:(getTotal() + 1), function(i) {
if (i == (getTotal() + 1) && getTotal() > 1) {
selectInput('legendcolor', 'Color of Legend', list('Red' = 'Reds', 'Blue' = 'Blues', 'Green' = 'Greens', 'Red-Green' = 'Red-Green'))
} else if (i < (getTotal() + 1)){
selectInput(paste0('color', i), paste0('Color ', i), list('Red' = 'Reds', 'Blue' = 'Blues', 'Green' = 'Greens', 'Red-Green' = 'Red-Green'))
}
})
})
output$table1List <- renderUI({
selectInput("sort1By", "Sort By:", getTableCols())
})
output$table2List <- renderUI({
selectInput("sort2By", "Sort By:", getTableCols())
})
getMapCols <- reactive({
switch(input$whichMapData,
'Plot by census data' = c('None', readMap()[, 1]),
'Plot by user data' = c('None', colnames(readMap())[-1])
)
})
getTableCols <- reactive({
switch(input$useData,
'Sort table by census data' = c('None', readTable()[, 1]),
'Sort table by user data' = c('None', colnames(readTable())[-1])
)
})
# populate dropdown menu with column names of dataframe (table 1 and table 2 assumed to have same column names)
output$icd9List <- renderUI({
selectInput('sorticd9', NULL, c("None", colnames(readTable())[-1]))
})
# Isolate output to give dependency on go button
geticd9Table <- eventReactive(input$displayICD9, {
inFile <- ''
data <- ''
if (input$icd9 == 'None selected' | is.null(input$file1) & is.null(input$file2)) {
return(NULL)
} else if (input$icd9 == 'See ICD9 rates for user table') {
inFile <- input$file1
data = data2000
}
a <- read.csv(inFile$datapath, header=input$header, sep=input$sep)
a <- as.data.frame(cbind(a[, 1], a[, input$sorticd9]))
names(a) <- c('STCOU', input$sorticd9)
a[, -1] <- format(round(a[, -1] / data$population[1:length(a[, 1])], 7), scientific = TRUE)
a
})
output$icd9table <- renderDataTable({
geticd9Table()
}, options = list(scrollX = TRUE, scrollY = "100vh", paging = FALSE, processing = FALSE))
})
|
66c264589f55b78bb413a9c5a9074efc5d698e86 | 8eaf2fc58bda3aa46fe3d921fe8e10a23575061a | /tabs/fda_plots_tab.R | 9dadab491eff51557d81730ef55c0525bd9708ef | [
"MIT"
] | permissive | jovitagudan/Covid19 | 739c83a75bac016dd8e1c74a7627b5d8cd11b87c | 01e74c3fbad3349b028c8db6c1fb1a133ebadf5e | refs/heads/master | 2023-08-14T00:31:03.778392 | 2021-10-06T13:26:57 | 2021-10-06T13:26:57 | 292,038,510 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,793 | r | fda_plots_tab.R | fda_plots_tab <- tabItem(
tabName = "fda_plots",
fluidRow(
box(
span(tags$i(h6("Select one or more countries/regions.")), style="color:#d2d5d6"),
tags$head(tags$style(HTML('
#country+ div>.selectize-dropdown{background: #5b6063; color: #ffffff;}
#country+ div>.selectize-input{background: #5b6063; color: #ffffff;}
#outcome+ div>.selectize-dropdown{background: #5b6063; color: #ffffff;}
#outcome+ div>.selectize-input{background: #5b6063; color: #ffffff;}
#scale+ div>.selectize-dropdown{background: #5b6063; color: #ffffff;}
#scale+ div>.selectize-input{background: #5b6063; color: #ffffff;}
'))),
# selectizeInput("country", "Select country or region:", choices = countries_list,
# selected = countries_list$`Northern Europe`[c(3,7,8)], multiple = TRUE),
pickerInput("level_select_fda", "Select level:",
choices = c("World", "Continent", "Country", "US state"),
selected = c("Country"),
multiple = FALSE),
pickerInput("region_select_fda", "Select country or region:",
choices = countries_list,
options = list(`actions-box` = TRUE, `none-selected-text` = "Please make a selection!", `live-search`=TRUE),
selected = countries_list$`Northern Europe`[which(countries_list$`Northern Europe` %in% c("Lithuania", "Latvia", "Estonia"))],
multiple = TRUE),
pickerInput("outcome_fda", "Select outcome:",
choices = c("Cases per 100,000",
"Deaths per 100,000",
"Vaccinated per 100,000"),
multiple=FALSE),
tags$style(type = "text/css",
".irs-grid-text:nth-child(-2n+18) {color: white}",
".irs-grid-text:nth-child(2n+20) {color: white}",
".irs-grid-pol:nth-of-type(-n+18) {background:white}",
".irs-grid-pol:nth-of-type(n+18) {background:white}")
),
box(
span(tags$i(h3("Level")), style="color:#d2d5d6"),
plotlyOutput("plot_fda_level", height = 500)
)
),
fluidRow(
box(
span(tags$i(h3("First derivative")), style="color:#d2d5d6"),
plotlyOutput("plot_fda_first_deriv", height = 500)
),
box(
span(tags$i(h3("Second derivative")), style="color:#d2d5d6"),
plotlyOutput("plot_fda_second_deriv", height = 500)
)
)
)
|
17cbec1171509c4859b4f1cd83ea2f769cfc7c44 | 2b5c558745f842e9944ae2618b9dc0c619256241 | /KS-plot questionable.R | 375fb78d67d2a6d430468461c247a8bc45d47ebe | [] | no_license | starsfell/KS_Curve | a78190b8ce583b830cb2029c99d9845612ab323d | 1946565bad89d94193ccbb80ee566c21f93acfa4 | refs/heads/master | 2020-03-27T08:42:55.347259 | 2018-08-28T07:10:55 | 2018-08-28T07:10:55 | 146,280,790 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,724 | r | KS-plot questionable.R | rm(list=ls())
# 引入library
library(ggplot2)
library(reshape2)
library(ROCR)
library(dplyr)
# 引入样本,划分Train与Test
diamonds$is_expensive <- diamonds$price > 2400
is_test <- runif(nrow(diamonds)) > 0.75
train <- diamonds[is_test==FALSE,]
test <- diamonds[is_test==TRUE,]
# 拟合模型
fit_A <- glm(is_expensive ~ carat + cut + clarity, data=train)
# 预测模型
prob_A <- predict(fit_A, newdata=test, type="response")
pred_A <- prediction(prob_A, test$is_expensive)
perf_A <- performance(pred_A, measure = "tpr", x.measure = "fpr")
# 预测值以概率的形式保存在“pred_B@predictions”中
# 真实值以“TRUE”/“FALSE”的形式保存在“pred_B@labels”中
# 将代码封装在函数PlotKS_N里,
# Pred_Var是预测结果,可以是评分或概率形式;
# labels_Var是好坏标签,取值为1或0,1代表坏客户,0代表好客户;
# descending用于控制数据按违约概率降序排列,如果Pred_Var是评分,则descending=0,如果Pred_Var是概率形式,则descending=1;
# N表示在将数据按风险降序排列后,等分N份后计算KS值。
# 注意:由于我们的数据中,真实值Y是分为“TRUE”与“FALSE”,而非1/0.
# 所以在df1$good1与df1$bad1、df2$good2、df2$bad2中要特别注意替换
#################### PlotKS_N ################################
PlotKS_N<-function(Pred_Var, labels_Var, descending, N){
# Pred_Var is prop: descending=1
# Pred_Var is score: descending=0
df<- data.frame(Pred=Pred_Var, labels=labels_Var)
if (descending==1){
df1<-arrange(df, desc(Pred), labels)
}else if (descending==0){
df1<-arrange(df, Pred, labels)
}
df1$good1<-ifelse(df1$labels=="TRUE",1,0) # 如果实际是True,则给1,否则给0
df1$bad1<-ifelse(df1$labels=="FALSE",1,0) # 如果实际是False,则给1,否则给0
df1$cum_good1<-cumsum(df1$good1)
df1$cum_bad1<-cumsum(df1$bad1)
df1$rate_good1<-df1$cum_good1/sum(df1$good1)
df1$rate_bad1<-df1$cum_bad1/sum(df1$bad1)
if (descending==1){
df2<-arrange(df, desc(Pred), desc(labels))
}else if (descending==0){
df2<-arrange(df, Pred, desc(labels))
}
df2$good2<-ifelse(df2$labels=="TRUE",1,0)
df2$bad2<-ifelse(df2$labels=="FALSE",1,0)
df2$cum_good2<-cumsum(df2$good2)
df2$cum_bad2<-cumsum(df2$bad2)
df2$rate_good2<-df2$cum_good2/sum(df2$good2)
df2$rate_bad2<-df2$cum_bad2/sum(df2$bad2)
rate_good<-(df1$rate_good1+df2$rate_good2)/2
rate_bad<-(df1$rate_bad1+df2$rate_bad2)/2
df_ks<-data.frame(rate_good,rate_bad)
df_ks$KS<-df_ks$rate_bad-df_ks$rate_good
L<- nrow(df_ks)
if (N>L){ N<- L}
df_ks$tile<- 1:L
qus<- quantile(1:L, probs = seq(0,1, 1/N))[-1]
qus<- ceiling(qus)
df_ks<- df_ks[df_ks$tile%in%qus,]
df_ks$tile<- df_ks$tile/L
df_0<-data.frame(rate_good=0,rate_bad=0,KS=0,tile=0)
df_ks<-rbind(df_0, df_ks)
M_KS<-max(df_ks$KS)
Pop<-df_ks$tile[which(df_ks$KS==M_KS)]
M_good<-df_ks$rate_good[which(df_ks$KS==M_KS)]
M_bad<-df_ks$rate_bad[which(df_ks$KS==M_KS)]
library(ggplot2)
PlotKS<-ggplot(df_ks)+
geom_line(aes(tile,rate_bad),colour="pink",size=0.7)+
geom_line(aes(tile,rate_good),colour="darkblue",size=0.7)+
geom_line(aes(tile,KS),colour="yellow",size=0.7)+
geom_vline(xintercept=Pop,linetype=2,colour="gray",size=0.6)+
geom_hline(yintercept=M_bad,linetype=2,colour="pink",size=0.6)+
geom_hline(yintercept=M_good,linetype=2,colour="darkblue",size=0.6)+
geom_hline(yintercept=M_KS,linetype=2,colour="yellow",size=0.6)+
annotate("text", x = 0.5, y = 1.05, label=paste("KS=", round(M_KS, 4), "at Pop=", round(Pop, 4)), size=4, alpha=0.8)+
scale_x_continuous(breaks=seq(0,1,.2))+
scale_y_continuous(breaks=seq(0,1,.2))+
xlab("of Total Population")+
ylab("of Total Bad/Good")+
ggtitle(label="KS - Chart")+
theme_bw()+
theme(
plot.title=element_text(colour="gray24",size=12,face="bold"),
plot.background = element_rect(fill = "gray90"),
axis.title=element_text(size=10),
axis.text=element_text(colour="gray35")
)
result<-list(M_KS=M_KS,Pop=Pop,PlotKS=PlotKS,df_ks=df_ks)
return(result)
}
# 输入参数,得到模型结果
# 预测值:unlist(pred_B@predictions)
# 真实值:unlist(pred_B@labels)
# descending=0
# N=10000
PlotKS_N(unlist(pred_A@predictions),unlist(pred_A@labels), descending=0, 10000)
# PlotKS_N函数返回的结果为一列表,
# 列表中的元素依次为KS最大值、KS取最大值的人数百分位置、KS曲线对象、KS数据框。
|
a5f7ee0aec1f34a08570465e88cc882932d9b3b0 | 0a021f843670c168c4a212207c13be1b0a88ddbe | /inst/doc/plotfunctions.R | be5cc2a9778bea3584624247a133c58f7f005a2d | [] | no_license | cran/plotfunctions | ddc4dd741ad2a43d81deb0ef13fe2d7b37ca84bd | ebacdd83686e1a32a4432a35f244bf82015a19a5 | refs/heads/master | 2021-01-20T18:53:05.464513 | 2020-04-28T09:00:02 | 2020-04-28T09:00:02 | 59,847,744 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,329 | r | plotfunctions.R | ## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(eval=TRUE, include=TRUE, echo=TRUE, fig.height = 4, fig.width = 4)
## ----startup------------------------------------------------------------------
library(plotfunctions)
## ----ex1, fig.width=8, fig.height=4, fig.show="hold"--------------------------
par(mfrow=c(1,2))
set.seed(123)
# PLOT1: t-distribution:
test <- rt(1000, df=5)
check_normaldist(test)
# PLOT2: skewed data, e.g., reaction times:
test <- exp(rnorm(1000, mean=.500, sd=.25))
check_normaldist(test)
## ----ex1b, fig.width=8, fig.height=4, fig.show="hold"-------------------------
par(mfrow=c(1,2))
set.seed(123)
# PLOT1: t-distribution:
test <- rt(1000, df=5)
qqnorm(test)
qqline(test)
# PLOT2: skewed data, e.g., reaction times:
test <- exp(rnorm(1000, mean=.500, sd=.25))
qqnorm(test)
qqline(test)
## ----ex2----------------------------------------------------------------------
# example InsectSprays from R datasets
avg <- aggregate(count ~ spray, data=InsectSprays, mean)
avg <- merge(avg,
aggregate(count ~ spray, data=InsectSprays, sd),
by="spray", all=TRUE)
# we could add the type of spray to the averages:
avg$type <- c(1,1,2,2,2,1)
# visualize output
dotplot_error(avg$count.x, se.val=avg$count.y, groups=avg$type, labels=avg$spray)
## ----ex3, fig.width=12, fig.height=4, fig.show="hold"-------------------------
# 3 panels:
par(mfrow=c(1,3), cex=1.1)
# define x and y
x <- -5:20
y <- x^2
# PLOT 1:
plot(x, y, pch=16)
# convert arrow positions:
ap1 <- getArrowPos(x, y, units="coords")
# PLOT 2:
plot(x, -1*y)
# convert arrow positions:
ap2 <- getArrowPos(x, -1*y, units="coords")
ap3 <- getArrowPos(c(1,1,1), c(1,.5,0), units="prop")
# PLOT 3:
plot(x, -2*y+200, ylim=c(-600,200), pch=18)
abline(h=c(0,-400), lty=3, col='red2')
points(x, -1*y)
ap4 <- getArrowPos(c(0,0,0), c(.75,.5,.25), units="prop")
# DRAW ARROWS:
drawDevArrows(start=ap1, end=ap2, arrows="none",
col=alphaPalette("red2", f.seq=c(.1,1), n=length(x)))
drawDevArrows(start=ap3, end=ap4, arrows="end", col="red2", lwd=3, length=.1)
## ----ex4, fig.width=8, fig.height=8, fig.show="hold"--------------------------
par(mfrow=c(2,2), cex=1.1)
# PLOT 1:
emptyPlot(10,1)
# PLOT 2:
emptyPlot(c(-10, 10), c(-100,500),
h0=0, main="Plot 2", xlab="X", ylab="Y")
# PLOT 3:
emptyPlot(c(-100, 1000), c(-8,8),
h0=0, v0=0, eegAxis=TRUE,
main="Plot 3: EEG axes")
# PLOT 4:
emptyPlot(c(-100, 1000), c(-8,8),
h0=0, v0=0,
xmark=TRUE, ymark=c(-5,5), las=1,
main="Plot 4: Simplified axes")
## ----ex5----------------------------------------------------------------------
# load example data:
data(chickwts)
# first calculate means and sd per feeding type:
avg <- with(chickwts, tapply(weight, list(feed), mean))
sds <- with(chickwts, tapply(weight, list(feed), sd))
# barplot:
b <- barplot(avg, beside = TRUE, ylim=c(0,400),
col=1, las=2)
# add errorbars:
errorBars(b, avg, sds, border = TRUE)
# add average:
add_bars(b[length(b)]+diff(b[1:2]), mean(avg),
col="red", xpd=TRUE)
errorBars(b[length(b)]+diff(b[1:2]), mean(avg), se(avg), xpd=TRUE)
mtext("mean/SE", at=b[length(b)]+diff(b[1:2]), line=1, side=1, font=2, las=2)
## ----ex6, fig.show="hold"-----------------------------------------------------
emptyPlot(c(-10,100), c(-2,2), h0=0, v0=0)
# Proportions to coordinates:
x <- getCoords(c(0, .25, .5, .75, 1))
y <- getCoords(c(0, .25, .5, .75, 1), side=2)
points(x, y, col="red1", pch=1:5, lwd=2, xpd=TRUE)
# wrt figure region:
x <- getCoords(c(0.05, .25, .5, .75, .95), input="f")
y <- getCoords(c(0.05, .25, .5, .75, .95), side=2, input="f")
points(x, y, col="steelblue", pch=1:5, lwd=2, xpd=TRUE)
## ----ex7, fig.show='hide'-----------------------------------------------------
emptyPlot(c(-10,100), c(-2,2), h0=0, v0=0)
# get plot coordinates:
getFigCoords("p")
# get figure coordinates:
getFigCoords("f")
# get proportions:
getProps(c(20,60,100,500))
getProps(c(-2,1,4), side=2)
## ----ex8, fig.show='hold'-----------------------------------------------------
dat <- expand.grid(x=seq(0,1,by=.1), y=seq(0,1, by=.1))
dat$z <- dat$x * dat$y
emptyPlot(1, 1, xlab="X", ylab="Y")
points(dat$x, dat$y, col=topo.colors(100)[round(dat$z*99)+1], pch=16, cex=2)
gradientLegend(range(dat$z), color="topo", nCol=100,inside = FALSE, pos=.825)
## ----ex9, fig.show='hold'-----------------------------------------------------
emptyPlot(1, 1, xlab="X", ylab="Y", bty='o')
legend("topright", legend=c("normal", "topright"), pch=21)
legend("center", legend=c("normal", "center"), pch=21)
legend("bottomleft", legend=c("normal", "bottomleft"), pch=21)
legend_margin("topright", legend=c("margin", "topright"), pch=21,
col="red1", box.col="red1", text.col="red1")
legend_margin("center", legend=c("margin", "center"), pch=21,
col="red1", box.col="red1", text.col="red1")
legend_margin("bottomleft", legend=c("margin", "bottomleft"), pch=21,
col="red1", box.col="red1", text.col="red1")
## ----ex10, fig.show="hold", fig.width=8, fig.height=4-------------------------
set.seed(1234)
# grand mean of data:
x <- 1:100
y <- -0.01*(x - 30)^2+rnorm(100, mean=100)
# stimulus onset values:
so <- runif(100, min=20, max=40)+rnorm(100, sd=2)
par(mfrow=c(1,2), cex=1.1)
# PLOT 1
emptyPlot(range(x), range(y), h0=0,
main="Data", xlab="Time", ylab="Y")
lines(x, y, lwd=2, col='steelblue')
# add mean of stimulus onset:
abline(v=mean(so), lwd=2)
# add density of stimulus onset in
marginDensityPlot(density(so), side=1)
# PLOT 2
emptyPlot(range(x), range(y), h0=0,
main="More examples", xlab="Time", ylab="Y")
lines(x, y, lwd=2, col='steelblue')
# add mean of stimulus onset:
abline(v=mean(so), lwd=2)
# add density of stimulus onset on top of plot:
marginDensityPlot(density(so), side=3, scale=1, density=25)
marginDensityPlot(density(so), side=3, from=getCoords(0, side=2), scale=1)
# or on left side:
marginDensityPlot(density(y), side=2, col="steelblue")
## ----ex11, fig.width=4, fig.height=4, fig.show="hold"-------------------------
data(volcano)
x <- 10*(1:nrow(volcano))
y <- 10*(1:ncol(volcano))
par(cex=1.1)
# PLOT 1: image and contour
image(x, y, volcano, col = terrain.colors(100),
axes = FALSE, xlab="", ylab="")
contour(x, y, volcano, levels = seq(90, 200, by = 5),
add = TRUE, col = "peru")
# PLOT 2: color_contour
color_contour(x, y, volcano,
color = terrain.colors(100), axes=FALSE,
col="peru", levels=seq(80, 200, by = 5), zlim=c(80,200))
# PLOT 3: filled.contour (takes the complete device)
filled.contour(x, y, volcano, color.palette = terrain.colors, axes=FALSE)
# not possible to add contour lines:
contour(x, y, volcano, levels = seq(90, 200, by = 5),
add = TRUE, col = "peru")
## ----ex12, fig.show="hold", fig.width=8, fig.height=4-------------------------
dat <- expand.grid(x=seq(0,1,by=.1), y=seq(0,1, by=.1))
dat$z <- dat$x * dat$y
# inspect the structure of the data:
head(dat)
par(mfrow=c(1,2), cex=1.1)
# PLOT 1: plot the default surface
plotsurface(dat, view=c("x", "y"), predictor="z")
# PLOT 2: customized color palette
plotsurface(dat, view=c("x", "y"), predictor="z",
color = c('gray25', 'white', 'red'), col=1,
main="Customized interaction surface", labcex=1)
## ----ex13, fig.show=FALSE, fig.width=8, fig.height=4--------------------------
# Generate some data:
x <- -10:20
y <- 0.3*(x - 3)^2 + rnorm(length(x))
s <- 0.15*abs(100-y + rnorm(length(x)))
par(mfrow=c(1,2), cex=1.1)
# PLOT 1: shaded confidence interval
emptyPlot(range(x), c(-25,100), h0=0, v0=0, main="Symmetric CI")
plot_error(x, y, s, shade=TRUE, lwd=2, col="steelblue")
# PLOT 2: Use of se.fit2 for asymmetrical error bar
cu <- y + 2*s
cl <- y - s
emptyPlot(range(x), c(-25,100), h0=0, v0=0, main="Asymmetric CI")
plot_error(x, y, s, shade=TRUE, lwd=2, col="steelblue")
plot_error(x, y, se.fit=cu, se.fit2=cl, col='red', shade=TRUE, density=30)
## ----ex14, eval=FALSE---------------------------------------------------------
# # 1 A. load png image directly -- only works with package png:
# img <- system.file("extdata", "Netherlands_by_SilverSpoon.png", package = "plotfunctions")
# plot_image(img=img, type='png')
## ----ex14b--------------------------------------------------------------------
# 1 B. load image object:
data(img)
plot_image(img=img, type='image')
## ----ex15, fig.width=8, fig.height=4, fig.show="hold"-------------------------
par(mfrow=c(1,2), cex=1.1)
# PLOT 1: replace colors
plot_image(img=img, type='image', replace.colors = list("#00000000"="#0000FF33", "#B.+"="#99DD99FF"),
main="the Netherlands")
points(c(.45, .8), c(.6, .85), pch=15)
text(c(.45, .8), c(.6, .85), labels=c("Amsterdam", "Groningen"), pos=1)
# example data
x <- 1:100
y <- -0.01*(x - 30)^2+rnorm(100, mean=100)
# PLOT 2: add picture to existing plot, while keeping original picture size ratio
emptyPlot(100, c(50, 100), h0=0,
main="Example data plot")
lines(x, y, lwd=2, col='steelblue')
plot_image(img=img, type='image', add=TRUE,
xrange=c(30,70), yrange=c(50,80), adj=1, keep.ratio = TRUE,
replace.colors = list("#B.+"="steelblue"),
bty='n')
|
6a3d1159d69e2d06455530d854aa9ef7fc803360 | 6b0a2ee0da687afe5812e895bfb5274409ea7fea | /R/dataprep.R | 4c3dfe76811146117283554c876648b469799bb1 | [
"MIT"
] | permissive | kkholst/mcif | d5450b13a264a1336579fcffc2cb74f8dd22de42 | 88c5e7606d991e3c9ec85ffb12febb32d3670bfa | refs/heads/master | 2020-05-21T23:57:17.269517 | 2017-08-02T14:03:39 | 2017-08-02T14:03:39 | 59,477,463 | 2 | 2 | null | 2017-07-31T11:51:16 | 2016-05-23T11:34:20 | C++ | UTF-8 | R | false | false | 2,685 | r | dataprep.R | #-----------------------------------------------------------------------
# Time transforming function g(t)
#-----------------------------------------------------------------------
# Transformation g
g <- function(x,delta){
atanh((x-delta/2)/(delta/2))
}
# Derivative of g
dg <- function(x,delta){
(1/2)*delta/(x*(delta-x))
}
#-----------------------------------------------------------------------
# Data preparing function
#-----------------------------------------------------------------------
data.prep <- function(data, time, status, ID, w, cova=NULL){
datao <- data[order(data[,ID]),]
#-----------------------------------------------------------------------
# Causes of failure
#-----------------------------------------------------------------------
causes <- cbind(datao[, paste(status, 1, sep="")], datao[, paste(status, 2, sep="")])
#-----------------------------------------------------------------------
# Xs
#-----------------------------------------------------------------------
n <- nrow(datao)
if (!is.null(cova)){
x1 <- paste(cova, "1", sep="")
x2 <- paste(cova, "2", sep="")
}
if (is.null(cova)){
x1 <- cova
x2 <- cova
}
x.1 <- as.matrix(cbind(rep(1,n),datao[,x1]))
x.2 <- as.matrix(cbind(rep(1,n),datao[,x2]))
#-----------------------------------------------------------------------
# Transformation g(t)
#-----------------------------------------------------------------------
# Time points
t1 <- datao[, paste(time, 1, sep="")]
t2 <- datao[, paste(time, 2, sep="")]
# Max. time
delta <- max(t1,t2)
# Transformed timepoints
gt1 <- g(t1,delta)
gt2 <- g(t2,delta)
#-----------------------------------------------------------------------
# Derivative of g(t)
#-----------------------------------------------------------------------
dgt1 <- dg(t1,delta)
dgt2 <- dg(t2,delta)
#-----------------------------------------------------------------------
# ID
#-----------------------------------------------------------------------
ID <- datao[, ID]
#-----------------------------------------------------------------------
# Weights
#-----------------------------------------------------------------------
w <- datao[, w]
#-----------------------------------------------------------------------
# EB0
#-----------------------------------------------------------------------
eb0 <- matrix(0, nrow=nrow(causes), ncol=2)
#-----------------------------------------------------------------------
# Return
#-----------------------------------------------------------------------
res <- list("causes"=causes, "x.1"=x.1, "x.2"=x.2, "gt1"=gt1, "dgt1"=dgt1, "gt2"=gt2, "dgt2"=dgt2, "delta"=delta, "weights"=w, "ID"=ID, "eb0"=eb0)
return(res)
}
|
dd59cc0c7af526e1508691e8fdc89c481c35811c | 96a999542c8dbcc68744697954cd516f88f66b32 | /WSP_detailed_Anal/old style solution.R | c2567f0806bf4fd8e584d5a86b256fc32e3596a6 | [] | no_license | tomfun10/test2 | 41bc7485d73cf60b55ecf1782ead8acaca8460b4 | d015382d13bdcf44ca4776f5b37c6665b2bd85c7 | refs/heads/master | 2021-01-23T07:20:41.511271 | 2017-01-31T06:45:00 | 2017-01-31T06:45:00 | 80,497,911 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,203 | r | old style solution.R | # ----------------------
# Old style solution below, assuming all csv's are readable - stacking one on top of another
mf = data.frame(fNumb=seq_along(d) , fName=d, isDataFrame= FALSE)
# Which files are readable as csv's
fil = vector(mode='list', length(d))
for (i in seq_along(d)){
# print(d[i])
fil[[i]] = try({readr::read_csv(d[i]) })
fil[[i]]$fileName = d[i]
}
library(dplyr)
xx = do.call(dplyr::bind_rows, fil[which(sapply(fil,ncol)==14)])
str(xx)
summary(xx$`Name of WSP`)
head(sort(xx$`Bet Result Date`))
tail(sort(xx$`Bet Result Date`))
xx$`Event Date` = anytime::anydate(xx$`Event Date`)
xx$`Bet Result Date` = anytime::anydate(xx$`Bet Result Date`)
summary(xx$`Event Date`)
summary(xx$`Bet Result Date`)
xx[is.na(xx$`Event Date`),]
as.data.frame(names(xx),)
xx = fread('20150701_051101_20150701_040138_Sportsbet_11042015.csv')
str(xx)
lubridate::dmy(xx$`Event Date`)
fil
for (i in fil) {
#print(i)
class(i)
}
table(unlist(sapply(fil,class)))
fil[[1]][,10:14]
fil[1]
ul = unlist(fil)
str(ul)
str(fil[[1]])
do.call(rbind, fil[1:300] )
# str(fil[[1]][2])
as.data.frame(table(unlist(fil)))
plyr::count(sapply(fil, '[' , 2) )
mf$isDataFrame = ifelse(sapply(fil, '[' , 2) == 'data.frame', TRUE, FALSE)
filesToFix = mf[is.na(mf$isDataFrame),]
FilesCanBeReadAsTables = d[dt]
fil2 = vector(mode='numeric', length(fil))
fil2names = vector(mode='character', length(file))
for (i in seq_along(FilesCanBeReadAsTables)){
fil2[[i]] = try({ncol(fread(FilesCanBeReadAsTables[i]) )})
fil2names[i] = FilesCanBeReadAsTables[i]
}
table(fil2)
fil3 = vector(mode='list')
for (i in seq_along(FilesCanBeReadAsTables)){
fil3[[i]] = try({fread(FilesCanBeReadAsTables[i] )})
fil3[[i]]$FileName = FilesCanBeReadAsTables[i]
}
sapply(fil3 , ncol)
table(sapply(fil3 , ncol))
colCnt = function(dfr) {ncol(dfr)}
table(sapply(fil3,colCnt))
correctColNames = names(fread(fil2names[1]))
for (i in seq_along(fil3)){
print(names(fil3[[i]]))
}
fil3
for (i in seq_along(fil3)){
# names(fil3[[i]]) = correctColNames
# print(fil3[[i]])
fil3[[i]]$`Event Date` = lubridate::dmy(fil3[[i]]$`Event Date`)
fil3[[i]]$`Bet Result Date` = lubridate::dmy(fil3[[i]]$`Bet Result Date`)
fil3[[i]]$`Race Number` = as.numeric(fil3[[i]]$`Race Number`)
fil3[[i]]$`Parimutuel Bets Taken` = as.numeric(fil3[[i]]$`Parimutuel Bets Taken`)
fil3[[i]]$`Non-Parimutuel Bets Taken` = as.numeric(fil3[[i]]$`Non-Parimutuel Bets Taken`)
fil3[[i]]$`Bets Paid/credited to customers` = as.numeric(fil3[[i]]$`Bets Paid/credited to customers`)
fil3[[i]]$`Net Customer Winnings` = as.numeric(fil3[[i]]$`Net Customer Winnings`)
fil3[[i]]$`Bets Back` = as.numeric(fil3[[i]]$`Bets Back`)
fil3[[i]]$`Bet Back Revenue` = as.numeric(fil3[[i]]$`Bet Back Revenue`)
fil3[[i]]$`Other Revenue` = as.numeric(fil3[[i]]$`Other Revenue`)
fil3[[i]]$`Jackpots Created` = as.numeric(fil3[[i]]$`Jackpots Created`)
}
ddff = do.call(rbind, fil3)
names(fil3[[244]])
names((fil3[[1]]))
#Read in one or many blocks of Excel cells, just select blocks of Excel cells run this function and they will be loaded to df
df=DescTools::XLGetRange(header = TRUE)
#Write a semi-colon delimited data block in R to Excel
DescTools::XLView(ddff)
plyr::count(ddff$`Event Date`)
which(sapply(fil3, colCnt)!=13)
table(sapply(fil3, colCnt))
244 245 248 494 495 497 498
fil2names[494]
library(DescTools);library(RDCOMClient)
#Read in one or many blocks of Excel cells, just select blocks of Excel cells run this function and they will be loaded to df
df=DescTools::XLGetRange(header = TRUE)
#Write a semi-colon delimited data block in R to Excel
DescTools::XLView(readr::read_csv(fil2names[244]))
testFile = 248 #245 244
fil2names[testFile]
fread(fil2names[testFile])
ncol(fread(fil2names[testFile]))
ncol(fread(fil2names[testFile])[1,])
names(fread(fil2names[244]))== names(fread(fil2names[1]))
names(fread(fil2names[244]))= names(fread(fil2names[1]))
table(names(fil3))
df = do.call(bind_rows, fil3)
dim(df)
names(df)
df %>%
group_by(`Event Date`) %>%
summarise(BetsPerDay = n()) %>%
arrange(`Event Date`)
summary(fil3)
str(fil3[[6]])
str(fil3[[7]])
summary(df)
library(DescTools)
Desc(df)
for (i in seq_along(d)){
# fil[[i]] = fread([[i]])
fil[[i]] = try({fread(d[i] )})
}
do.call(try(rbind), fil[which(sapply(fil,is.data.frame))])
sapply(d ,class )
fil[which(sapply(fil,is.data.frame))]
fil[[3]]
sapply(fil, class)
summary(fil)
fread(ProcessedList[[1]])
dir()
return(ProcessedList)
}
# ---
# Get Warehouse WSP data table and join to sftp folder names ----
#
# require(RODBC)
# fto=odbcConnect("R_Warehouse_Prod",uid="grv_support_tlukacevic",pwd="Anna19362")
# sql = " select * from [warehouse].[d1_WageringProvider] "
# sql2=gsub(sql, pattern = '\\t|\\n',replacement = ' ' ,fixed = TRUE) #fixed = TRUE
# dfSQL2 = sqlQuery(fto,sql2 , stringsAsFactors=FALSE)
# dput(dfSQL2)
# -------------- warehouse table initial view
# Before forematting in notepad, gett rid of 5spaces and tabs in Notepad++
# pk.getWSP_Warehouse_Tble =
# structure(list(d1_WageringProvider_ID = 1:26, Code = c("VICTAB",
# "NSWTAB", "ToteTas", "ACTTAB", "RWWA", "RWWAClubs", "TattsQld",
# "NTTAB", "SATAB", "Luxbet", "Unibet", "CrownBet", "Bet365", "Ladbrokes",
# "Sportingbet", "Sportsbet", "Centrebet", "Palmerbet", "TomWaterhouse",
# "Topbetta", "Topsport", "Classicbet", "Betfair", "ClubAllSports",
# "BetHQ", "MadBookie"), Name = c("TABCORP (VIC)", "TAB Ltd (NSW)",
# "TOTE Tasmania", "TABCORP (ACT)", "RWWA", "RWWA Clubs", "TattsBet",
# "NT TAB Pty Ltd", "SA TAB Pty Ltd", "Luxbet", "UNI Bet (ex Betchoice)",
# "Beteasy (ex Betezy)", "Bet365", "Ladbrokes (inc Bookmaker)",
# "Sportingbet Aust", "Sportsbet", "Sportingbet (Centrebet)", "Palmerbet",
# "Tom Waterhouse.com", "Topbetta", "Top Sport", "Classicbet",
# "Betfair", "Club All Sports", "Bet HQ", "Mad Bookie"), Category = c("TAB",
# "TAB", "TAB", "TAB", "TAB", "TAB", "TAB", "TAB", "TAB", "TAB",
# "Corporate", "Corporate", "Corporate", "Corporate", "Corporate",
# "Corporate", "Corporate", "Corporate", "Corporate", "Corporate",
# "Corporate", "Corporate", "Exchange", "Corporate", "Corporate",
# "Corporate"), ReportingGroup = c("VICTAB", "TAB", "TAB", "TAB",
# "TAB", "TAB", "TAB", "TAB", "TAB", "Corp/Exch", "Corp/Exch",
# "Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch",
# "Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch",
# "Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch"
# )), .Names = c("d1_WageringProvider_ID", "Code", "Name", "Category",
# "ReportingGroup"), row.names = c(NA, 26L), class = "data.frame")
# -------------- warehouse table cleaned up view
# After forematting in notepad, gett rid of 5spaces and tabs in Notepad++
pk.getWSP_Warehouse_Tble =
structure(list(
d1_WageringProvider_ID = 1:26
, Code = c("VICTAB",
"NSWTAB", "ToteTas", "ACTTAB", "RWWA", "RWWAClubs", "TattsQld",
"NTTAB", "SATAB", "Luxbet", "Unibet", "CrownBet", "Bet365", "Ladbrokes",
"Sportingbet", "Sportsbet", "Centrebet", "Palmerbet", "TomWaterhouse",
"Topbetta", "Topsport", "Classicbet", "Betfair", "ClubAllSports",
"BetHQ", "MadBookie")
, Name = c("TABCORP (VIC)", "TAB Ltd (NSW)",
"TOTE Tasmania", "TABCORP (ACT)", "RWWA", "RWWA Clubs", "TattsBet",
"NT TAB Pty Ltd", "SA TAB Pty Ltd", "Luxbet", "UNI Bet (ex Betchoice)",
"Beteasy (ex Betezy)", "Bet365", "Ladbrokes (inc Bookmaker)",
"Sportingbet Aust", "Sportsbet", "Sportingbet (Centrebet)", "Palmerbet",
"Tom Waterhouse.com", "Topbetta", "Top Sport", "Classicbet",
"Betfair", "Club All Sports", "Bet HQ", "Mad Bookie")
, Category = c("TAB",
"TAB", "TAB", "TAB", "TAB", "TAB", "TAB", "TAB", "TAB", "Corporate",
"Corporate", "Corporate", "Corporate", "Corporate", "Corporate",
"Corporate", "Corporate", "Corporate", "Corporate", "Corporate",
"Corporate", "Corporate", "Exchange", "Corporate", "Corporate",
"Corporate")
, ReportingGroup = c("VICTAB", "TAB", "TAB", "TAB",
"TAB", "TAB", "TAB", "TAB", "TAB", "Corp/Exch", "Corp/Exch",
"Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch",
"Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch",
"Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch", "Corp/Exch"
))
, .Names = c("d1_WageringProvider_ID", "Code", "Name", "Category",
"ReportingGroup"), row.names = c(NA, 26L)
, class = "data.frame")
# Listing of Top Level WSP folders ----
# Directly from fstp server:
pk.get_WSP_dirs = function()
c(
'ACTTAB'
,'BET365'
,'BETEASY'
,'BETFAIR'
,'BETHQ'
,'CENTREBET'
,'CLASSICBET'
,'CLUBALLSPORTS'
,'LADBROKES'
,'LUXBET'
,'MADBOOKIE'
,'NSWTAB'
,'NTTAB'
,'PALMERBET'
,'RWWA'
,'RWWACLUBS'
,'SATAB'
,'SPORTSBET'
,'SPORTINGBET'
,'TATTSQLD'
,'TOMWATERHOUSE'
,'TOPBETTA'
,'TOPSPORT'
,'TOTETAS'
,'UNIBET'
,'VICTAB'
)
# ---- End
# Get unique Error msgs per WSP
# eg to call pk.scan.Errfiles.in.fldr('BET365','ERROR')
pk.scan.Errfiles.in.fldr = function(wsp.Folder.Name ) {
# WSPs = which(pk.get_WSP_dirs() %in% c(wsp.Folder.Name))
sub.fldr = 'ERROR'
err.list = list()
setwd(paste0("Y:/",wsp.Folder.Name,"/",sub.fldr,"/"))
d = dir() ; d
vErrLog = grep('ERROR_LOG.CSV', d,ignore.case = TRUE) ; print(paste0('Uniq.files.in.err.fldr: ',length(vErrLog)))
# 2 4 6 8 10 12 14 16
try(
for (i in 1:length(vErrLog)) {
f = d[vErrLog[i]] #; print(paste0(vErrLog[i], ' FName-> ',f))
fr =readr::read_csv(f) #; fr
# fr =data.table::fread(f) #; fr
names(fr) = gsub(' ','',tolower(names(fr)),fixed = TRUE)
names(fr) = gsub('-','',tolower(names(fr)),fixed = TRUE)
names(fr) = gsub('/','',tolower(names(fr)),fixed = TRUE)
namesUsed = names(fr)
# print(f)
# print('____________________________________________')
# print(names(fr))
#print(fr)
# print(vErrLog[i])
# print(unique(fr[c('log_severity','log_code', 'log_message')]))
err.list[[i]] = cbind(file = as.character(f) , err.Msg = unique(fr['log_message']))
# print('____________________________________________')
}
)
ErrList = do.call(rbind,err.list)
ErrList$file = as.character(ErrList$file)
return(ErrList)
}
# ----- Pass Error log file name moves fixes error folders back to inbox
# Assumes Error_Log file name contains stub of original data file
# Check didn't debug much
# eg pk.move.err.log.files.back.to.inbox('TOPSPORT' , '20160515_000422_TopSport_14052016_ERROR_LOG.csv' )
pk.move.err.log.files.back.to.inbox <- function (wsp.fldr.name, err.file) {
# Move the data file back out of the inbox and rename
data.file = gsub('_ERROR_LOG' , '', err.file.to.remove)
file.copy(from = paste0("Y:/",wsp.fldr.name,"/ERROR/",data.file)
,to = paste0("Y:/",wsp.fldr.name,"/INBOX/",data.file)
,overwrite = FALSE)
# Delete data and error log files from the Error folder
file.remove(c(
paste0("Y:/",wsp.fldr.name,"/ERROR/",data.file))
, paste0("Y:/",wsp.fldr.name,"/ERROR/",err.file)
)
}
|
fd2cd5c498454bc68fdef3f6d1bd08d5f58cea12 | 100579f212788d09f4e502dd0b058709f282faa4 | /R/jfsp.R | 640e67abe7336306fafded44d05e28cc52e2cbfe | [
"MIT"
] | permissive | leonawicz/jfsp | 615c63d9d4b8cf1c628db2e6710ff4c76cfe52b5 | 370918501cf9764fe03ff1e811557e8d804655ce | refs/heads/master | 2021-04-27T08:50:09.450955 | 2018-03-21T16:43:18 | 2018-03-21T16:43:18 | 76,271,792 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 720 | r | jfsp.R | globalVariables(c(".", ".data"))
#' jfsp: R functions and data associated with ALFRESCO wildfire model outputs for the Joint Fire Science Program (JFSP).
#'
#' The jfsp package encapsulates code, data and analysis results associated with ALFRESCO wildfire model outputs produced by the Scenarios Network for Alaska and Arctic Planning (SNAP) to assist the Joint Fire Science Program (JFSP).
#' jfsp provides a collection of functions, data sets, apps and interactive documents for exploring and analyzing ALFRESCO wildfire model outputs associated with JFSP-related projects.
#' It is a satellite package in the SNAPverse R package ecosystem.
#'
#' @docType package
#' @name jfsp
NULL
#' @importFrom magrittr %>%
NULL
|
9897413298ec7b6e0bcb6d6eede69f514c19d915 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Rpdb/examples/rotation.Rd.R | 73bcd0198f9355745e301881847fbdace377f901 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 617 | r | rotation.Rd.R | library(Rpdb)
### Name: rotation
### Title: Rotation of Atomic Coordinates
### Aliases: R R.coords R.pdb rotation
### Keywords: manip
### ** Examples
# First lets read a pdb file
x <- read.pdb(system.file("examples/PCBM_ODCB.pdb",package="Rpdb"))
cell <- cell.coords(x)
visualize(x, mode = NULL)
# Rotation of the structure around the c-axis
visualize(R(x, 90, x=cell["x","c"], y=cell["y","c"], z=cell["z","c"]),
mode = NULL)
# Rotation of the residue 1 around the c-axis
visualize(R(x, 90, x=cell["x","c"], y=cell["y","c"], z=cell["z","c"], mask=x$atoms$resid==1),
mode = NULL)
|
3286ab1b88791270bfb03bd7d2ffe2b55d385bb1 | 8527a6e9d3747128929a0946e0612345280649fe | /R/get_pbp.R | 3bb9d23a97e28eacb21a6b81ac1b08fb0c273e8a | [] | no_license | skoval/slam-sofascoreR | 2164da68a62dd767fa7115a79b186db299a79a4f | dd2f377cf376355566dee04a7a1d0cac4b14fd5b | refs/heads/master | 2022-03-11T07:00:09.916410 | 2019-11-20T02:45:01 | 2019-11-20T02:45:01 | 222,835,812 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,796 | r | get_pbp.R | #' Get PBP and Pressure Scores
#'
#' @params matches. Data frame of completed matches
#' @params con. DB connection
#'
#' @export
write_pbp <- function(matches, con = make_connection()){
matches <- matches %>%
dplyr::filter(statusDescription == "FT") # Limit to completed matches
results <- do.call("rbind", lapply(matches$id, function(x) tryCatch(pbp(x), error = function(e) NULL)))
results <- results %>%
inner_join(matches %>% dplyr::select(event, tour, round, matchid = id, winnerCode, formatedStartDate, player1id, player1name, player2id, player2name), by = "matchid")
results <- results %>%
dplyr::mutate(
year = str_extract(formatedStartDate, "20[0-9][0-9]"),
playerid = ifelse(player_ref == 1, player1id, player2id),
playername = ifelse(player_ref == 1, player1name, player2name),
opponentid = ifelse(player_ref == 2, player1id, player2id),
opponentname = ifelse(player_ref == 2, player1name, player2name),
wonMatch = winnerCode == player_ref,
format = ifelse(tour == "atp", "bestof5", "bestof3"),
advantage = !(year == 2019 & event %in% c("australian-open")) & event != "us-open",
tiebreak10 = grepl("2019", formatedStartDate) & event == "australian-open"
) %>%
dplyr::select(-player1id, -player1name, -player2id, -player2name)
results <- assign_pressure(results) # Assign scores to each point
if(dbExistsTable(con, "slam_point_by_point"))
RMariaDB::dbWriteTable(
conn = con,
name = "slam_point_by_point",
value = as.data.frame(results),
append = T,
row.names = F
)
else
RMariaDB::dbWriteTable(
conn = con,
name = "slam_point_by_point",
value = as.data.frame(results),
overwrite = T,
temp = F,
row.names = F
)
RMariaDB::dbDisconnect(con)
print("Successfully wrote pbp to DB")
} |
e6ae8924f861b9d1f65e3688d2bd729b6a100129 | 0aadcf7d61193d1a2405370cbad3764f565cdf3e | /R/rmysql.R | fe17f3a2d2c64d69cab6259c79801ece71bfd6b8 | [] | no_license | rsh249/vegdistmod | da662a75579ab32b7b462aa2e1547ae9b8caac61 | cc4e5f0c31fa1ef7d53e8127f0a57001c1b04403 | refs/heads/master | 2021-01-17T10:01:22.940779 | 2019-01-28T16:45:12 | 2019-01-28T16:45:12 | 59,592,035 | 4 | 0 | null | 2016-10-19T19:21:41 | 2016-05-24T16:58:17 | R | UTF-8 | R | false | false | 864 | r | rmysql.R |
.gbif_local <- function(taxon, limit=1000000000, db, u, h, pass){
#this will only work on Cuvier
#if(!is.null(grep("[[:space:]]", taxon))){
split <- strsplit( taxon, ' ');
genus = split[[1]][1];
species = split[[1]][2];
if(is.na(species)){
species = "%%";
}
#}
query = paste("SELECT div_id, genus, species, lat, lon from div_base where genus = \'", genus, "\' and species = \'", species, "\' and is_bad !=1 and cultivated != 1 and no_precision != 1 and hasGeospatialIssues != 1 ", " LIMIT ", sprintf("%i", limit), sep='');
#return(query)
con = DBI::dbConnect(RMySQL::MySQL(), dbname=db, username=u, host = h, password = pass);
get = DBI::dbGetQuery(con, query);
DBI::dbDisconnect(con);
get[,2] = paste(get[,2], get[,3]);
get = get[,-3];
colnames(get) = c('ind_id', 'tax', 'lat', 'lon')
return(get)
}
|
f9dcb7cdb65144b44b9f35ee5f29fcb4c453cc87 | 6051b2367c480dea740e343edb682ccabf5993b6 | /music_plotter.R | 61fef11d8827d247ec0293c4780383c9d8c71f8a | [] | no_license | gvdr/2016_sc_sfu | b7a0f9ffeb546ae9c21fbf7b0345b3ef40dc326b | 4fcb36480a80330eba72fbc306d87d98c2ef68db | refs/heads/master | 2021-01-12T12:00:57.982562 | 2016-10-04T19:28:14 | 2016-10-04T19:28:14 | 69,998,188 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 517 | r | music_plotter.R | music_plotter <- function(music_data,
xvariable,
yvariable,
colorvariable,
do_smooth) {
music_plotted <- music_data %>%
filter(year > 1000) %>%
ggplot(aes_string(y = yvariable, x = xvariable, color = colorvariable)) +
geom_point()
if(do_smooth){
music_plotted <- music_plotted +
geom_smooth(method = "lm")
} else {
music_plotted <- music_plotted
}
return(music_plotted)
} |
e0ac56766c9a1052dd9fe02fc8c6896d84feed8b | d4918568929a592a40ee705dc91614be17603c2c | /man/data2nnet.Rd | 0b8519deff999f8f0d5de4317dc07b031f296389 | [] | no_license | kevin05jan/iop | d722f6c8520cd457872f9a4f2d83294e1a3dc675 | 8a8b391976982985f1cfe66535d58a1606d4099b | refs/heads/master | 2020-08-02T14:25:27.256169 | 2019-10-19T09:43:06 | 2019-10-19T09:43:06 | 211,387,761 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 316 | rd | data2nnet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tmp.R
\name{data2nnet}
\alias{data2nnet}
\title{Neural Networks: nnet::nnet}
\usage{
data2nnet(f, x, ...)
}
\description{
Neural Networks: nnet::nnet
}
\examples{
m = data2nnet(Species ~ ., iris)
predict(m, iris)
}
|
85b5f42585fd87ef9af29103d83ed279e871b6f7 | ef57d1cb1527de38efedd75149facbb5848f1ff7 | /man/Compboost.Rd | 6d3a4c845b9132a0c042237a4a651e0dd9353a11 | [
"MIT"
] | permissive | QuayAu/compboost | 848126d5ca089a16922ade488ee22ad08e79e16d | c016409903583f1490e0800ee3bb0099878a203f | refs/heads/master | 2020-05-05T02:25:00.429666 | 2019-03-30T13:12:54 | 2019-03-30T13:12:54 | 173,743,931 | 0 | 0 | NOASSERTION | 2019-03-04T12:49:46 | 2019-03-04T12:49:46 | null | UTF-8 | R | false | true | 12,207 | rd | Compboost.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compboost.R
\name{Compboost}
\alias{Compboost}
\title{Compboost API}
\format{\code{\link{R6Class}} object.}
\description{
\code{Compboost} wraps the \code{S4} class system exposed by \code{Rcpp} to make defining
objects, adding objects, the training, calculating predictions, and plotting much easier.
As already mentioned, the \code{Compboost R6} class is just a wrapper and compatible
with the most \code{S4} classes.
}
\section{Usage}{
\preformatted{
cboost = Compboost$new(data, target, optimizer = OptimizerCoordinateDescent$new(), loss,
learning_rate = 0.05, oob_fraction)
cboost$addLogger(logger, use_as_stopper = FALSE, logger_id, ...)
cbboost$addBaselearner(features, id, bl_factory, data_source = InMemoryData,
data_target = InMemoryData, ...)
cbboost$train(iteration = 100, trace = -1)
cboost$getCurrentIteration()
cboost$predict(newdata = NULL)
cboost$getInbagRisk()
cboost$getSelectedBaselearner()
cboost$getEstimatedCoef()
cboost$plot(blearner_name = NULL, iters = NULL, from = NULL, to = NULL, length_out = 1000)
cboost$getBaselearnerNames()
cboost$prepareData(newdata)
cboost$getLoggerData()
cboost$calculateFeatureImportance(num_feats = NULL)
cboost$plotFeatureImportance(num_feats = NULL)
cboost$plotInbagVsOobRisk()
}
}
\section{Arguments}{
\strong{For Compboost$new()}:
\describe{
\item{\code{data}}{[\code{data.frame}]\cr
A data frame containing the data.
}
\item{\code{target}}{[\code{character(1)}]\cr
Character value containing the target variable. Note that the loss must match the
data type of the target.
}
\item{\code{optimizer}}{[\code{S4 Optimizer}]\cr
An initialized \code{S4 Optimizer} object exposed by Rcpp (e.g. \code{OptimizerCoordinateDescent$new()})
to select features at each iteration.
}
\item{\code{loss}}{[\code{S4 Loss}]\cr
Initialized \code{S4 Loss} object exposed by Rcpp that is used to calculate the risk and pseudo
residuals (e.g. \code{LossQuadratic$new()}).
}
\item{\code{learning.rage}}{[\code{numeric(1)}]\cr
Learning rate to shrink the parameter in each step.
}
\item{\code{oob_fraction}}{[\code{numeric(1)}]\cr
Fraction of how much data are used to track the out of bag risk.
}
}
\strong{For cboost$addLogger()}:
\describe{
\item{\code{logger}}{[\code{S4 Logger}]\cr
Uninitialized \code{S4 Logger} class object that is registered in the model.
See the details for possible choices.
}
\item{\code{use_as_stopper}}{[\code{logical(1)}]\cr
Logical value indicating whether the new logger should also be used as stopper
(early stopping). Default value is \code{FALSE}.
}
\item{\code{logger_id}}{[\code{character(1)}]\cr
Id of the new logger. This is necessary to, for example, register multiple risk logger.
}
\item{}{\code{...}\cr
Further arguments passed to the constructor of the \code{S4 Logger} class specified in
\code{logger}. For possible arguments see details or the help pages (e.g. \code{?LoggerIteration}).
}
}
\strong{For cboost$addBaselearner()}:
\describe{
\item{\code{features}}{[\code{character()}]\cr
Vector of column names which are used as input data matrix for a single base-learner. Note that not
every base-learner supports the use of multiple features (e.g. the spline base-learner does not).
}
\item{\code{id}}{[\code{character(1)}]\cr
Id of the base-learners. This is necessary since it is possible to define multiple learners with the same underlying data.
}
\item{\code{bl_factory}}{[\code{S4 Factory}]\cr
Uninitialized base-learner factory given as \code{S4 Factory} class. See the details
for possible choices.
}
\item{\code{data_source}}{[\code{S4 Data}]\cr
Data source object. At the moment just in memory is supported.
}
\item{\code{data_target}}{[\code{S4 Data}]\cr
Data target object. At the moment just in memory is supported.
}
\item{}{\code{...}\cr
Further arguments passed to the constructor of the \code{S4 Factory} class specified in
\code{bl_factory}. For possible arguments see the help pages (e.g. \code{?BaselearnerPSplineFactory})
of the \code{S4} classes.
}
}
\strong{For cboost$train()}:
\describe{
\item{\code{iteration}}{[\code{integer(1)}]\cr
Number of iterations that are trained. If the model is already trained the model is set to the given number
by goint back through the already trained base-learners or training new ones. Note: This function defines an
iteration logger with the id \code{_iterations} which is then used as stopper.
}
\item{\code{trace}}{[\code{integer(1)}]\cr
Integer indicating how often a trace should be printed. Specifying \code{trace = 10}, then every
10th iteration is printed. If no trace should be printed set \code{trace = 0}. Default is
-1 which means that in total 40 iterations are printed.
}
}
\strong{For cboost$predict()}:
\describe{
\item{\code{newdata}}{[\code{data.frame()}]\cr
Data to predict on. If newdata equals \code{NULL} predictions on the training data are returned.
}
}
\strong{For cboost$plot()}:
\describe{
\item{\code{blearner_name}}{[\code{character(1)}]\cr
Character name of the base-learner to plot the additional contribution to the response.
}
\item{\code{iters}}{[\code{integer()}]\cr
Integer vector containing the iterations the user wants to illustrate.
}
\item{\code{from}}{[\code{numeric(1)}]\cr
Lower bound for plotting (should be smaller than \code{to}).
}
\item{\code{to}}{[\code{numeric(1)}]\cr
Upper bound for plotting (should be greater than \code{from}).
}
\item{\code{length_out}}{[\code{integer(1)}]\cr
Number of equidistant points between \code{from} and \code{to} used for plotting.
}
}
}
\section{Details}{
\strong{Loss}\cr
Available choices for the loss are:
\itemize{
\item
\code{LossQuadratic} (Regression)
\item
\code{LossAbsolute} (Regression)
\item
\code{LossBinomial} (Binary Classification)
\item
\code{LossCustom} (Custom)
}
(For each loss take also a look at the help pages (e.g. \code{?LossBinomial}) and the
\code{C++} documentation for details)
\strong{Logger}\cr
Available choices for the logger are:
\itemize{
\item
\code{LoggerIteration}: Logs the current iteration. Additional arguments:
\describe{
\item{\code{max_iterations} [\code{integer(1)}]}{
Maximal number of iterations.
}
}
\item
\code{LoggerTime}: Logs the elapsed time. Additional arguments:
\describe{
\item{\code{max_time} [\code{integer(1)}]}{
Maximal time for the computation.
}
\item{\code{time_unit} [\code{character(1)}]}{
Character to specify the time unit. Possible choices are \code{minutes}, \code{seconds}, or \code{microseconds}.
}
}
\item
\code{LoggerInbagRisk}:
\describe{
\item{\code{used_loss} [\code{S4 Loss}]}{
Loss as initialized \code{S4 Loss} which is used to calculate the empirical risk. See the
details for possible choices.
}
\item{\code{eps_for_break} [\code{numeric(1)}]}{
This argument is used if the logger is also used as stopper. If the relative improvement
of the logged inbag risk falls below this boundary, then the stopper breaks the algorithm.
}
}
\item
\code{LoggerOobRisk}:
\describe{
\item{\code{used_loss} [\code{S4 Loss}]}{
Loss as initialized \code{S4 Loss} which is used to calculate the empirical risk. See the
details for possible choices.
}
\item{\code{eps_for_break} [\code{numeric(1)}]}{
This argument is used if the logger is also used as stopper. If the relative improvement
of the logged inbag risk falls above this boundary the stopper breaks the algorithm.
}
\item{\code{oob_data} [\code{list}]}{
A list which contains data source objects which corresponds to the source data of each registered factory.
The source data objects should contain the out of bag data. This data is then used to calculate the
new predictions in each iteration.
}
\item{\code{oob_response} [\code{vector}]}{
Vector which contains the response for the out of bag data given within \code{oob_data}.
}
}
}
\strong{Note}:
\itemize{
\item
Even if you do not use the logger as stopper you have to define the arguments such as \code{max_time}.
\item
We are aware of that the style guide here is not consistent with the \code{R6} arguments. Nevertheless, using
\code{_} as word separator is due to the used argument names within \code{C++}.
}
}
\section{Fields}{
\describe{
\item{\code{data} [\code{data.frame}]}{
Data used for training the algorithm.
}
\item{\code{data_oob} [\code{data.frame}]}{
Data used for out of bag tracking.
}
\item{\code{oob_fraction} [\code{numeric(1)}]}{
Fraction of how much data are used to track the out of bag risk.
}
\item{\code{response} [\code{vector}]}{
Response vector.
}
\item{\code{target} [\code{character(1)}]}{
Name of the target variable
}
\item{\code{id} [\code{character(1)}]}{
Name of the given dataset.
}
\item{\code{optimizer} [\code{S4 Optimizer}]}{
Optimizer used within the fitting process.
}
\item{\code{loss} [\code{S4 Loss}]}{
Loss used to calculate pseudo residuals and empirical risk.
}
\item{\code{learning_rate} [\code{numeric(1)}]}{
Learning rate used to shrink the estimated parameter in each iteration.
}
\item{\code{model} [\code{S4 Compboost_internal}]}{
\code{S4 Compboost_internal} class object from that the main operations are called.
}
\item{\code{bl_factory_list} [\code{S4 FactoryList}]}{
List of all registered factories represented as \code{S4 FactoryList} class.
}
\item{\code{positive_category} [\code{character(1)}]}{
Character containing the name of the positive class in the case of (binary) classification.
}
\item{\code{stop_if_all_stoppers_fulfilled} [\code{logical(1)}]}{
Logical indicating whether all stopper should be used simultaneously or if it is sufficient
to just use the first stopper to stop the algorithm.
}
}
}
\section{Methods}{
\describe{
\item{\code{addLogger}}{method to add a logger to the algorithm (Note: This is just possible before the training).}
\item{\code{addBaselearner}}{method to add a new base-learner to the algorithm (Note: This is just possible before the training).}
\item{\code{getCurrentIteration}}{method to get the current iteration on which the algorithm is set.}
\item{\code{train}}{method to train the algorithm.}
\item{\code{predict}}{method to predict on a trained object.}
\item{\code{getSelectedBaselearner}}{method to get a character vector of selected base-learner.}
\item{\code{getEstimatedCoef}}{method to get a list of estimated coefficient of each selected base-learner.}
\item{\code{plot}}{method to plot individual feature effects.}
\item{\code{getBaselearnerNames}}{method to get the names of the registered factories.}
\item{\code{prepareData}}{method to prepare data to track the out of bag risk of an arbitrary loss/performance function.}
\item{\code{getLoggerData}}{method to the the logged data from all registered logger.}
\item{\code{calculateFeatureImportance}}{method to calculate feature importance.}
\item{\code{plotFeatureImportance}}{method to plot the feature importance calculated by \code{calulateFeatureImportance}.}
\item{\code{plotInbagVsOobRisk}}{method to plot the inbag vs the out of bag behavior. This is just applicable if a logger with name \code{oob_logger} was registered. This is automatically done if the \code{oob_fraction} is set.}
}
}
\examples{
cboost = Compboost$new(mtcars, "mpg", loss = LossQuadratic$new(), oob_fraction = 0.3)
cboost$addBaselearner("hp", "spline", BaselearnerPSpline, degree = 3,
n.knots = 10, penalty = 2, differences = 2)
cboost$train(1000)
table(cboost$getSelectedBaselearner())
cboost$plot("hp_spline")
cboost$plotInbagVsOobRisk()
}
|
dc7d9edfc36276877273837af7fc5b2e463ce5f9 | 698dcc73b9c81e7cb81ce4fd90efa52a4b72ea0b | /E428 - PTM counts.R | d312e344fd8c601646e5796442895ce8675d5f20 | [] | no_license | andrewrgross/E428-post-translational-modification | 22cc3a711d5b9deb13965e4b1ca84d0db7dca486 | 49f61918068521529ccd8f52be4ec353d680236a | refs/heads/main | 2023-08-30T02:42:33.575551 | 2021-11-12T18:49:41 | 2021-11-12T18:49:41 | 399,268,701 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,992 | r | E428 - PTM counts.R | ### E428 - PTM Counts -- Andrew R Gross -- 22SEP21
###
### A program to count the number of PTMs expressed in an expression table by group
### INPUT: PTM quantification tables
### OUTPUT: A counts table of the number of PTMs consistently found in each group
####################################################################################################################################################
### 1 - Header
####### 1.1 - Libraries
library(ggplot2)
####### 1.2 - Functions
########### 1.2.1 - countCheck - Checks whether the items in the specified columns of each row of a dataframe are present or absent above a cutoff
countCheck <- function(dataframe,columns,cutoffNumber){
rowCheck = c()
for(rowNum in 1:nrow(dataframe)){
row = dataframe[rowNum,]
trues = as.numeric(row[columns]>0)
if(sum(trues)>=cutoffNumber){
rowCheck = c(rowCheck,TRUE)
} else{
rowCheck = c(rowCheck,FALSE)
}
}
return(rowCheck)
}
####################################################################################################################################################
### 2 - Input
setwd('C:/Users/grossar/Box/Sareen Lab Shared/Data/Andrew/E428 - PTM of iECs/Input data/')
ptmAll <- read.csv('PTM-all.csv', fileEncoding="UTF-8-BOM")
ptmLace <- read.csv('PTM-lycine-acetylation.csv', fileEncoding="UTF-8-BOM")
ptmMeth <- read.csv('PTM-methylation.csv', fileEncoding="UTF-8-BOM")
ptmNace <- read.csv('PTM-N-term-acetylation.csv', fileEncoding="UTF-8-BOM")
ptmPhos <- read.csv('PTM-phosphorylation.csv', fileEncoding="UTF-8-BOM")
ptmUnmod <- read.csv('PTM-unmodified.csv', fileEncoding="UTF-8-BOM")
metadata <- read.csv('metadata.csv', fileEncoding="UTF-8-BOM")
####################################################################################################################################################
### 3 - Format
### 3.1 - Create new dataframes separating expression and identity data for each modification row
rowAll <- ptmAll[1:6] ; ptmAll <- ptmAll[7:26] ; dataset = 'All modifications'
rowLace <- ptmLace[1:6] ; ptmLace <- ptmLace[7:26] ; dataset = 'Lysene Acetylation'
rowMeth <- ptmMeth[1:6] ; ptmMeth <- ptmMeth[7:26] ; dataset = 'Methylation'
rowNace <- ptmNace[1:6] ; ptmNace <- ptmNace[7:26] ; dataset = 'N-terminal Aceytlation'
rowPhos <- ptmPhos[1:6] ; ptmPhos <- ptmPhos[7:26] ; dataset = 'Phosphorylation'
rowUnmod <- ptmUnmod[c(1:8)] ; ptmUnmod <-ptmUnmod[9:29] ; dataset = 'Unmodified'
dataContainer <- list(ptmAll, ptmLace, ptmMeth, ptmNace, ptmPhos)
rowContainer <- list(rowAll, rowLace, rowMeth, rowNace, rowPhos)
names(dataContainer) <- c('All Modifications', 'L-Acetylation', 'Methylation', 'N-Acetylation', 'Phosphorylation')
names(rowContainer) <- names(dataContainer)
#dataContainer <- mapply(log, dataContainer)
for(pos in 1:length(dataContainer)) {
dataContainer[[pos]] <- round(log(dataContainer[[pos]] + 1),2)
dataContainer[[pos]][is.na(dataContainer[[pos]])] = 0
names(dataContainer[[pos]]) <- metadata$Shortname
}
test <- countCheck(inputDf, columns= 1:8, cutoffNumber = 7)
####################################################################################################################################################
### 4 - Subset data tables to count based on presence in the specified condition
############################################################################################
### 4.1 - For each row, check if a set of coditions are met (universal detection, for now)
ptmCountsByCondition = data.frame(allCheck = integer(), iecChec = integer(), huvecCheck = integer(), ipscCheck = integer(), iecHuCheck = integer(), iecIpscCheck = integer(), huIpscCheck = integer())
for (ptmTypeNum in 1:length(dataContainer)) {
ptmType <- names(dataContainer)[ptmTypeNum]
print(ptmType)
inputDf <- dataContainer[[ptmTypeNum]]
conditionReport <- data.frame(allCheck = logical(), iecChec = logical(), huvecCheck = logical(), ipscCheck = logical(), iecHuCheck = logical(), iecIpscCheck = logical(), huIpscCheck = logical())
inputDf$iecCheck = countCheck(inputDf,columns = which(metadata$Group=='iEC'), cutoffNumber = 8)
inputDf$ipsCheck = countCheck(inputDf,columns = which(metadata$Group=='iPSC'), cutoffNumber = 7)
inputDf$huvCheck = countCheck(inputDf,columns = which(metadata$Group=='Huvecs'), cutoffNumber = 3)
inputDf$allCheck = as.logical(inputDf$iecCheck * inputDf$ipsCheck * inputDf$huvCheck)
inputDf$iecHuCheck = as.logical(inputDf$iecCheck * inputDf$huvCheck)
inputDf$iecIpscCheck = as.logical(inputDf$iecCheck * inputDf$ipsCheck)
inputDf$huIpscCheck = as.logical(inputDf$ipsCheck * inputDf$huvCheck)
### For each row, check if the PTM is listed in all of each cell type
for(rowNum in 1:nrow(inputDf)) {
row = inputDf[rowNum,] # Loop through row numbers
### Subset each cell type
ipsc = row[metadata[metadata$Group=='iPSC',]$Shortname]
iec = row[metadata[metadata$Group=='iEC',]$Shortname]
huvec = row[metadata[metadata$Group=='Huvecs',]$Shortname]
### Check if all samples in a group have a non-zero value
allCheck = min(row) != 0
iecCheck = countCheck(iec)
huvecCheck = min(huvec) != 0
ipscCheck = min(ipsc) != 0
### Check if the row is universally present in multiple groups
iecHuCheck = as.logical(iecCheck * huvecCheck)
iecIpscCheck = as.logical(iecCheck * ipscCheck)
huIpscCheck = as.logical(huvecCheck * ipscCheck)
### Assign the result to the condition report file
conditionReport = rbind(conditionReport,data.frame(allCheck, iecCheck, huvecCheck, ipscCheck, iecHuCheck, iecIpscCheck, huIpscCheck))
}
newConditionData <- data.frame(t(apply(conditionReport,2,sum)), row.names = ptmType)
### Subtract out co-occurring rows from the groups themselves to make those exclusive
newConditionData[2:7] = newConditionData[2:7]-newConditionData$allCheck
newConditionData$iecCheck = newConditionData$iecCheck - newConditionData$iecHuCheck - newConditionData$iecIpscCheck
newConditionData$huvecCheck = newConditionData$huvecCheck - newConditionData$iecHuCheck - newConditionData$huIpscCheck
newConditionData$ipscCheck = newConditionData$ipscCheck - newConditionData$huIpscCheck - newConditionData$iecIpscCheck
ptmCountsByCondition <- rbind(ptmCountsByCondition, newConditionData)
}
####################################################################################################################################################
### 5 - Generate Bar Graphs
############################################################################################
### 5.1 - Review the summary of each dF
(ptmType = names(dataContainer)[5])
group = c(rep('iEC',4), rep('HUVEC',4), rep('iPSC',4))
shared = c('iEC','iEC & HUVEC','iEC & iPSC', 'All', 'HUVEC', 'iEC & HUVEC','HUVEC & iPSC', 'All', 'iPSC', 'iEC & iPSC', 'HUVEC & iPSC', 'All' )
value = ptmCountsByCondition[ptmType, , drop(FALSE)]
value = c(value['iecCheck'][[1]], value['iecHuCheck'][[1]], value['iecIpscCheck'][[1]], value['allCheck'][[1]], value['huvecCheck'][[1]], value['iecHuCheck'][[1]], value['huIpscCheck'][[1]], value['allCheck'][[1]], value['ipscCheck'][[1]], value['iecIpscCheck'][[1]], value['huIpscCheck'][[1]], value['allCheck'][[1]] )
text = rep(NA, 12)
cutoff = round(max(value)/10)
text[value>cutoff] <- value[value>cutoff]
ptmCountsToPlot = data.frame(group, shared, value, text)
valueSummed = c(ptmCountsToPlot$value[1] + ptmCountsToPlot$value[2]+ptmCountsToPlot$value[3]+ptmCountsToPlot$value[4],
ptmCountsToPlot$value[5] + ptmCountsToPlot$value[6]+ptmCountsToPlot$value[7]+ptmCountsToPlot$value[8],
ptmCountsToPlot$value[9] + ptmCountsToPlot$value[10]+ptmCountsToPlot$value[11]+ptmCountsToPlot$value[12])
ptmCountsToPlot2 = data.frame(group = c(1,2,3), value = valueSummed)
ptmCountsToPlot$shared <- factor(ptmCountsToPlot$shared, levels = c('iEC', 'HUVEC', 'iPSC', 'iEC & HUVEC', 'iEC & iPSC', 'HUVEC & iPSC', 'All'))
ptmCountsToPlot$group <- factor(ptmCountsToPlot$group, levels = c('iEC', 'HUVEC', 'iPSC'))
ptmCountsToPlot2$group <- factor(c('iEC', 'HUVEC', 'iPSC'), levels = c('iEC', 'HUVEC', 'iPSC'))
ggplot(data = ptmCountsToPlot, aes(x = group, y = value, fill = shared)) +
geom_bar(position = 'stack', stat = 'identity') +
geom_text(aes(x = group, y = value, label = text), color = 'White', size = 8, position = position_stack(vjust = 0.5)) +
geom_bar(data = ptmCountsToPlot2, aes(x = group, y = value), color = 'Black', fill = NA, position = 'stack', stat = 'identity', size = 1.1) +
scale_fill_manual(values=c("#37d51e", "#1e37d5", "#d51e37", "#1e93d5", "#d5bc1e", "#d51e93", "#e3e3e3"), labels = c('iEC only', 'HUVEC only', 'iPSC only', 'iEC & HUVEC', 'iEC & iPSC', 'HUVEC & iPSC', 'Found in all')) +
#scale_y_continuous(breaks = seq(0,50,5), limits = c(0,45.1), expand = c(0,0)) +
labs(title = ptmType,
x = 'Cell Type',
y = 'PTMs Detected',
fill = 'Overlap') +
theme(plot.title = element_text(size = 24,hjust = 0.5),
axis.title.x = element_text(face="bold", size=14, margin =margin(10,0,0,0)),
axis.title.y = element_text(face="bold", size=14, margin =margin(0,10,0,0)),
axis.text.x = element_text(face="italic", size = 14),
axis.text.y = element_text(size = 14),
panel.background = element_rect(fill = 'white', color = 'white', size = 1),
panel.grid = element_blank(),
axis.line = element_line(size = 1),
axis.ticks = element_line(size = 2))
####################################################################################################################################################
### 6 - Save PTMs to group lists
############################################################################################
### 6.1 - Define group lists
ptmCountsByCondition
allList = c() ; allListM = c()
iecList = c() ; iecListM = c()
huvList = c() ; huvListM = c()
ipsList = c() ; ipsListM = c()
iechuvL = c() ; iechuvLM = c()
iecipsL = c() ; iecipsLM = c()
huvipsL = c() ; huvipsLM = c()
### 6.2 - Call data based on PTM group
ptmTypeNum = 5
(ptmType <- names(dataContainer)[ptmTypeNum])
inputDf <- dataContainer[[ptmTypeNum]]
proteins = rowContainer[[ptmTypeNum]]$Gene
mods = rowContainer[[ptmTypeNum]]$Modified.Sequence
conditionReport <- data.frame(allCheck = logical(), iecChec = logical(), huvecCheck = logical(), ipscCheck = logical(), iecHuCheck = logical(), iecIpscCheck = logical(), huIpscCheck = logical())
### 6.3 - For each row, check if the PTM is listed in all of each cell type
for(rowNum in 1:nrow(inputDf)) {
row = inputDf[rowNum,] # Loop through row numbers
### Subset each cell type
ipsc = row[metadata[metadata$Group=='iPSC',]$Shortname]
iec = row[metadata[metadata$Group=='iEC',]$Shortname]
huvec = row[metadata[metadata$Group=='Huvecs',]$Shortname]
### Check if all samples in a group have a non-zero value
allCheck = min(row) != 0
iecCheck = min(iec) != 0
huvecCheck = min(huvec) != 0
ipscCheck = min(ipsc) != 0
### Check if the row is universally present in multiple groups
iecHuCheck = as.logical(iecCheck * huvecCheck)
iecIpscCheck = as.logical(iecCheck * ipscCheck)
huIpscCheck = as.logical(huvecCheck * ipscCheck)
conditionReport = rbind(conditionReport,data.frame(allCheck, iecCheck, huvecCheck, ipscCheck, iecHuCheck, iecIpscCheck, huIpscCheck))
### Add protein to appropriate list and skip others
if(allCheck){ # If all check is true, add to allList
allList = c(allList, proteins[rowNum]) ; allListM = c(allListM, mods[rowNum])
} else if(iecHuCheck) { # If iecHuCheck is true add to iechuvL
iechuvL = c(iechuvL, proteins[rowNum]) ; iechuvLM = c(iechuvLM, mods[rowNum])
} else if(iecIpscCheck) { # If iecIpscCheck is true add to iecipsL
iecipsL = c(iecipsL, proteins[rowNum]) ; iecipsLM = c(iecipsLM, mods[rowNum])
} else if(huIpscCheck) { # If huIpscCheck is tru add to huvipsL
huvipsL = c(huvipsL, proteins[rowNum]) ; huvipsLM = c(huvipsLM, mods[rowNum])
} else if(iecCheck) { # If iecCheck is true add to iecList
#print(paste('iEC:', proteins[rowNum]))
iecList = c(iecList, proteins[rowNum]) ; iecListM = c(iecListM, mods[rowNum])
} else if(huvecCheck) { # If huvecCheck is true add to huvList
#print(paste('HUVEC:', proteins[rowNum]))
huvList = c(huvList, proteins[rowNum]) ; huvListM = c(huvListM, mods[rowNum])
} else if(ipscCheck) { # If ipscCheck is true add ipsList
#print(paste('iPSC:', proteins[rowNum]))
ipsList = c(ipsList, proteins[rowNum]) ; ipsListM = c(ipsListM, mods[rowNum])
}
}
masterList = list('All Cell Types' = allList,'iECs' = iecList, 'HUVECs' = huvList, 'iPSCs' = ipsList, 'Endotheial (iECs+HUVECs)' = iechuvL, 'Induced (iECs+iPSCs)' = iecipsL, 'HUVECs+iPSCs' = huvipsL)
modsList = list('All Cell Types' = allListM,'iECs' = iecListM, 'HUVECs' = huvListM, 'iPSCs' = ipsListM, 'Endotheial (iECs+HUVECs)' = iechuvLM, 'Induced (iECs+iPSCs)' = iecipsLM, 'HUVECs+iPSCs' = huvipsLM)
### 6.4 - Find the longest list
maxListLength = 0
for(list in masterList){
print(length(list))
maxListLength = max(maxListLength, length(list))
}
### 6.5 - Create an emtpy matrix to write to
outputMatrix <- matrix('', ncol = 14, nrow = maxListLength)
colnames(outputMatrix) <- c('All Cell Types - Protein', 'All - Mod', 'iECs - Protein', 'iECs - Mod', 'HUVECs - Protein', 'HUVECs - Mod', 'iPSCs - Protein', 'iPSC - Mod', 'iECs+HUVECs - Protein', 'iEC+HUVECs - Mod', 'iECs+iPSCs - Protein', 'iEC+iPSCs - Mod', 'HUVECs+iPSCs - Protein', 'HUVECs+iPSCs - Mod')
### 6.6 - Assign each list to a column
for(listNum in 1:length(masterList)){
selectedList = masterList[[listNum]]
selectedListM = modsList[[listNum]]
#outputMatrix[,listNum][1:length(selectedList)] <- selectedList
assignmentPos = listNum*2-1
outputMatrix[0:length(selectedList),assignmentPos] <- selectedList
outputMatrix[0:length(selectedListM),assignmentPos+1] <- selectedListM
}
setwd('C:/Users/grossar/Box/Sareen Lab Shared/Data/Andrew/E428 - PTM of iECs/Counts/')
write.csv(outputMatrix, paste(ptmType,'-PTMs found.csv'), row.names = FALSE)
length(allList)
length(iecList)
length(huvList)
length(ipsList)
length(iechuvL)
length(iecipsL)
length(huvipsL)
test <- data.frame('all' = allList, 'iec' = rep(0,103), 'huvec' = rep(0,103))
test$iec = iecList
####################################################################################################################################################
### 5 - Generate Bar Graphs
############################################################################################
### 5.1 - Review the summary of each dF
fig3a$expandedPercent <- fig3a$Expanded.abnormal/fig3a$expanded.total*100
fig3a$unexpandedPercent <- fig3a$unexpanded.abnormal/fig3a$unexpanded.total*100
fig3a$zScore <- zScoreCalculator(fig3a$Expanded.abnormal, fig3a$unexpanded.abnormal, fig3a$expanded.total, fig3a$unexpanded.abnormal)
zScores <- c()
for(rowNum in 1:nrow(fig3a)) {
currentRow <- fig3a[rowNum,]
newZscore = zScoreCalculator(currentRow[,1], currentRow[,3], currentRow[,2], currentRow[,4])
zScores <- c(zScores, newZscore)
}
fig3a$zScore <- zScores
fig3a$pValue <- 2*(1-pnorm(fig3a$zScore))
plotA <- melt(t(fig3a[5:6]))
(f3aPlot <- ggplot(data = plotA, aes(x = Var2, y = value)) +
geom_bar(aes(fill = Var1), stat = 'identity', position=position_dodge()) +
scale_fill_manual(values = c('black', 'grey50')) +
scale_y_continuous(breaks = seq(0,50,5), limits = c(0,45.1), expand = c(0,0)) +
labs(x = 'Donor Age',
y = '% Abnormal') +
theme(axis.title.x = element_text(face="italic", size=14, margin =margin(10,0,0,0)),
axis.title.y = element_text(face="italic", size=14, margin =margin(0,10,0,0)),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14),
panel.background = element_rect(fill = 'white', color = 'white', size = 1),
panel.grid = element_blank(),
axis.line = element_line(size = 1),
axis.ticks = element_line(size = 2),
legend.position = 'none') ) #+ coord_equal(ratio = 0.08)
sigData <- data.frame(x=c(0.875, 1.875, 2.875, 3.875, 4.875), xend=c(1.125, 2.125, 3.125, 4.125, 5.125),
y=c(16, 18, 19, 18, 42), annotation=c('***', '**', '****', ' **** ', ' ** '))
f3aPlot <- f3aPlot + geom_signif(stat="identity",
data = sigData,
aes(x=x,xend=xend, y=y, yend=y, annotation=annotation),
tip_length = 0,
vjust = 0)
####################################################################################################################################################
### 6 - Export
############################################################################################
setwd("C:/Users/grossar/Box/Sareen Lab Shared/Data/Andrew/E428 - PTM of iECs/DE")
write.csv(outputData[[1]], paste(names(outputData)[[1]], ' - iEC v Huvec.csv'))
write.csv(outputData[[2]], paste(names(outputData)[[2]], ' - iEC v Huvec.csv'))
write.csv(outputData[[3]], paste(names(outputData)[[3]], ' - iEC v Huvec.csv'))
write.csv(outputData[[4]], paste(names(outputData)[[4]], ' - iEC v Huvec.csv'))
write.csv(outputData[[5]], paste(names(outputData)[[5]], ' - iEC v Huvec.csv'))
####################################################################################################################################################
### 6 - Scratchwork
############################################################################################
apply.t.test <- function(dataframe, metadata, group1, group2) {
statColumns <- data.frame(p.val = numeric(), mean1 = numeric(), mean2 = numeric(), noZero1 = logical(), noZero2 = logical(), noZeroBoth = factor(, levels = c('Null', 'OneFull', 'BothFull')))
for(rowNum in 1:nrow(dataframe)) {
row <- dataframe[rowNum,]
set1 = row[metadata$Shortname[metadata$Group == group1]]
set2 = row[metadata$Shortname[metadata$Group == group2]]
testResult <- t.test(set1, set2)
zCheck1 = min(set1) > 0
zCheck2 = min(set2) > 0
zCheck3 = c('Null', 'OneFull', 'BothFull')[zCheck1 + zCheck2 + 1]
newStatRow <- data.frame(p.val = testResult$p.value, mean1 = testResult$estimate[[1]], mean2 = testResult$estimate[[2]], noZero1 = zCheck1, noZero2 = zCheck2, noZeroBoth = factor(zCheck3))
statColumns <- rbind(statColumns, newStatRow)
}
return(cbind(dataframe, statColumns))
}
summary(apply.t.test(test, metadata, 'iEC', 'Huvecs'))
|
acc44fe845974f498f75aec93ff636a9be71eb06 | 7c39da976f28af016e5b1f847e68473c659ea05d | /R/RearrangementParams-class.R | 56cc5a1e4afdb9e6d35cace5a6f9be117b220609 | [] | no_license | cancer-genomics/trellis | b389d5e03959f8c6a4ee7f187f7749048e586e03 | 5d90b1c903c09386e239c01c10c0613bbd89bc5f | refs/heads/master | 2023-02-24T05:59:44.877181 | 2023-01-09T20:38:36 | 2023-01-09T20:38:36 | 59,804,763 | 3 | 1 | null | 2023-01-11T05:22:52 | 2016-05-27T04:45:14 | R | UTF-8 | R | false | false | 5,553 | r | RearrangementParams-class.R | #' @include AllGenerics.R
NULL
#' Parameter class for rearrangement analysis
#'
#' @slot rp_separation length-one numeric vector
#' @slot min_number_tags_per_cluster length-one numeric vector
#' @slot min_cluster_size length-one numeric vector
#' @slot max_cluster_size length-one numeric vector
#' @slot min.gapwidth length-one numeric vector
#' @slot percent_modal_type length-one numeric vector
#' @slot percent_linking length-one numeric vector
#' @export
#' @rdname RearrangementParams-class
setClass("RearrangementParams", representation(rp_separation="numeric",
min_number_tags_per_cluster="numeric",
min_cluster_size="numeric",
max_cluster_size="numeric",
min.gapwidth="numeric",
percent_modal_type="numeric",
percent_linking="numeric"))
#' A parameter class for somatic rearrangement analysis
#'
#' Some details about why we look for linked tag clusters.
#'
#' @details A tag cluster is defined as follows:
#'
#' (i) it must have at least <min_number_tags_per_cluster> reads
#'
#' (ii) Provided (i) is TRUE, the cluster includes all improper reads
#' with less than <min.gapwidth> separation
#'
#' (iii) The size of a cluster is defined as the difference in the
#' minimum basepair across all members and the maximum basepair
#' across all members. The size of the cluster must be at least
#' <min_cluster_size> and no bigger than <max_cluster_size>.
#'
#' Having determined the type of rearrangment supported by each read
#' pair for two linked clusters, we require that the modal
#' rearrangement type be supported by at least <percent_modal_type>>
#' read pairs.
#'
#'
#' @examples
#'
#' ## Default rearrangement parameters for whole genome sequence data
#' ## with 30x coverage
#' rp <- RearrangementParams()
#'
#' @export
#'
#' @param rp_separation length-one numeric vector indicating minimum
#' separation of the first and last read of a pair
#'
#' @param min_number_tags_per_cluster length-one numeric vector
#' indicating the minimum number of reads in a cluster and the
#' minimum number of reads required to link two clusters
#'
#' @param min_cluster_size length-one numeric vector; the minimum size
#' of a cluster of reads
#'
#' @param max_cluster_size length-one numeric vector; the maximum size
#' of a cluster of reads
#'
#' @param min.gapwidth length-one numeric vector; reads with at most
#' min.gapwidth separation between them are considered overlapping.
#'
#' @param percent_modal_type length-one numeric vector; the percentage
#' of reads that must agree with the modal rearrangement type
#'
#' @param percent_linking length-one numeric vector; two linked tag
#' clusters must be linked by at least this percentage of reads. See
#' details
#'
#' @rdname RearrangementParams-class
RearrangementParams <- function(rp_separation=10e3,
min_number_tags_per_cluster=5,
min_cluster_size=115L,
max_cluster_size=5000L,
min.gapwidth=1000L,
percent_modal_type=0.9,
percent_linking=0.8){
new("RearrangementParams", rp_separation=rp_separation,
min_number_tags_per_cluster=min_number_tags_per_cluster,
min_cluster_size=min_cluster_size,
max_cluster_size=max_cluster_size,
min.gapwidth=min.gapwidth,
percent_modal_type=percent_modal_type,
percent_linking=percent_linking)
}
#' @rdname RearrangementParams-class
#' @aliases rpSeparation,RearrangementParams-method
setMethod("rpSeparation", "RearrangementParams", function(object) object@rp_separation)
#' @rdname RearrangementParams-class
#' @aliases minNumberTagsPerCluster,RearrangementParams-method
setMethod("minNumberTagsPerCluster", "RearrangementParams", function(object) object@min_number_tags_per_cluster)
#' @rdname RearrangementParams-class
#' @aliases minNumberTagsPerCluster,RearrangementParams-method
setMethod("minClusterSize", "RearrangementParams", function(object) object@min_cluster_size)
#' @rdname RearrangementParams-class
#' @aliases maxClusterSize,RearrangementParams-method
setMethod("maxClusterSize", "RearrangementParams", function(object) object@max_cluster_size)
#' @rdname RearrangementParams-class
#' @aliases minGapWidth,RearrangementParams-method
setMethod("minGapWidth", "RearrangementParams", function(object) object@min.gapwidth)
#' @rdname RearrangementParams-class
#' @aliases percentModalType,RearrangementParams-method
setMethod("percentModalType", "RearrangementParams", function(object) object@percent_modal_type)
#' @rdname RearrangementParams-class
#' @export
percentLinking <- function(object) object@percent_linking
setMethod("show", "RearrangementParams", function(object){
cat("Object of class RearrangementParams \n")
cat(" min tag separation:", rpSeparation(object), "\n")
cat(" min tags/cluster:", minNumberTagsPerCluster(object), "\n")
cat(" min cluster size:", minClusterSize(object), "\n")
cat(" max cluster size:", maxClusterSize(object), "\n")
cat(" min gap width between tags:", minGapWidth(object), "\n")
cat(" prop modal rearrangement:", percentModalType(object), "\n")
cat(" prop linking 2 clusters :", percentLinking(object), "\n")
})
|
41ca606d4203145a04ddccd9cf5ff1b15b312835 | a312832099b0835621b07ae88cd915cac5518bc7 | /utils/stick2lm.R | d8886fa55eaa7f1e793a1bb666bbed611e4ec631 | [
"MIT"
] | permissive | PennLINC/xcpEngine | 5858b4dcdd614d1319eaf3fc5f904cc065f1c50e | d7998c89bf4b77060479f164db8914d30afe5050 | refs/heads/master | 2023-04-25T23:58:59.315254 | 2023-04-19T16:30:56 | 2023-04-19T16:30:56 | 80,768,241 | 13 | 12 | MIT | 2023-04-19T16:30:57 | 2017-02-02T21:00:22 | Shell | UTF-8 | R | false | false | 8,222 | r | stick2lm.R | #!/usr/bin/env Rscript
###################################################################
# ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ #
###################################################################
###################################################################
# function for converting stick functions into curves to include
# in a linear model
#
# This is not to be used.
###################################################################
cat('THIS UTILITY IS NO LONGER SUPPORTED\n')
cat('EXITING\n')
quit()
###################################################################
# Load required libraries
###################################################################
suppressMessages(suppressWarnings(library(optparse)))
suppressMessages(suppressWarnings(library(pracma)))
#suppressMessages(suppressWarnings(library(ANTsR)))
###################################################################
# Parse arguments to script, and ensure that the required arguments
# have been passed.
###################################################################
option_list = list(
make_option(c("-i", "--img"), action="store", default=NA, type='character',
help="Path to the BOLD timeseries that will be modelled"),
make_option(c("-s", "--stick"), action="store", default=NA, type='character',
help="Path to a directory containing files with onset,
duration, and amplitude information for the ideal
activation model to be fit, for instance as stick
functions"),
make_option(c("-d", "--deriv"), action="store", default=TRUE, type='logical',
help="Specify whether you wish to include the first
temporal derivatives of each ideal timeseries in
the linear model."),
make_option(c("-n", "--interval"), action="store", default='seconds', type='character',
help="Specify whether the timescale in the stick
functions is in units of seconds or repetition
times. Accepted options include:
'seconds' [default]
'trep'"),
make_option(c("-c", "--custom"), action="store", default=NA, type='character',
help="Comma-separated list of paths to files
containing nuisance regressors or other custom
timeseries to be included in the model. Columns
in the file should correspond to timeseries and
should be equal in length to the analyte BOLD
timeseries.")
make_option(c("-m", "--mat"), action="store", default=NA, type='character',
help="Use this option to write output in FSL .mat format.")
)
opt = parse_args(OptionParser(option_list=option_list))
if (is.na(opt$img)) {
cat('User did not specify an input timeseries.\n')
cat('Use stick2lm.R -h for an expanded usage menu.\n')
quit()
}
if (is.na(opt$stick)) {
cat('User did not specify a model.\n')
cat('Use stick2lm.R -h for an expanded usage menu.\n')
quit()
}
impath <- opt$img
modeldir <- opt$stick
derivs <- opt$deriv
interval <- opt$interval
custom <- opt$custom
sink("/dev/null")
###################################################################
# 1. Determine the repetition time and number of volumes from
# the analyte timeseries.
###################################################################
suppressMessages(require(ANTsR))
syscom <- paste("fslval",impath,"pixdim4")
trep <- as.numeric(system(syscom,intern=TRUE))
syscom <- paste("fslval",impath,"dim4")
nvol <- as.numeric(system(syscom,intern=TRUE))
###################################################################
# 2. Obtain a list of stick function files.
###################################################################
syscom <- paste0("ls -d1 ",modeldir,"/*")
models <- system(syscom,intern=TRUE)
###################################################################
# Iterate through the stick function files.
###################################################################
lmmat <- c()
for (mfile in models) {
################################################################
# 3. Load the stick function from the file.
#
# * The first column must represent the onset time for each
# stimulus, in seconds.
# * The second column must represent the duration of each
# stimulus, in seconds.
# * The third column must represent the magnitude of each
# stimulus, in seconds.
################################################################
model <- read.table(mfile)
ncol <- dim(model)[2]
nrow <- dim(model)[1]
onset <- c()
duration <- c()
magnitude <- c()
if (ncol >= 1) { onset <- model[,1] }
if (ncol >= 2) { duration <- model[,2] }
if (ncol >= 3) { magnitude <- model[,3] }
if (interval == 'seconds') {
times <- onset
} else {
times <- NULL
}
################################################################
# 4. Convolve each stick function with a modelled HRF.
################################################################
if (isempty(magnitude) || numel(unique(magnitude)) <= 1) {
convmodel <- hemodynamicRF(scans = nvol,
onsets = onset,
durations = duration,
rt = trep,
times = times,
a1 = 8)
################################################################
# The HRF convolution that is built into ANTsR does not
# support stimuli of different magnitudes. Because
# convolution is distributive, it is possible to model this
# as a weighted sum of convolutions.
################################################################
} else {
cmodels <- zeros(nvol,nrow)
for (i in seq(1,nrow)) {
cmodels[,i] <- hemodynamicRF(scans = nvol,
onsets = onset[i],
durations = duration[i],
rt = trep,
times = times,
a1 = 8)
}
convmodel <- apply(cmodels,1,sum)
}
################################################################
# 5. Compute the temporal derivative of the convolved model if
# that should be included in the design matrix.
################################################################
dconv1 <- c()
dconv <- c()
if (derivs) {
dconv <- zeros(nvol,1)
dconv1 <- zeros(nvol + 1,1)
dconv1[2:nvol,] <- diff(convmodel,lag = 1)
#############################################################
# FSL uses the mean of signal differences before and after
# the time point to determine the temporal derivative at
# that time point.
#
# This behaviour is overriden for now.
#############################################################
#for (i in seq(1,nvol)) {
# dconv[i,] <- mean(dconv1[i:i+1,])
#}
dconv <- dconv1[1:nvol,]
convmodel <- cbind(convmodel,dconv)
}
lmmat <- cbind(lmmat,convmodel)
}
###################################################################
# 6. Read in any requested motion parameters or nuisance
# regressors.
###################################################################
if (!is.na(custom)) {
custom <- unlist(strsplit(custom,split=','))
for (i in custom) {
curreg <- read.table(i)
lmmat <- cbind(lmmat,curreg)
}
}
###################################################################
# 7. Compute the maximal amplitude of each ideal timeseries.
###################################################################
amplitude <- apply(lmmat,2,max) - apply(lmmat,2,min)
###################################################################
# 8. Print the design matrix.
###################################################################
sink(NULL)
nvar <- dim(lmmat)[2]
cat('/NumWaves\t',nvar,'\n')
cat('/NumPoints\t',nvol,'\n')
cat('/PPheights\t',amplitude,'\n\n')
cat('/Matrix\n')
for (i in 1:nvol){
cat(unlist(lmmat[i,]), sep='\t')
cat('\n')
}
|
a48b3bb8d5b1ca5fefc32ebd69aa8ab52cf697bb | 541202ce433b1fdcff47e07647333806fb0e6d6c | /tests/testthat/test-geos-polygonize.R | ffba3543c45de9e2362d392abbe1193bee2868bc | [
"MIT"
] | permissive | morandiaye/geos | fe3b786366a9ec891d1aa79c9b5b4144d64c437d | fd33a9ca885f2a5e2793cd738e6c4173051eb44f | refs/heads/master | 2023-08-26T10:07:43.055164 | 2021-11-07T15:12:15 | 2021-11-07T15:12:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,567 | r | test-geos-polygonize.R |
test_that("polygonize works", {
expect_true(
geos_equals(
geos_polygonize("MULTILINESTRING ((0 0, 0 1), (0 1, 1 0), (1 0, 0 0))"),
"POLYGON ((0 0, 0 1, 1 0, 0 0))"
)
)
expect_identical(geos_polygonize(NA_character_), geos_read_wkt(NA_character_))
bad_ptr <- geos_read_wkt("POINT (0 0)")
tmp <- tempfile()
saveRDS(bad_ptr, tmp)
bad_ptr <- readRDS(tmp)
unlink(tmp)
expect_error(geos_polygonize(bad_ptr), "not a valid external pointer")
expect_identical(
wk::wk_crs(
geos_polygonize(
as_geos_geometry("MULTILINESTRING ((0 0, 0 1), (0 1, 1 0), (1 0, 0 0))", crs = 12)
)
),
12
)
})
test_that("polygonize valid works", {
# don't have a good example of how these are different
expect_true(
geos_equals(
geos_polygonize("MULTILINESTRING ((0 0, 0 1), (0 1, 1 0), (1 0, 0 0))"),
geos_polygonize_valid("MULTILINESTRING ((0 0, 0 1), (0 1, 1 0), (1 0, 0 0))")
)
)
})
test_that("polygonize cut edges works", {
# don't have a good example of how to create a cut edge here
expect_true(
geos_equals(
geos_polygonize_cut_edges("MULTILINESTRING ((0 0, 0 1), (0 1, 1 0), (1 0, 0 0))"),
"GEOMETRYCOLLECTION EMPTY"
)
)
expect_identical(
wk::wk_crs(
geos_polygonize_cut_edges(
as_geos_geometry("MULTILINESTRING ((0 0, 0 1), (0 1, 1 0), (1 0, 0 0))", crs = 12)
)
),
12
)
})
test_that("polygonize full works", {
poly_valid <- geos_polygonize_full("MULTILINESTRING ((0 0, 0 1), (0 1, 1 0), (1 0, 0 0))")
expect_true(
geos_equals(
poly_valid$result,
geos_polygonize("MULTILINESTRING ((0 0, 0 1), (0 1, 1 0), (1 0, 0 0))")
)
)
expect_true(geos_equals(poly_valid$cut_edges, geos_empty()))
expect_true(geos_equals(poly_valid$dangles, geos_empty()))
expect_true(geos_equals(poly_valid$invalid_rings, geos_empty()))
poly_null <- geos_polygonize_full(NA_character_)
expect_identical(names(poly_null), names(poly_valid))
expect_true(all(vapply(poly_null, identical, geos_read_wkt(NA_character_), FUN.VALUE = logical(1))))
bad_ptr <- geos_read_wkt("POINT (0 0)")
tmp <- tempfile()
saveRDS(bad_ptr, tmp)
bad_ptr <- readRDS(tmp)
unlink(tmp)
expect_error(geos_polygonize_full(bad_ptr), "not a valid external pointer")
expect_identical(
lapply(
geos_polygonize_full(
as_geos_geometry("MULTILINESTRING ((0 0, 0 1), (0 1, 1 0), (1 0, 0 0))", crs = 12)
),
wk::wk_crs
),
list(result = 12, cut_edges = 12, dangles = 12, invalid_rings = 12)
)
})
|
a6f62589de4610b0814e3282af3f657548d88522 | 7225139edc69f95a358143b6da98dee0d8dd83b4 | /r_kkagi/ch04_ex02.R | afbf1d188b57942f2d9c6baa4241bb3589dafc8f | [] | no_license | diligejy/r_train | 57512fb571fcef4bdc63e3fccdc8d304ff020024 | 925bb9fea85151a4e1be8ddc9ced3e48a3d789a8 | refs/heads/master | 2022-12-16T06:44:49.017734 | 2020-09-16T10:04:42 | 2020-09-16T10:04:42 | 258,130,049 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 424 | r | ch04_ex02.R | DF <- mtcars
# 산점도 그리기기
plot(DF[, c(1:5)])
plot(DF[,c(6:11)])
# 원하는 그래프만 그리기
plot(DF$mpg ~ DF$disp)
# disp -> 배기량, mpg -> 연비
# 상관계수 구하기
cor(DF$mpg, DF$disp)
# mpg(연비) 자세히 살펴보기
summary(DF$mpg)
boxplot(DF$mpg)
hist(DF$mpg)
hist(DF$mpg, breaks = 10)
# disp(배기량) 알아보기
boxplot(DF$disp)
hist(DF$disp)
hist(DF$disp, breaks = 20)
|
a0c4e057452e4fb5eae3b47ff7fb15a2ed9c6b89 | da980b7e2a20ca7a3e5e7ba1f54cb670bb2306bd | /rsample.R | cc0af63c27c230edc0b5df85b10d7949f93d1edb | [
"Apache-2.0"
] | permissive | pucpsophia/linealr | 13f6279411a884eb2c569ce7244caf828bc233bb | 39d6ad3e8350de1237adf8028d9961cf0ba3160e | refs/heads/master | 2021-04-15T13:56:28.320047 | 2018-03-24T23:06:29 | 2018-03-24T23:06:29 | 126,647,297 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 170 | r | rsample.R | library("faraway")
attach(cathedral)
str(cathedral)
head(cathedral)
lab <- rownames(cathedral)
plot(cathedral$x, cathedral$y)
text(cathedral$x, cathedral$y, lab = lab)
|
e88a875b987212020527462f8053b97bed6b6e19 | 7167a42a17be4c995958aaeac639cfa84867e7e9 | /US_only/1-TrendInHumidityUSOnly.R | 5c9ebfd52e61588edb5cb9bd9cdefa9ed3d6ec27 | [] | no_license | tristanballard/false_discovery_rate | b78dd9551ae1d9ea63bf50f1762cd367a2f60fb2 | b8583e547527dac12f62df54a93c77360d14fd8f | refs/heads/master | 2020-12-30T12:24:18.097054 | 2017-05-15T21:47:37 | 2017-05-15T21:47:37 | 91,384,311 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,592 | r | 1-TrendInHumidityUSOnly.R | suppressMessages(library(fields))
suppressMessages(library(ncdf4))
suppressMessages(library(RColorBrewer))
suppressMessages(library(abind))
suppressMessages(library(base))
suppressMessages(library(Rfit))
#File below is too large (150mb) to include in the zipfile;
shum.hw=readRDS("/scratch/users/tballard/shum/percentile.threshold/shum.hw")
## Define month indices ##
jan=1:124; feb=125:236; mar=237:360; apr=361:480;may=481:604; jun=605:724;
jul=725:848; aug=849:972; sep=973:1092; oct=1093:1216; nov=1217:1336; dec=1337:1460;
## Extract the shum values for the month of interest. If I were better at matrix operations
## I might be able to avoid this, but running regression on 192x94x1460x36 is tricky when
## you want 3 dimensions instead
month.extract=function(month){
NA.matrix=matrix(rep(NA,192*94),nrow=192) #used to initialize
shum.all.values=array(NA.matrix,c(192,94,1)) #192x94x1 array of NA's to initialize
for (i in 1:36){
shum.values=shum.hw[,,month,i]
shum.all.values=abind(shum.all.values,shum.values)
}
shum.all.values=shum.all.values[,,-1] #remove the NA.matrix used to initialize
return(shum.all.values)
}
daily.jan.values=month.extract(jan) #192lat x 94lon x (31day*4val/day*36yr=4464time)
daily.jul.values=month.extract(jul)
##### Read in ocean/land mask file 1's are land #####
fileName="/scratch/PI/omramom/reanalysis/ncep-doe-r2/4xdaily/ocean.mask/land.sfc.gauss.nc"
land = ncvar_get(nc_open(fileName), "land") #192 x 94
land[land==0]=NA #set ocean values to NA instead of 0
mask=function(data,land){
new.data=land*data
return(new.data)
}
daily.jan.values.mask=apply(daily.jan.values, c(3), mask, land=land) #dim=18048x4464
daily.jan.values.mask=array(daily.jan.values.mask, dim=c(192,94,4464))
daily.jul.values.mask=apply(daily.jul.values, c(3), mask, land=land)
daily.jul.values.mask=array(daily.jul.values.mask,dim=c(192,94,4464))
##### Mask out as well all the globe not in the US and southern Canada #####
zone=c(26,75,230,300) #lat.min, lat.max, lon.min, lon.max
##### Read in lat/lon values you'll use to make the mask #####
fileName="/scratch/PI/omramom/reanalysis/ncep-doe-r2/daily/shum/shum.2m.gauss.1979.nc"
lon. = ncvar_get(nc_open(fileName), "lon") #192 values (0 to 358.12)
lat. = ncvar_get(nc_open(fileName), "lat") #94 values (88.542 to -88.542)
region.lat=lat.>zone[1] & lat.<zone[2] #True if lat is w/in bounds
region.lon=lon.>zone[3] & lon.<zone[4] #True if lon is w/in bounds
region.lat[region.lat==FALSE]=NA #Set False values to NA
region.lon[region.lon==FALSE]=NA
region.lat=region.lat+0; region.lon=region.lon+0 #Set TRUE values to 1
#Now that you have a vector for lat and vector for lon of NA or 1's,
#Combine that into a matrix format that you can multiply things by
region.lat.mat=matrix(rep(region.lat,192), nrow=192, byrow=T)
region.lon.mat=matrix(rep(region.lon,94), nrow=192)
region.mask=region.lat.mat*region.lon.mat #192x94
####Below is applying the mask to your entire array of values
mask=function(data, region.mask){
new.data=region.mask*data
return(new.data)
}
daily.jan.values.mask2=apply(daily.jan.values.mask, c(3), mask, region.mask=region.mask) ###Pick region in this line
daily.jan.values.mask2=array(daily.jan.values.mask2,dim=c(192,94,4464))
daily.jul.values.mask2=apply(daily.jul.values.mask, c(3), mask, region.mask=region.mask) ###Pick region in this line
daily.jul.values.mask2=array(daily.jul.values.mask2,dim=c(192,94,4464))
##### Compute them OLS trends! #####
#Function below computes OLS regression of shum vs year and outputs the slope value and p-value
#Then apply this function to every pixel using 'apply' command
#Note the lm function automatically skips over NA's
fit.lm=function(dataset, month){
years=rep(c(1979:2014),each=length(month)) #1979, 1979, ... 2014, 2014
a=tryCatch(summary(lm(dataset~years))$coefficient[2,c(1,4)], error=function(e) c(NA,NA)) #slope for 'year' and p-value
return(a)
}
#Array of lon,lat,month,results; results is 2D of the slope and its SE from running the regression
#'aperm' rearranges order of arrays. the aperm below switches the apply output from dim=2,192,94 to dim=192,94,2
lm.trends=array(rep(NA,192*94*2*2),c(192,94,2,2)) #initialize
lm.trends[,,1,]=aperm(apply(daily.jan.values.mask2, c(1,2), fit.lm, month=jan), c(2,3,1))
lm.trends[,,2,]=aperm(apply(daily.jul.values.mask2, c(1,2), fit.lm, month=jul), c(2,3,1))
saveRDS(lm.trends,"/scratch/users/tballard/shum/class.project/shum.hw.trend.USonly.rds") #194x94x2monthsx2variables
##### Compute rank based regression trends #####
rfit.lm=function(dataset, month){
years=rep(c(1979:2014),each=length(month)) #1979, 1979, ... 2014, 2014
a=tryCatch(summary(rfit(dataset~years))$coefficient[2,c(1,4)], error=function(e) c(NA,NA)) #slope for 'year' and p-value
return(a)
}
#Array of lon,lat,month,results; results is 2D of the slope and its SE from running the regression
#'aperm' rearranges order of arrays. the aperm below switches the apply output from dim=2,192,94 to dim=192,94,2
rank.trends=array(rep(NA,192*94*2*2),c(192,94,2,2)) #initialize
rank.trends[,,1,]=aperm(apply(daily.jan.values.mask2, c(1,2), rfit.lm, month=jan), c(2,3,1))
rank.trends[,,2,]=aperm(apply(daily.jul.values.mask2, c(1,2), rfit.lm, month=jul), c(2,3,1))
saveRDS(rank.trends,"/scratch/users/tballard/shum/class.project/shum.hw.rank.trend.USonly.rds") #194x94x2monthsx2variables
|
579f8b14288d0be0f3d75517e1becbbfd2557bd9 | 0ee989cf562771905cb979ad19a49eba735f77db | /R/amh_proposal_refresh.R | a80b8ef2b1e17fd84c335379c2dfe8f30b94979d | [] | no_license | alexanderrobitzsch/LAM | e403fdd1cfd573566ed67035ecb8873ee63dd53e | 14431ad82c95671353fda9fba1fbe8c5102a7ba8 | refs/heads/master | 2023-05-14T05:08:53.569886 | 2023-05-03T09:13:01 | 2023-05-03T09:13:01 | 95,305,128 | 5 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,315 | r | amh_proposal_refresh.R | ## File Name: amh_proposal_refresh.R
## File Version: 0.25
#*** refreshing the proposal SD
amh_proposal_refresh <- function( acceptance_parameters, proposal_sd,
acceptance_bounds, acceptance_rates_history, proposal_equal )
{
target <- mean(acceptance_bounds)
acc <- acceptance_parameters[,1] / acceptance_parameters[,2]
SD.pp <- proposal_sd
#-- compute new proposal SD
SD.pp <- ifelse( acc < acceptance_bounds[1],
SD.pp / ( 2 - acc / target ), SD.pp )
SD.pp <- ifelse( acc > acceptance_bounds[2],
SD.pp * ( 2 - (1-acc)/(1-target) ), SD.pp )
SD.pp <- ifelse( acceptance_parameters$no_change >=proposal_equal, proposal_sd, SD.pp )
proposal_retain <- 1 * ( proposal_sd==SD.pp )
ind <- attr(acceptance_rates_history,"include")
acceptance_rates_history[ ind, ] <- acc
attr(acceptance_rates_history,"include") <- ind + 1
#-- acceptance parameters
acceptance_parameters[,1:2] <- 0
acceptance_parameters$no_change <- acceptance_parameters$no_change + proposal_retain
acceptance_parameters$no_change[ proposal_retain==0 ] <- 1
#-- output
res0 <- list( proposal_sd=SD.pp, acceptance_parameters=acceptance_parameters,
acceptance_rates_history=acceptance_rates_history )
return(res0)
}
|
e798b4b21543a4b5149c34f6c214425db1c8d202 | acc1ad04e1827919205fabb796fcf89592781f5c | /R/plot_past.R | 886fccc9c0507075957ff57e42989dcc78eae5ee | [] | no_license | csgillespie/benchmarkme-data | f2c21707a6babc6f51263a49140390433fc3a574 | 0672aedd6678f5f60d7643f7eefa4031788899b8 | refs/heads/master | 2020-05-21T12:23:20.201955 | 2020-04-23T14:31:48 | 2020-04-23T14:31:48 | 48,945,789 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,573 | r | plot_past.R | globalVariables(c("cpu", "n_time", "past_results_v2", "time", "sysname", "ram"))
#' Scatter plot of past benchmarks
#'
#' Plot the previous benchmarks. This function creates two figures.
#' \itemize{
#' \item Figure 1: Total benchmark time over all benchmarks (in seconds) on the y-axis.
#' \item Figure 2: Relative time (compared to the smallest benchmark).
#' }
#' The data set used is \code{data(past_results_v2)}.
#' @param test_group One of "prog", "matrix_fun", "matrix_cal", "read5", "read50", "read200",
#' "write5", "write50" or "write200". Default value \code{prog}.
#' @param blas_optimize Default \code{NULL}. The default behaviour is to plot all results.
#' To plot only the BLAS optimized results, set to \code{TRUE}, otherwise \code{FALSE}.
#' @param cores Default \code{0}, i.e. no parallel.
#' @param log By default the y axis is plotted on the log scale. To change, set the
#' the argument equal to the empty parameter string, \code{""}.
#' @importFrom graphics abline grid par plot points legend
#' @importFrom grDevices palette rgb
#' @importFrom utils data
#' @importFrom stats aggregate
#' @import dplyr
#' @export
#' @examples
#' ## Plot all past results for the `prog` benchmark
#' plot_past("prog", blas_optimize = NULL)
plot_past = function(test_group,
blas_optimize = NULL,
cores = 0,
log = "y") {
if (missing(test_group) || !(test_group %in% get_benchmarks())) {
stop("test_group should be one of\n\t",
get_benchmarks(collapse = TRUE),
call. = FALSE)
}
results = select_results(test_group, blas_optimize = blas_optimize,
cores = cores)
## Arrange plot colours and layout
op = par(mar = c(3, 3, 2, 1),
mgp = c(2, 0.4, 0), tck = -.01,
cex.axis = 0.8, las = 1, mfrow = c(1, 2))
old_pal = palette()
on.exit({
palette(old_pal)
par(op)
})
nice_palette()
ymin = min(results$time)
ymax = max(results$time)
plot(results$time, xlab = "Rank", ylab = "Total timing (secs)",
ylim = c(ymin, ymax), xlim = c(1, nrow(results) + 1), cex = 0.9,
panel.first = grid(), log = log, pch = 21, bg = as.numeric(results$test_group))
## Relative timings
fastest = min(results$time)
ymax = ymax / fastest
plot(results$time / fastest, xlab = "Rank", ylab = "Relative timing",
ylim = c(1, ymax), xlim = c(1, nrow(results) + 1), cex = 0.9,
panel.first = grid(), log = log, pch = 21, bg = as.numeric(results$test_group))
abline(h = 1, lty = 3)
invisible(results)
}
|
98a7baa723226f436cd4ddd8e4cd60c788cb1e5d | b650358a163e9187cb5db226e5c4acba52fde98a | /viz-with-r.R | 125e9284e1092e7e7d6aa48095d9136e0b70a5f7 | [] | no_license | andrewbtran/viz-in-r | 11ead60480d3d945066d730096a86ab11a277ee0 | ed467971b4befd01563b3e5b28e48f574017199e | refs/heads/master | 2021-01-11T18:53:45.764539 | 2015-12-09T03:56:40 | 2015-12-09T03:56:40 | 47,667,459 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,889 | r | viz-with-r.R | # libraries needed for some neat charts
library(plyr)
library(lubridate)
library(dplyr)
library(ggplot2)
library(tidyr)
# The city gave us two separate files
garage1 <- read.csv("council_garage/garage_rawdata1.csv")
garage2 <- read.csv("council_garage/garage_rawdata2.csv")
# We have to bring them together, so we'll bind by rows.
# This only works if column names are identical
garage <- rbind(garage1, garage2)
# We've been told that garage door is the more accurate measure.
# So let's filter out that dataset
garage_out <- subset(garage, Location=="EXECUTIVE GARAGE OVERHEAD DOOR (READER GARAGE OUT)")
garage_door <- subset(garage, Location=="EXECUTIVE GARAGE OVERHEAD DOOR (READER GARAGE OVERHEAD DOOR)")
# We've got a cleaner data set, but we want to focus on City Council members only
garage_council <- subset(garage_door, Who=="Linehan, William"
| Who=="Flaherty, Michael"
| Who=="Murphy, Stephen"
| Who=="Pressley, Ayanna"
| Who=="Wu, Michelle"
| Who=="Lamattina, Salvatore"
| Who=="Baker, Frank"
| Who=="Yancey, Charles"
| Who=="McCarthy, Timothy"
| Who=="O'Malley, Matthew"
| Who=="Jackson, Tito"
| Who=="Zakim, Josh"
| Who=="McCarthy, Timothy"
| Who=="Ciommo, Mark" )
# Now let's fix the dates using the lubridate package
garage_council$datetime <- mdy_hm(garage_council$Date.time)
# Let's extract the time of the day from the timestamp
garage_council$hour <- hour(garage_council$datetime)
# Making a basic histogram
hist(garage_council$hour)
#Kind of broad. Let's narrow it down
hist(garage_council$hour, breaks=(0:24))
# Better but that's the limits of out-of-the-box graphics. lets get into ggplot
qplot(hour, data=garage_council, geom="histogram")
# Prettier. But the viz looks off. Let's play around the binwidth
qplot(hour, data=garage_council, geom="histogram", binwidth=1)
qplot(hour, data=garage_council, geom="histogram", binwidth=.5)
# qplot is only a slight step up. ggplot is where it gets better
c <- ggplot(garage_council, aes(x=hour))
c + geom_histogram()
c + geom_histogram(binwidth=1)
#Let's add some color
c + geom_histogram(colour="darkred",fill="white", binwidth=1)
#Let's break it out by council person via facets
c <- c + geom_histogram(colour="darkred",fill="white", binwidth=1)
c + facet_grid(. ~ Who)
# Whoa, we're getting somewhere! But it looks funky. Way too wide. Let's swap it.
c + facet_grid(Who ~ .)
#Better. Try exporting as a PNG or a PDF throught he plot viewer on the right.
# Let's get ambitious. What about the day per councilor?
# We have to go back and add a column for day based on the timestamp
garage_council$day <- wday(garage_council$datetime)
head(garage_council$day)
# Hm. Day is a number... I want the day spelled out. How do I find out?
?wday
garage_council$day <- wday(garage_council$datetime, label=TRUE, abbr=TRUE)
# Great, let's try to generate the chart again
c + facet_grid(Who ~ day)
# That didn't work... why?
# Because we have to reload the dataframe with new day column into "c"
c <- ggplot(garage_council, aes(x=hour))
c <- c + geom_histogram(colour="darkred",fill="white", binwidth=1)
c + facet_grid(Who ~ day)
#OK, I'm being picky now. Let's clean it up a little bit. I only want last names
#What's the variable ns the Who column?
typeof(garage_council$Who)
#Ok, it's a factor. We need to change it into a string so we can edit it
garage_council$Who <- as.character(garage_council$Who)
#Easy. Let's replace everything after the comma with a blank, leaving behind the last name
garage_council$Who <- gsub(",.*","",garage_council$Who)
#OK, let's chart it again
c <- ggplot(garage_council, aes(x=hour))
c <- c + geom_histogram(colour="darkred",fill="white", binwidth=1)
c + facet_grid(Who ~ day)
# Good. Let's add a chart title
c <- ggplot(garage_council, aes(x=hour))
c <- c + geom_histogram(colour="darkred",fill="white", binwidth=1)
c <- c + ggtitle("Council member garage door triggers by hour and day")
council_histograms <- c + facet_grid(Who ~ day)
#Better! Now what about the Y axis title...
c <- ggplot(garage_council, aes(x=hour))
c <- c + geom_histogram(colour="darkred",fill="white", binwidth=1)
c <- c + ggtitle("Council member garage door triggers by hour and day")
c <- c + ylab("Garage Triggers")
council_histograms <- c + facet_grid(Who ~ day)
#Ok, let's export the file (you can also export as a .pdf, if you want)
ggsave(council_histograms, file="council_histograms.png", width=10, height=20)
# Congratulations. For more great info about ggplots,
# Check out Grammer of Graphics with R & ggplot
# Challenge time! export a .pdf checkins for everyone who's NOT a council member
# NEXT! Let's look at coffee
grow <- read.csv("starbucksgrowth.csv")
# Take a look at the data
grow
# Make a quick chart of US growth - plot(x, y,...)
plot(grow$Year, grow$US)
# Put line between the dots-- Check ?plot
?plot
plot(grow$Year, grow$US, type="l")
# Add another line for Worldwide growth
plot(grow$Year, grow$US, type="l")
lines(grow$Year, grow$Worldwide, type="l", col="red")
# Well, that's weird.
# Here's the problem. Out-of-the box plotting is based on layers
# Start over but with the order flipped
plot(grow$Year, grow$Worldwide, type="l", col="red")
lines(grow$Year, grow$US, type="l", col="green")
# Much better. Let's clean up the axis titles and add a header
plot(grow$Year, grow$Worldwide, type="l", col="red", main="Starbucks by year", xlab="Year", ylab="Starbucks")
lines(grow$Year, grow$US, type="l", col="green")
# It's missing something.
legend("topleft", # places a legend at the appropriate place
c("Worldwide","US"), # puts text in the legend
lty=c(1,1), # gives the legend appropriate symbols (lines)
lwd=c(2.5,2.5),col=c("red","green")) # gives the legend lines the correct color and width
# Alright, that's ok. Kinda boring. Let's ggplot it up
qplot(Year, Worldwide, data=grow, geom="line")
# Alternatively,
g <- ggplot(grow, aes(x=Year, y=Worldwide)) + geom_line()
# We can't plot the second line easily. We need to change the structure of the dataframe
# http://www.rstudio.com/wp-content/uploads/2015/02/data-wrangling-cheatsheet.pdf
growgg <- gather(grow, "Type", "Starbucks", 2:3)
# Ok, now we can plot it with two lines
ggplot(growgg, aes(x=Year, y=Starbucks, col=Type)) + geom_line()
qplot(factor(Year), data=growgg, geom="bar", fill=Type, binwidth=1)
ggplot(growgg, aes(Year, fill=Type)) + geom_bar(binwidth=1)
# Nice! Let's add a title
ggplot(growgg, aes(x=Year, y=Starbucks, col=Type)) + geom_line() + ggtitle("Starbucks growth since 1992")
# Something fun: Let's export the chart we made to Plot.ly
# First, assign the ggplot to a variable
plotlyggplot <- ggplot(growgg, aes(x=Year, y=Starbucks, col=Type)) + geom_line() + ggtitle("Starbucks growth since 1992")
# Next, download the library
# Get more thorough instructions here https://plot.ly/r/getting-started/
library(devtools)
# load the plotly library
library(plotly)
# set up your authorization. Create a login account and generate your own key
# https://plot.ly/settings/api
# edit this code with your username and API key and run it
set_credentials_file("PlotlyUserName", "APIKey")
# Now, prepare the plotly environment
py <- plotly()
# This will send your ggplot to Plotly and render it online
plotted <- py$ggplotly(plotlyggplot)
# Edit it a bit. Add sourceline, etc.
# Plotly has great documentation, guides for how to use R to make charts
# https://plot.ly/r/
# Another chart maker https://rstudio.github.io/dygraphs/index.html
library(dygraphs)
library(xts)
# Need to convert our years into a time series recognized by R
grow$Year <- strptime(grow$Year, "%Y")
# This is to convert the time series into another format called eXtensible Time Series
grow <- xts(grow[,-1],order.by=as.POSIXct(grow$Year))
dygraph(grow)
# Customize it
dygraph(grow) %>% dyRangeSelector()
# More customization on height and chart type and headline
dygraph(grow,
main = "Starbucks growth worldwide",
ylab = "Starbucks") %>%
dySeries("Worldwide", label = "World") %>%
dySeries("US", label = "US") %>%
dyOptions(stackedGraph = TRUE) %>%
dyRangeSelector(height = 20)
# Bring in some interesting data
sbux <- read.csv("starbucks.csv")
# Load in some libraries
# Leaflet for R tutorial https://rstudio.github.io/leaflet/
require(leaflet)
require(dplyr)
# Make a simple map just to test
m <- leaflet() %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addMarkers(lng=-71.101936, lat=42.348799, popup="Storytelling with Data")
m # Print the map
# How many rows are there?
nrow(sbux)
m <- leaflet(sbux) %>% addTiles()
m %>% setView(-98.964844, 38.505191, zoom = 7)
m %>% addCircles(~lon, ~lat)
# Close, but needs some cleaning up. Add some map customization
# Add custom map tiles -- look up here http://homepage.ntlworld.com/keir.clarke/leaflet/leafletlayers.htm
m <- leaflet(sbux) %>% addTiles('http://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png')
m %>% setView(-98.964844, 38.505191, zoom = 4)
m %>% addCircles(~lon, ~lat, weight = 2, radius=1, color = "#008000", stroke = FALSE, fillOpacity = 0.5)
# Let's try another mapping library for R. This time from Google
library(ggmap)
# https://www.nceas.ucsb.edu/~frazier/RSpatialGuides/ggmap/ggmapCheatsheet.pdf
# Let's bring in another interesting data set
dunk <- read.csv("dunkindonuts.csv")
myLocation <- "Lebanon, KS"
myMap <- get_map(location=myLocation,
source="stamen", maptype="toner", crop=FALSE, zoom=4)
ggmap(myMap)+
geom_point(aes(x = lng, y = lat), data=dunk, alpha=.5,
color="orange", size=1)
# Alright, let's bring it together. We need to put them on one dataframe
# Take just the latitude and longitude columns in Starbucks (and state, too)
sb <- sbux[,c("lat", "lon", "City", "Province")]
# Need a seperate column to distinguish between SB and DD when joined
sb$type <- "Starbucks"
head(sb)
dd <- dunk[,c("lat", "lng", "city", "state")]
dd$type <- "Dunkin' Donuts"
# Bring them together!
sbdd <- rbind(sb, dd)
# Error?? Oh right, the columns are named differently.
colnames(sb) <- c("lat","lng","city", "state","type")
# OK, try it again
sbdd <- rbind(sb, dd)
# Back to leaflet! because it was so pretty
#First, turn Type into a factor, and do some fancy work to assign a color per type
sbdd$type <- as.factor(sbdd$type)
levels(sbdd$type)
cols2 <- c("#FF8000", "#00ff00")
sbdd$colors <- cols2[unclass(sbdd$type)]
# new leaflet code. so exciting
m <- leaflet(sbdd) %>% addTiles('http://{s}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png')
m %>% setView(-98.964844, 38.505191, zoom = 4)
m %>% addCircles(~lng, ~lat, weight = 1, radius=1,
color=~colors, stroke = FALSE, fillOpacity = 0.3)
# OK, neat visual. Let's do some calculations
# Chart out the top 5 states for Starbucks
# Good guide for barcharts http://www.cookbook-r.com/Graphs/Bar_and_line_graphs_(ggplot2)/
# Count up the Starbucks per State, turn it into a dataframe
sbstate <- data.frame(table(sb$state))
head(sbstate)
# Need to name the columns for clarity
colnames(sbstate) <- c("id", "Starbucks")
# Order dataframer in descending order of number of Starbucks
sbstate <- sbstate[order(-sbstate$Starbucks),]
sbgg <- ggplot(data=head(sbstate), aes(x=id, y=Starbucks)) +
ggtitle("States with the most Starbucks") +
xlab("State") +
geom_bar(fill="darkgreen", stat="identity")
sbgg
# Hm... Order seems off, right? That's because of ordering of factors (states)
sbhead <- head(sbstate)
# Head only displays the top 5 We need to subset it out entirely
sbhead <- sbstate[1:5,]
levels(sbhead$id)
# Whoa, that's messy. Let's fix it
# First, we purge the old factors by converting it to string and converting it back
sbhead$id <- as.character(sbhead$id)
sbhead$id <- as.factor(sbhead$id)
# Now, we can reorder it
levels(sbhead$id)
sbhead$id <- factor(sbhead$id,
levels = c("CA", "TX", "WA", "FL", "NY"))
levels(sbhead$id)
# Ok, plot it again
sbgg <- ggplot(data=sbhead, aes(x=id, y=Starbucks)) +
ggtitle("States with the most Starbucks") +
xlab("State") +
geom_bar(fill="darkgreen", stat="identity")
sbgg
# Want to see it on plotly? Go for it
plottedsb <- py$ggplotly(sbgg)
# Which states have the most SB or DD per capita?
# Bring in the population table
uspop <- read.csv("uspopulation.csv")
# Let's join them together, using the plyr library
library(plyr)
sb <- join(sbstate, uspop)
head(sb)
# It worked! OK, let's do some calculations
sb$Per100kPeople <- (sb$Starbucks/sb$population)*100000
sb2 <- arrange(sb, desc(Per100kPeople))
sbhead2 <- sb2[1:5,]
sbhead2$id <- as.character(sbhead2$id)
sbhead2$id <- as.factor(sbhead2$id)
sbhead2$id <- factor(sbhead2$id,
levels = c("DC", "WA", "OR", "CO", "NV"))
levels(sbhead2$id)
sb2gg <- ggplot(data=sbhead2, aes(x=id, y=Per100kPeople)) +
ggtitle("Most Starbucks per capita") +
xlab("State") +
geom_bar(fill="darkgreen", stat="identity")
sb2gg
# Some fancy Chart layout
require(gridExtra)
grid.arrange(sbgg, sb2gg, ncol=2, main="Starbucks popularity")
test < - grid.arrange(sbgg, sb2gg, ncol=2, main="Starbucks popularity")
# Want to try it in Plotly? Go ahead.
plottedpc <- py$ggplotly(test)
# Well, it won't work all the time...
# Because it used a new library (gridExtra) on top of ggplot
# OK, back to spatial join!
# Load these packages
require(gtools)
require(rgdal)
require(scales)
require(Cairo)
require(gpclib)
require(maptools)
require(reshape)
# Let's manipulate the Dunkin' Donuts data now.
# Focus on Dunkin' Donuts in Massachusetts only
str(dd)
massdunk <- filter(dd, state == "MA")
# Let's get the count by town
masscount <- data.frame(table(massdunk$city))
# Name the columns of the new dataframe
colnames(masscount) <- c("id", "DD")
gpclibPermit()
gpclibPermitStatus()
towntracts <- readOGR(dsn="towns", layer="town_shapes")
towntracts <- fortify(towntracts, region="TOWN")
MassData <- left_join(towntracts, masscount)
# That didn't work. Why?
# Because id in towntracts is in uppercase while masscount is not
masscount$id <- toupper(masscount$id)
# Try again
MassData <- left_join(towntracts, masscount)
head (MassData)
# Nice!
# Ok, now it's going to get a little crazy
ddtowns <- ggplot() +
geom_polygon(data = MassData, aes(x=long, y=lat, group=group,
fill=DD), color = "black", size=0.2) +
coord_map() +
scale_fill_distiller(type="seq", palette = "Reds", breaks=pretty_breaks(n=5)) +
theme_nothing(legend=TRUE) +
labs(title="Dunkin Donut towns", fill="")
ggsave(ddtowns, file = "map1.png", width = 6, height = 4.5, type = "cairo-png")
ddtowns
# Now, we sit and wait
# neat!
# There's a slightly easier way
# Back to leaflet! (I love leaflet)
pal <- colorQuantile("YlGn", NULL, n = 5)
town_popup <- paste0("<strong>Dunkin' Donuts: </strong>",
MassData$DD)
mb_tiles <- "http://a.tiles.mapbox.com/v3/kwalkertcu.l1fc0hab/{z}/{x}/{y}.png"
mb_attribution <- 'Mapbox <a href="http://mapbox.com/about/maps" target="_blank">Terms & Feedback</a>'
leaflet(data = MassData) %>%
addTiles(urlTemplate = mb_tiles,
attribution = mb_attribution) %>%
addPolygons(fillColor = ~pal(order),
fillOpacity = 0.8,
color = "#BDBDC3",
weight = 1,
popup = town_popup)
# Real quick, let's take a look at this amazing choropleth package
library(acs)
library(choroplethr)
library(choroplethrMaps)
# Let's play with Census data-- Sign up for an API key
# http://www.census.gov/developers/
api.key.install("yourkeygoeshere")
choroplethr_acs("B01003", "state")
# You can look up more Census tables to map out
# http://censusreporter.org/topics/table-codes/
# Try it again but at the county level
choroplethr_acs("YourTableofChoice", "county")
# So many choropleth options: Animated, Custom shape files
|
52e97d44bbfbad9a3c5f95b6977fc66e97bf76a4 | b6228c51511e3656ed0bc422d3999af2715ae893 | /template.R | ce68e58ed59495311013655b14db7bad254d8351 | [] | no_license | NorfolkDataSci/twitter-account-analysis | d0cedb7673c0c31c714036a0bc45348444171276 | 9d32df8a8d19b66e42d044e8e11bb52e797e1ac2 | refs/heads/master | 2021-01-20T06:46:13.503101 | 2016-08-07T19:06:58 | 2016-08-07T19:06:58 | 64,975,828 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,107 | r | template.R |
# turn off stringsAsFactors because they make text analysis hard
options(stringsAsFactors = FALSE)
# load packages
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(twitteR))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(plyr))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(dplyr))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(ggplot2))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(ggthemes))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(ggrepel))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(scales))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(lubridate))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(network))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(sna))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(xml2))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(twitteR))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(qdap))))
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(tm))))
# Authenticate against the Twitter API ----------------------------------------------------------------------------------------
# Check with administrator about running a one time script
# that will provide your system with the appropriate API Keys
# authenticate against twitter using your api keys
setup_twitter_oauth(consumer_key = Sys.getenv("TWITTER_CONSUMER_KEY"),
consumer_secret = Sys.getenv("TWITTER_CONSUMER_SECRET"),
access_token = Sys.getenv("TWITTER_ACCESS_TOKEN"),
access_secret = Sys.getenv("TWITTER_ACCESS_SECRET"))
token <- get("oauth_token", twitteR:::oauth_cache) #Save the credentials info
token$cache()
# Pull down tweets of a person of interest ----------------------------------------------------------------------------------------
# determine a twitter user that you are interested in
user_of_interest <- getUser('NorfolkVA')
# pull down the last 3200 tweets
tweets <- userTimeline(user_of_interest, n=3200, maxID=NULL, sinceID=NULL, includeRts=FALSE, excludeReplies=FALSE)
# saving tweets if there is no online access
saveRDS(tweets, './data/tweets.Rds')
# loading tweets from file
tweets <- readRDS('./data/tweets.Rds')
# Cleaning the tweets from unescaped HTML ----------------------------------------------------------------------------------------
# tweets come across with certain html characters encoded
# for example, the ampersand (&) symbol comes across as amp;
# we need to unescape those characters
# taken from http://stackoverflow.com/questions/5060076/convert-html-character-entity-encoding-in-r
unescape_xml <- function(str){
xml2::xml_text(xml2::read_xml(paste0("<x>", str, "</x>")))
}
unescape_html <- function(str){
xml2::xml_text(xml2::read_html(paste0("<x>", str, "</x>")))
}
# create the unescaped version
tweets <- lapply(tweets, FUN=function(x){x$text <- unescape_html(x$text);return(x)})
# Calculate tweet emotional polarity -----------------------------------------------------------------------------------------------
polarity <- lapply(tweets, function(tweet) {
txt <- tweet$text
# strip sentence enders so each tweet is analyzed as a sentence,
# and +'s which muck up regex
txt <- gsub('(\\.|!|\\?)\\s+|(\\++)', ' ', txt)
# strip URLs
txt <- gsub(' http[^[:blank:]]+', '', txt)
# calculate polarity
return(polarity(txt))
})
retweet_data <- data.frame(text = sapply(tweets, FUN=function(x){x$text}),
retweetCount = sapply(tweets, FUN=function(x){x$retweetCount}),
emotionalValence = sapply(polarity, function(x) x$all$polarity))
# Do happier tweets get retweeted more? ----------------------------------------------------------------------------------------
ggplot(retweet_data, aes(x = emotionalValence,
y = retweetCount)) +
geom_point(position = 'jitter') +
geom_smooth(span = 1) +
scale_x_continuous(breaks = pretty_breaks(6)) +
scale_y_continuous(breaks = pretty_breaks(6)) +
labs(x="Tweet Emotion (negative to positive)", y="Retweets Count") +
ggtitle('Count of Retweets by Message Emotion') +
theme(axis.text=element_text(size=12),
axis.title=element_text(size=14, face="bold"),
plot.title = element_text(color="black", face="bold", size=24, hjust=0))
plot(retweet_data$emotionalValence, retweet_data$retweetCount)
identify(retweet_data$emotionalValence, retweet_data$retweetCount, labels=retweet_data$text)
retweet_data[29,]
retweet_data[55,]
retweet_data[83,]
# Who is retweeting whom? ------------------------------------------------------------------------------------------------------
# pull down tweets this time INCLUDING retweets
tweets_inc_retweets <- userTimeline(user_of_interest, n=3200, maxID=NULL, sinceID=NULL, includeRts=TRUE, excludeReplies=FALSE)
tweets_df <- twListToDF(tweets_inc_retweets)
# Split into retweets and original tweets
sp <- split(tweets_df, tweets_df$isRetweet)
# Extract the retweets and pull the original author's screenname
rt <- mutate(sp[['TRUE']], sender = substr(text, 5, regexpr(':', text) - 1))
el <- as.data.frame(cbind(sender = tolower(rt$sender),
receiver = tolower(rt$screenName)))
el <- count(el, sender, receiver)
rtnet <- network(el, matrix.type = 'edgelist', directed = TRUE,
ignore.eval = FALSE, names.eval = 'num')
# Get names of only those who were retweeted to keep labeling reasonable
vlabs <- rtnet %v% 'vertex.names'
vlabs[degree(rtnet, cmode = 'outdegree') == 0] <- NA
col3 <- RColorBrewer::brewer.pal(3, 'Paired')
par(mar = c(0, 0, 3, 0))
plot(rtnet, label = vlabs, label.pos = 5, label.cex = .8,
vertex.cex = log(degree(rtnet)) + .5, vertex.col = col3[1],
edge.lwd = 'num', edge.col = 'gray70', main = '@NorfolkVA Retweet Network')
# Determine retweeter reach to see if there are some real influencers in our network ----------------------------------------
el$sender_followers <- NA
for (i in 1:nrow(el)){
try({el$sender_followers[i] <- getUser(el$sender[i])$followersCount})
}
nas_removed_dat <- el %>%
na.omit() %>%
filter(n >= 2,
sender_followers >= 5)
ggplot(nas_removed_dat, aes(sender_followers, n)) +
geom_point(color = 'red') +
scale_x_continuous(trans = log_trans(), breaks=c(100, 1000, 10000, 50000)) +
geom_text_repel(aes(label = nas_removed_dat$sender)) +
ggtitle('Followers Count for @NorfolkVA Retweeters') +
labs(x="Follower Count of Retweeter (log scale)", y="Retweets Count") +
theme(axis.text=element_text(size=12),
axis.title=element_text(size=14, face="bold"),
plot.title = element_text(color="black", face="bold", size=24, hjust=0))
|
90ce195cda14077d5064aada6a1c30921c82d29d | 5537583b4a93a529590af69d273f70ee16d4c0e4 | /purge_intri/append.R | ed7c6e9018abd85dc3ca6207511d0f04bd73fa64 | [] | no_license | larrymy/intri | 5735693c4fba8f8ab0a623737873a1fecab899a8 | 16f75626a005946cdac001670dfba36711242c09 | refs/heads/master | 2021-09-06T02:13:25.712837 | 2018-02-01T15:51:55 | 2018-02-01T15:51:55 | 106,582,642 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,109 | r | append.R | #append fdf
library(dplyr)
library(lubridate)
#Load data
setwd("C:/Users/jy/Desktop/intri/purge_intri/")
load("3_20171110_fdf_intri.rda")
setwd("C:/Users/jy/Desktop/tempmplus/rda")
load("plot_df.rda")
#last date
lastdate <- max(df[,"DATE"])
df1 <- plot_df %>%
filter(date == lastdate) %>%
select(code, open, high, low, close) %>%
mutate(type = "mplus") #next time add volume
df2 <- df %>%
filter(DATE == lastdate) %>%
select(code = TICKER, open = ADJ_OPEN, high = ADJ_HIGH, low = ADJ_LOW, close = ADJ_CLOSE) %>%
mutate(type = "intri")
mergedf <- merge(df1,df2, by = "code", all = T)
scaledf <- mergedf %>% mutate(scale = close.y/close.x) %>% mutate(scale = ifelse(is.na(scale), 1, scale))
newdf <- (merge(plot_df, scaledf %>% select(code, scale), by = "code"))
plot_df_before <- newdf %>%
filter(date < lastdate) %>% select(-scale)
plot_df_after <- newdf %>%
filter(date >= lastdate) %>%
mutate(open = open*scale,
high = high*scale,
low = low*scale,
close = close*scale) %>%
select(code, stock, date, open, high, low, close, vol)
intri_df_before <- df %>%
filter(DATE < lastdate) %>%
select(code = TICKER, stock = FIGI_TICKER, date = DATE, open = ADJ_OPEN, high = ADJ_HIGH, low = ADJ_LOW, close = ADJ_CLOSE, vol = VOLUME)
intri_missing_df <- plot_df_before %>% filter(!code %in% intri_df_before$code)
intri_df_before <- intri_df_before %>% mutate(stock = as.character(stock))
final_full_df <- rbind(intri_missing_df, intri_df_before, plot_df_after) %>% arrange(code, date)
u1 <- grep(pattern = "^0{1}$", x = final_full_df$close)
final_full_df[u1 ,c("open", "high", "low", "close")] <- final_full_df[u1-1,"close"]
fdf_klse <- final_full_df
save(fdf_klse, file = "fdf_klse.rda")
write.csv(fdf_klse, file = "fdf_klse.csv", row.names = F)
|
d68fd48d2053c42dcdf5abcc13334b660483f1f0 | 6117f10e74484838f8d21498487dfd7449a824d3 | /plot1.R | 0b8ba0fb2bc56edeac0530475eddc086ad543b1a | [] | no_license | dougyoung/ExData_Plotting1 | 6866db3ced1c0f40c92e0b96b020db7d7bb0ff47 | d07d8bd4aea56775a4e3818e4882f61e61a1e72c | refs/heads/master | 2020-06-21T07:36:05.472241 | 2016-11-26T03:05:39 | 2016-11-26T03:05:39 | 74,797,977 | 0 | 0 | null | 2016-11-26T01:39:33 | 2016-11-26T01:39:32 | null | UTF-8 | R | false | false | 716 | r | plot1.R | source('load_samples.R')
source('render_to_png.R')
plot1 <- function(samples) {
# Histogram of Global Active Power over samples
# Histogram has these properties:
# color: red
# title: Global Active Power
# x-axis label: Global Active Power (kilowatts)
# y-axis label: Frequency
hist(
samples$Global_active_power,
col='Red',
main='Global Active Power',
xlab='Global Active Power (kilowatts)',
ylab='Frequency'
)
}
# Create a new device
dev.new()
# Samples between 2007-02-01 00:00:00 and 2007-02-03 00:00:00
samples <- loadSamplesBetweenDates('2007-02-01', '2007-02-03')
# Render plot
plot1(samples)
# Render to png
render_to_png('plot1.png')
# Close device
dev.off() |
c95a045a7993955a0832a74d52c04ea616190b3a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/catnet/examples/cnCluster.Rd.R | 8a8d3668b50079e2f502543b14cab37b1c890f5b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 332 | r | cnCluster.Rd.R | library(catnet)
### Name: cnCluster-method
### Title: Network Clustering
### Aliases: cnCluster cnClusterSep cnClusterMI cnCluster,catNetwork-method
### cnClusterSep,catNetwork-method
### Keywords: methods graphs
### ** Examples
cnet <- cnRandomCatnet(numnodes=30, maxParents=2, numCategories=2)
cnCluster(object=cnet)
|
c3c2aa6f2a3d8b8c2b946a96a781793c3f526f30 | b1552d66be25934829f08f923c84af17a3d6b8fc | /fig-whoami/fig-mcmc.R | 76401d31f5ef6b179eb621313b45cb5ffecef801 | [] | no_license | gvegayon/faculty-talk | b8d0e52c6dc1b4c9f868e54ceb5c4fecfefc1529 | 48eb4e0e53e567eb4636f4f884d2c3979e98cc0e | refs/heads/master | 2023-08-25T00:31:02.309572 | 2021-11-01T17:21:35 | 2021-11-01T17:21:35 | 317,743,622 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,223 | r | fig-mcmc.R | library(coda)
# dat <- readRDS("fig-mcmc/mcmc_partially_annotated_no_prior.rds")
library(fmcmc)
data(logit, package = "mcmc")
out <- glm(y ~ x1 + x2 + x3 + x4, data = logit, family = binomial, x = TRUE)
beta.init <- as.numeric(coefficients(out))
lupost_factory <- function(x, y) function(beta) {
eta <- as.numeric(x %*% beta)
logp <- ifelse(eta < 0, eta - log1p(exp(eta)), - log1p(exp(- eta)))
logq <- ifelse(eta < 0, - log1p(exp(eta)), - eta - log1p(exp(- eta)))
logl <- sum(logp[y == 1]) + sum(logq[y == 0])
return(logl - sum(beta^2) / 8)
}
lupost <- lupost_factory(out$x, out$y)
khaario <- kernel_adapt(freq = 1, warmup = 2000)
set.seed(12)
out_harrio_1 <- MCMC(
initial = rbind(beta.init, beta.init + rnorm(5))[1,],
fun = lupost,
nsteps = 6000, # We will only run the chain for 100 steps
kernel = khaario, # We passed the predefined kernel
thin = 1, # No thining here
nchains = 1L, # A single chain
multicore = FALSE # Running in serial
)
graphics.off()
svg("fig-mcmc/fig-mcmc.svg", bg = "transparent")
traceplot(out_harrio_1[,5], main = "Trace of Adaptive Transition Kernel")
abline(v = 2000, col = "red", lwd = 2, lty=2)
dev.off() |
7aa3a71e2b8b26348cdc407f70d75deaf8e8ef59 | 66a2afd9c0dab1d55e6d236f3d85bc1b61a11a66 | /man/remove_empty_linked_object_cols.Rd | f0aaa58b7c6b0091a3a0a0380422a8e5643cdca3 | [
"MIT"
] | permissive | StevenMMortimer/salesforcer | 833b09465925fb3f1be8da3179e648d4009c69a9 | a1e1e9cd0aa4e4fe99c7acd3fcde566076dac732 | refs/heads/main | 2023-07-23T16:39:15.632082 | 2022-03-02T15:52:59 | 2022-03-02T15:52:59 | 94,126,513 | 91 | 19 | NOASSERTION | 2023-07-14T05:19:53 | 2017-06-12T18:14:00 | R | UTF-8 | R | false | true | 950 | rd | remove_empty_linked_object_cols.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{remove_empty_linked_object_cols}
\alias{remove_empty_linked_object_cols}
\title{Remove NA Columns Created by Empty Related Entity Values}
\usage{
remove_empty_linked_object_cols(dat, api_type = c("SOAP", "REST"))
}
\arguments{
\item{dat}{data; a \code{tbl_df} or \code{data.frame} of a returned resultset}
\item{api_type}{\code{character}; one of \code{"REST"}, \code{"SOAP"},
\code{"Bulk 1.0"}, or \code{"Bulk 2.0"} indicating which API to use when
making the request.}
}
\value{
\code{tbl_df}; the passed in data, but with the object columns removed
that are empty links to other objects.
}
\description{
This function will detect if there are related entity columns coming back
in the resultset and try to exclude an additional completely blank column
created by records that don't have a relationship at all in that related entity.
}
\keyword{internal}
|
317dde33ed300571695320f8e00ff87b5eedd116 | cb6f2a406e75c379a647e0913ac407a2e067c693 | /man/run.permutations.Rd | 26d8f5a8cca47b5d74be880b782a3fb5813993e2 | [] | no_license | NKI-CCB/iTOP | 9f797340aa9bf90a1bb7b1bb273c7b7f2b59a37a | e93ad3a8bbd7754153c57c44afc85970c9b682c2 | refs/heads/master | 2021-04-09T16:02:13.978456 | 2018-06-13T08:14:16 | 2018-06-13T08:14:16 | 125,842,771 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,148 | rd | run.permutations.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{run.permutations}
\alias{run.permutations}
\title{Permutations for significance testing}
\usage{
run.permutations(config_matrices, nperm = 1000)
}
\arguments{
\item{config_matrices}{The result from compute.config.matrices().}
\item{nperm}{The number of permutations to perform (default=1000).}
}
\value{
An n x n x nperms array of RV coefficients for the permutated data, where n is the number of datasets.
}
\description{
Performs a permutations for significance testing. The result from this function can be used with
rv.pval() to determine a p-value. By decoupling this into two functions,
you don't have to redo the permutations for every p-value, hence increasing the runtime speed.
}
\examples{
set.seed(2)
n = 100
p = 100
x1 = matrix(rnorm(n*p), n, p)
x2 = x1 + matrix(rnorm(n*p), n, p)
x3 = x2 + matrix(rnorm(n*p), n, p)
data = list(x1=x1, x2=x2, x3=x3)
config_matrices = compute.config.matrices(data)
cors = rv.cor.matrix(config_matrices)
cors_perm = run.permutations(config_matrices, nperm=1000)
rv.pval(cors, cors_perm, "x1", "x3", "x2")
}
|
246400d5d077d430a3bad8b6b6b01e45900811bf | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/heplots/examples/Hernior.Rd.R | eb4f7044cec8b3209971e83e0c91dc029d8d85b8 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,238 | r | Hernior.Rd.R | library(heplots)
### Name: Hernior
### Title: Recovery from Elective Herniorrhaphy
### Aliases: Hernior
### Keywords: datasets
### ** Examples
str(Hernior)
Hern.mod <- lm(cbind(leave, nurse, los) ~
age + sex + pstat + build + cardiac + resp, data=Hernior)
Anova(Hern.mod, test="Roy") # actually, all tests are identical
# test overall regression
linearHypothesis(Hern.mod, c("age", "sexm", "pstat", "build", "cardiac", "resp"))
# joint test of age, sex & caridac
linearHypothesis(Hern.mod, c("age", "sexm", "cardiac"))
clr <- c("red", "darkgray", "blue", "darkgreen", "magenta", "brown", "black")
heplot(Hern.mod, col=clr)
pairs(Hern.mod, col=clr)
## Enhancing the pairs plot ...
# create better variable labels
vlab <- c("LeaveCondition\n(leave)", "NursingCare\n(nurse)", "LengthOfStay\n(los)")
# Add ellipse to test all 5 regressors simultaneously
hyp <- list("Regr" = c("age", "sexm", "pstat", "build", "cardiac", "resp"))
pairs(Hern.mod, hypotheses=hyp, col=clr, var.labels=vlab)
## Views in canonical space for the various predictors
if (require(candisc)) {
Hern.canL <- candiscList(Hern.mod)
plot(Hern.canL, term="age")
plot(Hern.canL, term="sex")
plot(Hern.canL, term="pstat") # physical status
}
|
568d844af932edde1d165dadc770e24d1f6c5a74 | 4201e9b754760dc35fc0aeef9df5a8b9d801c47f | /bin/R-3.5.1/src/library/graphics/R/persp.R | 25739ca9b334b0804359d71550b1a224f60e15da | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] | permissive | lifebit-ai/exomedepth | cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e | 5a775ae5e2a247aeadc5208a34e8717c7855d080 | refs/heads/master | 2020-03-27T12:55:56.400581 | 2018-10-11T10:00:07 | 2018-10-11T10:00:07 | 146,578,924 | 0 | 0 | MIT | 2018-08-29T09:43:52 | 2018-08-29T09:43:51 | null | UTF-8 | R | false | false | 2,995 | r | persp.R | # File src/library/graphics/R/persp.R
# Part of the R package, https://www.R-project.org
#
# Copyright (C) 1995-2012 The R Core Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
persp <- function(x, ...) UseMethod("persp")
persp.default <-
function (x = seq(0, 1, length.out = nrow(z)),
y = seq(0, 1, length.out = ncol(z)),
z, xlim = range(x), ylim = range(y), zlim = range(z, na.rm = TRUE),
xlab = NULL, ylab = NULL, zlab = NULL, main = NULL, sub = NULL,
theta = 0, phi = 15, r = sqrt(3), d = 1, scale = TRUE, expand = 1,
col = "white", border = NULL, ltheta = -135, lphi = 0, shade = NA,
box = TRUE, axes = TRUE, nticks = 5, ticktype = "simple", ...)
{
if (is.null(xlab))
xlab <- if (!missing(x)) deparse(substitute(x)) else "X"
if (is.null(ylab))
ylab <- if (!missing(y)) deparse(substitute(y)) else "Y"
if (is.null(zlab))
zlab <- if (!missing(z)) deparse(substitute(z)) else "Z"
## labcex is disregarded since we do NOT yet put ANY labels...
if (missing(z)) {
if (!missing(x)) {
if (is.list(x)) {
z <- x$z
y <- x$y
x <- x$x
}
else {
z <- x
x <- seq.int(0, 1, length.out = nrow(z))
}
}
else stop("no 'z' matrix specified")
}
else if (is.list(x)) {
y <- x$y
x <- x$x
}
if (any(diff(x) <= 0) || any(diff(y) <= 0))
stop("increasing 'x' and 'y' values expected")
if (box) {
zz <- z[!is.na(z)]
if(any(x < xlim[1]) || any(x > xlim[2]) ||
any(y < ylim[1]) || any(y > ylim[2]) ||
any(zz < zlim[1]) || any(zz > zlim[2]))
warning("surface extends beyond the box")
}
ticktype <- pmatch(ticktype, c("simple", "detailed"))
plot.new()
r <- .External.graphics(C_persp, x, y, z, xlim, ylim, zlim, theta, phi, r, d,
scale, expand, col, border, ltheta, lphi, shade,
box, axes, nticks, ticktype,
as.character(xlab), as.character(ylab),
as.character(zlab), ...)
for(fun in getHook("persp")) {
if(is.character(fun)) fun <- get(fun)
try(fun())
}
if(!is.null(main) || !is.null(sub))
title(main = main, sub = sub, ...)
invisible(r)
}
|
8354a0608a4809f9d2fe80ea4f41f03b5b0daa9d | c4a21be5b82f9f2f797e99f0658a290fd5a1035c | /update_nowcasting.R | 1d648bc7e354de6fe3b2be4662004ea5ba485e70 | [] | no_license | kamila-belo/nowcasting | 98209d3e31ac3a57d25430136e1d7d83e9942d57 | 5c26e259765f4ca03773ea3413739b6c22c54805 | refs/heads/master | 2022-09-02T20:34:00.328155 | 2020-05-21T07:13:03 | 2020-05-21T07:13:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,851 | r | update_nowcasting.R | # Libraries
library(widgetframe)
library(tidyverse)
library(plotly)
library(lubridate)
library(optparse)
library(Hmisc)
library(stringr)
library(foreign)
library(dplyr)
library(NobBS)
library(zoo)
library(tidyr)
# carrega funcoes----
source("_src/funcoes.R")
################################################################################
## comandos git: PULL ANTES de adicionar arquivos
################################################################################
system("git pull")
################################################################################
## Parsing command line arguments
################################################################################
if (sys.nframe() == 0L) {
option_list <- list(
make_option("--dir",
help = ("Caminho até o diretório com os arquivos csv com base sivep gripe"),
default = "../dados/municipio_SP/SRAG_hospitalizados/dados/",
metavar = "dir"),
make_option("--escala", default = "municipio",
help = ("Nível administrativo, um de: municipio, micro, meso, estado, país"),
metavar = "escala"),
make_option("--sigla", default = "SP", # ainda nao estamos usando
help = ("Sigla do estado a ser atualizado"),
metavar = "sigla"),
make_option("--geocode",
help = ("Geocode de município, micro-mesorregião ou estado"),
metavar = "geocode"),
make_option("--window", type = "integer", default = 40,
help = ("Largura da running window do nowcasting (dias)"),
metavar = "window"),
make_option("--trim", type = "integer", default = 2,
help = ("Últimos dias da serie temporal a tirar do nowcasting"),
metavar = "trim"),
make_option("--dataBase",
help = ("Data da base de dados, formato 'yyyy_mm_dd'"),
metavar = "dataBase"),
make_option("--formatoData", default = "%Y_%m_%d",
help = ("Formato do campo de datas no csv, confome padrão da função as.Date"),#ast antes de tirar checar outras fontes de dados
metavar = "formatoData"),
make_option("--updateGit", default = "FALSE",
help = ("Fazer git add, commit e push?"),
metavar = "updateGit"),
make_option("--outputDir", default = "./dados_processados/nowcasting",
help = ("Diretório de destino"),
metavar = "outputDir")
)
parser_object <- OptionParser(usage = "Rscript %prog [Opções] [ARQUIVO]\n",
option_list = option_list,
description = "Script para importar csv da sivep gripe,
executar nowcasting e salvar os resultados")
## aliases
opt <- parse_args(parser_object, args = commandArgs(trailingOnly = TRUE), positional_arguments = TRUE)
dir <- opt$options$dir
escala <- opt$options$escala
sigla <- opt$options$sigla
geocode <- opt$options$geocode
window <- opt$options$window
trim.now <- opt$options$trim
data <- opt$options$dataBase
formato.data <- opt$options$formatoData
update.git <- opt$options$updateGit
out.dir <- opt$options$outputDir
}
####################################################
### to run INTERACTIVELY:
#You only have to set up the variables that are not already set up above or the ones that you would like to change #
#geocode <- "3550308" # municipio SP
#data <- "2020_05_20"
#######################################################
if (!exists('geocode')) {
print("Geocode não definido")
quit(status = 1)
}
# sets paths
name_path <- check.geocode(escala = escala,
geocode = geocode)
output.dir <- paste0(out.dir, "/", name_path, "/")
# só para o output
out.path <- paste0(output.dir, "output_nowcasting/")
# só para as tabelas
df.path <- paste0(output.dir, "tabelas_nowcasting_para_grafico/")
if (!file.exists(df.path))
dir.create(df.path, showWarnings = TRUE, recursive = TRUE)
if (!file.exists(out.path))
dir.create(out.path, showWarnings = TRUE, recursive = TRUE)
# pegando a data mais recente
if (is.null(data)) {
data <- get.last.date(dir)
}
print(paste("Atualizando", gsub(x = name_path, pattern = "/", replacement = " ")))
source("_src/01_gera_nowcastings_SIVEP.R")
source("_src/02_prepara_dados_nowcasting.R")
source("_src/03_analises_nowcasting.R")
source("_src/04_plots_nowcasting.R")
files_para_push <- list.files(output.dir, pattern = paste0("*.", data, ".csv"),
full.names = TRUE)
files_para_push <- files_para_push[-grep(files_para_push, pattern = "post")]
#aqui também poderia rolar um push das tabelas pro site mesmo
tabelas_para_push <- list.files(df.path, pattern = paste0("*.", data, ".csv"),
full.names = TRUE)
######plots----
# Graficos a serem atualizados
plots.para.atualizar <- makeNamedList(
# covid
plot.nowcast.covid,
plot.nowcast.cum.covid,
plot.estimate.R0.covid,
plot.tempo.dupl.covid,
# srag
plot.nowcast.srag,
plot.nowcast.cum.srag,
plot.estimate.R0.srag,
plot.tempo.dupl.srag,
# obitos covid
plot.nowcast.ob.covid,
plot.nowcast.cum.ob.covid,
plot.tempo.dupl.ob.covid,
# obitos srag
plot.nowcast.ob.srag,
plot.nowcast.cum.ob.srag,
plot.tempo.dupl.ob.srag
#obitos srag.proaim
#plot.nowcast.ob.srag.proaim,
#plot.nowcast.cum.ob.srag.proaim,
#plot.tempo.dupl.ob.srag.proaim
)
plots.true <- sapply(plots.para.atualizar, function(x) !is.null(x))
filenames <- gsub(".", "_", names(plots.para.atualizar), fixed = TRUE)
filenames <- paste0(plot.dir, filenames)
n <- 1:length(plots.para.atualizar)
for (i in n[plots.true]) {
fig.name <- filenames[i]
# SVG ####
# fazendo todos os graficos svg para o site
graph.svg <- plots.para.atualizar[[i]] +
theme(axis.text = element_text(size = 6.65)
#plot.margin = margin(10, 0, 0, 7, "pt")
)
ggsave(paste(fig.name, ".svg", sep = ""),
plot = graph.svg,
device = svg,
scale = 1,
width = 215,
height = 146,
units = "mm")
#ast nao chequei as dimensoes, só tirei o que parece redundante
}
#
###############################################################################
## Comando git: commits e pushs
################################################################################
if (update.git) {
system("git pull")
## todos os arquivos da data
system(paste("git add", paste(files_para_push, collapse = " ")))
system(paste("git add", paste(tabelas_para_push, collapse = " ")))
system(paste("git commit -m ':robot: nowcasting",
gsub(x = name_path, pattern = "/", replacement = " "),
"dados:", data,
"'"))
system("git push")
}
}
#falta git plot
|
e64722577c268d890a85fbed3f9ab0540018dd37 | 9a67dc2525091b63f5182294d65266e735600b71 | /rcpp2.R | 731797c97f1ff9b5da7dbe4129715da04956e945 | [] | no_license | brennap3/thesis_2 | 83ba43d4e8d687314a5f02d63c3de6a30571da00 | 5747a5bfbfb3fbf13643e1f1265d839cdf297b9c | refs/heads/master | 2021-01-20T15:18:29.181654 | 2018-05-28T12:11:16 | 2018-05-28T12:11:16 | 90,749,279 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,971 | r | rcpp2.R | rm(list=ls())
library(lubridate)
library(dplyr)
library(lubridate)
library(purrr)
library(magrittr)
library(rts)
library(depmixS4)
library(TTR)
library(ggplot2)
library(reshape2)
library(gridExtra)
library(dplyr)
library(ggplot2)
library(tidyr)
library(lubridate)
library(KMsurv)
library(tibble)
library(stringr)
library(tidyr)
library(stargazer)
library(stringr)
library(stringi)
gti_data<-read.csv("C://Users//Peter//Desktop//gti//globalterrorismdb_0616dist.csv",header=TRUE,sep=",")
gti_data$nkill_clean<-coalesce(gti_data$nkill,0)
iraq.data<-gti_data %>% filter(country_txt=='Iraq')
iraq.data$idate<-ymd(paste(iraq.data$iyear,str_pad(iraq.data$imonth,2,pad="0"),
str_pad(iraq.data$iday,2,pad="0")))
max(iraq.data.postinvasion$idate)
min(iraq.data.postinvasion$idate)
c<-seq(ymd('1975-03-01'),ymd('2015-12-31'),by='1 day')
length(c)
d<-c(rep(0,times=14916)) %>% as.data.frame()
d$date<-c
colnames(d)<-c('count_kills','idate')
iraq.dataex<-dplyr::left_join(d, iraq.data, by = "idate")
iraq.dataex$nkill<-dplyr::coalesce(iraq.dataex$nkill,0)
iraq.data.postinvasion<-iraq.dataex %>% filter(idate>="2003-03-20")
iraq.data.postinvasion$year<-lubridate::year(iraq.data.postinvasion$idate)
iraq.data.postinvasion$week<-lubridate::week(iraq.data.postinvasion$idate)
## unique(iraq.data.postinvasion$week)
iraq.data.weekly <- iraq.data.postinvasion %>%
dplyr::select(year,week,nkill_clean) %>% group_by(year,week) %>%
summarize(sum_kill=sum(nkill_clean,na.rm=T)) %>%
arrange(year,week) %>% as.data.frame()
str(iraq.data.weekly)
library(lubridate)
citation(lubridate)
## str(iraq.dataex)
head(iraq.data.weekly)
tail(iraq.data.weekly,50)
library(ggplot2)
install.packages("lubridate")
library(lubridate)
devtools::install_github("twitter/AnomalyDetection")
library(AnomalyDetection)
library(magrittr)
iraq.data.weekly$weekstrdate<-as.POSIXct(paste(iraq.data.weekly$year,iraq.data.weekly$week,"1",sep=" "), format = "%Y %U %u")
ggplot(iraq.data.weekly, aes(x=weekstrdate, y=sum_kill)) +
geom_line(color = "blue")+
ggtitle("Time series plot of the \n number of Deaths due to terrorism, \n averaged across all weeks (y-axis)")+
xlab("Date")+
ylab("number of deaths \n due to terrorism")
## post invasion
## use 4 week period
?breakout
library(BreakoutDetection)
res.weekly.twitter.breakout = breakout(iraq.data.weekly$sum_kill, min.size=4,
method='multi', percent=.2, degree=1, plot=TRUE)
##
res.weekly.twitter.breakout$plot
res.weekly.twitter.breakout$loc
##
install.packages("tseries")
library(tseries)
library(devtools)
install_github(repo = "Surus", username = "Netflix", subdir = "resources/R/RAD")
library(RAD)
X<-iraq.data.weekly$sum_kill
length(X)
## 678/4
res.iraq.anom<-RAD::AnomalyDetection.rpca(head(X,676), frequency = 4, autodiff = T,
forcediff = F, scale = T, verbose = F)
str(res.iraq.anom)
res.iraq.anom$S_transform ## The sparse outliers in the transformed space
?RAD::ggplot_AnomalyDetection.rpca
RAD::ggplot_AnomalyDetection.rpca(res.iraq.anom) +
ggplot2::theme_grey(base_size = 25)
str(res.iraq.anom)
str(res.iraq.anom$name)
res.iraq.anom
##
####
##
str(iraq.data.weekly)
iraq.data.weekly.ts<- iraq.data.weekly %>% dplyr::select(weekstrdate,sum_kill) %>% dplyr::filter(!is.na(weekstrdate))
head(iraq.data.weekly)
?AnomalyDetectionTs
data_anomaly = AnomalyDetectionTs(iraq.data.weekly.ts, max_anoms=0.01, direction="pos", plot=TRUE, e_value = T)
####
#######
#######
####
## str(iraq.data.weekly)
xdf<- iraq.data.weekly %>% dplyr::select(sum_kill,weekstrdate)
## ?sts
head(
xdf)
library(tsoutliers)
library(surveillance)
?sts
xsts <- sts(observed = xdf$sum_kill, start=c(2003,12) ,epoch=as.numeric(xdf$weekstrdate))
nrow(xsts)
nrow(xdf)
plot(xsts)
?sts2disProg
stsd<-sts2disProg(sts = xsts)
plot(stsd)
?algo.cdc
?algo.farrington
?earsC
rm(res1)
?earsC
xsts
?earsC
res1x <- earsC(xsts, control = list( method="C3",baseline=18))
str(res1x)
str(xsts)
plot(res1x)
res1.df<-NULL
res2.df<-as.data.frame(res1)
nrow(res2.df)
str(res1.df)
str(xdf)
library(dplyr)
?mutate
nrow(res1.df)
max(xdf$weekstrdate)
res1.df.1 <- mutate(res1.df,
time = xdf$weekstrdate[22:677])
xas<-xdf[22:677,]
nrow(res1.df.1)
nrow(xas)
str(res1.df.1)
ggplot() +
geom_bar(aes(x=xas$weekstrdate,y=xas$sum_kill), stat = "identity",color="darkblue") +
viridis::scale_fill_viridis(discrete = TRUE, name = "Alarm:") +
geom_step(data = res1.df.1, aes(time, upperbound)) +
theme(legend.position = "bottom") +
theme(axis.text.x=element_text(angle=90, hjust=1)) +
ylab("No. of deaths") +
xlab("Time") + ggtitle("Outbreaks (detection) of deaths due to terrorism in Iraq \n (EarsC3)") +
theme(axis.text.x = element_text(angle = 60, size = 6)) +
geom_point(data = filter(res1.df.1, alarm), aes(x = time), y = 0, color = "red")
str(xsts)
str(stsd)
?algo.farrington ##only works with weeks
##farrignton
plot(stsd)
stsfar <- xsts <- sts(observed = xdf$sum_kill,
start=c(2003,12) ,epoch=as.numeric(xdf$weekstrdate),freq=52)
plot(stsfar)
cntrl<-list(range=50:650,w=18,b=1,alpha=0.01)
?farrington
?farringtonFlexible
res.far <- farrington(stsfar,control=cntrl)
str(res.far)
surveillance::plot(res.far,legend.opts=NULL,
main="Farrington algorithm \n applied to Iraq data",
ylab="No. of Deaths",
xlab="Time"
)
?surveillance::plot
##
#### Redo for syria data
##
library(stringi)
library(stringr)
gti_data$nkill_clean<-coalesce(gti_data$nkill,0)
Syria.data<-gti_data %>% filter(country_txt=='Syria')
Syria.data$idate<-ymd(paste(Syria.data$iyear,str_pad(Syria.data$imonth,2,pad="0"),
str_pad(Syria.data$iday,2,pad="0")))
max(Syria.data$idate,na.rm=T)
min(Syria.data$idate,na.rm=T)
c<-seq(ymd('1974-08-14'),ymd('2015-12-31'),by='1 day')
length(c)
d<-c(rep(0,times=length(c))) %>% as.data.frame()
d$date<-c
colnames(d)<-c('count_kills','idate')
Syria.dataex<-dplyr::left_join(d, Syria.data, by = "idate")
2^30
2^16
2^8
Syria.dataex$nkill<-dplyr::coalesce(iraq.dataex$nkill,0)
Syria.dataex$year<-lubridate::year(Syria.dataex$idate)
Syria.dataex$week<-lubridate::week(Syria.dataex$idate)
## unique(iraq.data.postinvasion$week)
Syria.dataex.weekly <- Syria.dataex %>%
dplyr::select(year,week,nkill_clean) %>% group_by(year,week) %>%
summarize(sum_kill=sum(nkill_clean,na.rm=T)) %>%
arrange(year,week) %>% as.data.frame()
str(Syria.dataex.weekly)
Syria.dataex$weekstrdate<-as.POSIXct(paste(Syria.dataex$year,Syria.dataex$week,
"1",sep=" "), format = "%Y %U %u")
## str(iraq.dataex)
library(ggplot2)
install.packages("lubridate")
library(lubridate)
devtools::install_github("twitter/AnomalyDetection")
library(AnomalyDetection)
library(magrittr)
|
1f642dade7670ab5995e308d94faab3ac8f05bc0 | 204998478eb0d27bd9d64d8814f8b6d664107bb5 | /SEWE.R | 183430d55aac99b4870aad43e98b2a73a41806fc | [] | no_license | TheAlchemistNerd/R-Programming | da7ce38ffc26957671b84170c24725445903dd75 | d0e032d93c440b770692636b191bade05ce4f64d | refs/heads/main | 2023-06-08T09:37:24.133530 | 2023-05-31T18:58:18 | 2023-05-31T18:58:18 | 356,393,502 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,004 | r | SEWE.R | m=15
m
n=17
n
m+n
m/n
17/15
fn1 <- function(x){
return(x)
}
fn1(66)
mulp=function(a,b){
result=a*b
return(result)
}
mulp2=function(a,b){
return(a*b)
}
mulp(40,80)
mulp(m,n)
# non-linear least square
xvalues <- c(1.6, 2.1, 2, 2.23, 3.71, 3.25, 3.4, 3.86, 1.19, 2.21)
yvalues <- c(5.19, 7.43, 6.94, 8.11, 18.75, 14.88, 16.06, 19.12, 3.21, 21.7)
# plot these values
plot(xvalues, yvalues)
model <- nls(yvalues ~ b1*xvalues^2 + b2, start = list(b1 = 1, b2 = 3))
new.data <- data.frame(xvalues = seq(min(xvalues), max(xvalues), len = 100))
lines(new.data$xvalues, predict(model, new.data))
print(sum(resid(model)^2))
print(confint(model))
# Example 2
x <- seq(0, 10, 0.1)
y <- rnorm(101, 5, 1)
plot(x, y)
m <- nls(y~a*x^3+b*x+c, start = list(a=1, b=2, c=1))
lines(x, predict(m))
x <- seq(0, 100, 1)
y <- ((runif(1, 10, 20)*x)/((runif(1, 0, 10) + x)))
m<-nls(y~a * x/(b + x), start = list(a = 1, b = 2))
cor(y, predict(m))
print(sum(resid(m)^2))
plot(x, y)
lines(x, predict(m))
# Optimization
# One dimensional problems
func <- function(x){
return((x - 2)^2)
}
(func(-2))
# plot the funtion using the the curve function
curve(func, -4, 8)
# plot using a grid
grid <- seq(-10, 10, by=.1)
func(grid)
plot(grid, func(grid))
# you can find minimum using the optimize function
optimize(f=func, interval = c(-10, 10))
# BFGS Method
func <- function(x){
out <- (x[1]-2)^2 * (x[2]-1)^2
return(out)
}
optim(par = c(0,0), fn=func, gr=NULL,
method = c("BFGS"),
lower = -Inf, upper = Inf,
control = list(), hessian = T)
# Nelder Mead simplex algorithm
optim(par = c(0,0), fn=func, gr=NULL,
method = c("Nelder-Mead"),
lower = -Inf, upper = Inf,
control = list(), hessian = T)
library(lpSolve)
objective.in <- c(25, 20)
const.mat <- matrix(c(20, 12, 1/15, 1/15), byrow = TRUE)
const.rhs <- c(1800, 8)
const.dir<-c("<=", "<=")
optimum <- lp(direction = "max", objective.in,const.mat,const.dir,
const.rhs)
optimum$solution
optimum$objval
g <- D() |
490a0c13792ffe34c152804096b995571a472576 | 6ca2764f97c949600a320f6132a4d401eefc74c6 | /cachematrix.R | fdff3f74b59a1006d0903f5211055c5fdcccb7da | [] | no_license | bingcaowei/ProgrammingAssignment2 | 2e963b9c402d1c527168d98c89318eda1cf7b0c2 | 55fb2ed9bb888cc551371b0fbb2f39df481e1a25 | refs/heads/master | 2020-06-29T11:10:48.171133 | 2019-08-04T16:54:12 | 2019-08-04T16:54:12 | 200,518,917 | 0 | 0 | null | 2019-08-04T16:55:47 | 2019-08-04T16:55:46 | null | UTF-8 | R | false | false | 1,140 | r | cachematrix.R | ## bingcaowei August 2019
## https://github.com/bingcaowei/ProgrammingAssignment2
## HW3 for R Programming Coursera
## Creates a special matrix object that can cache its inverse
## assume matrix is always invertible
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function (y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## computes the inverse of the special matrix returned by
## makeCacheMatrix. If inverse has already been calculated
## (and matrix has not changed), then cacheSolve should retrieve
## inverse from cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
mat <- x$get()
if(!is.null(inv)){
message("getting cached data")
return (inv)
}
inv <- solve(mat,...)
x$setInverse(inv)
inv
}
|
832f77e2b5f93e8ad481349ec19b34224a21de89 | a0dc6dc943cee1dd1f86c1dd62d9ef44d9369951 | /data_model.R | 4f4ff107416a69b15afdd7c0ee84be69569c69d6 | [] | no_license | mortonanalytics/n_gram_model | d09eb66f00b66309fb3ef231b121512fc0ce7b73 | f60cc8427f73a818a1b5b18828ad7f4914ba93ec | refs/heads/master | 2021-01-20T04:47:24.587655 | 2017-06-26T22:29:32 | 2017-06-26T22:29:32 | 89,733,575 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,509 | r | data_model.R | ##tidy data management packages
library(dplyr)
library(tidyr)
library(tidytext)
library(quanteda)
##language detection package
library(textcat)
##graphics package
library(ggplot2)
dir <- "Z:/Ryan/Coursera/10_capstone_project/corpus/final/en_US"
setwd(dir)
data(stop_words)
docs <- list.files(dir)
docs <- docs[grep("txt", docs)]
fname <- paste(dir,"/",docs[1:3], sep = "")
text <- lapply(fname, readLines)
text_df <- data_frame(index = 1:length(text[[1]]),text = text[[1]]) %>% sample_frac(.1, replace = FALSE)
text_df <- rbind(text_df,data_frame(index = 1:length(text[[2]]),text = text[[2]]) %>% sample_frac(.1, replace = FALSE))
text_df <- rbind(text_df,data_frame(index = 1:length(text[[3]]),text = text[[3]]) %>% sample_frac(.05, replace = FALSE))
test_df <- data_frame(index = 1:length(text[[1]]),text = text[[1]]) %>% sample_frac(.01, replace = FALSE)
text_df$text <- gsub("http[[:alnum:]]*", "", text_df$text)
text_df$text <- gsub("@[a-z,A-Z]*","", text_df$text)
text_df$text <- gsub("_","", text_df$text)
text_df$text <- iconv(text_df$text, to='ASCII//TRANSLIT')
text_df$text <- gsub("'", "", text_df$text)
# text_df$text <- gsub("-"," ", text_df$text)
text_df$text <- gsub("[^[:alpha:],.]", " ", text_df$text)
text_df$text <-gsub('([[:alpha:]])\\1+', '\\1', text_df$text)
text_df$text <- gsub("\\.", " <s> <s>", text_df$text)
text_df$text <- paste("<s>", text_df$text, sep = " ")
test_df$text <- gsub("http[[:alnum:]]*", "", test_df$text)
test_df$text <- gsub("@[a-z,A-Z]*","", test_df$text)
test_df$text <- gsub("_","", test_df$text)
test_df$text <- iconv(test_df$text, to='ASCII//TRANSLIT')
test_df$text <- gsub("'", "", test_df$text)
# test_df$text <- gsub("-"," ", test_df$text)
test_df$text <- gsub("[^[:alpha:],.]", " ", test_df$text)
test_df$text <-gsub('([[:alpha:]])\\1+', '\\1', test_df$text)
test_df$text <- gsub("\\.", " <s> <s>", test_df$text)
test_df$text <- paste("<s>", test_df$text, sep = " ")
word_counts <- text_df %>%
unnest_tokens(word, text) %>%
count(word) %>%
#filter(!word %in% stop_words) %>%
ungroup()
words <- sum(word_counts$n)
word_counts <- word_counts %>%
mutate(tf = n/words) %>%
arrange(desc(tf)) %>%
mutate(cum_tf = cumsum(tf))
words <- word_counts$word #[word_counts$cum_tf < .8]
write.table(words, "words.txt", row.names = FALSE)
#write.csv(word_counts, "unigram.csv", row.names = FALSE)
#######N-Gram analysis
text_2_grams <- text_df %>%
unnest_tokens(n_gram, text, token = "ngrams", n = 2) %>%
separate(n_gram, c("key", "word"), sep = " ") %>%
# filter(word1 %in% words) %>%
# filter(word2 %in% words) %>%
count(key, word) %>%
bind_tf_idf(word, key, n) %>%
filter(n > 1) %>%
ungroup()
text_3_grams <- text_df %>%
unnest_tokens(n_gram, text, token = "ngrams", n = 3) %>%
separate(n_gram, c("word1", "word2", "word"), sep = " ") %>%
# filter(!word1 %in% words) %>%
# filter(!word2 %in% words) %>%
# filter(!word3 %in% words) %>%
unite(key,word1,word2, sep = " ") %>%
count(key, word) %>%
bind_tf_idf(word, key, n)%>%
filter(n > 1) %>%
ungroup()
text_4_grams <- text_df %>%
unnest_tokens(n_gram, text, token = "ngrams", n = 4) %>%
separate(n_gram, c("word1", "word2", "word3", "word"), sep = " ") %>%
# filter(!word1 %in% words) %>%
# filter(!word2 %in% words) %>%
# filter(!word3 %in% words) %>%
# filter(!word4 %in% words) %>%
unite(key,word1,word2,word3, sep = " ") %>%
count(key, word) %>%
bind_tf_idf(word, key, n)%>%
filter(n > 1) %>%
ungroup()
text_5_grams <- text_df %>%
unnest_tokens(n_gram, text, token = "ngrams", n = 5) %>%
separate(n_gram, c("word1", "word2", "word3", "word4", "word"), sep = " ") %>%
# filter(!word1 %in% words) %>%
# filter(!word2 %in% words) %>%
# filter(!word3 %in% words) %>%
# filter(!word4 %in% words) %>%
# filter(!word5 %in% words) %>%
unite(key,word1,word2,word3, word4, sep = " ") %>%
count(key, word) %>%
bind_tf_idf(word, key, n)%>%
filter(n > 1) %>%
ungroup()
text_6_grams <- text_df %>%
unnest_tokens(n_gram, text, token = "ngrams", n = 6) %>%
separate(n_gram, c("word1", "word2", "word3", "word4", "word5", "word"), sep = " ") %>%
# filter(!word1 %in% words) %>%
# filter(!word2 %in% words) %>%
# filter(!word3 %in% words) %>%
# filter(!word4 %in% words) %>%
# filter(!word5 %in% words) %>%
# filter(!word6 %in% words) %>%
unite(key,word1,word2,word3, word4, word5, sep = " ") %>%
count(key, word) %>%
bind_tf_idf(word, key, n)%>%
filter(n > 1) %>%
ungroup()
n_gram_df <- do.call("rbind", list(text_2_grams, text_3_grams, text_4_grams, text_5_grams, text_6_grams))
write.csv(n_gram_df, "n_gram.csv", row.names = FALSE)
###remove stop words set
library(tm)
text_df_2_corpus <- Corpus(DataframeSource(text_df))
text_df_2_corpus <- tm_map(text_df_2_corpus,removeWords, stop_words$word)
text_df_2 <- tidy(text_df_2_corpus) %>% select(text) %>% separate(text, c("index", "text"), sep = "\\n")
word_counts_stop <- text_df_2 %>%
unnest_tokens(word, text) %>%
count(word) %>%
#filter(!word %in% stop_words) %>%
ungroup()
words_stop <- sum(word_counts_stop$n)
word_counts_stop <- word_counts_stop %>%
mutate(tf = n/words_stop) %>%
arrange(desc(tf)) %>%
mutate(cum_tf = cumsum(tf))
words_stop <- word_counts_stop$word #[word_counts$cum_tf < .8]
write.table(words_stop, "words_stop.txt", row.names = FALSE)
stop_2_grams <- text_df_2 %>%
unnest_tokens(n_gram, text, token = "ngrams", n = 2) %>%
separate(n_gram, c("key", "word"), sep = " ") %>%
# filter(key %in% words_stop) %>%
# filter(word %in% words_stop) %>%
count(key, word) %>%
bind_tf_idf(word, key, n) %>%
filter(n > 1) %>%
ungroup()
stop_3_grams <- text_df_2 %>%
unnest_tokens(n_gram, text, token = "ngrams", n = 3) %>%
separate(n_gram, c("word1", "word2", "word"), sep = " ") %>%
# filter(!word1 %in% words_stop) %>%
# filter(!word2 %in% words_stop) %>%
# filter(!word %in% words_stop) %>%
unite(key,word1,word2, sep = " ") %>%
count(key, word) %>%
bind_tf_idf(word, key, n)%>%
filter(n > 1) %>%
ungroup()
stop_4_grams <- text_df_2 %>%
unnest_tokens(n_gram, text, token = "ngrams", n = 4) %>%
separate(n_gram, c("word1", "word2", "word3", "word"), sep = " ") %>%
# filter(!word1 %in% words_stop) %>%
# filter(!word2 %in% words_stop) %>%
# filter(!word3 %in% words_stop) %>%
# filter(!word %in% words_stop) %>%
unite(key,word1,word2,word3, sep = " ") %>%
count(key, word) %>%
bind_tf_idf(word, key, n)%>%
filter(n > 1) %>%
ungroup()
stop_5_grams <- text_df_2 %>%
unnest_tokens(n_gram, text, token = "ngrams", n = 5) %>%
separate(n_gram, c("word1", "word2", "word3", "word4", "word"), sep = " ") %>%
# filter(!word1 %in% words_stop) %>%
# filter(!word2 %in% words_stop) %>%
# filter(!word3 %in% words_stop) %>%
# filter(!word4 %in% words_stop) %>%
# filter(!word %in% words_stop) %>%
unite(key,word1,word2,word3, word4, sep = " ") %>%
count(key, word) %>%
bind_tf_idf(word, key, n)%>%
filter(n > 1) %>%
ungroup()
stop_6_grams <- text_df_2 %>%
unnest_tokens(n_gram, text, token = "ngrams", n = 6) %>%
separate(n_gram, c("word1", "word2", "word3", "word4", "word5", "word"), sep = " ") %>%
# filter(!word1 %in% words_stop) %>%
# filter(!word2 %in% words_stop) %>%
# filter(!word3 %in% words_stop) %>%
# filter(!word4 %in% words_stop) %>%
# filter(!word5 %in% words_stop) %>%
# filter(!word %in% words_stop) %>%
unite(key,word1,word2,word3, word4, word5, sep = " ") %>%
count(key, word) %>%
bind_tf_idf(word, key, n)%>%
filter(n > 1) %>%
ungroup()
n_gram_df_stop <- do.call("rbind", list(stop_2_grams, stop_3_grams, stop_4_grams, stop_5_grams, stop_6_grams))
write.csv(n_gram_df_stop, "n_gram_stop.csv", row.names = FALSE)
##########test set
test_6_grams <- test_df %>%
unnest_tokens(n_gram, text, token = "ngrams", n = 6) %>%
separate(n_gram, c("word1", "word2", "word3", "word4", "word5", "word"), sep = " ") %>%
# filter(!word1 %in% words_stop) %>%
# filter(!word2 %in% words_stop) %>%
# filter(!word3 %in% words_stop) %>%
# filter(!word4 %in% words_stop) %>%
# filter(!word5 %in% words_stop) %>%
# filter(!word %in% words_stop) %>%
unite(key,word1,word2,word3, word4, word5, sep = " ") %>%
count(key, word) %>%
bind_tf_idf(word, key, n)%>%
filter(n > 1) %>%
ungroup()
test_6_grams <- test_6_grams %>%
mutate(pred = select.word(key)) %>%
mutate(result = ifelse(word == pred, 1, 0))
write.csv(test_6_grams, "test_6_grams.csv", row.names = FALSE)
|
5f7e2ea07da0f6a98aaf4d72253a2a3687b4efde | e6ef0f643c8af293ce42bdeb0937d4b24798ddbd | /svm.R | e21143e983d7777eb7563311adcf857e04ce5936 | [] | no_license | Karpagampgdds/SVM | a7a91586da5304f60251fd9dfcf00fb6d5be0ac1 | 4c21de29ac379502ea53ad018f57442803ec209e | refs/heads/master | 2021-03-14T19:04:22.582838 | 2020-03-12T09:02:57 | 2020-03-12T09:02:57 | 246,787,287 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,676 | r | svm.R | #--------------------------------------------------------------------------------#
#----- Hand written Digit Recognition using SVM both Linear and Non-linear ------#
# Submitted by : KARPAGAM R
#--------------------------------------------------------------------------------#
# --------------------- Installing necessary packages ---------------------------#
install.packages("caret",dependencies = TRUE)
install.packages("kernlab")
install.packages("dplyr")
install.packages("readr")
install.packages("ggplot2")
install.packages("gridExtra")
install.packages("e1071")
install.packages("caTools")
#-----------------------Extracting required libraries----------------------------#
library("caret")
library("kernlab")
library("dplyr")
library("readr")
library("ggplot2")
library("gridExtra")
library("e1071")
library("caTools")
setwd("C:\\Users\\Student\\Documents\\svm")
#-----------------------Reading the given dataframes--------------------------------#
train <- read.delim("mnist_train.csv",sep=",",stringsAsFactors = FALSE,header = FALSE)
test <- read.delim("mnist_test.csv",sep=",",stringsAsFactors = FALSE,header = FALSE)
#----------Renaming the Target variable both in test and train dataframes-----------#
colnames(train)[1] <- "img_id"
colnames(test)[1] <- "img_id"
#----------------Adding train/test column in both the dataframes--------------------#
train$type <- "train"
test$type <- "test"
#--------------------Merging both the dataframes for data cleanup-------------------#
mnist_full_dataset <- rbind(train,test)
#------------------------------Checking for NAs-------------------------------------#
which(sapply(mnist_full_dataset,function(x) sum(is.na(x))) != 0) # No NAs
#--------------------Checking for redundancy among column values--------------------#
identical_cols <- which(sapply(mnist_full_dataset,function(x) length(unique(x)) == 1))
length(identical_cols) # Totally 65 identical columns present
identical_cols
mnist_clean_dataset <- mnist_full_dataset %>% select(-identical_cols)
#------------- checking for outliers at both the ends i.e. lower and uper------------#
which((mnist_clean_dataset %>% select(img_id)) > 9 | (mnist_clean_dataset %>% select(img_id)) < 0)
#-----Separating the Train and Test data for model development after data cleanup-----#
train_data <- mnist_clean_dataset %>% filter(type == "train") %>% select(-type)
test_data <- mnist_clean_dataset %>% filter(type == "test") %>% select(-type)
#------- Converting the target variable into a factor variable in both Train and Test dataset-------#
train_data$img_id <- as.factor(train_data$img_id)
test_data$img_id <- as.factor(test_data$img_id)
#----------------------Taking data for model building-------------------------------#
set.seed(100)
train_indices <- sample.split(train_data$img_id,SplitRatio = 0.3333)
train_sample <- train_data[train_indices,]
train_data_final <- train_sample
test_data_final <- test_data
#-----------------------------------Model Building--------------------------------#
#-------------------------------------LINEAR SVM----------------------------------#
linear_svm <- ksvm(img_id~., data=train_data_final, scale=FALSE, kernel="vanilladot")
linear_svm
#----------------------------Evaluating the Linear Model-------------------------#
linear_svm_evaluation <- predict(linear_svm,train_data_final)
confusionMatrix(linear_svm_evaluation,train_data_final$img_id)
#----------------Evaluating the Linear Model with test data----------------------#
linear_svm_test_evaluation <- predict(linear_svm,test_data_final)
confusionMatrix(linear_svm_test_evaluation,test_data_final$img_id)
#------------------------NON-LINEAR SVM with RBF - Kernel------------------------#
non_linear_svm_model <- ksvm(img_id~., data=train_data_final, scale=FALSE, kernel="rbfdot")
non_linear_svm_model
#----------------------------Trainig Accuracy------------------------------------#
non_linear_svm_train_evaluation <- predict(non_linear_svm_model,train_data_final)
confusionMatrix(non_linear_svm_train_evaluation,train_data_final$img_id)
#-------------------------Train Set Net Accuracy = 0.9838------------------------#
#----------------------------Test accuracy---------------------------------------#
non_linear_svm_test_evaluation <- predict(non_linear_svm_model,test_data_final)
confusionMatrix(non_linear_svm_test_evaluation,test_data_final$img_id)
#----------------------Test Net Accuracy of model = 0.9673----------------------#
#--------------------- Model Evaluation - Cross Validation----------------------#
trainControl <- trainControl(method = "cv", number = 2,verboseIter=TRUE)
metric <- "Accuracy"
set.seed(100)
grid <- expand.grid(.sigma = c(0.63e-7,1.63e-7,2.63e-7),.C=c(1,2,3))
non_linear_svm_fit <- train(img_id~.,data=train_data_final,method="svmRadial",
metric=metric,tuneGrid=grid,
trControl=trainControl)
non_linear_svm_fit
# sigma C Accuracy Kappa
# 6.30e-08 1 0.9377406 0.9308006
# 6.30e-08 2 0.9442916 0.9380814
# 6.30e-08 3 0.9490423 0.9433616
# 1.63e-07 1 0.9567434 0.9519216
# 1.63e-07 2 0.9623443 0.9581468
# 1.63e-07 3 0.9644947 0.9605367
# 2.63e-07 1 0.9638945 0.9598701
# 2.63e-07 2 0.9684453 0.9649281
# 2.63e-07 3 0.9698455 0.9664845
#Accuracy was used to select the optimal model using the largest value.
#The final values used for the model were sigma = 2.63e-07 and C = 3.
plot(non_linear_svm_fit)
#------------------Building a model with C = 3 and sigma = 2.63e-07----------#
non_linear_svm_final <- ksvm(img_id~.,data=train_data_final,kernel="rbfdot",
scale=FALSE,C=3,kpar=list(sigma=2.63e-7))
non_linear_svm_final
#----------------------------Training accuracy-------------------------------#
non_linear_svm_train_eval1 <- predict(non_linear_svm_final,train_data_final)
confusionMatrix(non_linear_svm_train_eval1,train_data_final$img_id)
#--------------------------Net Train Accuracy = 0.9989-----------------------#
#----------------------------Test accuracy-----------------------------------#
non_linear_svm_test_evaluation1 <- predict(non_linear_svm_final,test_data_final)
confusionMatrix(non_linear_svm_test_evaluation1,test_data_final$img_id)
# Overall Statistics
# Accuracy : 0.9775
# 95% CI : (0.9744,0.9803)
# No information Rate : 0.1135
# P-Value[Acc>NIR] : <2.2e-16
#Kappa : 0.975
# Mcnemar's Test P-value : NA
|
279d2ab069cfed57964e18db06a9fff28806d597 | e68e99f52f3869c60d6488f0492905af4165aa64 | /man/torch_nonzero.Rd | f85dee27cfcda261ebe0cfe2ca8e42d8a7d7e740 | [
"MIT"
] | permissive | mlverse/torch | a6a47e1defe44b9c041bc66504125ad6ee9c6db3 | f957d601c0295d31df96f8be7732b95917371acd | refs/heads/main | 2023-09-01T00:06:13.550381 | 2023-08-30T17:44:46 | 2023-08-30T17:44:46 | 232,347,878 | 448 | 86 | NOASSERTION | 2023-09-11T15:22:22 | 2020-01-07T14:56:32 | C++ | UTF-8 | R | false | true | 1,712 | rd | torch_nonzero.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R, R/wrapers.R
\name{torch_nonzero}
\alias{torch_nonzero}
\title{Nonzero}
\usage{
torch_nonzero(self, as_list = FALSE)
}
\arguments{
\item{self}{(Tensor) the input tensor.}
\item{as_list}{If \code{FALSE}, the output tensor containing indices. If \code{TRUE}, one
1-D tensor for each dimension, containing the indices of each nonzero element
along that dimension.
\strong{When} \code{as_list} \strong{is \code{FALSE} (default)}:
Returns a tensor containing the indices of all non-zero elements of
\code{input}. Each row in the result contains the indices of a non-zero
element in \code{input}. The result is sorted lexicographically, with
the last index changing the fastest (C-style).
If \code{input} has \eqn{n} dimensions, then the resulting indices tensor
\code{out} is of size \eqn{(z \times n)}, where \eqn{z} is the total number of
non-zero elements in the \code{input} tensor.
\strong{When} \code{as_list} \strong{is \code{TRUE}}:
Returns a tuple of 1-D tensors, one for each dimension in \code{input},
each containing the indices (in that dimension) of all non-zero elements of
\code{input} .
If \code{input} has \eqn{n} dimensions, then the resulting tuple contains \eqn{n}
tensors of size \eqn{z}, where \eqn{z} is the total number of
non-zero elements in the \code{input} tensor.
As a special case, when \code{input} has zero dimensions and a nonzero scalar
value, it is treated as a one-dimensional tensor with one element.}
}
\description{
Nonzero elements of tensors.
}
\examples{
if (torch_is_installed()) {
torch_nonzero(torch_tensor(c(1, 1, 1, 0, 1)))
}
}
|
cf894c8436bc5e4786034c41812bbc7932c60dc0 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /Benchmarking/R/bootStat.R | 7a307a8ff62e95622e2c3af0e25515a166f194a4 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 817 | r | bootStat.R | # $Id: bootStat.R 117 2011-05-17 10:17:07Z Lars $
# Calculates the critical value at level |alpha| for the vector of
# trials |s|
critValue <- function(s, alpha=0.05) {
if ( alpha <= 0 || alpha >= 1 )
stop("The argument alpha must be between 0 and 1")
ss_ <- sort(s)
mean( ss_[floor(alpha*length(s))], ss_[ceiling(alpha*length(s))],
na.rm=TRUE )
}
# Calculate the probability of a larger value than |shat| in the vector
# of trials |s|
typeIerror <- function(shat,s) {
reject <- function(alfa) {
quantile(s, alfa, na.rm=TRUE, names=F) - shat
}
if ( reject(0) * reject(1) > 0 ) {
# Ingen loesning til unitroot, saa enten 0% eller 100%
if ( shat <= min(s) ) return(0)
if ( shat >= max(s) ) return(1)
}
uniroot(reject,c(0,1))$root
}
|
05dabef9b1515b3a3074e88315bcc4ca7a7617ca | 506d30b16d10cbbdd832d1efe7a293eeaaa8fe96 | /R/mpgroup.R | dce581403a73aee8a9bae2aa6138d5bf87ddfd64 | [] | no_license | Peter-Jackson22/mpMap | 407646d292717c2c18db3302a3048b38acb23127 | 89837b6dcfe126b682079d2de5283eda3c5c2d5a | refs/heads/master | 2020-03-30T13:53:24.581363 | 2017-09-19T22:45:16 | 2017-09-19T22:45:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,297 | r | mpgroup.R | #' Construct linkage groups using 2-point recombination fraction estimates
#'
#' Use two-point recombination fraction estimates to group markers into the specified number of linkage group, The input \code{initial} can be used to select groups of markers that will be assigned to the same group. Grouping is performed using hierarchical clustering (\code{hclust}) using either average, complete or single linkage. The matrix used for clustering can be either theta (the recombination fraction matrix), lod (the log likelihood ratio) or a combination of the two.
#' @importFrom stats na.omit
#' @importFrom stats hclust
#' @importFrom stats as.dist
#' @importFrom stats cutree
#' @export
#' @param mpcross Object of class \code{mpcross}
#' @param groups The number of groups to be formed
#' @param initial A list, with each entry containing markers which are to be assigned to the same group. Markers can be referenced by name or by index
#' @param clusterBy The type of data to cluster by. Can be one of "theta" for recombination fraction, "lod" for log likelihood ration, or "combined" for a combination of the two
#' @param method The clustering method to use. Must be one of "single", "complete" or "average"
#' @return A copy of the input mpcross object, with an additional "lg" entry containing the groupings of the markers. In addition the recombination fraction estimates and genetic data are reordered according to the created groupings of markers.
#' \item{lg$groups}{ Numeric vector giving the group to which each marker belongs}
#' \item{lg$all.groups}{ Numeric vector giving the numbers for any groups that are present}
#' @examples
#' map <- qtl::sim.map(len=rep(100, 2), n.mar=11, eq.spacing=TRUE, include.x=FALSE)
#' sim.ped <- sim.mpped(4, 1, 500, 6, 1)
#' sim.dat <- sim.mpcross(map=map, pedigree=sim.ped,
#' qtl=matrix(data=c(1, 50, .4, 0, 0, 0),
#' nrow=1, ncol=6, byrow=TRUE), seed=1)
#' dat.rf <- mpestrf(sim.dat)
#' grouped <- mpgroup(dat.rf, groups=2, clusterBy="combined", method="average")
#' grouped$lg
mpgroup <- function(mpcross, groups, initial = NULL, clusterBy="combined", method="average")
{
if(!(clusterBy %in% c("combined", "theta", "lod")))
{
stop("Input clusterBy must be one of 'combined', 'theta' or 'lod'")
}
if(!(method %in% c("average", "complete", "single")))
{
stop("Input method must be one of 'average', 'complete' or 'single'")
}
if (missing(mpcross))
{
stop("Input mpcross cannot be missing")
}
if (is.null(mpcross$rf)&is.null(mpcross$map))
{
stop("Must calculate recombination fractions prior to grouping loci")
}
if(!is.null(mpcross$map)) {
cat("Using map groupings for groups. Remove map object if you want to regroup.\n")
initial <- lapply(mpcross$map, function(x) match(names(x), colnames(mpcross$finals)))
groups <- length(mpcross$map)
output <- mpcross
grpassignment <- vector(length=ncol(mpcross$finals))
for (ii in 1:groups) grpassignment[initial[[ii]]] <- ii
output$lg <- list(all.groups=1:groups, groups=grpassignment)
names(output$lg$groups) <- unlist(lapply(mpcross$map, names))
return(output)
## don't rearrange order
}
lod <- mpcross$rf$lod
theta <- mpcross$rf$theta
#Reverse lod so that small values indicate similarity
lod[is.na(mpcross$rf$lod)] <- 0
lod <- max(lod) - lod
diag(lod) <- 0
theta[is.na(mpcross$rf$theta)] <- 0.5
if(method == "average")
{
linkFunction <- function(x) mean(x, na.rm=TRUE)
}
else if(method == "complete")
{
linkFunction <- function(x) max(x, na.rm=TRUE)
}
else
{
linkFunction <- function(x) min(x, na.rm=TRUE)
}
if(clusterBy == "combined")
{
distMat <- theta + lod / max(lod) * min(abs(diff(mpcross$rf$r)))
}
else if(clusterBy == "theta")
{
distMat <- theta
}
else
{
distMat <- lod
}
if(is.null(initial))
{
clustered <- hclust(as.dist(distMat), method=method)
}
else
{
#The number of values we're going to shrink the distance matrix by, as a result of the pre-grouping
nLess <- sum(unlist(lapply(initial, function(x) length(x) - 1)))
if(nLess == 0)
{
clustered <- hclust(as.dist(distMat), method=method)
}
else
{
#We actually have groups input. These groups go first, followed by the ungrouped values
ungrouped <- setdiff(colnames(mpcross$rf$theta), unlist(initial))
new.dist <- matrix(data=0, nrow=ncol(mpcross$rf$theta) - nLess, ncol = ncol(mpcross$rf$theta) - nLess)
#Are there even any ungrouped values? If there aren't, skip this bit
onlyGrouped <- length(initial) == ncol(mpcross$rf$theta) - nLess
if(!onlyGrouped)
{
new.dist[(length(initial)+1):(ncol(mpcross$rf$theta) - nLess), (length(initial)+1):(ncol(mpcross$rf$theta) - nLess)] <- distMat[ungrouped, ungrouped]
}
for(i in 1:length(initial))
{
markersI <- initial[[i]]
for(j in 1:length(initial))
{
markersJ <- initial[[j]]
new.dist[i, j] <- new.dist[j, i] <- linkFunction(distMat[markersI, markersJ])
}
if(!onlyGrouped)
{
for(j in 1:length(ungrouped))
{
new.dist[i, j+length(initial)] <- linkFunction(distMat[markersI, ungrouped[j]])
}
}
}
clustered <- hclust(as.dist(new.dist), members=c(unlist(lapply(initial, length)), rep(1, length(ungrouped))), method=method)
}
}
cut <- cutree(clustered, k=groups)
if(!is.null(initial))
{
specifiedGroups <- cut[1:length(initial)]
cut <- cut[-(1:length(initial))]
names(cut) <- ungrouped
for(i in 1:length(initial))
{
new <- rep(specifiedGroups[i], length(initial[[i]]))
names(new) <- initial[[i]]
cut <- c(cut, new)
}
cut <- cut[colnames(mpcross$rf$theta)]
}
else
{
names(cut) <- colnames(mpcross$rf$theta)
}
output <- mpcross
output$lg <- list(all.groups=1:groups, groups=cut)
return(subset(output, markers = colnames(output$founders)[order(output$lg$groups)]))
}
mpsplitgroup <- function(mpcross, toSplit, nSplits, clusterBy="combined", method="average")
{
if(!(method %in% c("average", "complete", "single")))
{
stop("Input method must be one of 'average', 'complete' or 'single'")
}
if(!(clusterBy %in% c("combined", "theta", "lod")))
{
stop("Input clusterBy must be one of 'combined', 'theta' or 'lod'")
}
if (missing(mpcross))
{
stop("Input mpcross cannot be missing")
}
if (is.null(mpcross$rf))
{
stop("Must calculate recombination fractions prior to grouping loci")
}
if(is.null(mpcross$lg))
{
stop("Must have an existing grouping structure to call mpsubgroup")
}
lod <- mpcross$rf$lod
theta <- mpcross$rf$theta
#Reverse lod so that small values indicate similarity
lod[is.na(mpcross$rf$lod)] <- 0
lod <- max(lod) - lod
diag(lod) <- 0
theta[is.na(mpcross$rf$theta)] <- 0.5
if(clusterBy == "combined")
{
distMat <- theta + lod / max(lod) * min(abs(diff(mpcross$rf$r)))
}
else if(clusterBy == "theta")
{
distMat <- theta
}
else
{
distMat <- lod
}
new.groups <- mpcross$lg$groups
current.group <- which(mpcross$lg$groups == toSplit)
subdist <- as.dist(distMat[current.group, current.group])
clustered <- hclust(subdist, method=method)
cut <- cutree(clustered, k=nSplits)
new.groups[new.groups > toSplit] <- new.groups[new.groups > toSplit] + nSplits-1
new.groups[current.group] <- cut + toSplit - 1
output <- mpcross
all.groups <- mpcross$lg$all.groups
all.groups[all.groups > toSplit] <- all.groups[all.groups > toSplit] + nSplits - 1
all.groups <- unique(c(all.groups, toSplit:(toSplit + nSplits-1)))
output$lg <- list(all.groups=all.groups, groups=new.groups)
return(subset(output, markers = colnames(output$founders)[order(output$lg$groups)]))
}
mpsubgroup <- function(mpcross, subgroups, clusterBy="combined", method="average")
{
if(!(method %in% c("average", "complete", "single")))
{
stop("Input method must be one of 'average', 'complete' or 'single'")
}
if(!(clusterBy %in% c("combined", "theta", "lod")))
{
stop("Input clusterBy must be one of 'combined', 'theta' or 'lod'")
}
if (missing(mpcross))
{
stop("Input mpcross cannot be missing")
}
if (is.null(mpcross$rf))
{
stop("Must calculate recombination fractions prior to grouping loci")
}
if(is.null(mpcross$lg))
{
stop("Must have an existing grouping structure to call mpsubgroup")
}
lod <- mpcross$rf$lod
theta <- mpcross$rf$theta
#Reverse lod so that small values indicate similarity
lod[is.na(mpcross$rf$lod)] <- 0
lod <- max(lod) - lod
diag(lod) <- 0
theta[is.na(mpcross$rf$theta)] <- 0.5
if(clusterBy == "combined")
{
distMat <- theta + lod / max(lod) * min(abs(diff(mpcross$rf$r)))
}
else if(clusterBy == "theta")
{
distMat <- theta
}
else
{
distMat <- lod
}
new.groups <- vector(mode="integer", length=length(mpcross$lg$groups))
names(new.groups) <- names(mpcross$lg$groups)
for(index in 1:length(mpcross$lg$all.groups))
{
group <- mpcross$lg$all.groups[index]
current.group <- which(mpcross$lg$groups == group)
subdist <- as.dist(distMat[current.group, current.group])
clustered <- hclust(subdist, method=method)
cut <- cutree(clustered, k=subgroups)
new.groups[current.group] <- cut + ((index -1)*subgroups)
}
output <- mpcross
output$lg <- list(n.groups=length(mpcross$lg$all.groups) * subgroups, groups=new.groups)
return(subset(output, markers = colnames(output$founders)[order(output$lg$groups)]))
}
fineTuneGroups <- function(grouped, excludeGroups=c())
{
if(is.null(names(grouped$lg$groups))) stop("Invalid mpcross object input")
originalChromosome <- newChromosome <- markerName <- newAverage <- c()
for(i in 1:length(grouped$lg$all.groups))
{
indicesI <- which(grouped$lg$groups == grouped$lg$all.groups[i])
diagonal <- grouped$rf$theta[indicesI, indicesI, drop=FALSE]
originalAverages <- apply(diagonal, 1, function(x) mean(x, na.rm=TRUE))
for(j in grouped$lg$all.groups[-i])
{
indicesJ <- which(grouped$lg$groups == j)
offDiagonal <- grouped$rf$theta[indicesI, indicesJ, drop=FALSE]
averages <- apply(offDiagonal, 1, function(x) mean(x, na.rm=TRUE))
#Ok, for these markers names chromosome j is better than chromosome i.
betterMarkerNames <- names(which(averages < originalAverages))
#For these ones we have no previously better chromosome, so add them straight in
firstSelection <- betterMarkerNames[!(betterMarkerNames %in% markerName)]
#For these we've already made a selection. But is j also better than other any other previously looked choices. If it's mostly unlinked to its current chromosome then a lot of other bad chromosomes will probably be a little bit better too
secondSelection <- betterMarkerNames[betterMarkerNames %in% markerName]
markerName <- c(markerName, firstSelection)
originalChromosome <- c(originalChromosome, rep(grouped$lg$all.groups[i], length(firstSelection)))
newChromosome <- c(newChromosome, rep(j, length(firstSelection)))
newAverage <- c(newAverage, averages[firstSelection])
#Which of these ones to keep
keep <- newAverage[match(secondSelection, markerName)] > averages[secondSelection]
#And the corresponding bits of the previous relocation data that has to be removed
overwrite <- match(secondSelection[keep], markerName)
secondSelection <- secondSelection[keep]
newChromosome[overwrite] <- rep(j, length(secondSelection))
newAverage[overwrite] <- averages[secondSelection]
}
}
#Actually do the re-arranging
for(index in 1:length(markerName))
{
currentNewChromosome <- newChromosome[index]
if(!(currentNewChromosome %in% excludeGroups))
{
currentMarkerName <- markerName[index]
original <- originalChromosome[index]
grouped$lg$groups[currentMarkerName] <- currentNewChromosome
}
}
remove <- c()
for(i in grouped$lg$all.groups)
{
indicesI <- which(grouped$lg$groups == i)
diagonal <- grouped$rf$theta[indicesI, indicesI, drop=FALSE]
additionalRemove <- which(apply(diagonal, 1, function(x) mean(x, na.rm=TRUE)) > 0.41)
remove <- c(remove, additionalRemove)
}
markers <- colnames(grouped$founders)[order(grouped$lg$groups)]
markers <- markers[-match(names(remove), markers)]
newGrouped <- subset(grouped, markers = markers)
return(newGrouped)
}
joinGroups <- function(mpcross, join)
{
#The value contained at index i is the new group ID for that group
new.group.id <- vector(mode="numeric", length=max(mpcross$lg$all.groups))
new.group.id[] <- NA
next.group.id <- 1
for(i in 1:length(join))
{
group <- as.integer(join[[i]])
#None of these have existing groups, so we're creating a new group
if(all(is.na(new.group.id[group])))
{
new.group.id[group] <- next.group.id
next.group.id <- next.group.id + 1
}
else
{
#At least one of these is already part of a group, in fact there might be two groups being joined here.
existing <- unique(na.omit(new.group.id[group]))
#We're joining different groups
if(length(existing) > 1)
{
new.group.id[unique(c(group, which(new.group.id %in% existing)))] <- min(existing)
}
else
{
new.group.id[group] <- existing
}
}
}
for(i in mpcross$lg$all.groups)
{
if(is.na(new.group.id[i]))
{
new.group.id[i] <- next.group.id
next.group.id <- next.group.id + 1
}
}
previous.names <- names(mpcross$lg$groups)
mpcross$lg$groups <- new.group.id[mpcross$lg$groups]
names(mpcross$lg$groups) <- previous.names
mpcross$lg$all.groups <- unique(mpcross$lg$groups)
return(mpcross)
}
findJoinPoint <- function(mpcross, marker1, marker2, joins)
{
group1 <- mpcross$lg$groups[marker1]
group2 <- mpcross$lg$groups[marker2]
for(i in 1:length(joins))
{
join <- joins[[i]]
if(join[[1]] == "join")
{
joinCommand <- c(join[[2]], join[[3]])
if(all(c(group1, group2) %in% joinCommand))
{
return(i)
}
else if(group1 %in% joinCommand)
{
group1 <- min(joinCommand)
}
else if(group2 %in% joinCommand)
{
group2 <- min(joinCommand)
}
}
}
return(-1)
}
|
bc4fbe0755f77b979042cf34135da449d9e951be | be29750bf5d0f705fb3f966f77bb857ff481231c | /exemplos/05-ggplot2.R | 5c62bc98ece880871a400919232a5d89626f475b | [
"MIT"
] | permissive | curso-r/intro-programacao-em-r-mestre | 6dcfdecac4ab049562154fe49413631a42811481 | 3a743ee8f6c25498fff63da47bbfd1dfa4182f00 | refs/heads/master | 2021-07-04T10:12:33.462192 | 2020-09-01T21:55:37 | 2020-09-01T21:55:37 | 165,696,442 | 7 | 4 | null | null | null | null | UTF-8 | R | false | false | 11,259 | r | 05-ggplot2.R |
# Carregar pacotes --------------------------------------------------------
library(tidyverse)
# Ler base IMDB -----------------------------------------------------------
imdb <- read_rds("dados/imdb.rds")
imdb <- imdb %>% mutate(lucro = receita - orcamento)
# Gráfico de pontos (dispersão) -------------------------------------------
# Apenas o canvas
imdb %>%
ggplot()
# Salvando em um objeto
p <- imdb %>%
ggplot()
# Gráfico de dispersão da receita contra o orçamento
imdb %>%
ggplot() +
geom_point(aes(x = orcamento, y = receita))
# Inserindo a reta x = y
imdb %>%
ggplot() +
geom_point(aes(x = orcamento, y = receita)) +
geom_abline(intercept = 0, slope = 1, color = "red")
# Observe como cada elemento é uma camada do gráfico.
# Agora colocamos a camada da linha antes da camada
# dos pontos.
imdb %>%
ggplot() +
geom_abline(intercept = 0, slope = 1, color = "red") +
geom_point(aes(x = orcamento, y = receita))
# Atribuindo a variável lucro aos pontos
imdb %>%
ggplot() +
geom_point(aes(x = orcamento, y = receita, color = lucro))
# Categorizando o lucro antes
imdb %>%
mutate(
lucrou = ifelse(lucro <= 0, "Não", "Sim")
) %>%
ggplot() +
geom_point(aes(x = orcamento, y = receita, color = lucrou))
# Salvando um gráfico em um arquivo
imdb %>%
mutate(
lucrou = ifelse(lucro <= 0, "Não", "Sim")
) %>%
ggplot() +
geom_point(aes(x = orcamento, y = receita, color = lucrou))
ggsave("meu_grafico.png")
# Filosofia ---------------------------------------------------------------
# Um gráfico estatístico é uma representação visual dos dados
# por meio de atributos estéticos (posição, cor, forma,
# tamanho, ...) de formas geométricas (pontos, linhas,
# barras, ...). Leland Wilkinson, The Grammar of Graphics
# Layered grammar of graphics: cada elemento do
# gráfico pode ser representado por uma camada e
# um gráfico seria a sobreposição dessas camadas.
# Hadley Wickham, A layered grammar of graphics
# Exercícios --------------------------------------------------------------
# a. Crie um gráfico de dispersão da nota do imdb pelo orçamento.
#dicas: ggplot() aes() geom_point()
# b. Pinte todos os pontos do gráfico de azul. (potencial pegadinha =P)
# Gráfico de linhas -------------------------------------------------------
# Nota média dos filmes ao longo dos anos
imdb %>%
group_by(ano) %>%
summarise(nota_media = mean(nota_imdb, na.rm = TRUE)) %>%
ggplot() +
geom_line(aes(x = ano, y = nota_media))
# Número de filmes coloridos e preto e branco por ano
imdb %>%
filter(!is.na(cor)) %>%
group_by(ano, cor) %>%
summarise(num_filmes = n()) %>%
ggplot() +
geom_line(aes(x = ano, y = num_filmes, color = cor))
# Nota média do Robert De Niro por ano
imdb %>%
filter(ator_1 == "Robert De Niro") %>%
group_by(ano) %>%
summarise(nota_media = mean(nota_imdb, na.rm = TRUE)) %>%
ggplot() +
geom_line(aes(x = ano, y = nota_media))
# Colocando pontos no gráfico
imdb %>%
filter(ator_1 == "Robert De Niro") %>%
group_by(ano) %>%
summarise(nota_media = mean(nota_imdb, na.rm = TRUE)) %>%
ggplot() +
geom_line(aes(x = ano, y = nota_media)) +
geom_point(aes(x = ano, y = nota_media))
# Reescrevendo de uma forma mais agradável
imdb %>%
filter(ator_1 == "Robert De Niro") %>%
group_by(ano) %>%
summarise(nota_media = mean(nota_imdb, na.rm = TRUE)) %>%
ggplot(aes(x = ano, y = nota_media)) +
geom_line() +
geom_point()
# Colocando as notas no gráfico
imdb %>%
filter(ator_1 == "Robert De Niro") %>%
group_by(ano) %>%
summarise(nota_media = mean(nota_imdb, na.rm = TRUE)) %>%
mutate(nota_media = round(nota_media, 1)) %>%
ggplot(aes(x = ano, y = nota_media)) +
geom_line() +
geom_label(aes(label = nota_media))
# Exercício ---------------------------------------------------------------
# Faça um gráfico do orçamento médio dos filmes ao longo dos anos.
# dicas: group_by() summarise() ggplot() aes() geom_line()
# Gráfico de barras -------------------------------------------------------
# Número de filmes dos diretores da base
imdb %>%
count(diretor) %>%
top_n(10, n) %>%
ggplot() +
geom_col(aes(x = diretor, y = n))
# Tirando NA e pintando as barras
imdb %>%
count(diretor) %>%
filter(!is.na(diretor)) %>%
top_n(10, n) %>%
ggplot() +
geom_col(
aes(x = diretor, y = n),
show.legend = FALSE
)
# Invertendo as coordenadas
imdb %>%
count(diretor) %>%
filter(!is.na(diretor)) %>%
top_n(10, n) %>%
ggplot() +
geom_col(
aes(x = n, y = diretor),
show.legend = FALSE
)
# Ordenando as barras
imdb %>%
count(diretor) %>%
filter(!is.na(diretor)) %>%
top_n(10, n) %>%
mutate(
diretor = forcats::fct_reorder(diretor, n)
) %>%
ggplot() +
geom_col(
aes(x = n, y = diretor, fill = diretor),
show.legend = FALSE
)
# Colocando label nas barras
top_10_diretores <- imdb %>%
count(diretor) %>%
filter(!is.na(diretor)) %>%
top_n(10, n)
top_10_diretores %>%
mutate(
diretor = forcats::fct_reorder(diretor, n)
) %>%
ggplot() +
geom_col(
aes(x = n, y = diretor),
show.legend = FALSE
) +
geom_label(aes(x = n/2, y = diretor, label = n))
# Exercícios --------------------------------------------------------------
# a. Transforme o gráfico do exercício anterior em um gráfico de barras.
# b. Refaça o gráfico apenas para filmes de 1989 para cá.]]
# [AVANÇADO] Gráfico de barras II: positions e labels ---------------------------------
diretor_por_filme_de_drama <- imdb %>%
mutate(filme_de_drama = str_detect(generos, "Drama")) %>%
count(diretor, filme_de_drama) %>%
filter(
!is.na(diretor),
!is.na(filme_de_drama),
diretor %in% top_10_diretores$diretor
) %>%
mutate(
diretor = forcats::fct_reorder(diretor, n)
)
# Colocando cor nas barras com outra variável
# coisas novas: fill = filme_de_drama e position = position_stack(vjust = 0.5)
diretor_por_filme_de_drama %>%
ggplot(aes(x = n, y = diretor, group = filme_de_drama)) +
geom_col(aes(fill = filme_de_drama)) +
geom_label(aes(label = n), position = position_stack(vjust = 0.5))
# position dodge (lado a lado)
diretor_por_filme_de_drama %>%
ggplot(aes(x = n, y = diretor, group = filme_de_drama)) +
geom_col(aes(fill = filme_de_drama), position = position_dodge(width = 1, preserve = "single")) +
geom_text(aes(label = n), position = position_dodge(width = 1), hjust = -0.1)
# position fill (preenchido ate 100%)
diretor_por_filme_de_drama %>%
ggplot(aes(x = n, y = diretor, group = filme_de_drama)) +
geom_col(aes(fill = filme_de_drama), position = position_fill()) +
geom_text(aes(label = n), position = position_fill(vjust = 0.5))
# Ordenar eh um desafio =(
diretor_por_filme_de_drama %>%
group_by(diretor) %>%
mutate(proporcao_de_drama = sum(n[filme_de_drama])/sum(n)) %>%
ungroup() %>%
mutate(diretor = forcats::fct_reorder(diretor, proporcao_de_drama)) %>%
ggplot(aes(x = n, y = diretor, group = filme_de_drama)) +
geom_col(aes(fill = filme_de_drama), position = position_fill()) +
geom_text(aes(label = n), position = position_fill(vjust = 0.5))
# Exercícios --------------------------------------------------------------
# a. Faça um gráfico de barras empilhados cruzando cor e classificacao
# dica: geom_col(position = "fill")
# b. adicione + scale_fill_brewer(palette = "Set3") ao grafico
# Histogramas e boxplots --------------------------------------------------
# Histograma do lucro dos filmes do Steven Spielberg
imdb %>%
filter(diretor == "Steven Spielberg") %>%
ggplot() +
geom_histogram(aes(x = lucro))
# Arrumando o tamanho das bases
imdb %>%
filter(diretor == "Steven Spielberg") %>%
ggplot() +
geom_histogram(
aes(x = lucro),
binwidth = 100000000,
color = "white"
)
# Boxplot do lucro dos filmes dos diretores
# fizeram mais de 15 filmes
imdb %>%
filter(!is.na(diretor)) %>%
group_by(diretor) %>%
filter(n() >= 15) %>%
ggplot() +
geom_boxplot(aes(x = diretor, y = lucro))
# Ordenando pela mediana
imdb %>%
filter(!is.na(diretor)) %>%
group_by(diretor) %>%
filter(n() >= 15) %>%
ungroup() %>%
mutate(diretor = forcats::fct_reorder(diretor, lucro, na.rm = TRUE)) %>%
ggplot() +
geom_boxplot(aes(x = diretor, y = lucro))
# Exercícios --------------------------------------------------------------
#a. Descubra quais são os 5 atores que mais aparecem na coluna ator_1.
# dica: count() top_n()
#b. Faça um boxplot do lucro dos filmes desses atores.
# Título e labels ---------------------------------------------------------
# Labels
imdb %>%
ggplot() +
geom_point(mapping = aes(x = orcamento, y = receita, color = lucro)) +
labs(
x = "Orçamento ($)",
y = "Receita ($)",
color = "Lucro ($)",
title = "Gráfico de dispersão",
subtitle = "Receita vs Orçamento"
)
# Escalas
imdb %>%
group_by(ano) %>%
summarise(nota_media = mean(nota_imdb, na.rm = TRUE)) %>%
ggplot() +
geom_line(aes(x = ano, y = nota_media)) +
scale_x_continuous(breaks = seq(1916, 2016, 10)) +
scale_y_continuous(breaks = seq(0, 10, 2))
# Visão do gráfico
imdb %>%
group_by(ano) %>%
summarise(nota_media = mean(nota_imdb, na.rm = TRUE)) %>%
ggplot() +
geom_line(aes(x = ano, y = nota_media)) +
scale_x_continuous(breaks = seq(1916, 2016, 10)) +
scale_y_continuous(breaks = seq(0, 10, 2)) +
coord_cartesian(ylim = c(0, 10))
# Cores -------------------------------------------------------------------
# Escolhendo cores pelo nome
imdb %>%
count(diretor) %>%
filter(!is.na(diretor)) %>%
top_n(5, n) %>%
ggplot() +
geom_bar(
aes(x = n, y = diretor, fill = diretor),
stat = "identity",
show.legend = FALSE
) +
scale_fill_manual(values = c("orange", "royalblue", "purple", "salmon", "darkred"))
# http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf
# Escolhendo pelo hexadecimal
imdb %>%
count(diretor) %>%
filter(!is.na(diretor)) %>%
top_n(5, n) %>%
ggplot() +
geom_bar(
aes(x = n, y = diretor, fill = diretor),
stat = "identity",
show.legend = FALSE
) +
scale_fill_manual(
values = c("#ff4500", "#268b07", "#ff7400", "#abefaf", "#33baba")
)
# Mudando textos da legenda
imdb %>%
filter(!is.na(cor)) %>%
group_by(ano, cor) %>%
summarise(num_filmes = n()) %>%
ggplot() +
geom_line(aes(x = ano, y = num_filmes, color = cor)) +
scale_color_discrete(labels = c("Preto e branco", "Colorido"))
# Definiando cores das formas geométricas
imdb %>%
ggplot() +
geom_point(mapping = aes(x = orcamento, y = receita), color = "#ff7400")
# Tema --------------------------------------------------------------------
# Temas prontos
imdb %>%
ggplot() +
geom_point(mapping = aes(x = orcamento, y = receita)) +
# theme_bw()
# theme_classic()
# theme_dark()
theme_minimal()
# A função theme()
imdb %>%
ggplot() +
geom_point(mapping = aes(x = orcamento, y = receita)) +
labs(
title = "Gráfico de dispersão",
subtitle = "Receita vs Orçamento"
) +
theme(
plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5)
)
|
613c1868037cf8fb53562dd4c08f5985bd39327d | 4d265c3f4046c3edd1bac44a9894d466526d9d1d | /chap01/03textMining.R | e32608259d5e8772d01716226cb20524fcad643e | [] | no_license | kjy3309/R_study | 3caa3c0753c3f32b8aefe69afbb9ae97ed7288ce | 6ff03f9b4940116a551aaa5e186559400c7746d0 | refs/heads/master | 2023-01-22T06:07:11.477448 | 2020-11-12T04:49:27 | 2020-11-12T04:49:27 | 312,169,482 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 813 | r | 03textMining.R | library(dplyr) # 전처리
library(KoNLP) # 형태소 분석기
library(stringr) # 문자열 다루기
library(wordcloud2) # 워드클라우드 시각화
library(rvest) # html 요소 가져오기
library(RColorBrewer) # R 색상 조합 표
useNIADic()
url <- 'http://www.itworld.co.kr/main/'
html_source <- read_html(url,eencoding = 'CP-949')
# a 태그 안의 텍스트 추출
title <- html_source %>% html_nodes('a') %>% html_text();
head(title)
title <- str_replace_all(title,'\\W',' ')
# 명사 추출
nouns <- extractNoun(title)
word_cnt <- table(unlist(nouns))
class(unlist(nouns))
# 데이터프레임으로 변경
df_word <- as.data.frame(word_cnt,stringsAsFactors = FALSE)
df_word <- df_word %>% arrange(desc(Freq)) %>% head(200)
pal <- brewer.pal(8,'Dark2')
wordcloud2(df_word,size=0.9,color=pal)
|
d05d2415cb0c01f9002831b89f97489ed318e141 | 6b769ade12829c97f7aa9930265418bede361967 | /man/Table1_5.Rd | 5d0e9567df419389888cc9a90acd3169bcbee452 | [] | no_license | brunoruas2/gujarati | 67b579a4fde34ae1d57f4a4fd44e5142285663c1 | a532f2735f9fa8d7cd6958e0fc54e14720052fd4 | refs/heads/master | 2023-04-07T01:15:45.919702 | 2021-04-04T15:10:58 | 2021-04-04T15:10:58 | 284,496,240 | 14 | 6 | null | null | null | null | UTF-8 | R | false | true | 410 | rd | Table1_5.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table1_5.R
\docType{data}
\name{Table1_5}
\alias{Table1_5}
\title{Table 1.5
Seasonally Adjusted M1 Supply: 1959:01 to 1999:07 (billions of dollars)}
\format{
\itemize{
\item \strong{Year:Month}
\item \strong{M1}
}
}
\usage{
data('Table1_5')
}
\description{
Source: Board of Governors, Federal Reserve Bank, USA
}
\keyword{datasets}
|
89312af6215b5332ea6909f53cfc6c403d48dead | a632a57196002af7477f6f45a55fd1c150cb9724 | /cachematrix.R | f01e2560a5d42577861e7126c58079bd1e5eaa09 | [] | no_license | MichaelLHerman/ProgrammingAssignment2 | f506f82fd86fc95e7ab768f761ed4b5714a5eeb2 | 5c5aceaecaa96c214d662b2a031f371f9c525ff2 | refs/heads/master | 2023-01-25T05:17:08.430843 | 2023-01-12T19:35:26 | 2023-01-12T19:35:26 | 22,898,358 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,350 | r | cachematrix.R | ## ProgrammingAssignment2 for Coursera course `R Programming` by Roger D. Peng, PhD, Jeff Leek, PhD, Brian Caffo, PhD
# makeCacheMatrix creates a special "matrix" object that maintains a cache its inverse.
# Arguments: x a matrix
# The returned object is represented by a list of accessor functions to get and set the matrix and cached inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
#changing the matrix clears the cache
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(i) inv <<- i
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
# cacheSolve computes the inverse of the special "matrix" returned by `makeCacheMatrix`.
# If the inverse has already been calculated (and the matrix has not changed), then
# cacheSolve will retrieve the inverse from the cache.
# Arguments: x a wrapped matrix created by makeCacheMatrix
# additional arguments will be passed to solve()
# Value: returns the inverse of the matrix
cacheSolve <- function(x, ...) {
#try cache first
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
#not found in cache
data <- x$get()
m <- solve(data, ...)
#cache result
x$setInverse(m)
m
}
|
4def28b414c9f3e9466ddbbfcd98987dc076957a | eacb17556e69c2ce33abc189262164618f66ca90 | /rejected.R | ae272aa86335771a80b84ec1ec8a23cfdd019738 | [
"Apache-2.0"
] | permissive | facorread/taskr | 670e08f5a00cd5338a04bb854df49490720b2c73 | 3441cd1830f9087519577e1bf92ab0bf430834e6 | refs/heads/master | 2020-12-21T21:31:41.579376 | 2020-07-03T19:08:44 | 2020-07-03T19:08:44 | 236,568,392 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 147 | r | rejected.R | # This looks alright but it overcomplicates the code.
overdueE <- parse(text='(date < Sys.Date()) & (state == "Pending")')
taskr$t[eval(overdueE)]
|
35a4e344f300e0c720c76e132ff2f724a1b9ccf4 | 3176bef3d05e202483010f8e3215d6bd1b5ef390 | /R/converters.R | f01562df358b962b10c6b70a444ecfd70fec3160 | [
"MIT"
] | permissive | BlocklancerOrg/ethReum | dcd7af91cd7a572a8e7b376e69bff02e2d8d8ab3 | 561c2a3b2233af935fa9267ae84abee9c4ccc7f9 | refs/heads/master | 2021-09-20T18:27:14.543351 | 2018-08-13T23:07:14 | 2018-08-13T23:07:14 | 113,497,628 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,186 | r | converters.R | #' Convert hex to decimal value
#'
#' @param hex character, hex value, '0x' prefixed.
#' @return numeric, decimal equivalent
#' @export
hexDec <- function(hex) {Rmpfr::mpfr(hex, base=16)}
#' Convert decimal to hex value
#'
#' @param dec numeric or character, an integer value formatted as character if
#' over 32bit max (2147483647).
#' @return character, hex equivalent, '0x' prefixed.
#' @export
decHex <- function(dec) {
dmax <- 2147483647
if(is.numeric(dec) && (dec > dmax)) {
stop("Numbers greater than 32bit max (2147483647) should be
formatted as type \"character\" to preserve precision.")
}
result <- paste0("0x", as.character(gmp::as.bigz(dec), b = 16))
return(result)
}
#' Convert Ethereum Denominations
#'
#' @param x numeric, number to be converted
#' @param from character, 2 letter short notation to convert from (see details)
#' @param to character, 2 letter short notation to convert to (see details)
#'
#' @return mpfr converted value (see details)
#' @export
#'
#' @examples
#' ethCon(15000000000000000, "we", "et")
#' ethCon(1000000000, "et", "me")
#' @details This function returns a Multiple Precision Floating-Point Reliable
#' number (implemented by the Rmpfr package). This is to allow for the conersion
#' of very high values of Wei into other forms. This can then be converted to
#' numeric, integer or double as required at the expense of accuracy. WARNING do
#' not use this in calculating values to transact actual ether as the precision is not
#' guaranteed currently. This is intended for indicative conversions when parsing
#' data from the Ethereum blockchain. \cr
#' \cr
#' Denomination short codes:\cr
#' wei = we, Kwei = kw, Mwei = mw, Gwei = gw \cr
#' szabo = sz, finney = fi, ether = et \cr
#' Kether = ke, Mether = me,Gether = ge, Tether = te \cr
ethCon <- function(x, from, to) {
data("conversion_table")
a <- conversion_table[(conversion_table$short == from), ]$conversion
a <- Rmpfr::mpfr(a, Rmpfr::getPrec(a))
b <- conversion_table[(conversion_table$short == to), ]$conversion
b <- Rmpfr::mpfr(b, Rmpfr::getPrec(b))
result <- x * (b/a)
return(result)
}
|
8e34b83c91bb72a04a0ffdc21a9407f2c4577241 | 81a2139233fb15960f3d947cbfe97ccd04b82929 | /server.R | b3cb3abfaca936a75512db6d7b3dad8830fae8ef | [] | no_license | mengeln/B13-2 | b437d0b0495025b0ffdb02e732e24b271cc572f8 | 27a7a2b123cb3977685eb8c2c4df03ba28028ba0 | refs/heads/master | 2021-01-23T03:28:11.477900 | 2014-06-27T22:15:29 | 2014-06-27T22:15:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,679 | r | server.R |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://www.rstudio.com/shiny/
#
library(shiny)
library(knitr)
library(xtable)
source("r/entero.r")
source("r/hf183.r")
shinyServer(function(input, output) {
#process data#
results <- reactive({
if(input$ent_process == 0)return(NULL)
isolate({
processor <- get(input$Assay)
# Process data
result <- try(processor(input$data_file$datapath,
input$sketa_file, input$plat,
input$org))
success <- class(result) != "try-error"
# Store file
if(input$db & success){
time <- gsub(" ", "-", Sys.time())
file.copy(input$data_file$datapath,
paste0("submissions/",
input$Assay, "/",
input$org, "/",
input$date, "_",
time, "_",
input$describe, ".csv"))
}
result
})
})
output$goodresults <- downloadHandler(
filename = "report.pdf",
content = function(f){
knit2pdf(input=paste0("templates/", input$Assay, "/report.Rtex"),
output = paste0("templates/", input$Assay, "/report.tex"),
compiler="xelatex")
file.copy(paste0("templates/", input$Assay, "/report.pdf"),
f)
})
output$result <- renderUI({
if(class(results()) == "try-error")
renderText(results())
else if(is.null(results()))
NULL
else
downloadButton("goodresults", "Download Results")
})
})
|
eb35743a840529f597a6aba2b5f2cb8ed1e31286 | 8bddfa712c7a264d2fb4e9b9e15303750ddeef3d | /R/reporting.r | b45df648c0544e19054a914edc0616fe74776711 | [] | no_license | crbwin/clnR | 38c88a198e19e4c19de8f86216343e9ddd8010fb | d181cef228a85b8473bb131acaf853501ee51b66 | refs/heads/master | 2023-01-01T13:00:34.051591 | 2020-10-27T21:13:56 | 2020-10-27T21:13:56 | 281,728,881 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,221 | r | reporting.r |
#' Produce a formatted table of proportions of frequency for a single variable
#'
#'
#' @param data data frame
#' @param ... name of single variabe
#' @param wide should the table be full width or compressed?
#' @return a formatted table of proportions of frequency for a single variable
#' @export
tabshell <- function(data, ..., wide = TRUE){
vector <- quos(...)
tab <- data %>%
filter(!is.na(!!!vector)) %>%
group_by(!!!vector) %>%
summarise(n = n()) %>%
mutate('Proportion' = paste0(round((n / sum(n)), 3)*100, "%"))
tab[, -c(2)]%>% t() %>% kable() %>% kable_styling(bootstrap_options = c("striped", "hover"), full_width = wide)
}
#' Produce a short report of descriptives
#'
#' This function produces descriptives in a 'nutshell'.
#'
#' @param data data frame
#' @param vector quosure of items or name of single item to analyze
#' @param type is this for a single variable ('single') or a group of variables ('group')?
#' @return a table of n, mean, and sd and range for variables
#' @export
nutshell <- function(data, vector, type = "group"){
if(type=="single"){
vector <- enquo(vector)
desc <- data %>%
select(!!vector) %>%
psych::describe %>% round(., digits = 2) %>% data.frame()
} else{
desc <- data %>%
select(!!!vector) %>%
describe %>% round(., digits = 2) %>% data.frame()
}
scale <- paste(desc$min, "-", desc$max)
desc %>%
select(c("n", "mean", "sd")) %>%
cbind(., scale) %>%
kable %>% kable_styling(bootstrap_options = c("striped", "hover"))
}
#' Produce Cronbach alpha if dropped output using quosures
#'
#' This function reports the Cronbach's alpha for a group of items if each item were dropped.
#'
#' @param data data frame
#' @param items quosure of items to analyze
#' @param name optional vector of item names/wording
#' @param neg.corr are some items negatively correlated? This reverses those items to get an accurate estimate of alpha
#' @return cronbach alpha output from the psych package's 'alpha' function
#' @export
alpha.drop <- function(data, items, name=NULL, neg.corr = FALSE){
alpha <- data %>% select(!!!items) %>% psych::alpha( check.keys = neg.corr)
m_names <- rownames(alpha$alpha.drop)
m_names <- c(m_names, "", "Overall")
a_drop <- round(alpha$alpha.drop[, 2], digits = 3)
row.drop <- which(a_drop>alpha$total$std.alpha) %>% as.numeric()
a_drop <- c(a_drop, "", round(alpha$total$std.alpha, digits = 3))
n <- length(a_drop)
if (is.null(name)){
tab <- cbind("Item" = m_names, "Alpha if dropped" = a_drop)
if(!is_empty(row.drop)){
tab %>% kable(align=c('l', 'c')) %>% row_spec( n, bold = T) %>% row_spec(row.drop, italic = T) %>% kable_styling( full_width = F)
}else{
tab %>% kable(align=c('l', 'c')) %>% row_spec(n, bold = T, italic = T) %>% row_spec(n, italic = T) %>% kable_styling( full_width = F)
}
} else {
blank <- c(name, "", "")
tab <- cbind(" " = blank, "Item" = m_names, "Alpha if dropped" = a_drop)
if(!is_empty(row.drop)){
tab %>% kable(align=c('l','l', 'c')) %>% row_spec( n, bold = T) %>% row_spec(row.drop, italic = T) %>% kable_styling( full_width = F)
}else{
tab %>% kable(align=c('l','l', 'c')) %>% row_spec(n, bold = T, italic = T) %>% row_spec(n, italic = T) %>% kable_styling( full_width = F)
}
}
}
#' Produce Cronbach alpha output using quosures
#'
#' This function produces a vector of Cronbach's alphas for a list of composite variables.
#' Note that this function requires quosures including the consituent items of each composite
#' to exist and have already been created. These are necessary to assess alpha for each composite.
#'
#' @param data data frame
#' @param x quosure of composites to analyze
#' @param neg.corr are some items negatively correlated? This reverses those items to get an accurate estimate of alpha
#' @return a vector of Cronbach's alphas for a list of composite variables
#' @export
table.alpha <- function(data, x){
var.name <- c()
qq <- c()
alphas <- c()
substrRight <- function(x, n){
substr(x, nchar(x)-n+1, nchar(x))
}
for(i in 1:length(x)){
var.name <- data %>% select(!!!x[[i]]) %>% names()
if(substrRight(var.name,2)=="_r"){
var.name <- substr(var.name, 1, nchar(var.name) - 2)
} else {
}
gg <- paste0("q_", var.name)
gg2 <- as.name(gg)
qq <- enquo(gg2)
if(!exists(gg)){
alphas[i] <- " "
} else if(length(eval(rlang::quo_get_expr(qq)))>2){
alphas[i] <- data %>% alpha.only(eval(rlang::quo_get_expr(qq)))
} else{
alphas[i] <- " "
}
}
if(sum(nchar(alphas)) < length(x)+1){
print("Warning: Alphas for composites have not been computed. To compute, quosures including the constituent variables of each composite must exist in the form of 'q_comp_name'.")
} else {
}
alphas
}
#' Produces a formatted table of descriptives
#'
#' This function produces a table of descriptive output for a list of variables.
#'
#' @param data data frame
#' @param vars quosure of items to analyze
#' @param names a vector of names to replace variable names for increased clarity
#' @param copy Would you like to copy the table to a spreadsheet or doc? The 'copiable' version is not formatted, such that it's easier to copy the matrix of information.
#' @param alpha Would you like to include Cronbach's alphas? This calls 'table.alpha' to calculate alphas for each composite
#' @return a table of descriptives for a group of variables
#' @export
table.describe <- function(data, vars, names = NULL, copy = TRUE, alpha = FALSE){
if(!is.null(names)){
gr.desc <- data %>% select(!!!vars) %>% psych::describe() %>% data.frame() %>% select(-c(1, 6, 7, 10:13))
gr.desc <- gr.desc %>% cbind.data.frame(names, .)
} else{
gr.desc <- data %>% select(!!!vars) %>% psych::describe() %>% data.frame() %>% select(-c(1, 6, 7, 10:13)) %>% tibble::rownames_to_column("names")
}
round.new <- function(x){
round(x, 2)
}
if(alpha==TRUE){
alpha <- table.alpha(data, vars)
if(copy==TRUE){
gr.desc %>% mutate(across(where(is.numeric), round.new)) %>% cbind.data.frame(., alpha) %>% relocate(., "alpha", .before = "mean") %>% kable() %>%
kable_styling(bootstrap_options = c("striped", "hover", "responsive"), font_size = 12, full_width = F)
} else{
gr.desc %>% mutate(across(where(is.numeric), round.new)) %>% cbind.data.frame(., alpha) %>% relocate(., "alpha", .before = "mean")
}
}else{
if(copy==TRUE){
gr.desc %>% mutate(across(where(is.numeric), round.new)) %>% kable() %>% kable_styling(bootstrap_options = c("striped", "hover", "responsive"), font_size = 12, full_width = F)
} else{
gr.desc %>% mutate(across(where(is.numeric), round.new))
}
}
}
#' Produce Cronbach alpha output using quosures
#'
#' This function produces info about Cronbach's alpha for a group of variables
#'
#' @param data data frame
#' @param vector quosure of items to analyze
#' @param neg.corr are some items negatively correlated? This reverses those items to get an accurate estimate of alpha
#' @return cronbach alpha output from the psych package's 'alpha' function
#' @export
partR <- function(model){
sqrt(rsq::rsq.partial(model)$partial.rsq[1]) %>% round(., digits = 3)
}
#' Produces formatted histogram
#'
#' This function, exported from the local package 'vizzify', produces a histogram with count and proportions for each bin.
#'
#' @param data data frame
#' @param x variable to visualize
#' @param type specifies whether the variable is a factor or numeric variable. Because numeric variables have many thin bins, this removes proportion
#' and count data that would clog the graph
#' @param title main title for the top of the graph
#' @param subtitle subtitle to go under the main title
#' @param xlab label for the x axis
#' @param ylab label for the y axis; default is "Count"
#' @return Formatted histogram
#' @export
histomize <- function(data, x, type = "factor", title, subtitle, xlab, ylab = "Count"){
title <- enquo(title)
subtitle <- enquo(subtitle)
xlab <- enquo(xlab)
ylab <- enquo(ylab)
x_var <- enquo(x)
if(type=="numeric"){
data %>%
filter(!is.na(!!x_var)) %>%
ggplot() +
geom_histogram(aes(x = !!x_var, y = ..density..), fill = "#599ad3", bins = 35) +
ggtitle(title, subtitle) +
theme_classic() +
xlab(xlab) + ylab("Density")
} else if(type=="factor") {
data %>%
filter(!is.na(!!x_var)) %>%
ggplot() +
geom_bar(aes(x = !!x_var), fill = "#599ad3") +
ggtitle(title, subtitle) +
theme_classic() +
xlab(xlab) + ylab(ylab) +
#geom_text(stat = "count", aes(x = !!x_var, label = ..count.., y = ..count..), vjust = -.5) +
geom_text(stat = "count",
aes(x = !!x_var, label=..count.., y = ..count..),
vjust = -.5) +
geom_text(stat = "count",
aes(x = !!x_var, label= paste0(" (", format(..prop..*100, digits = 1), "%)"), y = ..count.., group = 1),
vjust = +1.5)
} else {
print("Not discrete or continuous variable")
}
}
|
4b28452c5ad56e76ade82d64aa6d027625e3f0dd | 319f27b05eac6b53d6e231446bd9ea05dda0ba69 | /rstudio-project/RandomForest_classification.R | de58da8740460c6e137e77c8845ed40b697369d2 | [] | no_license | adamnapieralski/mow-activities-classification | 427907b723be2513bc1dbf5e21652bf7e0d31125 | 883da7d4050db1a085470f75c6024b6f3d1eac6f | refs/heads/develop | 2022-09-09T00:21:39.863909 | 2020-06-03T13:42:43 | 2020-06-03T13:42:43 | 247,791,246 | 0 | 0 | null | 2020-06-02T13:08:20 | 2020-03-16T18:41:09 | R | UTF-8 | R | false | false | 2,813 | r | RandomForest_classification.R | RandomForest_classification <-function(train_data_and_classes, validate_data_and_classes) {
random_data.learning <<- list()
random_data.preds <<- list()
random_data.preds.probs <<- list()
random_data.preds.class <<- list()
random_data.cm <<- list()
random_data.mroc <<-list()
train_data_and_classes_f <- train_data_and_classes
train_data_and_classes_f$Class <- factor(train_data_and_classes_f$Class)
validate_data_and_classes$Class <- factor(validate_data_and_classes$Class)
# for k-cross validation
n <- nrow(train_data_and_classes)
K <- 5
tail <- n%/%K
alea <- runif(n)
rn <- rank(alea)
bloc <- as.factor((rn - 1)%/%tail + 1)
random_data.err <<- list()
shift<-list(0,10,20)
for(var in 1:3){
current=as.numeric(shift[var])
if(var==1){
number_of_trees=250
}else if (var==2){
number_of_trees=500
}else{
number_of_trees=1000
}
for (k in 1:K) {
random_data.learning[[k+current]] <- randomForest(Class~., data=train_data_and_classes_f[bloc != k,], ntree=number_of_trees)
random_data.preds.class[[k+current]] <- predict(random_data.learning[[current+k]], train_data_and_classes_f[bloc==k,], type="response")
random_data.preds.probs[[k+current]] <- predict(random_data.learning[[current+k]], train_data_and_classes_f[bloc==k,], type="prob")
random_data.cm[[k+current]] <- confusionMatrix(random_data.preds.class[[k+current]], train_data_and_classes_f[bloc==k,]$Class)
random_data.err[[k+current]] <- random_data.cm[[current+k]][["overall"]][["Accuracy"]]
# random_data.mroc[[k+current]] <- multiclass.roc(validate_data_and_classes[bloc==k,]$Class, random_data_reduced.preds.probs[[current+k]])
}
random_data.learning[[6+current]] <-random_data.learning[[match(max(unlist(random_data.err[current:(current+5)])), random_data.err)]]
random_data.preds.class[[6+current]] <- predict(random_data.learning[[current+6]], newdata=subset(validate_data_and_classes, , -c(Class)), type="response")
random_data.preds.probs[[6+current]] <- predict(random_data.learning[[current+6]],newdata=subset(validate_data_and_classes, , -c(Class)), type="prob")
random_data.cm[[6+current]] <- confusionMatrix(random_data.preds.class[[6+current]], validate_data_and_classes$Class)
random_data.err[[6+current]] <- random_data.cm[[current+6]][["overall"]][["Accuracy"]]
random_data.mroc[[6+current]] <- multiclass.roc(validate_data_and_classes$Class, random_data.preds.probs[[current+6]])
}
random_data <- list(list("classifiers"=random_data.learning, "preds"=list("class"=random_data.preds.class, "probs"=random_data.preds.probs),
"cm"=random_data.cm, "err"=random_data.err, "mroc"=random_data.mroc))
return(random_data)
}
|
7c5ec4ce22b8fe88677d60a43265c7aa3c0a7143 | 12e622493b28b6f32df9d486249201ffde6f3ea2 | /CADS2018/Exercícios/Exerc_01.R | 78fe2e171072fc6a5dd4589a0fccc9b5f0c2e543 | [] | no_license | ivanzricardo/aulas_ENAP | da7fd61a49d08816cbecfe544b11284264a046cc | cdde0e74fb8e63e87a78aa4da4c2fd44c8effb08 | refs/heads/master | 2020-03-30T20:08:13.151758 | 2018-10-29T05:33:52 | 2018-10-29T05:33:52 | 151,574,609 | 0 | 0 | null | 2018-10-29T05:33:53 | 2018-10-04T13:18:29 | HTML | UTF-8 | R | false | false | 391 | r | Exerc_01.R | #### Coleta e Análise de dados secundários
library(tidyverse)
library(magrittr)
# Exercícios Aula 01 ----
# 1. Reescreva a expressão abaixo utilizando o %>%.
round(mean(divide_by(sum(1:10),3)),digits = 1)
# 2. Sem rodar, diga qual a saída do código abaixo. Consulte o help das funções caso precise.
2 %>%
add(2) %>%
c(6, NA) %>%
mean(na.rm = T) %>%
equals(5)
|
c82aa089a980fdde1e27280e365cd454e07c67d7 | 1689ffafecb3af7ef10dfc84b38c29b5ed3030fb | /tests/testthat/test_as_workbook.R | c6e2c6de8ef15c9bbcfa99d8b6d9187ba60454b9 | [] | no_license | cran/tatoo | 809a5bc82d7bd14818f5f0f0effd2d629efdd574 | ccbc213befc1c6de2fb08e6618398f32fa0cd398 | refs/heads/master | 2023-03-29T12:10:32.885209 | 2023-03-26T08:50:02 | 2023-03-26T08:50:02 | 90,194,415 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,880 | r | test_as_workbook.R | context("as_workbook")
#* @testing as_workbook
#* @testing as_workbook.default
#* @testing as_workbook.Tagged_table
#* @testing as_workbook.Mashed_table
#* @testing as_workbook.Composite_table
#* @testing as_workbook.Stacked_table
#* @testing as_workbook.Tatoo_report
test_that("as_workbook works as expected", {
source(file.path(test_path(), 'testdata', 'testdata.R'))
for(el in list(t_df1, t_mash_1, t_comp_1, t_stack_1))
{
# Check if workbook project is created without warnings or errors
tres <- as_workbook(el, blah = "blubb", keepNA = TRUE)
expect_is(tres, 'Workbook')
# Check if workbook object contains the appropriate number of sheets
if(!inherits(el, 'Tatoo_report')){
expect_identical(length(names(tres)), 1L)
} else {
expect_identical(names(tres), names(el))
}
}
})
test_that("Tagged_table and default named regions are created correctly", {
source(file.path(test_path(), 'testdata', 'testdata.R'))
footer(t_tagged_1) <- c("blah", "blubb")
wb <- as_workbook(t_tagged_1)
res <- openxlsx::getNamedRegions(wb)
expect_setequal(
attr(res, "position"),
c("A1:A3", "A5:D10", "A5:D5", "A6:D10", "A12:A13")
)
footer(t_tagged_1) <- NULL
wb <- as_workbook(t_tagged_1)
res <- openxlsx::getNamedRegions(wb)
expect_setequal(
attr(res, "position"),
c("A1:A3", "A5:D10", "A5:D5", "A6:D10")
)
title(t_tagged_1) <- NULL
table_id(t_tagged_1) <- NULL
longtitle(t_tagged_1) <- NULL
subtitle(t_tagged_1) <- NULL
wb <- as_workbook(t_tagged_1)
res <- openxlsx::getNamedRegions(wb)
expect_setequal(
attr(res, "position"),
c("A2:D7", "A2:D2", "A3:D7")
)
})
test_that("Mashed_table named regions are created correctly", {
source(file.path(test_path(), 'testdata', 'testdata.R'))
wb <- as_workbook(t_mash_1)
res <- openxlsx::getNamedRegions(wb)
expect_identical(
attr(res, "position"),
c("A1:D21", "A1:D1", "A2:D21")
)
wb <- as_workbook(t_mash_4)
res <- openxlsx::getNamedRegions(wb)
expect_identical(
attr(res, "position"),
c("A1:A3", "A5:J5", "A6:J11", "A6:J6", "A7:J11", "A13:A13")
)
})
test_that("Composite_table named regions are created correctly", {
source(file.path(test_path(), 'testdata', 'testdata.R'))
wb <- as_workbook(t_comp_1)
res <- openxlsx::getNamedRegions(wb)
expect_setequal(
attr(res, "position"),
c("A1:I1", "A2:I8", "A2:I2", "A3:I8")
)
wb <- as_workbook(t_comp_3)
res <- openxlsx::getNamedRegions(wb)
expect_setequal(
attr(res, "position"),
c("A1:A3", "A5:G5", "A6:G12", "A6:G6", "A7:G12", "A14:A14")
)
})
# devtools::load_all()
# outdir <- rprojroot::find_testthat_root_file("testout")
# save_xlsx(wb, file.path(outdir, "test.xlsx"), overwrite = TRUE)
|
4cbffd11759a1df3001148c9af2acabbc9a5a80b | 726b211e966ad297f2d6620d9d53df103c87682a | /scripts/prepare_things.R | 00052058b57cd0c5aa3910ffc3ec2d0cbdbf4e7d | [] | no_license | H3K4me3/TreeFriends | 452db0a0760da368139c31499b1b54f0a570924c | f0d39a6accaaa683a4cef993735a4a8e5ff4e351 | refs/heads/master | 2020-03-28T23:03:59.875898 | 2019-12-24T13:42:05 | 2019-12-24T13:42:05 | 149,274,285 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,551 | r | prepare_things.R |
library(here)
setwd(here())
if (!requireNamespace("BiocManager"))
install.packages("BiocManager")
### dbSNP package from Bioconductor #####-------------------------------
if (!requireNamespace('SNPlocs.Hsapiens.dbSNP151.GRCh38'))
BiocManager::install("SNPlocs.Hsapiens.dbSNP151.GRCh38")
### BSgenome packages from Bioconductor #####----------------------------
if (!requireNamespace('BSgenome.Hsapiens.UCSC.hg38'))
BiocManager::install("BSgenome.Hsapiens.UCSC.hg38")
if (!requireNamespace('BSgenome.Ptroglodytes.UCSC.panTro5'))
BiocManager::install("BSgenome.Ptroglodytes.UCSC.panTro5")
if (!requireNamespace('BSgenome.Mmulatta.UCSC.rheMac8'))
BiocManager::install('BSgenome.Mmulatta.UCSC.rheMac8')
### Install self-made BSgenome packages #####----------------------------
if (!requireNamespace('BSgenome.GGorilla.UCSC.gorGor5'))
devtools::install_url("https://github.com/H3K4me3/BSgenome.GGorilla.UCSC.gorGor5/releases/download/virtual-0.0.1/BSgenome.GGorilla.UCSC.gorGor5_0.0.1.tar.gz")
if (!requireNamespace('BSgenome.PAbelii.UCSC.ponAbe2'))
devtools::install_url("https://github.com/H3K4me3/BSgenome.PAbelii.UCSC.ponAbe2/releases/download/virtual-0.0.1/BSgenome.PAbelii.UCSC.ponAbe2_0.0.1.tar.gz")
### Misc R packages #####------------------------------------------------
local({
prepare_package <- function(pkg) {
if (!requireNamespace(pkg))
BiocManager::install(pkg)
}
prepare_package('VariantAnnotation')
prepare_package('here')
prepare_package('Rsamtools')
prepare_package('rtracklayer')
prepare_package('VariantAnnotation')
prepare_package('Biostrings')
prepare_package('BiocParallel')
prepare_package('phangorn')
})
### Check raw data #####------------------------------------------------
# File integrity
if (!file.exists("raw_data/ALL.TOPMed_freeze5_hg38_dbSNP.vcf.gz"))
## The current link can be expired, maybe host it in another place.
download.file("https://gatech.box.com/shared/static/ls1cq5qr8x254w9su7v5pc3z4bjgmsub.gz",
destfile = "raw_data/ALL.TOPMed_freeze5_hg38_dbSNP.vcf.gz")
stopifnot(tools::md5sum("raw_data/ALL.TOPMed_freeze5_hg38_dbSNP.vcf.gz") == "773e9e97759a4a5b4555c5d7e1e14313")
# Create index
if (!file.exists("raw_data/ALL.TOPMed_freeze5_hg38_dbSNP.vcf.gz.tbi"))
Rsamtools::indexTabix("raw_data/ALL.TOPMed_freeze5_hg38_dbSNP.vcf.gz", format = "vcf")
# Check chain files
# The chain files are downloaded from ftp://hgdownload.soe.ucsc.edu/goldenPath/hg38/liftOver/
prepare_chainfile <- function(url, md5 = NULL) {
gzpath <- here::here("raw_data/chainfiles", basename(url))
path <- here::here("raw_data/chainfiles", sub("\\.gz$", "", basename(url)))
if (!file.exists(path)) {
download.file(url, gzpath)
system2("gunzip", gzpath, wait = TRUE)
}
if (!is.null(md5))
stopifnot(tools::md5sum(path) == md5)
invisible(path)
}
prepare_chainfile(
"http://hgdownload.cse.ucsc.edu/goldenpath/hg38/liftOver/hg38ToGorGor5.over.chain.gz",
"fa532f74ede70ccc724badcf3a65acaf"
)
prepare_chainfile(
"http://hgdownload.cse.ucsc.edu/goldenpath/hg38/liftOver/hg38ToPanTro5.over.chain.gz",
"cf39846b96f245d1ff27942d1ab94461"
)
prepare_chainfile(
"http://hgdownload.cse.ucsc.edu/goldenpath/hg38/liftOver/hg38ToPonAbe2.over.chain.gz",
"18089c8a1b07268f8547a9402ce4d3b1"
)
prepare_chainfile(
"http://hgdownload.cse.ucsc.edu/goldenpath/hg38/liftOver/hg38ToRheMac8.over.chain.gz",
"da89c3353a70db359210ff7d45febf8d"
)
|
ca8aa8a1cfad0345dedc6b1f0b7214cdcc4796a3 | 81b93eb1ae55df23c6cbddef27e56f17c77c729e | /man/Node.Rd | e3e530a79f90a52bce7213cc9b075e5ea22c0320 | [] | no_license | ipub/data.tree | fa842bfa03a8f78aa43843dec173d3f96e66f164 | 4dcc2fb8c6d250439504557913d91a470ea3dfb2 | refs/heads/master | 2020-05-20T17:55:43.473209 | 2015-04-02T17:10:34 | 2015-04-02T17:10:34 | 33,321,260 | 0 | 0 | null | 2015-04-02T17:07:46 | 2015-04-02T17:07:46 | null | UTF-8 | R | false | false | 3,062 | rd | Node.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/node.R
\docType{class}
\name{Node}
\alias{Node}
\title{Node}
\format{An \code{\link{R6Class}} generator object}
\usage{
Node
}
\description{
\code{Node} is at the very heart of the \code{data.tree} package. All trees are constructed
by tying toghether \code{Node} objects.
}
\section{Fields}{
\describe{
\item{\code{children}}{A list of children}
\item{\code{parent}}{The node's parent Node}
}}
\section{Methods}{
\describe{
\item{\code{Node$new(name)}}{Creates a new \code{Node} called \code{name}. Often used to construct the root.}
\item{\code{AddChild(name)}}{Creates a new \code{Node} called \code{name} and adds it to this \code{Node}.}
\item{\code{\link{Find}(...)}}{Find a node with path \code{...}}
\item{\code{\link{Get}(attribute, ..., traversal = "pre-order", filterFun = function(x) TRUE, assign = NULL, format = NULL)}}{Traverses the tree and collects values along the way.}
\item{\code{\link{Set}(..., traversal = "pre-order", returnValues = FALSE)}}{Traverses the tree and assigns attributes along the way.}
\item{\code{\link{Aggregate}(attribute, fun, ...)}}{Traverses the tree and calls \code{fun(children$Aggregate(...))} on each node. }
\item{\code{\link{Sort}(attribute, ..., decreasing = FALSE, recursive = TRUE)}}{Sorts the children of a node according to \code{attribute}}
\item{\code{\link{ToDataFrame}(row.names = NULL, optional = FALSE, ...)}}{Converts the tree below this \code{Node} to a \code{data.frame}}
}
}
\section{Properties}{
\describe{
\item{\code{children}}{Returns a list containing all the children of this \code{Node}}
\item{\code{parent}}{Returns the parent \code{Node} of this \code{Node}}
\item{\code{name}}{Gets or sets the name of a \code{Node}. For example \code{Node$name <- "Acme"}}
\item{\code{isLeaf}}{Returns \code{TRUE} if the \code{Node} is a leaf, \code{FALSE} otherwise}
\item{\code{isRoot}}{Returns \code{TRUE} if the \code{Node} is the root, \code{FALSE} otherwise}
\item{\code{count}}{Returns the number of children of a \code{Node}}
\item{\code{totalCount}}{Returns the total number of \code{Node}s in the tree}
\item{\code{path}}{Returns a vector of mode \code{character} containing the names of the \code{Node}s in the path from the root to this \code{Node}}
\item{\code{pathString}}{Returns a string representing the path to this \code{Node}, separated by backslash}
\item{\code{levelName}}{Returns the name of the \code{Node}, preceded by level times '*'. Useful for printing.}
\item{\code{leaves}}{Returns a list containing all the leaf \code{Node}s }
\item{\code{level}}{Returns an integer representing the level of a \code{Node}. For example, the root has level 0.}
\item{\code{root}}{Returns the root \code{Node} of a \code{Node}'s tree}
}
}
\examples{
library(data.tree)
acme <- Node$new("Acme Inc.")
accounting <- acme$AddChild("Accounting")
print(acme)
}
\seealso{
For more details see the \code{data.tree} vignette: \code{vignette("data.tree")}
}
\keyword{datasets}
|
c4f140a0c32f6516da38d633555066bf2f6e0e7e | 105a13098f22b706f608aef70f56e7d60d71bfae | /dataExploration.R | 45b1e98a3c3a3b87ad3fcde6db8077cd473f8d54 | [] | no_license | ratnexa/patios-contenedores | 173a32476cb74d3147c7c73a6f4d1ac3c26ebb8c | bc7bbb308d05c8953f40c8fe3c728af93bb5944d | refs/heads/master | 2023-04-09T06:01:35.368338 | 2021-04-12T20:17:02 | 2021-04-12T20:17:02 | 321,177,765 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,318 | r | dataExploration.R | library(RMySQL)
library(dplyr)
library(lubridate)
library(ggplot2)
library(gridExtra)
source("config.R")
get_query <- function(query){
mydb <- RMySQL::dbConnect(MySQL(),
user=patiosName,
password=patiosPw,
dbname=patiosDbName,
host=patiosHost)
result <- dbGetQuery(mydb, query)
dbDisconnect(mydb)
return(result)
}
mydb <- RMySQL::dbConnect(MySQL(),
user=patiosName,
password=patiosPw,
dbname=patiosDbName,
host=patiosHost)
totalTables <- dbListTables(mydb)
dbDisconnect(mydb)
dataset <- get_query(
'select * from operation_list where yardId = 1'
)
clients <- get_query(
'select * from client'
)
unique(dataset$transportadorName)
unique(dataset$linerName)
unique(dataset$containerStatusId)
unique(dataset$statusId)
unique(dataset$statusName)
sort(unique(dataset$clientName))
sort(unique(dataset$licensePlate))
unique(dataset$containerNumber) #Container id
unique(dataset$containerType)
unique(dataset$reposicion_id)
unique(dataset$reposicion_status)
unique(dataset$reposicion_destino)
unique(dataset$reposicion_tamano) #Datos contenedor
unique(dataset$reposicion_oferta)
unique(dataset$reposicion_transportista)
outliers <- boxplot(dataset$kpi2_tiempoEnPatio)$out
dataset2 <- dataset[-which(dataset$kpi2_tiempoEnPatio %in% outliers),]
hist(dataset2$kpi2_tiempoEnPatio)
boxplot(dataset2$kpi2_tiempoEnPatio)d
boxplot(dataset$kpi2_tiempoEfectivo)
yardOp <- get_query({
'select * from yard_operation yo where stopwatch_seconds is not NULL and date(appointment_dateTime) >= "2020-10-01"'
})
yardOp$validation <- as.POSIXlt(yardOp$dateTimeStatus4) - seconds(yardOp$stopwatch_seconds)
yardOp$kpi2test <- difftime(as.POSIXlt(yardOp$dateTimeStatus4), as.POSIXlt(yardOp$approvedInYardDateTime), units = "mins")
fullData <- dataset %>% left_join(yardOp %>% select(id, kpi2test),
by = c("yardOperationId" = "id"))
fullData$kpi2test <- ifelse(is.na(fullData$kpi2test), 0, fullData$kpi2test)
x <- fullData %>% group_by(licensePlate) %>% summarize(meanTime = mean(kpi2test, na.rm = T),
count = n())
hist(as.numeric(yardOp$kpi2test))
xfilt <- yardOp %>% select(id, kpi2test)
yardOp$load_containerId
yardOp$load_containerDateTime
yardOp$statusId
table(yardOp$operationType)
yardOp$appointment_dateTime[5]
yardOp$dateTimeStatus2[5] #Waiting in line
yardOp$approvedInYardDateTime[5]
yardOp$yardEntryAuthorizedDateTime[5]
yardOp$dateTimeStatus3[5]
yardOp$unload_containerDateTime[5]
yardOp$dateTimeStatus4[5]
yardOp$load_containerDateTime[4]
yardOp$dateTimeStatus4[4]
yardOp$codigoTurno
yardOp$load_containerInspectedDateTime[4]
yardOp$totalTime <- ifelse(
yardOp$operationType == "EXPO",
difftime(as.POSIXlt(yardOp$dateTimeStatus3), as.POSIXlt(yardOp$dateTimeStatus2), units = "mins") +
abs(difftime(as.POSIXlt(yardOp$load_containerInspectedDateTime), as.POSIXlt(yardOp$dateTimeStatus4), units = "mins")),
difftime(as.POSIXlt(yardOp$dateTimeStatus3), as.POSIXlt(yardOp$dateTimeStatus2), units = "mins") +
difftime(as.POSIXlt(yardOp$dateTimeStatus4), as.POSIXlt(yardOp$unload_containerDateTime), units = "mins")
)
yardOp$waitingTime <- as.numeric(abs(difftime(
yardOp$appointment_dateTime,
yardOp$dateTimeStatus2,
units = "mins"
)))
expo <- yardOp %>% filter(operationType == "EXPO")
outExpo <- boxplot(expo$totalTime)$out
expo <- expo[-which(expo$totalTime %in% outExpo),]
impo <- yardOp %>% filter(operationType == "IMPO")
#Unload time
expoPlot <- expo %>%
ggplot(aes(totalTime)) + geom_histogram(fill = "#33FFF6",
col = I("black"))+
labs(title="EXPO")
impoPlot <- impo %>%
ggplot(aes(totalTime)) + geom_histogram(fill = "#B8FF33",
col = I("black"))+
labs(title="IMPO")
hist(impo$totalTime)
impo %>% filter(linerCode == "SUD") %>%
ggplot(aes(totalTime)) + geom_histogram(fill = I("blue"),
col = I("red"))
hist(impo$waitingTime)
hist(expo$totalTime)
a <- impo %>% filter(linerCode == "SEA") %>%
ggplot(aes(totalTime)) + geom_histogram(fill = "#B8FF33",
col = I("black"))+
labs(title="SEA")
b <- impo %>% filter(linerCode == "MSK") %>%
ggplot(aes(totalTime)) + geom_histogram(fill = "#B8FF33",
col = I("black")) +
labs(title="MSK")
c <- impo %>% filter(linerCode == "PIL") %>%
ggplot(aes(totalTime)) + geom_histogram(fill = "#B8FF33",
col = I("black"))+
labs(title="PIL")
d <- impo %>% filter(linerCode == "SUD") %>%
ggplot(aes(totalTime)) + geom_histogram(fill = "#B8FF33",
col = I("black")) +
labs(title="SUD")
grid.arrange(a,b,c,d, nrow = 2, ncol = 2)
hist(expo$waitingTime)
table(expo$linerCode)
table(yardOp$appointmentRequestedName)
sort(table(yardOp$truckerId))
sort(table(yardOp$truckId))
yardOp <- yardOp %>% left_join(clients,
by = c("clientId" = "id"))
sort(table(yardOp$name))
|
9ba35eef914fa6b36904190ae745d57a521cbe9f | 8227fe55923635d2c02e2382d7d9696d7f4b3f35 | /man/Rslippy-package.Rd | 42ebdac3ed110997d651b68bf17b831ca8718005 | [
"BSD-2-Clause"
] | permissive | ozjimbob/Rslippy | 1ff80939e725803368113424a692cf59760fafb0 | c4edc2ae0d91c535a59bc38bb96d917cc78757d5 | refs/heads/master | 2021-01-19T12:58:26.081208 | 2014-07-28T01:42:07 | 2014-07-28T01:42:07 | 22,045,519 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 819 | rd | Rslippy-package.Rd | \name{Rslippy-package}
\alias{Rslippy-package}
\alias{Rslippy}
\docType{package}
\title{
Generate Leaflet slippy maps from spatial objects.
}
\description{
This package renders tiles from Raster or Spatial* objects, and produces a HTML page that uses the Leaflet Javascript library to render them as a slippy map in a browser.
}
\details{
\tabular{ll}{
Package: \tab Rslippy\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-07-21\cr
License: \tab What license is it under?\cr
}
The main functions are slippyRaster, for raster objects, and slippySpatial for Spatial* vector objects.
}
\author{
Grant Williamson
Maintainer: Grant Williamson <grant.williamson@utas.edu.au>
}
\keyword{ package }
\keyword{ Rslippy }
\keyword{ slippy }
\keyword{ raster }
\keyword{ spatial }
\keyword{ map }
\keyword{ leaflet }
|
6427cf734248f02d68bf3c83e435c146ed94e996 | f4742292708b2f096ed2c4d07bf5fe7d3236ed00 | /build_ngrams.R | 13287dbe18fbbbe478b5e228bbd2448783628cbc | [] | no_license | lucasloami/capstone_project | c8f8ca0deaa5070508a2a09e48b8911cd4211b36 | 86efa9cdbf16f329f13eb2e4c8c507356ddaba09 | refs/heads/master | 2021-01-12T04:30:35.268441 | 2017-01-01T21:47:15 | 2017-01-01T21:47:15 | 77,629,636 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,672 | r | build_ngrams.R | library(tm)
library(RWeka)
library(SnowballC)
#library(ggplot2)
#library(data.table)
sample_path <- "datasets/sample_dataset_20000.txt"
badwords_path <- "datasets/badwords.txt"
con <- file(sample_path, open="r")
con2 <- file(badwords_path, open="r")
sample_content <- readLines(con)
badwords <- readLines(con2)
close(con)
close(con2)
rm(con)
rm(con2)
# Create corpus from sample
buildProcessedCorpus <- function(sample_content) {
corpus <- VCorpus(VectorSource(sample_content))
# preprocess corpus removing unnecessary information
toSpace <- content_transformer(function(x, pattern) gsub(pattern, " ", x))
corpus <- tm_map(corpus, toSpace, "/|@|\\|") #remove mentions
corpus <- tm_map(corpus, toSpace, "@\\w+") #remove mentions
corpus <- tm_map(corpus, toSpace, "#\\w+") #remove hashtags
corpus <- tm_map(corpus, toSpace, "(f|ht)tp(s?)://(.*)[.][a-z]+") #remove links or URLs
corpus <- tm_map(corpus, toSpace, "(\\b\\S+\\@\\S+\\..{1,3}(\\s)?\\b)")
corpus <- tm_map(corpus, tolower)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, stripWhitespace)
corpus <- tm_map(corpus, PlainTextDocument)
corpus <- tm_map(corpus, removeWords, badwords)
return(corpus)
}
print("processing corpus and cleaning it")
corpus <- buildProcessedCorpus(sample_content = sample_content)
rm(sample_content, badwords)
ngram_freqency <- function(tokenized_corpus, low_frequency=5) {
most_freq_terms <- findFreqTerms(tokenized_corpus, lowfreq = low_frequency)
print("before as.matrix function")
a <- as.matrix(tokenized_corpus[most_freq_terms,])
print("before rowSums")
term_freq <- sort(rowSums(a), decreasing = TRUE)
term_freq_df <- data.frame(ngram=names(term_freq), frequency=term_freq)
return(term_freq_df)
}
unigram_tokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 1, max = 1))
bigram_tokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
trigram_tokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
fourgram_tokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 4, max = 4))
print("unigram_tokenizer")
unigram <- TermDocumentMatrix(corpus, control=list(tokenize=unigram_tokenizer))
unigram <- removeSparseTerms(unigram, 0.9999)
unigram.freq <- ngram_freqency(unigram, 50)
save(unigram.freq, file="unigrams_df.RData");
rm(unigram.freq)
rm(unigram)
print("bigram_tokenizer");
bigram <- TermDocumentMatrix(corpus, control=list(tokenize=bigram_tokenizer));
bigram <- removeSparseTerms(bigram, 0.9999);
bigram.freq <- ngram_freqency(bigram, 20);
save(bigram.freq, file="bigrams_df.RData");
rm(bigram.freq)
rm(bigram)
print("trigram_tokenizer")
trigram <- TermDocumentMatrix(corpus, control=list(tokenize=trigram_tokenizer))
trigram <- removeSparseTerms(trigram, 0.9999)
trigram.freq <- ngram_freqency(trigram, 10)
save(trigram.freq, file="trigrams_df.RData");
rm(trigram.freq)
rm(trigram)
print("fourgram_tokenizer")
fourgram <- TermDocumentMatrix(corpus, control=list(tokenize=fourgram_tokenizer))
fourgram <- removeSparseTerms(fourgram, 0.9999)
fourgram.freq <- ngram_freqency(fourgram, 10)
save(fourgram.freq, file="fourgrams_df.RData");
rm(fourgram.freq)
rm(fourgram)
# build_plot <- function(df, max_size, title, xLabText, yLabText) {
# df <- df[1:max_size,]
# g <- ggplot(df, aes(x=reorder(ngram, frequency), y=frequency)) +
# geom_bar(stat = "identity") + coord_flip() +
# theme(legend.title=element_blank()) +
# xlab(xLabText) + ylab(yLabText) +
# labs(title = title)
# print(g)
# }
# build_plot(unigram.freq, 10, "Top 10 Unigrams distribution", "Unigrams", "Frequency")
|
3de28467351a215b2d0e12fb040b816cf66abc5d | 31505f487482742f7dc82c9ef4e4388b0127cfbb | /short_read_code/gff_processing.R | a5ae1469fd6b35f27add831e7ca36939ea980f51 | [] | no_license | qhauck16/Quinn-Summer-2020 | 2a2ca4d3dd86fa149e9289a207abc704280e6f32 | 04394259dcb4048854972782f8efd052f2d756a5 | refs/heads/master | 2023-04-19T06:34:20.070885 | 2021-04-20T18:18:03 | 2021-04-20T18:18:03 | 265,127,662 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 125 | r | gff_processing.R | #aliana_gff <- read.gtf('/uru/Data/Nanopore/Analysis/gmoney/hbird/210114_aliana_annotation/acolubris_masurca_ragoo_v2.gff')
|
a1b166bcda3c5f56b85f38e75c0bf70f654f57a1 | f80b82dc3b27e8729b8401ffb06bd37839c411b4 | /ABCDTestApp/server.R | ae42d0569ac94e79fc9ecae9b4f9d6121cc1d67d | [] | no_license | maganti/shinyapps | 158f29f7b7ec494f26fd59fd891749787fd069f9 | 35d3528c479b80199f7dc058995c56d5ce7e0ca4 | refs/heads/master | 2020-12-30T14:55:49.919132 | 2014-11-12T12:46:18 | 2014-11-12T12:46:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,670 | r | server.R |
# This is the server logic for the ABCD² Risk Score Shiny web application.
# The score table and the interpretation rules data frame are created using functions
#from the tables.R file.
#
library(shiny)
library(ggplot2)
source("tables.r")
riskdata<-createScoreDF()
scoreTable<-createScoreTable()
shinyServer(function(input, output) {
val<-reactive({
val<-as.numeric(input$age)+as.numeric(input$diabetes) + as.numeric(input$BP)+ as.numeric(input$clinical)+ as.numeric(input$duration)
})
testsc<-reactive({
testsc<-riskdata[riskdata$score==val(),]
})
output$riskplot <- renderPlot({
testsc<-testsc()
colr<-switch(as.character(testsc$type[1]), "Low"="tan","Medium"="orange","High"="red")
theme_set(theme_bw(base_size = 16))
ggplot(data=testsc, aes(x=factor(days), y=perc, fill=type))+
labs(title=paste0("ABCD² | Risk of Stroke After TIA\nScore = ",val()),y="Risk Percentage", x="Days After Stroke")+
geom_bar(stat="identity")+
geom_text(aes(y=perc,label=paste0(perc,"%"), vjust=-0.5),size = 5) +
scale_y_continuous(limits=c(0,25), labels=c("0%","5%","10%","15%","20%","25%"))+
scale_fill_manual(name="Stroke Risk\nCategory:", values=colr)+
theme(legend.title = element_text(colour="chocolate", size=12, face="bold"))
})
output$scoreTable<-renderTable({
scoreTable
})
})
|
775a254508fef604eb309c3d23b25f64c9ace945 | 2defb970de80008d3a5f77728bf3f896832fe2e1 | /Vaccine Rollout/Create_Coverage_Boxplot_Wane.R | 7d17e5701f8d033140bc27c5621521cd017ed0aa | [] | no_license | FredHutch/COVID_modeling_schools | bd211844ebd00c5977ac6ad0ef8b298aa7d6a8f2 | 98c1a879e1685b78a21427780f0f34941c309034 | refs/heads/master | 2023-08-31T14:37:46.365370 | 2021-10-06T16:36:47 | 2021-10-06T16:36:47 | 413,988,503 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,937 | r | Create_Coverage_Boxplot_Wane.R | library(HutchCOVID)
library(lubridate)
library(dplyr)
source("covid-model-plotting.R")
#out_dir = "out"
out_dir = "big_out"
do_box_plots_24 = function(final_cumul, days_at_max_sd, max_daily, final_cumul_ylab, max_daily_ylab)
{
# for now we assume the first half has SD control, second half does not
cols = c("#7fbf7b", "#67a9cf", "#af8dc3") # for vaccinate kids 10/1 and 1/1, no kids,
cols_light = c("#d9f0d3", "#d1e5f0", "#e7d4e8") # green, blue, purple
final_cumul$scenario_name = factor(final_cumul$scenario_name, levels = unique(final_cumul$scenario_name), ordered = TRUE) # keep original order
final_cumul$scenario_school = factor(final_cumul$scenario_school, levels = unique(final_cumul$scenario_school), ordered = TRUE)
final_cumul$scenario_vac = factor(final_cumul$scenario_vac, levels = unique(final_cumul$scenario_vac), ordered = TRUE)
final_cumul$scenario_wane = factor(final_cumul$scenario_wane, levels = unique(final_cumul$scenario_wane), ordered = TRUE)
# box plot for scenarios summed across age and vaccination status
final_cumul_totals = final_cumul %>% group_by(sim, scenario_name) %>%
summarise(value = sum(value, na.rm = TRUE), .groups = "drop")
n = length(unique(final_cumul_totals$scenario_name))
metrics = final_cumul_totals %>%
group_by(scenario_name) %>%
summarise(med = median(value), mean = mean(value), max = max(value), .groups = "drop")
print(metrics)
plot_data = matrix(rep(0,24),nrow=2)
for (i in 1:length(metrics$mean))
{
if (i %% 2 == 1)
{
plot_data[1,((i+1) / 2)] = metrics$mean[i]
} else {
plot_data[2,(i / 2)] = metrics$mean[i]
}
}
barplot(plot_data,
xlab = "", ylab = paste("Mean",final_cumul_ylab),beside=TRUE, xaxt = "n",
col = c(rep(c(cols_light[1], cols[1]),4),rep(c(cols_light[2], cols[2]),4),rep(c(cols_light[3], cols[3]),4)))
axis(side = 1, line = -1, at = c(1.5 + 3 * (0:3), 2 + 3 * (4:7), 2.5 + 3 * (8:11)), labels = c("school 0%","50%","75%","100%","school 0%","50%","75%","100%","school 0%","50%","75%","100%"), tick = FALSE, cex.axis = 0.8)
axis(side = 1, line = 0, at = c(4.5, 18, 31.5), labels = c("October vaccination", "January vaccination", "No vaccination"), tick = FALSE)
legend("topright",legend=c("No Waning","Waning"),pch=15,
col = c("grey70","grey5"),inset=c(-0.30,0),xpd = TRUE)
final_cumul_by_vacdate = final_cumul %>% group_by(sim, scenario_name, scenario_school, scenario_vac, scenario_wane) %>%
summarise(value = sum(value, na.rm = TRUE), .groups = "drop")
print(final_cumul_by_vacdate)
final_cumul_by_vacdate$scenario_name_vac = paste0(as.character(final_cumul_by_vacdate$scenario_school), "\n", final_cumul_by_vacdate$scenario_wane)
final_cumul_by_vacdate$scenario_name_vac = factor(final_cumul_by_vacdate$scenario_name_vac, levels = unique(final_cumul_by_vacdate$scenario_name_vac), ordered = TRUE)
boxplot(value ~ scenario_name_vac, data = final_cumul_by_vacdate, subset = scenario_vac == "October",
at = 1:8, xlim = c(0.5, 25.5), xaxt = "n",main = "Overall",
xlab = "", ylab = final_cumul_ylab,
col = c(cols_light[1], cols[1]))
boxplot(value ~ scenario_name_vac, data = final_cumul_by_vacdate, subset = scenario_vac == "January",
at = 9.5:16.5, add = TRUE, xaxt = "n",
col = c(cols_light[2], cols[2]))
boxplot(value ~ scenario_name_vac, data = final_cumul_by_vacdate, subset = scenario_vac == "None",
at = 18:25, add = TRUE, xaxt = "n",
col = c(cols_light[3], cols[3]))
axis(side = 1, line = -1, at = c(1.5 + 2 * (0:3), 2 + 2 * (4:7), 2.5 + 2 * (8:11)), labels = rep(unique(final_cumul_by_vacdate$scenario_school), 3), tick = FALSE, cex.axis = 0.6)
axis(side = 1, line = 0, at = c(4.5, 12.5, 20.5), labels = c("October vaccination", "January vaccination", "No vaccination"), tick = FALSE, cex.axis = 0.8)
legend("topright",legend=c("No Waning","Waning"),pch=15,
col = c("grey70","grey5"),inset=c(-0.25,0),xpd = TRUE)
max_daily_by_vacdate = max_daily %>% group_by(sim, scenario_name, scenario_school, scenario_vac,scenario_wane) %>%
summarise(value = sum(value, na.rm = TRUE), .groups = "drop")
max_daily_by_vacdate$scenario_name_vac = paste0(as.character(max_daily_by_vacdate$scenario_school), "\n", max_daily_by_vacdate$scenario_wane)
max_daily_by_vacdate$scenario_name_vac = factor(max_daily_by_vacdate$scenario_name_vac, levels = unique(max_daily_by_vacdate$scenario_name_vac), ordered = TRUE)
boxplot(value ~ scenario_name_vac, data = max_daily_by_vacdate, subset = scenario_vac == "October",
at = 1:8, xlim = c(0.5, 25.5), xaxt = "n",main = "Daily Peak",
xlab = "", ylab = max_daily_ylab,
col = c(cols_light[1], cols[1]))
boxplot(value ~ scenario_name_vac, data = max_daily_by_vacdate, subset = scenario_vac == "January",
at = 9.5:16.5, add = TRUE, xaxt = "n",
col = c(cols_light[2], cols[2]))
boxplot(value ~ scenario_name_vac, data = max_daily_by_vacdate, subset = scenario_vac == "None",
at = 18:25, add = TRUE, xaxt = "n",
col = c(cols_light[3], cols[3]))
axis(side = 1, line = -1, at = c(1.5 + 2 * (0:3), 2 + 2 * (4:7), 2.5 + 2 * (8:11)), labels = rep(unique(max_daily_by_vacdate$scenario_school), 3), tick = FALSE, cex.axis = 0.6)
axis(side = 1, line = 0, at = c(4.5, 12.5, 20.5), labels = c("October vaccination", "January vaccination", "No vaccination"), tick = FALSE, cex.axis = 0.8)
legend("topright",legend=c("No Waning","Waning"),pch=15,
col = c("grey70","grey5"),inset=c(-0.25,0),xpd = TRUE)
}
#read in Rdata files, and extract necessary data
start_date = ymd("2021-09-01")
end_date = ymd("2022-06-01")
scenario_files = c(paste0(out_dir, "/scenario0_KIDS_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario0_KIDS_Wane_B_data.rdata"),
paste0(out_dir, "/scenario50_KIDS_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario50_KIDS_Wane_B_data.rdata"),
paste0(out_dir, "/scenario75_KIDS_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario75_KIDS_Wane_B_data.rdata"),
paste0(out_dir, "/scenario100_KIDS_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario100_KIDS_Wane_B_data.rdata"),
paste0(out_dir, "/scenario0_KIDS2_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario0_KIDS2_Wane_B_data.rdata"),
paste0(out_dir, "/scenario50_KIDS2_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario50_KIDS2_Wane_B_data.rdata"),
paste0(out_dir, "/scenario75_KIDS2_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario75_KIDS2_Wane_B_data.rdata"),
paste0(out_dir, "/scenario100_KIDS2_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario100_KIDS2_Wane_B_data.rdata"),
paste0(out_dir, "/scenario0_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario0_Wane_B_data.rdata"),
paste0(out_dir, "/scenario50_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario50_Wane_B_data.rdata"),
paste0(out_dir, "/scenario75_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario75_Wane_B_data.rdata"),
paste0(out_dir, "/scenario100_NoWane_B_data.rdata"),
paste0(out_dir, "/scenario100_Wane_B_data.rdata"))
scenario_names = c("0% school\nOctober\nNo Wane","0% school\nOctober\nWane", "50%\nOctober\nNo Wane", "50%\nOctober\nWane",
"75%\nOctober\nNo Wane", "75%\nOctober\nWane","100%\nOctober\nNo Wane","100%\nOctober\nWane",
"0% school\nJanuary\nNo Wane","0% school\nJanuary\nWane", "50%\nJanuary\nNo Wane", "50%\nJanuary\nWane",
"75%\nJanuary\nNo Wane", "75%\nJanuary\nWane","100%\nJanuary\nNo Wane","100%\nJanuary\nWane",
"0% school\nNone\nNo Wane","0% school\nNone\nWane", "50%\nNone\nNo Wane", "50%\nNone\nWane",
"75%\nNone\nNo Wane", "75%\nNone\nWane","100%\nNone\nNo Wane","100%\nNone\nWane")
if (file.exists("big_out/boxplot_hosp_schools_wane_data.rdata"))
{
load(file="big_out/boxplot_hosp_schools_wane_data.rdata")
} else {
final_hosp = consolidate_scenarios(scenario_files, scenario_names,
state_to_extract = "cum_hosp", start_date = start_date, end_date = end_date)
scenario_parts = plyr::ldply(strsplit(final_hosp$scenario_name, '\n'))
names(scenario_parts) = c("scenario_school", "scenario_vac", "scenario_wane")
final_hosp = cbind(final_hosp, scenario_parts)
days_at_max_sd = consolidate_max_sd(scenario_files, scenario_names, report_as_percentage = TRUE,
max_sd_age_1 = 0.3, start_date = start_date, end_date = end_date)
scenario_parts = plyr::ldply(strsplit(days_at_max_sd$scenario_name, '\n'))
names(scenario_parts) = c("scenario_school", "scenario_vac", "scenario_wane")
days_at_max_sd = cbind(days_at_max_sd, scenario_parts)
max_hosp = consolidate_max_scenarios(scenario_files, scenario_names,
c("H", "DH"), start_date = start_date, end_date = end_date)
scenario_parts = plyr::ldply(strsplit(max_hosp$scenario_name, '\n'))
names(scenario_parts) = c("scenario_school", "scenario_vac", "scenario_wane")
max_hosp = cbind(max_hosp, scenario_parts)
save(final_hosp, days_at_max_sd, max_hosp, file = "big_out/boxplot_hosp_schools_wane_data.rdata")
}
pdf("out/boxplot_hosp_schools_Wane.pdf", width = 8, height = 4)
par(mgp=c(3.5, 1.2, 0), las = 1, mar = c(3, 4.5, 2, 8) + 0.1)
do_box_plots_24(final_hosp, days_at_max_sd, max_hosp, "Cumulative hospitalizations","Peak hospitalizations")
dev.off()
if (file.exists("big_out/boxplot_death_schools_wane_data.rdata"))
{
load(file="big_out/boxplot_death_schools_wane_data.rdata")
} else {
final_death = consolidate_scenarios(scenario_files, scenario_names,
state_to_extract = "cum_death", start_date = start_date, end_date = end_date)
scenario_parts = plyr::ldply(strsplit(final_death$scenario_name, '\n'))
names(scenario_parts) = c("scenario_school", "scenario_vac", "scenario_wane")
final_death = cbind(final_death, scenario_parts)
max_death = consolidate_max_scenarios(scenario_files, scenario_names,
c("F", "DF"), start_date = start_date, end_date = end_date)
scenario_parts = plyr::ldply(strsplit(max_death$scenario_name, '\n'))
names(scenario_parts) = c("scenario_school", "scenario_vac", "scenario_wane")
max_death = cbind(max_death, scenario_parts)
save(final_death, days_at_max_sd, max_death, file = "big_out/boxplot_death_schools_wane_data.rdata")
}
#--------------------------------------------------------------
pdf("out/boxplot_death_schools_Wane.pdf", width = 10, height = 4)
par(mgp=c(3.5, 1.2, 0), las = 1, mar = c(3, 4.5, 2, 8) + 0.1)
do_box_plots_24(final_death, days_at_max_sd, max_death, "Cumulative deaths", "Peak deaths")
dev.off()
|
3409cd24e4d4a8a9d95b40125cb410009c25eb94 | f64770d666eea570863b0e3988399cde5429e2eb | /plot5.R | 17205739c1f94ee9f85936347014b9eb1ccf1f27 | [] | no_license | mrdumass/ExData_Plotting2 | 493a700bc1e0f54ddf788f157713af948b2cd010 | 2849388ff1f52920ebbb0a99196ccc925a3fcf22 | refs/heads/master | 2020-06-02T15:13:54.517402 | 2014-08-21T02:14:29 | 2014-08-21T02:14:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 934 | r | plot5.R | ## Read in the Source Classification file ##
sc<-readRDS("Source_Classification_Code.rds")
##read in the summary scc data ##
sumSCC<-readRDS("summarySCC_PM25.rds")
##get the data only for baltimre##
baltimore<-(sumSCC[(sumSCC$fips==24510),])
rm(sumSCC)
## find all the codes for vehicles ##
vehicles<-sc[grepl("Vehicles",sc$EI.Sector),]
## get the data set of baltimore just from vehicles ##
baltmv<-baltimore[(baltimore$SCC %in% vehicles$SCC),]
## install reshape package ##
install.packages("reshape")
library(reshape)
## sort and sound for vehicle emissions ##
meltbaltmv<-melt(baltmv,id.vars="year",measure.vars="Emissions")
castbaltmv<-cast(meltbaltmv,year~variable,sum)
## plot ##
png(file="plot5.png")
plot(castbaltmv$year,castbaltmv$Emissions,main="Vehicle Emissions Baltimore with Regression line",xlab="year",ylab="Emissions (tons)")
a<-lm(castbaltmv$Emissions~castbaltmv$year)
abline(a)
## close png device ##
dev.off() |
403ead71d94252d54d7d6aecb867fac69b0fe1aa | 76de44331998499ba9388780db45681a8648dabd | /simulate.R | 0bbec3b6b712594e659dd12157100e756647994a | [] | no_license | michaellevy/gwdSimulateEstimate | 629f2324e2dd5ffd7f7837c39cd6cfc5d3981c40 | c3d9abe417452d5dfb951c6c27085d87ce46895e | refs/heads/master | 2020-04-18T11:46:01.160840 | 2016-10-24T15:22:24 | 2016-10-24T15:22:24 | 66,306,491 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,432 | r | simulate.R | # On aws had to install libssl-dev, libcurl4-gnutls-dev, and libxml2-dev and maybe run the following line
# Sys.setenv(PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig")
if (!require("devtools")) install.packages("devtools", dependencies = TRUE)
if (!require("pacman")) install.packages("pacman")
if (!require("netUtils")) {
install.packages("devtools")
devtools::install_github("michaellevy/netUtils")
}
pacman::p_load(statnet, netUtils, tidyverse, broom, stringr, parallel)
set.seed(80112)
cores = detectCores()
nNodes = 100
meanDegs = c(1, 3, 10)
coefs = seq(-.5, .5, len = 4)
decays = 10^(seq(-1, .7, len = 9))
for (simParam in c("twopath", "gwd1.0", "degreepopularity", "twopath", "gwd0.25", "gwd1.0", "gwd3.0")) {
for (constrained in c(TRUE, FALSE)) {
for (fixed in c(TRUE, FALSE)) {
start = Sys.time()
# Organized as models/generating_mechanism/constrained-or-density/fixed-or-CEF
directory = paste0("models/", simParam, "/constrained-", constrained, "/fixed-", fixed)
if(!dir.exists(directory)) dir.create(directory, recursive = TRUE)
estimates =
lapply(meanDegs, function(meanDeg) {
randomGraph = makeNetwork(nNodes, meanDegree = meanDeg, directed = FALSE)
clust = makeCluster(cores, type = 'FORK')
coefWithinMeandeg =
parLapply(clust, coefs, function(dp) {
# Simulate a network with the given parameter value (dp) for this iteration
simForm = if(str_detect(simParam, "gwd")) {
simForm = formula(paste0("randomGraph ~ gwdegree(",
str_extract(simParam, "[0-9]+\\.[0-9]+"),
", fixed = TRUE)"))
} else {
simForm = formula(paste0("randomGraph ~ ", simParam))
}
simNet =
simulate.formula(
object = simForm
, coef = dp
, constraints = ~ edges
, nsim = 1
, control = control.simulate(MCMC.burnin = 1e6, MCMC.interval = 1e6)
)
# lapply over theta_s values and estimate an ergm for each
decaysWithinCoef =
lapply(decays, function(decay) {
# Open connection to write warnings and note parameter values
lg = file(file.path(directory, "EstimationLog.txt"), open = "at")
sink(lg, type = "message", append = TRUE)
message("\nAt ", Sys.time(),
" Sim'ing on ", simParam,
", edges constrained: ", constrained,
", theta fixed: ", fixed,
", mean degree: ", meanDeg,
", coef: ", round(dp, 2),
", decay = ", round(decay, 2))
# Estimate
m =
if(constrained) {
try(ergm(simNet ~ gwdegree(decay, fixed = fixed),
control = control.ergm(MCMLE.maxit = 50,
MCMC.samplesize = 5e3,
seed = 475)
, constraints = ~ edges))
} else {
try(ergm(simNet ~ gwdegree(decay, fixed = fixed) + edges,
control = control.ergm(MCMLE.maxit = 50,
MCMC.samplesize = 5e3,
seed = 475)))
}
# Close connection
sink(type = "message")
close(lg)
# If model didn't converge, get rid of the estimates
if(class(m) != "ergm" || m$iterations == 50)
return(NULL)
# Something is causing failures here (I think it's here) with
# "Error in svd(X) : a dimension is zero". Rather than
# chasing it down, just return NULL
out =
try({
tidy(m) %>%
mutate(theta_s = decay)
})
if(!is.data.frame(out)) return(NULL) else return(out)
})
if(all(sapply(decaysWithinCoef, is.null)))
return(NULL)
do.call(rbind, decaysWithinCoef) %>%
mutate(simCoef = dp)
})
stopCluster(clust)
if(all(sapply(coefWithinMeandeg, is.null)))
return(NULL)
do.call(rbind, coefWithinMeandeg) %>%
mutate(meanDegree = meanDeg)
})
tab =
mutate(do.call(rbind, estimates),
sim_parameter = simParam,
edges_constrained = constrained,
fixed_decay = fixed)
write_csv(tab, file.path(directory, "estimates.csv"))
finish = Sys.time()
message("At ", Sys.time(),
"\nDone with ", simParam,
", edges constrained: ", constrained,
", theta fixed: ", fixed)
print(finish - start)
}
}
}
|
61e382f99b25681f2bb56286013312dc0accfde2 | 20bba0a43de06fb558a9e236c9bfcc8ebb332c16 | /tools/ngs/R/ngs-filter-annotations.R | e337bc3bfc5892f4cb68fdd7de06115e01578e03 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | JunjuanZheng/chipster-tools | 921e324aeee3d48b7e1729419ef6a8d6d076e101 | f95cbd4322926e5a64487e4e17a44aaf593256b1 | refs/heads/master | 2022-12-06T02:26:04.192480 | 2020-08-11T09:32:52 | 2020-08-11T09:32:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,305 | r | ngs-filter-annotations.R | # TOOL ngs-filter-annotations.R: "Filter table by column term" (Allows the user to filter the table rows on the basis of terms in any text column.)
# INPUT annotations.tsv: annotations.tsv TYPE GENERIC
# OUTPUT filtered-NGS-results.tsv: filtered-NGS-results.tsv
# PARAMETER column: "Column to filter by" TYPE COLUMN_SEL (Data column to filter by)
# PARAMETER match.term: "Term to match" TYPE STRING DEFAULT empty (Textual term to search for.)
# PARAMETER has.rownames: "Does the first column have a title" TYPE [yes: no, no: yes] DEFAULT no (Specifies whether the data has unique identifiers as rownames or lacks them.)
# MG 29.5.2010
# MK, EK 21.08.2013 added support for rownames
# Loads the normalized data
file<-c("annotations.tsv")
dat <- read.table(file, header=T, sep="\t", check.names=FALSE, quote="", comment.char="")
if(column == " ") {
dat2 <- dat[grep(match.term, rownames(dat)),]
} else {
# Extract the data from the column in question
dat2 <- dat[grep(match.term, dat[,(as.vector(grep(column,names(dat))))]),]
}
# Write the data to disk
if (has.rownames == "yes") {
write.table(dat2, "filtered-NGS-results.tsv", sep="\t", row.names=T, col.names=T, quote=F)
}
if (has.rownames == "no") {
write.table(dat2, "filtered-NGS-results.tsv", sep="\t", row.names=F, col.names=T, quote=F)
}
|
9a320f0ee837d02f05e4b5b065dfd6509c0b55c2 | c874e55ec73043f6b837601cc58d855d37649e59 | /mlcenzer/plots/example_correction_figure.R | 46a24be74ae6df52900c332561974076c092c055 | [] | no_license | mlcenzer/SBB-dispersal | 85c54c924b399834a798d700cabf0b2702ae0755 | 1a777370986f83186180552a09149dfba72b96d0 | refs/heads/master | 2022-12-11T10:13:32.416530 | 2022-12-03T16:23:52 | 2022-12-03T16:23:52 | 229,098,494 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,061 | r | example_correction_figure.R | ###############################################################################
######Fig. 5: Plasticity across latitude
setwd("/Users/Meredith/Desktop/Documents/R/soapberry bugs/F2 April 2014/")
library(lme4)
morph1<-read.csv("F2morphology12.3.15a.csv", header=TRUE, sep=",", quote = "")
morph<-data.frame(morph1[morph1$cracked=='y' & morph1$bugpop!="GainesvilleBV",], pophost=NA, nathost=NA, latitude=NA)
for(n in 1:length(morph$beak)){
if(morph$bugpop[n]=="Key_Largo"){
morph$pophost[n]<-"C.corindum"
morph$latitude[n]<-25.1756 }
else if(morph$bugpop[n]=="Plantation_Key"){
morph$pophost[n]<-"C.corindum"
morph$latitude[n]<-24.9913 }
else if(morph$bugpop[n]=="HomesteadBV"){
morph$pophost[n]<-"C.corindum"
morph$latitude[n]<-25.5708}
else if(morph$bugpop[n]=="HomesteadGRT"){
morph$pophost[n]<-"K.elegans"
morph$latitude[n]<-25.5510}
else if(morph$bugpop[n]=="Ft.Myers"){
morph$pophost[n]<-"K.elegans"
morph$latitude[n]<-26.6340}
else if(morph$bugpop[n]=="Lake_Wales"){
morph$pophost[n]<-"K.elegans"
morph$latitude[n]<-27.9347}
else if(morph$bugpop[n]=="Leesburg"){
morph$pophost[n]<-"K.elegans"
morph$latitude[n]<-28.7964}
else if(morph$bugpop[n]=="GainesvilleGRT"){
morph$pophost[n]<-"K.elegans"
morph$latitude[n]<-29.6605}
if(morph$natpop[n]=="Key_Largo"){
morph$nathost[n]<-"C.corindum"}
else if(morph$natpop[n]=="Plantation_Key"){
morph$nathost[n]<-"C.corindum"}
else if(morph$natpop[n]=="HomesteadBV"){
morph$nathost[n]<-"C.corindum"}
else{morph$nathost[n]<-"K.elegans"}
}
#Sym.lat<-25.56091
Sym.lat<-25.5609
morph$sym.dist<-abs(morph$latitude-Sym.lat)
data<-data.frame(R=log(morph$beak), A=log(morph$thorax), B=morph$pophost, C=morph$nathost, D=morph$sex, E=(morph$sym.dist-mean(morph$sym.dist)), X=droplevels(morph$bugpop))
data$B1<-1
data$B1[data$B=="K.elegans"]<- -1
data$C1<-1
data$C1[data$C=="K.elegans"]<- -1
data$D1<-1
data$D1[data$D=="M"]<- -1
m26a<-glm(R~B1*D1 + A + C1 + E*C1, family=gaussian, data=data)
new.data<-expand.grid(n.host=c(-1,1), sym.dist=unique(data$E))
coefs<-summary(m26a)$coef[,1]
new.data$pred<-coefs[1] + coefs[5]*new.data$n.host + coefs[6]*new.data$sym.dist + coefs[8]*new.data$n.host*new.data$sym.dist
data$maleness<--1
data$maleness[data$D=="F"]<-1
data$p.host<--1
data$p.host[data$B=="C.corindum"]<-1
data$correction<-data$R-coefs[4]*data$A-coefs[3]*data$maleness-coefs[7]*data$maleness*data$p.host-coefs[2]*data$p.host
m.c<-aggregate(correction~E*C1, data=data, FUN=mean)$correction
n.c<-aggregate(correction~E*C1, data=data, FUN=length)$correction
s.c<-aggregate(correction~E*C1, data=data, FUN=sd)$correction
other<-aggregate(correction~E*C1, data=data, FUN=mean)
#2014 F2 thorax confidence intervals
conf.c<-data.frame(sym.dist=other$E, nathost=other$C1, mean=m.c,reps=n.c,sd=s.c, upper=(m.c+s.c/sqrt(n.c)), lower=(m.c-s.c/sqrt(n.c)))
#don't plot rows with <5 datapoints; just plot the data.
#conf.c[2,]<-NA
#conf.c[4,]<-NA
#conf.c[10,]<-NA
#conf.c[12,]<-NA
#conf.c[14,]<-NA
#best fit ys
y1<-max(new.data$pred[new.data$n.host==-1 & round(new.data$sym.dist, digits=2)==-1.92])
y2<-max(new.data$pred[new.data$n.host==-1 & round(new.data$sym.dist, digits=2)==2.17])
y3<-max(new.data$pred[new.data$n.host==1 & round(new.data$sym.dist, digits=2)==-1.92])
y4<-max(new.data$pred[new.data$n.host==1 & round(new.data$sym.dist, digits=2)==2.17])
setwd("/Users/Meredith/Desktop/Documents/publications/Plant defense fitness consequences/American Naturalist submission/Figures")
setEPS()
postscript(file="Cenzer2.fig.5.eps", width=7, height=4.7)
#plot beak length corrected
#to separate sym sites
#conf.c$sym.dist[1]<-conf.c$sym.dist[1]+0.01
#conf.c$sym.dist[9]<-conf.c$sym.dist[9]+0.01
par(mai=c(.5, .5, .2, .1), ps=8)
plot(conf.c$mean~conf.c$sym.dist, pch=c(2,1)[as.factor(conf.c$nathost)], ylab="", xlab="", xaxt='n', yaxt='n', ylim=c(0.65, 0.82), cex=0.7)
#points(data$correction[round(data$E, digits=2)==-1.36]~data$E[round(data$E, digits=2)==-1.36], cex=0.7, pch=c(1,2)[as.factor(data$C)])
#points(data$correction[round(data$E, digits=2)==0.44]~data$E[round(data$E, digits=2)==0.44], cex=0.7, pch=c(2,2,1))
#points(new.data$pred~new.data$sym.dist, pch=c(1,2)[as.factor(new.data$n.hostK)], cex=0.7)
for(n in 1:length(conf.c$sd)){
lines(x=c(conf.c$sym.dist[n],conf.c$sym.dist[n]), y=c((conf.c$upper[n]),(conf.c$lower[n])))
}
#best fit lines?
lines(x=c(-1.9246718, 2.1650282), y=c(y1, y2), lty=2)
lines(x=c(-1.9246718, 2.1650282), y=c(y3, y4), lty=3)
title(ylab="Corrected log(beak length)", line=1.4)
title(xlab="Distance from sympatric zone (degrees of latitude)", line=1.3)
axis(side=1, at=seq(-2, 2, by=1), labels=c("", "", "", "", ""))
mtext(c("0", "1", "2", "3", "4"), side=1, line=.4, at=seq(-2, 2, by=1))
axis(side=2, at=seq(0.65, 0.80, by=0.05), labels=c( "", "", "", ""))
mtext(c("0.65", "0.70", "0.75", "0.80"), side=2, line=.5, at=seq(0.65, .8, by=0.05))
legend(1.7, 0.825, legend=c(expression(italic("K. elegans")), expression(italic("C. corindum"))), pch=c(2,1), pt.cex=.7, cex=.9)
dev.off()
|
bd72a82dd2179da9325bbdd4610eb6097341a230 | 72e6844bf2dcf570c62c54f0aa69405d3368d885 | /plot4.R | 353e070ed7f7a045cca5aa6971dbc8bec2d67dee | [] | no_license | alcanta/ExData_Plotting1 | 76a555d38f451d0d0374fa5113ecb7fbc0ceb4f2 | 25e6de5014cfea906384406c6db1faf63aa92b7f | refs/heads/master | 2021-01-18T10:04:27.325887 | 2014-06-06T20:46:15 | 2014-06-06T20:46:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,414 | r | plot4.R | #downloaded and unzipped the file manually on the local computer
#the script reads the data from the unzipped text file
#uses sqldf package for reading the data
library(sqldf)
select_statement<- "select * from file where Date='1/2/2007' or Date='2/2/2007' "
#read data into dataframe df
df <- read.csv.sql("household_power_consumption.txt",select_statement,sep=";")
#create a vector of R objects representing calendar dates and times --(POSIXlt class)
#these objects will be used on the x axis of the plot
d <- paste(df$Date,df$Time)
date_objects <-strptime(d,"%d/%m/%Y %H:%M:%S")
png("plot4.png",width = 480, height = 480, units = "px")
# modifies the mfrow parameter to be able to to put 4 plots into a single image
par(mfrow = c(2,2))
#plot 1
plot(date_objects,df$Global_active_power,type="l",xlab="",ylab="")
#plot 2
plot(date_objects,df$Voltage,type="l",xlab="datetime",ylab="Voltage")
#plot 3
plot(date_objects,df$Sub_metering_1,type="n", xlab="",ylab="Energy sub metering")
points(date_objects,df$Sub_metering_1,type="l")
points(date_objects,df$Sub_metering_2,col="red",type="l")
points(date_objects,df$Sub_metering_3,col="blue",type="l")
legend("topright",pch = "_____",pt.cex=1,cex=0.6,col=c("black","red",'blue'),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#plot 4
plot(date_objects,df$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
b25575d97e3a22d0d3604aebbc4df3ca3042db57 | cf9109f6e0a2c494bdc2f1f027ee2cd60cefc30f | /build_datasets_without_missing_values.R | 93c56680ccbe9c92682c671eb6515e8043d58d6e | [] | no_license | ACC1029/MATH-571-HAI-Medicare | da717f9f2cae53038f87df44c620adcabd3c6172 | eeb00d846af5c0501490f49615a1bb59787687c5 | refs/heads/master | 2021-04-28T01:38:14.108330 | 2018-04-29T04:49:06 | 2018-04-29T04:49:18 | 122,284,276 | 1 | 1 | null | 2018-04-28T20:26:47 | 2018-02-21T02:38:31 | R | UTF-8 | R | false | false | 1,053 | r | build_datasets_without_missing_values.R | # R script to create reduced datasets, minus all hospitals too small
# Run HAI exploration script and all its dependencies before this one if you're in a fresh session
## Using lack of ANY HAI scores as a easy parameter for too small
## This may be incorrect
hosp_gen_info_reduced_nona <- hosp_gen_info_reduced
hosp_gen_info_reduced_nona[!(hosp_gen_info_reduced$provider_id %in% no_score_providers$provider_id),]
mspb_reduced_nona <- mspb_reduced
mspb_reduced_nona[!(mspb_reduced_nona$provider_id %in% no_score_providers$provider_id),]
pay_val_care_reduced_nona <- pay_val_care_reduced
pay_val_care_reduced_nona[!(pay_val_care_reduced_nona$provider_id %in% no_score_providers$provider_id),]
write_csv(hosp_gen_info_reduced_nona, "adjusted_data/hosp_gen_info.csv", na = "NA", append = FALSE, col_names = TRUE)
write_csv(mspb_reduced_nona, "adjusted_data/mspb.csv", na = "NA", append = FALSE, col_names = TRUE)
write_csv(pay_val_care_reduced_nona, "adjusted_data/pay_val_of_care.csv", na = "NA", append = FALSE, col_names = TRUE) |
32defccece3f83f7cfc6b524dbaf36061de70e15 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/searchable/examples/boundary.Rd.R | 40448afa3faa8ee23be55a3957d543abe4d06818 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 186 | r | boundary.Rd.R | library(searchable)
### Name: boundary
### Title: Define boundary for pattern matching
### Aliases: boundary endsqwith full partial sentence startswith word
### ** Examples
# -tk
|
e0b742d47855e02d0c15d19d580809850e1cfe25 | a6f4c8c91414d62fad5f8f7f53b1dee9c9d099ee | /R-Portable-Mac/library/proto/demo/proto-vignette.R | 2e7803ec35850387acf3216be4faf8d92313b10a | [
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0"
] | permissive | sdownin/sequencer | 6a2d70777fbd8109e26f126229b5ee10348cf4e7 | 045d0580e673cba6a3bd8ed1a12ff19494bf36fa | refs/heads/master | 2023-08-04T08:06:02.891739 | 2023-08-03T04:07:36 | 2023-08-03T04:07:36 | 221,256,941 | 2 | 1 | CC0-1.0 | 2023-02-04T15:06:14 | 2019-11-12T16:00:50 | C++ | UTF-8 | R | false | false | 4,708 | r | proto-vignette.R |
# code from proto vignette
library(proto)
addProto <- proto( x = rnorm(5), add = function(.) sum(.$x) )
addProto$add()
addProto2 <- addProto$proto( x = 1:5 )
addProto2$add()
addProto2$y <- seq(2,10,2)
addProto2$x <- 1:10
addProto2$add3 <- function(., z) sum(.$x) + sum(.$y) + sum(z)
addProto2$add()
addProto2$add3(c(2,3,5))
addProto2$y
# addProto2$add <- function(.) .super$add(.) + sum(.$y)
addProto2$add <- function(.) parent.env(addProto2)$add(.) + sum(.$y)
addProto2a <- addProto$proto(x = 1:5)
addProto2a$add()
Add <- proto(
add = function(.) sum(.$x),
new = function(., x) .$proto(x=x)
)
add1 <- Add$new(x = 1:5)
add1$add()
add2 <- Add$new(x = 1:10)
add2$add()
Logadd <- Add$proto( logadd = function(.) log( .$add() ) )
logadd1 <- Logadd$new(1:5)
logadd1$logadd()
addProto$ls()
addProto$str()
addProto$print()
addProto$as.list()
addProto2a$parent.env()
addProto$eapply(length) # show length of each component
addProto$identical(addProto2)
oo <- proto(expr = {
x <- rnorm(251, 0, 0.15)
x <- filter(x, c(1.2, -0.05, -0.18), method = "recursive")
x <- unclass(x[-seq(100)]) * 2 + 20
tt <- seq(12200, length = length(x))
..x.smooth <- NA
xlab <- "Time (days)"
ylab <- "Temp (deg C)"
pch <- "."
col <- rep("black",2)
smooth <- function(., ...) {
.$..x.smooth <- supsmu(.$tt, .$x, ...)$y
}
plot <- function(.) with(., {
graphics::plot(tt, x, pch = pch, xlab = xlab,
ylab = ylab, col = col[1])
if (!is.na(..x.smooth[1]))
lines(tt, ..x.smooth, col=col[2])
})
residuals <- function(.) with(., {
data.frame(t = tt, y = x - ..x.smooth)
})
})
## inspect the object
oo
oo$ls(all.names = TRUE)
oo$pch
par(mfrow=c(1,2))
# oo$plot()
## set a slot
oo$pch <- 20
## smooth curve and plot
oo$smooth()
oo$plot()
## plot and analyse residuals, stored in the object
plot(oo$residuals(), type="l")
# hist(oo$residuals()$y)
# acf(oo$residuals()$y)
oo.res <- oo$proto( pch = "-", x = oo$residuals()$y,
ylab = "Residuals deg K" )
par(mfrow=c(1,1))
oo.res$smooth()
oo.res$plot()
## change date format of the parent
oo$tt <- oo$tt + as.Date("1970-01-01")
oo$xlab <- format(oo.res$tt[1], "%Y")
## change colors
oo$col <- c("blue", "red")
oo$splot <- function(., ...) {
.$smooth(...)
.$plot()
}
## the new function is now available to all children of oo
par(mfrow=c(1,2))
oo$splot(bass=2)
oo.res$splot()
## and at last we change the data and repeat the analysis
oos <- oo$proto( expr = {
tt <- seq(0,4*pi, length=1000)
x <- sin(tt) + rnorm(tt, 0, .2)
})
oos$splot()
#plot(oos$residuals())
oos.res <- as.proto( oo.res$as.list(), parent = oos )
oos.res$x <- oos$residuals()$y
oos.res$splot()
par(mfrow=c(1,2))
oos$splot()
oos.res$splot()
longley.ci <- proto( expr = {
data(longley)
x <- longley[,c("GNP", "Unemployed")]
n <- nrow(x)
pp <- c(.025, .975)
corx <- cor(x)[1,2]
ci <- function(.)
(.$CI <- tanh( atanh(.$corx) + qnorm(.$pp)/sqrt(.$n-3) ))
})
longley.ci.boot <- longley.ci$proto({
N <- 1000
ci <- function(.) {
corx <- function(idx) cor(.$x[idx,])[1,2]
samp <- replicate(.$N, corx(sample(.$n, replace = TRUE)))
(.$CI <- quantile(samp, .$pp))
}
})
longley.ci$ci()
longley.ci.boot$ci()
longley.ci.boot$proto(N=4000)$ci()
# do not need left <- right <- NULL anymore in leaf
# also eliminated right <- NULL in parent
tree <- proto(expr = {
incr <- function(., val) .$value <- .$value + val
..Name <- "root"
value <- 3
..left <- proto( expr = { ..Name = "leaf" })
})
cat("root:", tree$value, "leaf:", tree$..left$value, "\n")
# incrementing root increments leaf too
tree$incr(1)
cat("root:", tree$value, "leaf:", tree$..left$value, "\n")
# incrementing leaf gives it its own value field
# so now incrementing root does not increment leaf
tree$..left$incr(10)
cat("root:", tree$value, "leaf:", tree$..left$value, "\n")
tree$incr(5)
cat("root:", tree$value, "leaf:", tree$..left$value, "\n")
lineq <- proto(eq = "6*x + 12 - 10*x/4 = 2*x",
solve = function(.) {
e <- eval(parse(text=paste(sub("=", "-(", .$eq), ")")), list(x = 1i))
-Re(e)/Im(e)
},
print = function(.) cat("Equation:", .$eq, "Solution:", .$solve(), "\n")
)
lineq$print()
lineq2 <- lineq$proto(eq = "2*x = 7*x-12+x")
lineq2$print()
Lineq <- lineq
rm(eq, envir = Lineq)
Lineq$new <- function(., eq) proto(., eq = eq)
lineq3 <- Lineq$new("3*x=6")
lineq3$print()
|
d234bc71bde5d0b51b71e4f36d2dfce9c9dbe897 | f16341116353bd31afe205e192df3ae67a82b540 | /RLibrary/common.functions/LoadLib.R | 38dbf5035817a253b51961b2ba9311a25f75fa05 | [] | no_license | south-central-climate-science-center/OU-FUDGE | 6b5ceb5eaf07bc0faabb4a36288934d6112df809 | a5454a3ec8b6e0d314776e3c4b8c37123d0a4f6c | refs/heads/master | 2021-01-10T03:35:23.260314 | 2017-03-28T18:34:52 | 2017-03-28T18:34:52 | 47,719,764 | 0 | 3 | null | 2015-12-16T17:38:43 | 2015-12-09T21:22:46 | R | UTF-8 | R | false | false | 596 | r | LoadLib.R | # Function to load common and DS method specific libraries
# automatically throws an error if libraries are not available
LoadLib <- function(){
# Load common libraries and those based on the DS method used
# ncdf4 is common to all methods as datasets used in FUDGE are netCDF only at this time.
library(ncdf4)
library(PCICt)
library(udunits2)
library(ncdf4.helpers)
library(RNetCDF)
library(ncdf.tools)
library(abind)
if(rp$ds.method=='CDFt'){
print("Importing CDFt library")
library(CDFt)
}else{
print("No method-specfic libraries required")
}
} |
94ca0ddebb69544ea9e5cdb536400bba42dc3801 | cc2e7b3f34a6bcc3669d3454a02fd167234eb94f | /Health_Spending/UI_Components/tabs/ChargetoCostRatio_tab.R | 905b2489e35739f364d0f758a2c08900617d5b00 | [] | no_license | asidamo/Health-Spending-and-Outcomes | cfc9533079b7eda63b6c73b1bac2407480524dc2 | 2a35e548dcb9c7ec8b68fa3330629ae83ee5b7ab | refs/heads/master | 2020-04-18T01:11:50.738109 | 2019-02-01T02:43:17 | 2019-02-01T02:43:17 | 167,109,468 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 822 | r | ChargetoCostRatio_tab.R | ChargetoCostRatio_tab<-tabItem(
tabName = "CCR",
fluidRow(
box(
width=12,
column(6, sliderInput(inputId = "ratio",
label = "Value of Ratios",
min(ccr_df$AverageCCR),max(ccr_df$AverageCCR),
value=c(0,2) )),
# select input
# Select variable for y-axis
column(3,selectInput(inputId = "ccr_y",
label = "Charge to Cost Ratio:",
choices = 'AverageCCR' ))
,
# Show a plot scatter plot for States hospitals charge to cost ratios
mainPanel(
width=12,
plotlyOutput("scatterPlot",height = '500px')
)
)
)
)
|
06b2ca151dd5c05b33b58b81082cb2e14520499e | 6da4c044291c1f3636b02eb652382a34b20d215d | /R/processHeader.R | 8dba3ca29190f5e78579ccadb53f0ef13f28921f | [] | no_license | pmbrophy/mspReader | 49ca049782c31ee7c3c92c2d798493ea7fd4810c | c03a5b12b6e619c48d72b08db838849a1289e1df | refs/heads/master | 2022-08-23T02:37:27.855094 | 2020-05-26T17:55:57 | 2020-05-26T17:55:57 | 264,728,112 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,690 | r | processHeader.R | #' Process the header from an msp file
#'
#' @description Top level header processor for msp files. Provide a character
#' vector containing the header lines and corrisponding groupIndex.
#'
#' @param header character vector containing the header lines only
#' @param groupIndex an integer vector the same length as `header` providing a
#' index value that links the `header` information to data chunks (spectral
#' data)
#' @param commentType either `NIST` or `MoNA` - used for comment parsing
#'
#' @return a fully formatted data.table
#'
.processHeader <- function(header, groupIndex, commentType = commentType){
print("processing header chunks")
#Split header field by ":" expected to produce two elements per line
headers <- strsplit(x = header, split = ": ", perl = TRUE)
#extract names from each item in list
headerNames <- sapply(headers, "[[", 1)
#eactract values from each item in list
headerValues <- sapply(headers, "[[", 2)
#Convert header data to a data.table
headerDt <- data.table::data.table(index = groupIndex, variable = headerNames, value = headerValues)
headerDt <- data.table::dcast(data = headerDt, formula = index ~ variable, value.var = "value")
#Parse comment in the header and remove
print("Processing header comment")
headerDt <- .parseHeaderDt(headerDt = headerDt, commentType = commentType)
headerDt
}
#' Process header data.table containing comment string
#'
#' @description Wrapper function that takes a data.table containing header
#' information and a `Comment` column full of text. The `Comment` column is
#' parsed and a data.table with additional columns is returned. The original
#' `Comment` column is removed.
#'
#' @param headerDt a data.table returned from .processHeader()
#' @param commentType either `NIST` or `MoNA` - used for comment parsing
#'
#' @return a data.table with additional parsed fields from the comment comment
#' strings
#'
.parseHeaderDt <- function(headerDt, commentType = commentType){
#Globals to get past check
Comment <- NULL
Comments <- NULL
#NIST uses "Comment:" MoNA uses "Comments:"
if(commentType == "NIST"){
#Parse comment
commentDt <- .parseCommentVector(comments = headerDt$Comment, indexs = headerDt$index, commentType = commentType)
headerDt[, Comment := NULL]
}else if(commentType == "MoNA"){
commentDt <- .parseCommentVector(comments = headerDt$Comments, indexs = headerDt$index, commentType = commentType)
headerDt[, Comments := NULL]
}else{
stop("Comment type must be either NIST or MoNA")
}
#Merge data.tables and return
headerDt <- data.table::merge.data.table(x = headerDt, y = commentDt, by = "index")
headerDt
}
|
eb7e662100d5b18f6b5fbf43db4eb5f1a2c3cd7b | 1582167a782db5d8edea97f91545f9d6c613d5f6 | /supplementary_data/tra2/DEXSeq/scripts/DEXSeq_Calculation_TRA2.R | c1349031c1c593e9f7e87e40fbe333886f6fba7f | [] | no_license | wenmm/SUPPA_supplementary_data | d9293028ae5cd33cffa48cde920db89d93b0118c | 877f885c915f521385c321760239cbaae932a0f9 | refs/heads/master | 2020-06-23T12:52:17.254042 | 2018-02-13T10:25:30 | 2018-02-13T10:25:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,044 | r | DEXSeq_Calculation_TRA2.R | #DEXSeq_Calculation_TRA2.R
library("DEXSeq")
#Load the count files created with dexseq_count.py
countFiles = list.files(path="/~/tra2/DEXSeq", full.names = TRUE)
sampleTable = data.frame (row.names = c("SAMPLE_1_REP_1","SAMPLE_1_REP_2","SAMPLE_1_REP_3","SAMPLE_2_REP_1","SAMPLE_2_REP_2","SAMPLE_2_REP_3"),
condition = c("KD","KD","KD","CTRL","CTRL","CTRL"),
libType = c("paired-end","paired-end","paired-end","paired-end","paired-end","paired-end"))
dxd = DEXSeqDataSetFromHTSeq(countFiles,sampleData = sampleTable,design = ~ sample + exon + condition:exon)
#Normalize the samples, because of the different dpeths
dxd = estimateSizeFactors( dxd )
#estimate the dispersion estimates
dxd = estimateDispersions( dxd )
dxd = testForDEU( dxd )
dxd = estimateExonFoldChanges( dxd, fitExpToVar="condition")
dxr1 = DEXSeqResults( dxd )
jpeg("~/tra2/DEXSeq/MA_plot.jpeg")
plotMA( dxr1, cex=0.8 )
dev.off()
#Save the file
write.table(dxr1,file="~/tra2/DEXSeq/Results.txt",sep="\t",quote=FALSE)
|
7044839a87b1fd5921a278623b3bada96bcdc686 | abf19cc11247c66891450598b9b8e8a4c9e3d3f7 | /Biophys/HH_sim.R | 4044b2c0c4ec05de45eca80ed4beaaf7b1fd03f3 | [] | no_license | CSNLWigner/idegrendszeriModellezes | b1a6279ca6f61614c1e4b443ef4132c08c599eda | 7816a08b730551c35ada8169ad1b9f22ef9e5945 | refs/heads/master | 2021-01-23T20:30:14.765660 | 2018-11-19T11:11:10 | 2018-11-19T11:11:10 | 102,862,876 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,411 | r | HH_sim.R | ## simulating the Hodgkin-Huxley equations
isa <- require(deSolve)
if(isa=='FALSE') {
install.packages('deSolve')
require(deSolve)
}
#############################################
## definition of constants
source('HH_consts.R')
###############################################################
## the famous Hodgkin-Huxley equations - excitability of the squid giant axon
sim.HH <- function(t, state, params){
with(as.list(c(state, params)),{
## voltage-dependent opening and closing of the gates, m, h and n
am <- .1*(v+40)/(1-exp(-(v+40)/10))
bm <- 4*exp(-(v+65)/18)
ah <- .07*exp(-(v+65)/20)
bh <- 1/(1+exp(-(v+35)/10))
an <- .01*(v+55)/(1-exp(-(v+55)/10))
bn <- .125*exp(-(v+65)/80)
# rate of change
dv <- (I.ext(t) - gNa*h*(v-E.Na)*m^3-gK*(v-E.K)*n^4-gL*(v-E.L))/cm
# first order kinetics of the gating variables
dm <- am*(1-m)-bm*m
dh <- ah*(1-h)-bh*h
dn <- an*(1-n)-bn*n
#units: mV, mS, uF, uA
list(c(dv, dm, dh, dn), stim=I.ext(t))
})
}
###################################################
# this function is used to generate external stimulus (current pulses) for the HH equation
set.input <- function(t1, t2, I, input){
tt <- input[,1]
if (t1>t2){
t3 <- t1
t1 <- t2
t2 <- t3
}
if (t1 < max(tt)) x1 <- min(which(tt > t1)) else return(input)
if (t2 > min(tt)) x2 <- max(which(tt < t2)) else return(input)
input[x1:x2,2] <- I
input
}
|
7b89b5d126fd05fa75cf54b47b9a643965a9c048 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/DPpackage/examples/DProc.Rd.R | 5f793d6daa4cf4aad9e03b1355e66c3d3af501db | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,161 | r | DProc.Rd.R | library(DPpackage)
### Name: DProc
### Title: Semiparametric Bayesian ROC curve analysis using DPM of normals
### Aliases: DProc DProc.default
### Keywords: models nonparametric
### ** Examples
## Not run:
##D ##############################################################
##D # Fertility data example:
##D # The following are Sperm Deformity Index (SDI) values from
##D # semen samples of men in an infertility study. They are
##D # divided into a "condition" present group defined as those
##D # whose partners achieved pregnancy and "condition" absent
##D # where there was no pregnancy.
##D #
##D # Aziz et al. (1996) Sperm deformity index: a reliable
##D # predictor of the outcome of fertilization in vitro.
##D # Fertility and Sterility, 66(6):1000-1008.
##D #
##D ##############################################################
##D
##D "pregnancy"<- c(165, 140, 154, 139, 134, 154, 120, 133,
##D 150, 146, 140, 114, 128, 131, 116, 128,
##D 122, 129, 145, 117, 140, 149, 116, 147,
##D 125, 149, 129, 157, 144, 123, 107, 129,
##D 152, 164, 134, 120, 148, 151, 149, 138,
##D 159, 169, 137, 151, 141, 145, 135, 135,
##D 153, 125, 159, 148, 142, 130, 111, 140,
##D 136, 142, 139, 137, 187, 154, 151, 149,
##D 148, 157, 159, 143, 124, 141, 114, 136,
##D 110, 129, 145, 132, 125, 149, 146, 138,
##D 151, 147, 154, 147, 158, 156, 156, 128,
##D 151, 138, 193, 131, 127, 129, 120, 159,
##D 147, 159, 156, 143, 149, 160, 126, 136,
##D 150, 136, 151, 140, 145, 140, 134, 140,
##D 138, 144, 140, 140)
##D
##D "nopregnancy"<-c(159, 136, 149, 156, 191, 169, 194, 182,
##D 163, 152, 145, 176, 122, 141, 172, 162,
##D 165, 184, 239, 178, 178, 164, 185, 154,
##D 164, 140, 207, 214, 165, 183, 218, 142,
##D 161, 168, 181, 162, 166, 150, 205, 163,
##D 166, 176)
##D
##D
##D #########################################################
##D # Estimating the ROC curve from the data
##D #########################################################
##D
##D # Initial state
##D
##D statex <- NULL
##D statey <- NULL
##D
##D # Prior information
##D
##D priorx <-list(alpha=10,m2=rep(0,1),
##D s2=diag(100000,1),
##D psiinv2=solve(diag(5,1)),
##D nu1=6,nu2=4,
##D tau1=1,tau2=100)
##D
##D priory <-list(alpha=20,m2=rep(0,1),
##D s2=diag(100000,1),
##D psiinv2=solve(diag(2,1)),
##D nu1=6,nu2=4,
##D tau1=1,tau2=100)
##D
##D # MCMC parameters
##D
##D nburn<-1000
##D nsave<-2000
##D nskip<-0
##D ndisplay<-100
##D
##D mcmcx <- list(nburn=nburn,nsave=nsave,nskip=nskip,
##D ndisplay=ndisplay)
##D mcmcy <- mcmcx
##D
##D # Estimating the ROC
##D
##D fit1<-DProc(x=pregnancy,y=nopregnancy,priorx=priorx,priory=priory,
##D mcmcx=mcmcx,mcmcy=mcmcy,statex=statex,statey=statey,
##D statusx=TRUE,statusy=TRUE)
##D fit1
##D plot(fit1)
##D
##D
##D #########################################################
##D # Estimating the ROC curve from DPdensity objects
##D #########################################################
##D
##D fitx<-DPdensity(y=pregnancy,prior=priorx,mcmc=mcmcx,
##D state=statex,status=TRUE)
##D
##D fity<-DPdensity(y=nopregnancy,prior=priory,mcmc=mcmcy,
##D state=statey,status=TRUE)
##D
##D # Estimating the ROC
##D
##D fit2<-DProc(fitx=fitx,fity=fity)
##D
##D fit2
##D plot(fit2)
##D
## End(Not run)
|
c51bee4698823d638d96f5ff6b11aa45f3e9316f | ce8d13de6aa47617809c5fc4d83ccd961b310104 | /man/partial_dep.obs.Rd | cae37d8fd9fb5efc236f6c1ae68bb539bacca4cb | [] | no_license | BruceZhaoR/Laurae | 2c701c1ac4812406f09b50e1d80dd33a3ff35327 | 460ae3ad637f53fbde6d87b7b9b04ac05719a169 | refs/heads/master | 2021-01-22T12:24:50.084103 | 2017-03-24T19:35:47 | 2017-03-24T19:35:47 | 92,722,642 | 0 | 1 | null | 2017-05-29T08:51:26 | 2017-05-29T08:51:26 | null | UTF-8 | R | false | true | 6,271 | rd | partial_dep.obs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/partial_dep.obs.R
\name{partial_dep.obs}
\alias{partial_dep.obs}
\title{Partial Dependency Observation, Contour (single observation)}
\usage{
partial_dep.obs(model, predictor, data, observation, column,
accuracy = min(length(data), 100), safeguard = TRUE,
safeguard_val = 1048576, exact_only = TRUE, label_name = "Target",
comparator_name = "Evolution")
}
\arguments{
\item{model}{Type: unknown. The model to pass to \code{predictor}.}
\item{predictor}{Type: function(model, data). The predictor function which takes a model and data as inputs, and return predictions. \code{data} is provided as data.table for maximum performance.}
\item{data}{Type: data.table (mandatory). The data we need to use to sample from for the partial dependency with \code{observation}.}
\item{observation}{Type: data.table (mandatory). The observation we want to get partial dependence from. It is mandatory to use a data.table to retain column names.}
\item{column}{Type: character. The column we want partial dependence from. You can specify two or more \code{column} as a vector, but it is highly not recommended to go for a lot of columns because the complexity is exponential, think as \code{O^length(column)}. For instance, \code{accuracy = 100} and \code{length(column) = 10} leads to \code{1e+20} theoretical observations, which will explode the memory of any computer.}
\item{accuracy}{Type: integer. The accuracy of the partial dependence from, exprimed as number of sampled points by percentile of the \code{column} from the the \code{data}. Defaults to \code{min(length(data), 100)}, which means either 100 samples or all samples of \code{data} if the latter has less than 100 observations.}
\item{safeguard}{Type: logical. Whether to safeguard \code{accuracy^length(column)} value to \code{safeguard_val} observations maximum. If \code{TRUE}, it will prevent that value to go over \code{safeguard_val} (and adjust accordingly the \code{accuracy} value). Note that if safeguard is disabled, you might get at the end less observations than you expected initially (there is cleaning performed for uniqueness.}
\item{safeguard_val}{Type: integer. The maximum number of observations allowed when \code{safeguard} is \code{TRUE}. Defaults to \code{1048576}, which is \code{4^10}.}
\item{exact_only}{Type: logical. Whether to select only exact values for data sampling. Defaults to \code{TRUE}.}
\item{label_name}{Type: character. The column name given to the predicted values in the output table. Defaults to \code{"Target"}, this assumes you do not have a column called \code{"Target"} in your \code{column} vector.}
\item{comparator_name}{Type: character. The column name given to the evolution value (\code{"Increase"}, \code{"Fixed"}, \code{"Decrease"}) in the output table. Defaults to \code{"Evolution"}, this assumes you do not have a column called \code{"Evolution"} in your \code{column} vector.}
}
\value{
A list with different elements: \code{grid_init} for the grid before expansion, \code{grid_exp} for the expanded grid with predictions, \code{preds} for the predictions, and \code{obs} for the original prediction on observation.
}
\description{
This function computes partial dependency of a supervised machine learning model over a range of values for a single observation. Does not work for multiclass problems! Check \code{predictor_xgb} to get an example of \code{predictor} to use (so you can create your own).
}
\examples{
\dontrun{
# Let's load a dummy dataset
data(mtcars)
setDT(mtcars) # Transform to data.table for easier manipulation
# We train a xgboost model on 31 observations, keep last to analyze later
set.seed(0)
xgboost_model <- xgboost(data = data.matrix(mtcars[-32, -1]),
label = mtcars$mpg[-32],
nrounds = 20)
# Perform partial dependence grid prediction to analyze the behavior of the 32th observation
# We want to check how it behaves with:
# => horsepower (hp)
# => number of cylinders (cyl)
# => transmission (am)
# => number of carburetors (carb)
preds_partial <- partial_dep.obs(model = xgboost_model,
predictor = predictor_xgb, # Default for xgboost
data = mtcars[-32, -1], # train data = 31 first observations
observation = mtcars[32, -1], # 32th observation to analyze
column = c("hp", "cyl", "am", "carb"),
accuracy = 20, # Up to 20 unique values per column
safeguard = TRUE, # Prevent high memory usage
safeguard_val = 1048576, # No more than 1048576 observations,
exact_only = TRUE, # Not allowing approximations,
label_name = "mpg", # Label is supposed "mpg"
comparator_name = "evo") # Comparator +/-/eq for analysis
# How many observations? 300
nrow(preds_partial$grid_exp)
# How many observations analyzed per column? hp=10, cyl=3, am=2, carb=5
summary(preds_partial$grid_init)
# When cyl decreases, mpg increases!
partial_dep.plot(grid_data = preds_partial$grid_exp,
backend = "tableplot",
label_name = "mpg",
comparator_name = "evo")
# Another way of plotting... hp/mpg relationship is not obvious
partial_dep.plot(grid_data = preds_partial$grid_exp,
backend = "car",
label_name = "mpg",
comparator_name = "evo")
# Do NOT do this on >1k samples, this will kill RStudio
# Histograms make it obvious when decrease/increase happens.
partial_dep.plot(grid_data = preds_partial$grid_exp,
backend = "plotly",
label_name = "mpg",
comparator_name = "evo")
# Get statistics to analyze fast
partial_dep.feature(preds_partial$grid_exp, metric = "emp", in_depth = FALSE)
# Get statistics to analyze, but is very slow when there is large data
# Note: unreliable for large amount of observations due to asymptotic infinites
partial_dep.feature(preds_partial$grid_exp, metric = "emp", in_depth = TRUE)
}
}
|
0ef16dc60ec7198f556fa1336fd693a0f4f2ab01 | 004663c606c7b01ea02c520c93c2e678c8489bff | /Ex_data/plot1.R | ff4363b0c92f89a690201b95929fe238daeaef74 | [] | no_license | unniks2/datasciencecoursera | 2ea75af7364c81d0d10d3927332292d3d468828c | a357a986e47033941a1d0959be45133a32e36cb2 | refs/heads/master | 2020-05-18T17:25:05.781591 | 2015-12-12T11:03:41 | 2015-12-12T11:03:41 | 42,513,344 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 431 | r | plot1.R | library(lubridate)
library(dplyr)
read.table("hp",header=TRUE,sep = ";",colClasses = "character")
date<-dmy(c("01-02-2007","02-02-2007"))
final<-filter(hp,ymd(hp$date)==date)
final$datetime<-paste(final$Date,final$Time)
final$datetime<-strptime(final$datetime,"%d/%m/%Y %H:%M:%S")
hist(as.numeric(final$Global_active_power),col="red",xlab = "Global_active_power")
plot(final$datetime,final$Global_active_power,pch=20,type = "l")
|
0d15e2119dfcce74a4962237410256cb21bc036a | 52c521885d3f6b652bc3a54f20d01a45908176f3 | /man/cartografia.Rd | 9036deff2bd0c58f175d97ab3e67d351cc7e061e | [] | no_license | pcorpas/medear | 7e21133984cb219f317f22148f725cf65f9aa6b0 | e9f957f7cce4e303688f3aa655e576a75d5f3ae9 | refs/heads/master | 2021-05-11T12:27:12.392553 | 2018-01-15T16:50:59 | 2018-01-15T16:50:59 | 117,659,182 | 0 | 1 | null | 2018-01-16T08:58:39 | 2018-01-16T08:58:39 | null | UTF-8 | R | false | true | 1,681 | rd | cartografia.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\encoding{UTF-8}
\name{cartografia}
\alias{cartografia}
\title{Cartografia por seccion censal para las ciudades MEDEA3}
\format{Un objeto de clase \code{SpatialPoligonsDataFrame}, donde los datos
(\code{cartografia@data}) tienen clase \code{data.frame} y
\code{cartografia_ine}, donde cada fila es una sección censal y que cuenta
con 13 columnas: \describe{ \item{seccion}{Cádena de 10 caracteres con el
código de sección censal (incluye provincia, municipio, distrito y
sección).} \item{codmuni}{Cádena de 5 caracteres con el código INE del
municipio.} \item{NPRO}{Nombre de la provincia.} \item{NCA}{Nombre de la
comunidad autónoma.} \item{NMUN}{Nombre del municipio.}
\item{geometry}{Columna de tipo lista con la geometría asociada a cada
sección censal.}}}
\usage{
cartografia
}
\description{
Contiene la cartografía por sección censal tal cual puede ser
utilizada por el paquete \code{sp}.
}
\examples{
\dontrun{
library(medear)
library(sp)
data(cartografia)
# Representación de los secciones censales de Álava
plot(cartografia[substring(cartografia$seccion, 1, 5) == "01059", ])
# Representación de los secciones censales de Álava, según distritos.
distritos <- substring(cartografia[substring(cartografia$CUSEC, 1, 5) == "01059", ]$CUSEC, 6, 7)
plot(cartografia[substring(cartografia$CUSEC, 1, 5) == "01059", ], col = as.numeric(distritos))
}
}
\references{
\url{http://www.ine.es/}{ Sitio web del INE}.
\url{http://www.ine.es/censos2011_datos/cen11_datos_resultados_seccen.htm}{Cartografía}.
}
\keyword{datasets}
|
9bb19c09348c4c33b6fd76f704dacce031c63138 | 360df3c6d013b7a9423b65d1fac0172bbbcf73ca | /FDA_Pesticide_Glossary/tetrahydrophthalimid.R | 00997c7b2c523f591847bdde1be94cd9750d721b | [
"MIT"
] | permissive | andrewdefries/andrewdefries.github.io | 026aad7bd35d29d60d9746039dd7a516ad6c215f | d84f2c21f06c40b7ec49512a4fb13b4246f92209 | refs/heads/master | 2016-09-06T01:44:48.290950 | 2015-05-01T17:19:42 | 2015-05-01T17:19:42 | 17,783,203 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 276 | r | tetrahydrophthalimid.R | library("knitr")
library("rgl")
#knit("tetrahydrophthalimid.Rmd")
#markdownToHTML('tetrahydrophthalimid.md', 'tetrahydrophthalimid.html', options=c("use_xhml"))
#system("pandoc -s tetrahydrophthalimid.html -o tetrahydrophthalimid.pdf")
knit2html('tetrahydrophthalimid.Rmd')
|
a689e8bafdcd35c727dae5e516c569cbba22ed47 | ce19634de1b7bdbfa8aa44ff3b6645752117311c | /tic.R | 276682d0c47d05c84b985edfc10dbed81f4e2a0b | [
"MIT"
] | permissive | christophM/iml | 465f61e8e72fc3d5227584335d0b249de59e1b43 | 9e5d74968376ddb3440589aceb6ef85ed76fac6d | refs/heads/main | 2023-08-31T04:44:27.122158 | 2023-05-28T13:10:30 | 2023-05-28T13:10:30 | 108,261,564 | 487 | 100 | NOASSERTION | 2023-09-06T13:50:39 | 2017-10-25T11:36:53 | R | UTF-8 | R | false | false | 334 | r | tic.R | # installs dependencies, runs R CMD check, runs covr::codecov()
do_package_checks()
get_stage("install") %>%
add_code_step(reticulate::install_miniconda()) %>%
add_code_step(keras::install_keras())
if (ci_on_ghactions() && ci_has_env("BUILD_PKGDOWN")) {
# creates pkgdown site and pushes to gh-pages branch
do_pkgdown()
}
|
76ee8b701feb09cfc84fd014f4b5a0ade27b6e75 | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googlestoragev1.auto/man/Bucket.lifecycle.rule.condition.Rd | dc269a10d32ab31366285d9702a8e0c0dd635d96 | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,414 | rd | Bucket.lifecycle.rule.condition.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/storage_objects.R
\name{Bucket.lifecycle.rule.condition}
\alias{Bucket.lifecycle.rule.condition}
\title{Bucket.lifecycle.rule.condition Object}
\usage{
Bucket.lifecycle.rule.condition(age = NULL, createdBefore = NULL,
isLive = NULL, matchesStorageClass = NULL, numNewerVersions = NULL)
}
\arguments{
\item{age}{Age of an object (in days)}
\item{createdBefore}{A date in RFC 3339 format with only the date part (for instance, '2013-01-15')}
\item{isLive}{Relevant only for versioned objects}
\item{matchesStorageClass}{Objects having any of the storage classes specified by this condition will be matched}
\item{numNewerVersions}{Relevant only for versioned objects}
}
\value{
Bucket.lifecycle.rule.condition object
}
\description{
Bucket.lifecycle.rule.condition Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The condition(s) under which the action will be taken.
}
\seealso{
Other Bucket functions: \code{\link{Bucket.cors}},
\code{\link{Bucket.lifecycle.rule.action}},
\code{\link{Bucket.lifecycle.rule}},
\code{\link{Bucket.lifecycle}},
\code{\link{Bucket.logging}}, \code{\link{Bucket.owner}},
\code{\link{Bucket.versioning}},
\code{\link{Bucket.website}}, \code{\link{Bucket}},
\code{\link{buckets.insert}},
\code{\link{buckets.patch}}, \code{\link{buckets.update}}
}
|
31949f331e95deab8c6747cf06866095422502bc | dec0927e9512eeb4b9898f52557e91bd47b3c3d6 | /loops_YRIvCebu_boxplots.R | e2d23ae08e231abba37f4ca1190962e71478a483 | [] | no_license | Xudeh/Running-PrediXcan | b85335e955d3a633690a3431cbcd7838744d1411 | 610158bc5911023d584a4723df4dcd3b28a9021b | refs/heads/master | 2022-02-12T10:25:44.213605 | 2017-08-08T14:22:20 | 2017-08-08T14:22:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,627 | r | loops_YRIvCebu_boxplots.R | #goal: make boxplots for a tissue and a gene, with adjacent "YRI" and "Cebu"
"%&%" = function(a,b) paste(a,b,sep="")
library(dplyr)
sigGenes <- read.table('/home/angela/compare/sigGenes.txt')
sigGenes <- sigGenes$V1
sigTiss <- read.table("/home/angela/compare/sigTiss.txt")
sigTiss <- sigTiss$V1
for(i in sigTiss){
for(j in sigGenes){
tryCatch({
x <- read.table("/home/angela/px_cebu_chol/PrediXcan/" %&% i %&% "/predicted_expression.txt", header=T)
keep <- c("IID", j)
x <- x[keep]
x <- transform(x, IID=as.character(IID))
phenoCebu <- read.table("/home/angela/px_cebu_chol/GWAS/new_pheno.txt",header=T)
phenoCebu <- transform(phenoCebu, IID=as.character(IID))
x=left_join(x,phenoCebu,by='IID')
x=na.omit(x)
colnames(x)[2] <- "gene"
y <- read.table("/home/angela/px_yri_chol/PrediXcan/SangerImpOutput/PX_output/" %&% i %&% "/predicted_expression.txt",header=T)
y <- y[keep]
y <- transform(y, IID=as.character(IID))
phenoYRI <- read.table("/home/angela/px_yri_chol/GWAS/Phenotypes/prunedPheno.txt", header = T)
phenoYRI <- transform(phenoYRI, IID=as.character(IID))
y <- left_join(y, phenoYRI, by = "IID")
y <- na.omit(y)
colnames(y)[2] <- "gene"
png(file = "/home/angela/compare/boxplots/" %&% i %&% "/" %&% j %&% ".png")
title <- paste(j, "in", i)
boxplot(x$gene, y$gene, ylim = range(x$gene, y$gene), names = c("Cebu", "YRI"), main = title, xlab = "Population", ylab = "Expression Level")
dev.off()
}, error = function(e){}) #skips errors aka genes that are missing from tissues
}
}
|
03dc17dd07aff9bf92d7ee6c8f19760dbf946ba6 | 243491156eee732d9b0767775caa0026879f68d1 | /plot4.R | 248f4c5e3703824ebb11e455e4af04061f7fa949 | [] | no_license | Pu-Ting/ExData_Plotting1 | 43a12a40e4f2839ebbe5e2dc5b3808bee8b46150 | afbacffa4837319f3a13680bc9fa52c21e515c4e | refs/heads/master | 2022-11-10T18:32:06.951414 | 2020-06-22T02:26:58 | 2020-06-22T02:26:58 | 273,986,985 | 0 | 0 | null | 2020-06-21T21:33:36 | 2020-06-21T21:33:35 | null | UTF-8 | R | false | false | 882 | r | plot4.R | > data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
+ nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
> data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007"))
> data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
> datetime <- paste(as.Date(data1$Date), data1$Time)
> data1$Datetime <- as.POSIXct(datetime)
par(mfrow = c(2,2), mar = c(4,4,4,4))
> with(data1, plot(Global_active_power~Datetime, type="l",ylab="Global Active Power", xlab="", cex.lab=0.8))
> with(data1, plot(Voltage~Datetime, type="l",ylab="Voltage(volt)", xlab="", cex.lab=0.8))
> with(data1, plot(Sub_metering_1~Datetime, type="l",ylab="Global Active Power (killowatts)", xlab="", cex.lab=0.8))
> with(data1, plot(Global_reactive_power~Datetime, type="l",ylab="Global_Reactive_Power", xlab="", cex.lab=0.8))
|
9ebab813592c8e30e20500916ec2b10027db96c6 | 2656550952bf1c69735a399f3285ada7da4fea4e | /lab4/lab4/man/resid.linreg.Rd | 7124f908a1f48c5e7c53b1a744239b3d622f1882 | [] | no_license | Raikao/R_programming | fb32b4d3e45e8db5d79f8583643f7866700845e6 | a4a307fd3a36ede143f8ef06245a0f205ca5beda | refs/heads/master | 2021-01-21T03:22:43.421120 | 2019-05-01T14:07:17 | 2019-05-01T14:07:17 | 101,895,812 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 281 | rd | resid.linreg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linreg.R
\name{resid.linreg}
\alias{resid.linreg}
\title{resid linreg}
\usage{
resid.linreg(x)
}
\arguments{
\item{x}{linreg class object}
}
\description{
Return the residuals of the linear regression
}
|
2f0b5eee45c8eba83ffbe8bd970efb06f342500e | 96581be4d9c432d9e2f168339b80de7f8e5a8275 | /Process_and_Understand_Data_Ex.R | d42762c7c812708ae03a0663f72ef57378c40b3d | [] | no_license | rwfrankenfield1s/Basic_Code_Ref | 90a8c94d1ff4cfe0ab49468738bddddea3042daa | 400f6dfc9d1eaea7844d6dd70f12a20fd1186243 | refs/heads/master | 2020-04-26T21:24:19.052940 | 2019-03-07T01:46:59 | 2019-03-07T01:46:59 | 173,841,221 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,102 | r | Process_and_Understand_Data_Ex.R | library(lattice)
library(nutshell)
data(births2006.smpl)
births2006.smpl[1:5,]
dim(births2006.smpl)
births.dow=table(births2006.smpl$DOB_WK)
births.dow
barchart(births.dow,ylab="Day of Week", col="black")
dob.dm.tbl=table(WK=births2006.smpl$DOB_WK,
MM=births2006.smpl$DMETH_REC)
dob.dm.tbl
dob.dm.tbl=dob.dm.tbl[,-2]
dob.dm.tbl
trellis.device()
barchart(dob.dm.tbl,ylab="Day of Week")
barchart(dob.dm.tbl,horizontal=FALSE,groups=FALSE,
xlab="Day of Week",col="black")
histogram(~DBWT|DPLURAL,data=births2006.smpl,layout=c(1,5),
col="black")
histogram(~DBWT|DMETH_REC,data=births2006.smpl,layout=c(1,3),
col="black")
densityplot(~DBWT|DPLURAL,data=births2006.smpl,layout=c(1,5),
plot.points=FALSE,col="black")
densityplot(~DBWT,groups=DPLURAL,data=births2006.smpl,
plot.points=FALSE)
dotplot(~DBWT|DPLURAL,data=births2006.smpl,layout=c(1,5),
plot.points=FALSE,col="black")
xyplot(DBWT~DOB_WK,data=births2006.smpl,col="black")
xyplot(DBWT~DOB_WK|DPLURAL,data=births2006.smpl,layout=c(1,5),
col="black")
xyplot(DBWT~WTGAIN,data=births2006.smpl,col="black")
xyplot(DBWT~WTGAIN|DPLURAL,data=births2006.smpl,layout=c(1,5),
col="black")
smoothScatter(births2006.smpl$WTGAIN,births2006.smpl$DBWT)
boxplot(DBWT~APGAR5,data=births2006.smpl,ylab="DBWT",
xlab="AGPAR5")
boxplot(DBWT~DOB_WK,data=births2006.smpl,ylab="DBWT",
xlab="Day of Week")
bwplot(DBWT~factor(APGAR5)|factor(SEX),data=births2006.smpl,
xlab="AGPAR5")
bwplot(DBWT~factor(DOB_WK),data=births2006.smpl,
xlab="Day of Week")
fac=factor(births2006.smpl$DPLURAL)
res=births2006.smpl$DBWT
t4=tapply(res,fac,mean,na.rm=TRUE)
t4
t5=tapply(births2006.smpl$DBWT,INDEX=list(births2006.smpl$DPLURAL,
births2006.smpl$SEX),FUN=mean,na.rm=TRUE)
t5
barplot(t4,ylab="DBWT")
barplot
t5=table(births2006.smpl$ESTGEST)
t5
new=births2006.smpl[births2006.smpl$ESTGEST != 99,]
t51=table(new$ESTGEST)
t51
t6=tapply(new$DBWT,INDEX=list(cut(new$WTGAIN,breaks=10),
cut(new$ESTGEST,breaks=10)),FUN=mean,na.rm=TRUE)
t6
levelplot(t6,scales = list(x = list(rot = 90)))
contourplot(t6,scales = list(x = list(rot = 90)))
|
da4bec5800216d4c155fc6acf1db74f7ebf18e54 | f31960290de09e040047b27d8f894a0d7bd68054 | /tests/testthat/test-td.R | 287e705f87f6483157593eb0710a55ae890d891d | [
"MIT"
] | permissive | 1512474508/oncoscanR | 7c99dead3603433098ca5fb6de02007087f02a80 | c1f1f7715c403f1bf3564923d9cce16a4d46fd4f | refs/heads/master | 2022-09-08T19:58:01.203437 | 2020-05-18T19:03:54 | 2020-05-18T19:03:54 | 269,932,057 | 1 | 0 | null | 2020-06-06T09:26:16 | 2020-06-06T09:26:15 | null | UTF-8 | R | false | false | 3,101 | r | test-td.R | test_that("TD score works - only gains, border checks", {
cov <- GRanges(seqnames = factor(c(paste0(1:4, 'p'), paste0(1:4, 'q')),
levels = c(paste0(1:4, 'p'), paste0(1:4, 'q'))),
ranges = IRanges(start = rep(1, 8),
end = rep(200*10^6, 8)))
segs <- GRanges(seqnames = factor(c(rep('1p', 5), rep('2p', 4)), levels = paste0(1:4, 'p')),
ranges = IRanges(start = c(1, 2, 3, 5, 20.000001, 50, 90.000001, 95, 100)*10^6 + 1,
end = c(1.1, 2.3, 4, 15, 30, 80, 91, 96.000001, 110.000001)*10^6),
cn = c(3, 4, 3, 4, 3, 4, 3, 4, 3),
cn.type = rep(cntype.gain, 9),
cn.subtype = rep(cntype.gain, 9))
n <- score_td(segs)
expected <- list(TDplus=3, TD=4)
expect_equal(n, expected)
})
test_that("TD score works - CN checks", {
cov <- GRanges(seqnames = factor(c(paste0(1:4, 'p'), paste0(1:4, 'q')),
levels = c(paste0(1:4, 'p'), paste0(1:4, 'q'))),
ranges = IRanges(start = rep(1, 8),
end = rep(200*10^6, 8)))
segs <- GRanges(seqnames = factor(c(rep('1p', 7), rep('2p', 7)), levels = paste0(1:4, 'p')),
ranges = IRanges(start = rep(c(1, 10),7)*10^6 + 1,
end = rep(c(1.5, 15),7)*10^6),
cn = c(0,0,1,1,NA,NA,3,3,4,4,5,5,10,10),
cn.type = c(cntype.loss, cntype.loss,
cntype.loss, cntype.loss,
cntype.loh, cntype.loh,
cntype.gain, cntype.gain,
cntype.gain, cntype.gain,
cntype.gain, cntype.gain,
cntype.gain, cntype.gain),
cn.subtype = c(cntype.homloss, cntype.homloss,
cntype.hetloss, cntype.hetloss,
cntype.loh, cntype.loh,
cntype.gain, cntype.gain,
cntype.gain, cntype.gain,
cntype.weakamp, cntype.weakamp,
cntype.strongamp, cntype.strongamp))
n <- score_td(segs)
expected <- list(TDplus=2, TD=2)
expect_equal(n, expected)
})
test_that("TD scores work - real case", {
chas.fn <- system.file("testdata", "TDplus_gene_list_full_location.txt", package = "oncoscanR")
segments <- load_chas(chas.fn, oncoscanR::oncoscan_na33.cov)
segments$cn.subtype <- get_cn_subtype(segments, 'F')
segs.clean <- trim_to_coverage(segments, oncoscanR::oncoscan_na33.cov) %>%
prune_by_size()
n <- score_td(segs.clean)
expect_true(n$TDplus==102 && n$TD==21) #Verified by hand in Excel and ChAS
})
test_that("TD scores work - empty segments", {
segs <- GRanges(seqnames = factor(c(), levels = paste0(1:4, 'p')),
ranges = IRanges())
n <- score_td(segs)
expect_identical(unlist(n), c(TDplus=0,TD=0))
})
|
1dfaf454794acaafb2022b4973eac8ad08afcb94 | a3e3824c097bc5ebed250fa2b72a1575ad230954 | /man/metric.select.Rd | ec490a62d1eef155baf776ea4d4260841f39c058 | [] | no_license | p-schaefer/BenthicAnalysis | f147b353922d7845979446c07254a1d1f37036fd | 51e47f5ed8049c63422af91b267ca803e1aa675a | refs/heads/master | 2023-05-14T09:59:02.450146 | 2023-05-01T14:02:33 | 2023-05-01T14:02:33 | 43,658,323 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,600 | rd | metric.select.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metric.select.R
\name{metric.select}
\alias{metric.select}
\title{Indicator metric selection}
\usage{
metric.select(Test, Reference, outlier.rem = T, rank = F, outbound = 0.1)
}
\arguments{
\item{Test}{Vector containing metric scores at the test site. Should be a single row from \code{benth.met} or \code{add.met}.}
\item{Reference}{Data frame of metric scores at the reference sites. Should be output from \code{benth.met} or \code{add.met}.}
\item{outbound}{Used if outlier.rem=T A numeric value between 0 and 1 indicating the outlier boundary for defining values as final outliers (default to 0.1)}
\item{Rank}{Use rank differences in metric selection}
}
\value{
$Best.Metrics - Vector containing the final selected indicator metrics
$Indicative.Metrics - Vector containing all metrics that indicate impairment
$raw.data - Data frame containing only selected best metrics
$ref.sites - Vector containing input reference site names
$outlier.ref.sites - Vector containing sites removed as potential outliers
}
\description{
Determines which indicator metrics which best differentiate the test site from its nearest-neighbbour reference sites. Metrics that indicate impairment will be
used preferentially.
}
\details{
A interative selection algorithm is used as follows:
1. The first metric selected for the final set is the one which displayes the greatest distance from the Reference condition mean
2. Metrics with a pearson correlation greater than 0.7 to (any of) the selected metric(s) are excluded from further steps
3. The ranked departure of remaining metrics is divided by the (maximum) correlation with the metric(s) previously included in the analysis
4. The metric with the greatest score is selected for inclusion in the final set
5. Return to step 2 until the number of selected metrics is equal to the greater of 4 or 1/5 the number of Reference sites
If no metrics or too few metrics demonstrate impairment, the following metrics are included until the maximum is reached:
Richness, Percent Dominance, HBI, Percent EPT.
}
\examples{
data(YKBioData,envir = environment())
bio.data<-benth.met(YKBioData,2,2)$Summary.Metrics
nn.refsites<- c("075-T-1", "019-T-1","003-T-1","076-T-1","071-T-1","022-T-1","074-T-1",
"002-T-1","004-T-1","073-T-1","186-T-1","062-T-1","005-T-1","025-T-1",
"187-T-1","023-T-1","193-T-1","192-T-1","196-T-1","194-T-1")
metric.select(bio.data[201,],bio.data[nn.refsites,])
}
\keyword{Benthic}
\keyword{Metrics}
|
4cd5b848570441147f696daf75c7de5255a9ed19 | c261669633350df11966f606e302debb1b8b2ddf | /code/mpp_cat1.R | d9596329e7863a6ff49b619fd01610af6f507495 | [] | no_license | vrindakalia/mpp_cat1_metabolomics | 4f529f53d4d885bf0495c86bb1a69187f4af9838 | d8087ace32e18cbb4b586373beeff4cfa0f7f521 | refs/heads/master | 2022-11-06T04:36:14.782407 | 2020-06-30T19:31:33 | 2020-06-30T19:31:33 | 196,630,294 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,178 | r | mpp_cat1.R | ###############################################
## MPP+ compared to cat-1 ##
###############################################
# Two ways to do this: metabnet and mummichog on overlapping features
# xMSanalyzer to get overlapping features
library(MetabNet)
library(xMSanalyzer)
library(tidyverse)
library(janitor)
cat1_mztime <- read.table("results/mcg/cat1.t.mcg.txt", header = T, sep = "\t") %>%
arrange(m.z)
mpp_mztime <- read.table("results/mcg/mpp.t.mcg.txt", header = T, sep = "\t") %>%
arrange(m.z)
#getVenn(cat1_mztime, "CAT-1", mpp_mztime , "MPP+", mz.thresh = 10, time.thresh=30,
# alignment.tool = NA,
# xMSanalyzer.outloc = "/Users/vk2316/Documents/Miller_lab/Metabolomics_data/mpp_cat1_metabolomics/comparisons",
# use.unique.mz=F, plotvenn=TRUE)
#overlap.mz <- find.Overlapping.mzs(cat1_mztime, mpp_mztime, mz.thresh = 10, time.thresh = NA,
# alignment.tool=NA)
# Using features that meet p < 0.05 criteria to compare overlapping features
cat1_top <- cat1_mztime %>%
filter(p.value < 0.25)
mpp_top <- mpp_mztime %>%
filter(p.value < 0.2)
overlap_005 <- getVenn(cat1_top, "cat-1 \n(p < 0.25)", mpp_top, "MPP+ (p < 0.2)", mz.thresh = 10, time.thresh=30,
alignment.tool = NA,
xMSanalyzer.outloc = "/Users/vk2316/Documents/Miller_lab/Metabolomics_data/mpp_cat1_metabolomics/results/comparisons/overlap",
use.unique.mz=F, plotvenn=TRUE)
overlap_mztime <- overlap_005$common
overlap_mztime$mz_time <- paste0(overlap_mztime$mz.data.A,"_", overlap_mztime$time.data.A)
##### Creating a file that lets us see the pathways that these overlapping features belong to:
# give these features a cut-off p-value, change p-value on others
# features not in the overlap
cat1_mztime$mz_time <- paste0(cat1_mztime$m.z,"_", cat1_mztime$time)
overlap_mztime <- cat1_mztime[which(cat1_mztime$mz_time %in% overlap_mztime$mz_time),]
nonoverlap_mztime <- cat1_mztime[-which(cat1_mztime$mz_time %in% overlap_mztime$mz_time),]
overlap_mztime$p.dum <- rep(0.01, times = dim(overlap_mztime)[1])
nonoverlap_mztime$p.dum <- rep(0.5, times = dim(nonoverlap_mztime)[1])
dum.mcg <- rbind(overlap_mztime, nonoverlap_mztime) %>%
dplyr::select(m.z, p.dum, t.score) %>%
mutate(p.value = p.dum) %>%
dplyr::select(m.z, p.value, t.score) %>%
write.table("results/comparisons/mcg_hilicpos_cat1mppcompare.txt", col.names = T, sep = "\t", row.names = F)
overlap.feat <- overlap_mztime[,1:2] %>%
write.table("results/comparisons/metabnet/sig.metab.file.txt", col.names = T, row.names = F, sep = "\t")
mz_time <- (overlap_mztime[,1:2])
#######################################################
# Look for features of interest in MPP and CAT-1 FT ##
#######################################################
compounds <- read.table("results/comparisons/mummichog_matched_compound_all_compare.csv", header = T, sep = ",")
pathways <- read.table("results/comparisons/mummichog_pathway_enrichment_compare.csv", header = T, sep = ",")
pathways$logp <- -log10(pathways$FET)
pathways$enrich <- pathways$Hits.sig/pathways$Expected
#eth.hispcauc <- merge(hisp.sub, cauc.sub, by = "pathway", all = T)
#eth.all <- merge(eth.hispcauc, afr.sub, by = "pathway", all = T)
mem.sub <- pathways %>%
filter(logp > 0.35)
#tiff("figures/pathways.comparison.tiff", width = 5.7, height = 4, units = 'in', res = 300)
ggplot(mem.sub, aes(x=logp, y = reorder(X, logp), size = Hits.total, col = enrich)) +
geom_point(alpha=0.7) +
scale_color_gradient(low="blue", high="red")+
#theme_minimal() +
xlab("-log10(p-value)") +
ylab("") +
ggtitle("Pathways enriched by overlapping features",
subtitle = "Size of bubble represents number of significant hits \nEnrichment is calculated as (Total Hits/Pathway size)") +
theme(plot.title = element_text(size = 9, face = "bold"),
plot.subtitle = element_text(size = 7),
axis.text=element_text(size=7),
axis.title=element_text(size=9,face="bold"),
strip.text = element_text(size=7),
legend.text=element_text(size=7),
legend.title=element_text(size=8),
legend.position="bottom") +
guides(size=guide_legend("Overlap size")) +
labs(col = "Enrichment")
#dev.off()
tiff("figures/pathways.comparison.black.tiff", width = 6.5, height = 4, units = 'in', res = 300)
### No overlap size on graph, enrichment as size of bubble
mem.sub$label <- paste0(mem.sub$X," (",mem.sub$Hits.sig,"/", mem.sub$Pathway.total,")")
ggplot(mem.sub, aes(x=logp, y = reorder(label, logp), size = enrich)) +
geom_point() +
#scale_color_gradient(low="blue", high="red") +
#theme_minimal() +
xlab("-log10(p-value)") +
ylab("") +
ggtitle("Pathways enriched by overlapping features",
subtitle = "Enrichment is calculated as (Total Hits/Expected number of hits)") +
theme(plot.title = element_text(size = 9, face = "bold"),
plot.subtitle = element_text(size = 7),
axis.text=element_text(size=9),
axis.title=element_text(size=9,face="bold"),
strip.text = element_text(size=7),
legend.text=element_text(size=7),
legend.title=element_text(size=8),
legend.position="bottom") +
labs(size = "Enrichment")
dev.off()
# From mummichog metaboanalyst interface, compounds that are part of pathways and in overlap:
#1. Glycero-3-phosphocholine C00670
#2. Choline C00114
#3. N-Acetyl-D-glucosamine-6-phosphate C00357
#4. D-glucosamine C00329
#5. Malate C00149
#6. 4-hydroxyphenylacetate C00642
#7. Carnitine C00487
comp.interest <- c("C00670", "C00114", "C00357", "C00329", "C00149", "C00642", "C00487")
info.interest <- compounds[compounds$Matched.Compound %in% comp.interest,] %>%
mutate(Name = ifelse(Matched.Compound == comp.interest[1], "Glycero-3-phosphocholine",
ifelse(Matched.Compound == comp.interest[2], "Choline",
ifelse(Matched.Compound == comp.interest[3], "N-Acetyl-D-glucosamine-6-phosphate",
ifelse(Matched.Compound == comp.interest[4], "D-glucosamine",
ifelse(Matched.Compound == comp.interest[5], "Malate",
ifelse(Matched.Compound == comp.interest[6], "4-hydroxyphenylacetate", "Carnitine")))))))
info.interest #contains: mz, kegg id, matched form, mass.diff, Name
info.box <- info.interest %>%
group_by(Name, Matched.Form) %>%
mutate(min.adduct = min(Mass.Diff)) %>%
ungroup() %>%
filter(Mass.Diff == min.adduct)
write.table(info.box, "results/info.box.adduct.table.txt", col.names = T, row.names = F, sep = "\t")
# Next: subset the feature table to find these mzs and create box plots to see changes in levels between
# cat1 and N2 and
# MPP exposure v control
#Call in feature table with intensities
# Find mzs also present in overlap file
ind <- vector(mode = "logical", length = nrow(info.box))
for(i in 1: nrow(info.box)) {
ind[i] <- which(mz_time$m.z %in% info.box.1$Query.Mass[i])
print(i)
}
ind
feat.mpp <- read.table("results/feat.mpp.box.txt", header = T, sep = "\t")
feat.cat1 <- read.table("results/feat.cat1.box.txt", header = T, sep = "\t")
mz.mpp <- unlist(lapply(strsplit(as.character(names(feat.mpp)), "_"), function(x) x[1]))
time.mpp <- unlist(lapply(strsplit(as.character(names(feat.mpp)), "_"), function(x) x[2]))
mz.mpp <- gsub("X", "", mz.mpp)
mz.cat1 <- unlist(lapply(strsplit(as.character(names(feat.cat1)), "_"), function(x) x[1]))
time.cat1 <- unlist(lapply(strsplit(as.character(names(feat.cat1)), "_"), function(x) x[2]))
mz.cat1 <- gsub("X", "", mz.cat1)
#cbind(info.box[1,],feat.cat1[,c(2,which(mz.cat1 %in% info.box$Query.Mass[1]))])
#feat.mpp[,c(2,which(mz.mpp %in% info.box$Query.Mass[1]))]
#box.mpp <- cbind(info.box[1,], feat.mpp[,c(2,which(mz.mpp %in% info.box$Query.Mass[1]))])
data.plots <- data.frame()
for(i in 1:7){
# CAT-1
box <- feat.cat1[,c(2,which(mz.cat1 %in% info.box$Query.Mass[i]))]
box.plot <- box[,1:2]
names(box.plot) <- c("Strain", "Intensity")
box.plot$Strain <- as.factor(box.plot$Strain)
#box.plot$Intensity <- as.numeric(levels(box.plot$Intensity))[box.plot$Intensity]
box.plot$log.intensity <- log2(box.plot$Intensity)
box.plot$neuro <- rep("cat1", 12)
# MPP
box.mpp <- feat.mpp[,c(2,which(mz.mpp %in% info.box$Query.Mass[i]))]
box.plot.mpp <- box.mpp[,1:2]
names(box.plot.mpp) <- c("Strain", "Intensity")
box.plot.mpp$Strain <- as.factor(box.plot.mpp$Strain)
#box.plot$Intensity <- as.numeric(levels(box.plot$Intensity))[box.plot$Intensity]
box.plot.mpp$log.intensity <- log2(box.plot.mpp$Intensity)
box.plot.mpp$neuro <- rep("MPP", 10)
box.plot.all <- rbind(box.plot, box.plot.mpp)
box.plot.all$Strain.label <- case_when(
box.plot.all$Strain == "C" ~ "Control",
box.plot.all$Strain == "MPP" ~ "MPP+",
box.plot.all$Strain == "cat1" ~ "cat-1",
box.plot.all$Strain == "N2" ~ "N2"
)
box.plot.all$Strain.label <- factor(box.plot.all$Strain.label, levels = c("cat-1", "N2", "MPP+", "Control"))
name <- info.box$Name[i]
mz <- info.box$Query.Mass[i]
adduct <- info.box$Matched.Form[i]
boxes <- ggplot(data = box.plot.all, aes(x = Strain.label, y = log.intensity, fill = Strain.label)) +
geom_boxplot(show.legend = FALSE) +
scale_fill_manual(values = c("royalblue3", "lightblue4", "orange", "darkgray")) +
ggtitle(name, subtitle=paste0("mz = ", mz, "", " Adduct = ", adduct)) +
theme_minimal() +
xlab("") +
ylab("Log Intensity")
tiff(paste0("results/comparisons/figures/comparison", name,".tiff"), width =4.5, height = 4, units = 'in', res = 300)
print(boxes)
dev.off()
dat.meta <- cbind(box.plot.all, name, mz, adduct)
data.plots <- rbind(data.plots, dat.meta)
#boxplot(log.intensity~Strain, data = box.plot.all, ylab = "log2(Intensity)", xlab="Strain",
# cex.lab = 1, cex.axis = 1, col = c('gray', "white"))
#title(main = paste(strwrap(substring(box$Name[1], 1, 100), width = 60), collapse = "\n"), cex.main = 1)
#text(x = 2.25, y = max(box.plot$log.intensity), labels = "mz=", cex =0.75)
#text(x = 2.4, y = max(box.plot$log.intensity), labels = round(as.numeric(box$Query.Mass),4), cex =0.75)
#text(x = 2.4, y = max(box.plot$log.intensity), labels = box$Matched.Form, pos = 1,cex =0.75)
print(i)
}
write.table(data.plots, "results/comparisons/data_comparison_plots.txt", col.names = T, row.names = F, sep = "\t")
##############
# Remove line 9 from info.box since not present in MPP+ feature table
#info.box <- info.box[-9,]
# Remove line 10 from info.box since not present in MPP+ feature table
#info.box <- info.box[-10,]
box.plot.all$Strain.label <- case_when(
box.plot.all$Strain == "C" ~ "Control",
box.plot.all$Strain == "MPP" ~ "MPP+",
box.plot.all$Strain == "cat1" ~ "cat-1",
box.plot.all$Strain == "N2" ~ "N2"
)
|
4df5bef09ee5a0b1238f933181a7991452ae981e | 1c4f4e8c53a0e172fc6a78a4929b150f80770233 | /helper/glmerSpecial.R | 1f6a04252fb03bd47814dbaf22b3812d6c47f46f | [] | no_license | shakty/artex-analysis | d91632c8722cfbe571752641f6001f0e7f6d4d9a | 123876029d31e4bf879a9c2477313ee27c7ca5dd | refs/heads/master | 2020-09-20T12:48:39.108718 | 2019-11-27T17:32:18 | 2019-11-27T17:32:18 | 224,483,293 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,397 | r | glmerSpecial.R | ####################################################
## Helper Function: Fit hard to converge glmer models.
##
## Sometimes, some hard to fit glmer models do not converge properly. This code adds some robustness to make convergence more likely.
##
## Code from: http://stats.stackexchange.com/questions/158003/glmer-error-pwrssupdate-did-not-converge-in-30-iterations
## Other useful code at: https://rstudio-pubs-static.s3.amazonaws.com/33653_57fc7b8e5d484c909b615d8633c01d51.html
##
## Version log
## 2016-XX-XX Created initial code; reading up on the material
## 2017-07-20 Packaged this up in a function for easier use.
####################################################
glmerSpecial <- function(formula, data, family=Gamma(link="log")) {
## 1. set up model terms
glmod <- glFormula(formula, data=data, family=family)
## 1A. adjust maxit.
glmod <- c(glmod, list(maxit=200))
## 2. Create the deviance function for optimizing over theta:
devfun <- do.call(mkGlmerDevfun, glmod)
## 3. Optimize over theta using a rough approximation (i.e. nAGQ = 0):
opt <- optimizeGlmer(devfun)
## 4. Update the deviance function for optimizing over theta and beta:
devfun <- updateGlmerDevfun(devfun, glmod$reTrms)
## 5. Optimize over theta and beta:
opt <- optimizeGlmer(devfun, stage=2)
## 6. Return result
mkMerMod(environment(devfun), opt, glmod$reTrms, fr = glmod$fr)
}
|
5401f921f0e90184e86115d4c6c1b057b8166abc | 6d4a8d3fc9521805b6b2b4457e60bbb3b0663e81 | /Code/16-sql-summaries.R | 36be9fe7ffcdf38c198ff37355c4c3f4fdfb3619 | [
"MIT"
] | permissive | andrie/galaxyzoo-net | 287675c56ee401abe8b0c45375e0782951e7b3ec | a1479de5a521683842e1dac101022731ee8c6a68 | refs/heads/master | 2020-04-09T17:28:57.406838 | 2016-10-21T13:39:14 | 2016-10-21T13:39:14 | 68,102,271 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,991 | r | 16-sql-summaries.R | source("code/00-settings.R") # defines dbConnection
sql_share_directory <- file.path("c:", "AllShare", Sys.getenv("USERNAME"))
#dir.create(sql_share_directory, recursive = TRUE)
sql <- RxInSqlServer(connectionString = dbConnection,
shareDir = sql_share_directory)
# ------------------------------------------------------------------------
qry <- ("
-- Count number of galaxies per class
SELECT gz2class,
count(specobjid) as count_class
FROM zoo2MainSpecz
GROUP BY gz2class
ORDER BY count_class DESC
")
galaxy_class_count <- rxImport(
RxSqlServerData(sqlQuery = qry, connectionString = dbConnection)
)
galaxy_class_count <- galaxy_class_count %>% mutate(
class = factor(gz2class, levels = galaxy_class_count$gz2class)
)
head(galaxy_class_count, 10)
str(galaxy_class_count, 10)
nrow(galaxy_class_count)
# ------------------------------------------------------------------------
galaxy_class_count[1:20, ] %>% arrange(-count_class)
library(ggplot2)
ggplot(galaxy_class_count[1:20, ],
aes(x=class, y = count_class)) +
geom_bar(stat = "identity") +
xlab("Galaxy class") +
ylab(NULL) +
coord_flip()
1
# ------------------------------------------------------------------------
qry <- ("
SELECT specobjid, gz2class, petroR90_r
FROM (
SELECT zoo2MainSpecz.gz2class,
PhotoObjDR7.petroR90_r,
zoo2MainSpecz.specobjid,
Rank()
OVER (PARTITION BY zoo2MainSpecz.gz2class
ORDER BY PhotoObjDR7.petroR90_r DESC) as petrosian_rank
FROM zoo2MainSpecz
INNER JOIN PhotoObjDR7
ON zoo2MainSpecz.dr7objid = PhotoObjDR7.dr7objid
) AS derived
WHERE petrosian_rank <= 12
")
brightest_galaxies <- rxImport(
RxSqlServerData(sqlQuery = qry,
connectionString = dbConnection,
colClasses = c(specobjid = "character"))
)
head(brightest_galaxies, 10)
library(dplyr)
brightest_galaxies %>% filter(gz2class %in% galaxy_class_count$gz2class[1:10])
|
9835569d9f281a7f0e5791a43bc3279e4079323f | f85a27dda7eb61370ac868c5138ea90d5ddfcfaf | /src/04_seleccion.R | 914a1539bbb0f8fe49b39b0a69beaa1e21cea3e0 | [] | no_license | Songeo/proyecto_spam | 52f55ef52e035c5cacde10a25f78ffd15ca6cbcd | 66c47cbc5945ca81a9f4fed29fe9bde59693ed91 | refs/heads/master | 2021-01-22T02:04:21.016650 | 2017-06-02T01:46:49 | 2017-06-02T01:46:49 | 92,330,096 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,522 | r | 04_seleccion.R |
library(ProjectTemplate)
reload.project()
library(arm)
library(e1071)
library(randomForest)
library(gbm)
library(xtable)
# Loading results
load(file = "cache/results_models/results_logit.Rdata")
load(file = "cache/results_models/results_svm.Rdata")
load(file = "cache/results_models/results_rforest.Rdata")
load(file = "cache/results_models/results_gboost.Rdata")
# 1. Matriz ed confusión
# Muestra de Prueba
list.results <- list(`logística` = results.logit,
`SVM` = results.svm,
`bosque aleatorio` = results.rf,
`gradient boosting` = results.gb)
sapply(names(list.results), function(nom.elem){
tab.test <- list.results[[nom.elem]][[5]]
tp <- tab.test[2, 2]
tn <- tab.test[1, 1]
fp <- tab.test[1, 2]
fn <- tab.test[2, 1]
accuracy <- 100*(tp + tn)/sum(tab.test)
tpr <- 100*(tp)/(tp + fn)
tnr <- 100*(tn)/(tn + fp)
precision <- 100*(tp)/(tp + fp)
nvp <- 100*(tn)/(tn + fn)
fpr <- 100*(fp)/(fp + tn)
fnr <- 100*(fn)/(tp + fn)
fdr <- 100*(fp)/(tp + fp)
c(exactitud = accuracy,
`tasa verdadero positivo` =tpr,
`tasa verdadero negativo` = tnr,
`tasa falso positivo` = fpr,
`tasa falso negativo` = fnr,
precisión = precision) %>%
round()
}) %>%
data.frame(check.names = F) %>%
rownames_to_column("medición") %>%
xtable(., digits = 0, align = "rr|cccc") %>%
print(include.rownames = F)
# Muestra de Entrenamiento
list.results <- list(`logística` = results.logit,
`SVM` = results.svm,
`bosque aleatorio` = results.rf,
`gradient boosting` = results.gb)
sapply(names(list.results), function(nom.elem){
tab.test <- list.results[[nom.elem]][[2]]
tp <- tab.test[2, 2]
tn <- tab.test[1, 1]
fp <- tab.test[1, 2]
fn <- tab.test[2, 1]
accuracy <- 100*(tp + tn)/sum(tab.test)
tpr <- 100*(tp)/(tp + fn)
tnr <- 100*(tn)/(tn + fp)
precision <- 100*(tp)/(tp + fp)
nvp <- 100*(tn)/(tn + fn)
fpr <- 100*(fp)/(fp + tn)
fnr <- 100*(fn)/(tp + fn)
fdr <- 100*(fp)/(tp + fp)
c(exactitud = accuracy,
`tasa verdadero positivo` =tpr,
`tasa verdadero negativo` = tnr,
`tasa falso positivo` = fpr,
`tasa falso negativo` = fnr,
precisión = precision) %>%
round()
}) %>%
data.frame(check.names = F) %>%
rownames_to_column("medición") %>%
xtable(., digits = 0, align = "rr|cccc") %>%
print(include.rownames = F)
# 2. Devianza
# Únicamente comparable entre modelos de ensamble
load("cache/results_models/test_results_rforest.Rdata")
load("cache/results_models/test_results_gboost.Rdata")
test.results.rf %>% length
test.results.gb %>% length
test.results <- test.results.gb
list.test.results <- list( `bosque aleatorio` = test.results.rf,
`gradient boosting` = test.results.gb)
test.results$prob
sapply(names(list.test.results), function(nom.elem){
test.results <- list.test.results[[nom.elem]]
correctos <- test.results$resp == test.results$obs
prob <- test.results$prob[correctos]
prob.mod <- ifelse(prob == 0, 0.0001, prob)
cpk <- log(283)/length(prob.mod)
deviance <- -2*log(prob.mod)
gf <- sum(deviance)/length(prob)
c(`bondad de ajuste` = gf,
`penalización complejidad` = cpk,
`DIC` = gf + 0) %>%
round(4)
}) %>%
data.frame(check.names = F) %>%
rownames_to_column("medición") %>%
xtable(., digits = 2, align = "rr|cc") %>%
print(include.rownames = F)
|
5850f54b762792e65d1270600c056105e6f14d09 | d3ff029140e0e0bca01c88f0e8da64c09d12806c | /Shinny/ui.R | da7b5f16ec2129276ae17801a558c2d6a1899401 | [] | no_license | tinghf/dss-capstone2 | b15cb099115b2438725962aa10ccad62ba917800 | 9cb84c6d4394af623d76da890ba326b178331bbf | refs/heads/master | 2016-09-16T01:15:08.977321 | 2015-08-23T23:47:24 | 2015-08-23T23:47:24 | 41,075,585 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,605 | r | ui.R | require(shiny)
require(markdown)
shinyUI(
navbarPage("Coursera Data Science Capstone", inverse = FALSE, collapsible = FALSE,
tabPanel("Prediction",
fluidRow(
sidebarPanel(width=3,
hr(),
helpText(h5("Help Instruction:")),
helpText("To predict the next word in the sentence:"),
helpText("1. Type your sentence in the input text field"),
helpText("2. The value will be passed to the model while you are typing."),
helpText("3. Obtain the predictions below."),
helpText("After the app is 100% loaded you will see the prediction
on the right side."),
hr(),
h5("Text Input:"),
textInput("entry",
"Input a phrase below and press enter to predict the next word (be patience for initial loading):",
"input a phrase here"),
br(),
helpText(h5("Note:")),
hr(),
h6("This App is built for:"),
a("Coursera Data Science Capstone", href="https://www.coursera.org/course/dsscapstone"),
p("class started on 9 July 2015"),
hr(),
br()
),
mainPanel(
column(5,
h3("Word Prediction"),hr(),
h5('The sentence you just typed:'),
wellPanel(span(h4(textOutput('sent')),style = "color:#428ee8")),
hr(),
h5('Next Word Prediction:'),
wellPanel(span(h4(textOutput('top1')),style = "color:#e86042")),
hr(),
p()
),
column(5,
h3(" "),
br()
)
)
)
),
tabPanel("Model/Algorithm",
sidebarLayout(
sidebarPanel(width=3,
helpText(h5("Help Instruction:")),
helpText("Please switch the panels on the right side to figure out:"),
helpText("- Description on data processing pipeline of the predictive model"),
helpText("- How does the application work"),
hr(),
h6("This Application is built for:"),
a("Coursera Data Science Capstone", href="https://www.coursera.org/course/dsscapstone"),
p("class started on 6th July 2015"),
hr(),
br()
),
mainPanel(
tabsetPanel(type="tabs",
tabPanel("Model Description",
h3("Predictive Model Pipeline"),hr(),
h4("Clean the training dataset"),
p("The raw Swiftkey text datasets are about 580M in total- en_US.blogs.txt-210M,
en_US.news.txt-206M, and en_US.twitter.txt-167M"),
p("The raw dataset was imported through R readLines() function. The large dataset
was sampled into smaller dataset to reduce processing time and avoid out of
memory problem in later stages.
The sampled data was then tokenized with n-grams extracted, using the
N-grams tokenizer in the RWeka packages.
Also following data cleaning was performed:
- remove white-spaces
- change all words to lower case
- remove punctuation
- remove numbers
- profanity filtering
"),
p("After the above processing, the three corpus are then combined and we end up
with a 1-3 n-gram frequency matrix. "),
hr(),
h4("Build the model"),
p(a("Naive Bayes", href = "http://www.inside-r.org/packages/cran/e1071/docs/naiveBayes"),
'and was used for estimating the probabilities corresponding to the observed frequencies,
and the joint probability of all unobserved species. The last three words of users\' input sentence will be extracted first and used to predict the next words. ' ),
hr(),
br()
),
tabPanel("Application Work-flow",
h3("Shiny App Prediction Algorithm"),
hr(),
h4("Pre-process"),
p("1. Obtain the data from the input box."),
p("2. Cleaning for the data sentence. Numbers, punctuations,
extra spaces will be removed, and all words are converted to lower-case."),
hr(),
h4("Tokenize"),
p("After preprocessing, the sentence will be truncated from the last 3 words.
, If there are less than 3 words, all the words will be used."),
hr(),
h4("Search pattern"),
p("Search the pattern from the n-gram model
The algorithm will search the pattern from the frequency matrix,
and then return the it's next word prediction. ")
)
)
)
)
)
)
)
|
2829b08402b51fa95628ddbf75b5e0586b3c75b3 | e374888ce6ca6436d2b08b4d406ed39fb2504170 | /Final Visualizations.R | 3446bcb928cfd08d5a6ee58c0ab4f8d6eb00a0a2 | [] | no_license | derekcastleman/StoicProject | 1480b475c10edba3b6ba7764a6a87640d5fe7a06 | 028f846389a3ca6f15529cd64d856e28c79ff18b | refs/heads/main | 2023-04-20T19:34:34.461437 | 2021-05-07T19:01:52 | 2021-05-07T19:01:52 | 365,323,087 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,527 | r | Final Visualizations.R | install.packages('paletteer')
library('gt')
library('paletteer')
# Changing up the names of the columns for better final appearance.
philosopher_prepped <- philosopher_summary %>%
rename(Philosopher = philosopher,
Tweets = tweets, Users = distinct_users, Avg_Fav = avg_fav, Favorites = total_fav,
Avg_Retweet = avg_retweet, Retweets = total_retweet) %>%
arrange(desc(Tweets))
# Rounding all the numbers from the different categories to two decimal places.
philosopher_final <- philosopher_prepped %>%
select(Philosopher, Tweets, Users, Favorites, Avg_Fav, Retweets, Avg_Retweet) %>%
mutate_if(is.numeric, ~ round(., 2))
# Using formatabble to create a table with color bars to show differences in values.
philosopher_unfiltered_table <- formattable(philosopher_final,
align =c("l","c","c","c","c", "c", "c", "c", "r"),
list(`Philosopher` = formatter ("span", style = ~ style(color = "black",font.weight = "bold")),
`Tweets`= color_bar("#3df6b2"), `Users` = color_bar("#ff1d1d"),
'Favorites' = color_bar('#148afa'), Retweets = color_bar('#f10adb')))
print(philosopher_unfiltered_table)
# Completing the same conversions and create table with filtered data.
philosopher_prepped_filtered <- philosopher_summary_filtered %>%
rename(Philosopher = philosopher,
Tweets = tweets, Users = distinct_users, Avg_Fav = avg_fav, Favorites = total_fav,
Avg_Retweet = avg_retweet, Retweets = total_retweet) %>%
arrange(desc(Tweets))
philosopher_final_filtered <- philosopher_prepped_filtered %>%
select(Philosopher, Tweets, Users, Favorites, Avg_Fav, Retweets, Avg_Retweet) %>%
mutate_if(is.numeric, ~ round(., 2))
philosopher_filtered_table <- formattable(philosopher_final_filtered,
align =c("l","c","c","c","c", "c", "c", "c", "r"),
list(`Philosopher` = formatter ("span", style = ~ style(color = "black",font.weight = "bold")),
`Tweets`= color_bar("#3df6b2"), `Users` = color_bar("#ff1d1d"),
'Favorites' = color_bar('#148afa'),
Retweets = color_bar('#f10adb')))
print(philosopher_filtered_table)
# Creating a similar table with all the Stoic grouped versus Socrates with filtered data.
stoic_prepped_filtered <- stoic_versus_socrates_filtered %>%
rename(Tweets = tweets, Users = distinct_users, Avg_Fav = avg_fav, Favorites = total_fav,
Avg_Retweet = avg_retweet, Retweets = total_retweet) %>%
arrange(desc(Tweets))
stoic_final_filtered <- stoic_prepped_filtered %>%
select(Stoic, Tweets, Users, Favorites, Avg_Fav, Retweets, Avg_Retweet) %>%
mutate_if(is.numeric, ~ round(., 2))
stoic_filtered_table <- formattable(stoic_final_filtered,
align =c("l","c","c","c", "c", "c", "c"),
list(`Philosopher` = formatter ("span", style = ~ style(color = "black",font.weight = "bold")),
`Tweets`= color_bar("#3df6b2"), `Users` = color_bar("#ff1d1d"),
'Favorites' = color_bar('#148afa'),
Retweets = color_bar('#f10adb')))
print(stoic_filtered_table)
#Plotting the Favorites to Retweets with line of best fit with unfiltered data.
plot_unfiltered <- ggplot(final_philosopher, aes(favoriteCount, retweetCount)) +
geom_point(mapping = aes(x = favoriteCount, y = retweetCount, color = philosopher)) +
geom_smooth(method = "lm", se = FALSE, color = 'black', alpha = 0.5) +
ggtitle('Unfiltered Comparison') +
theme(plot.title = element_text(hjust = 0.5)) +
xlab('Favorites') +
ylab('Retweets')
print(plot_unfiltered)
best_fit_filtered <- ggplot(philosopher_filtered, aes(favoriteCount, retweetCount)) +
geom_point(mapping = aes(x = favoriteCount, y = retweetCount, color = philosopher)) +
geom_smooth(method = "lm", se = FALSE, color = 'black') +
ggtitle('Filtered Comparison (<1000 Favorites)') +
theme(plot.title = element_text(hjust = 0.5)) +
xlab('Favorites') +
ylab('Retweets')
print(best_fit_filtered)
|
7c15e17e83237a1c21fb08528438cd4dc30568f8 | 12292a3ea0df7e1ca422d778f8f3ea812069cb83 | /man/whippetTranscriptChangeSummary.Rd | 38aa064b839d12efc25ee24b256b51654aa3bfb8 | [] | no_license | federicomarini/GeneStructureTools | bd8be331d1bc29d849452329bcbb77705e6868dd | ed946d997fc62ac70b1f34e5ce631aefe60d2dca | refs/heads/master | 2020-03-13T21:15:32.154189 | 2018-04-27T12:22:17 | 2018-04-27T12:22:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,006 | rd | whippetTranscriptChangeSummary.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quickAnalysis.R
\name{whippetTranscriptChangeSummary}
\alias{whippetTranscriptChangeSummary}
\title{Compare open reading frames for whippet differentially spliced events}
\usage{
whippetTranscriptChangeSummary(whippetDataSet, gtf.all = NULL, BSgenome,
eventTypes = "all", exons = NULL, transcripts = NULL, NMD = FALSE,
exportGTF = NULL)
}
\arguments{
\item{whippetDataSet}{whippetDataSet generated from \code{readWhippetDataSet()}}
\item{gtf.all}{GRanges gtf annotation (can be used instead of specifying exons and transcripts)}
\item{BSgenome}{BSGenome object containing the genome for the species analysed}
\item{eventTypes}{which event type to filter for? default = "all"}
\item{exons}{GRanges gtf annotation of exons}
\item{transcripts}{GRanges gtf annotation of transcripts}
\item{NMD}{Use NMD predictions? (Note: notNMD must be installed to use this feature)}
\item{exportGTF}{file name to export alternative isoform GTFs (default=NULL)}
}
\value{
data.frame containing signficant whippet diff data and ORF change summaries
}
\description{
Compare open reading frames for whippet differentially spliced events
}
\examples{
whippetFiles <- system.file("extdata","whippet/",
package = "GeneStructureTools")
wds <- readWhippetDataSet(whippetFiles)
wds <- filterWhippetEvents(wds)
gtf <- rtracklayer::import(system.file("extdata","example_gtf.gtf",
package = "GeneStructureTools"))
g <- BSgenome.Mmusculus.UCSC.mm10::BSgenome.Mmusculus.UCSC.mm10
whippetTranscriptChangeSummary(wds, gtf.all=gtf,BSgenome = g)
}
\seealso{
Other whippet data processing: \code{\link{coordinates}},
\code{\link{diffSplicingResults}},
\code{\link{filterWhippetEvents}},
\code{\link{formatWhippetEvents}},
\code{\link{junctions}}, \code{\link{readCounts}},
\code{\link{readWhippetDIFFfiles}},
\code{\link{readWhippetDataSet}},
\code{\link{readWhippetJNCfiles}},
\code{\link{readWhippetPSIfiles}}
}
\author{
Beth Signal
}
|
2c3a4ac6b241b2ec3042d637c5b523d34a4d3588 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/fivethirtyeight/examples/drinks.Rd.R | 0cdbfc187111428fa41a1e5da465811f3650bfed | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 477 | r | drinks.Rd.R | library(fivethirtyeight)
### Name: drinks
### Title: Dear Mona Followup: Where Do People Drink The Most Beer, Wine
### And Spirits?
### Aliases: drinks
### Keywords: datasets
### ** Examples
# To convert data frame to tidy data (long) format, run:
library(tidyverse)
library(stringr)
drinks_tidy <- drinks %>%
gather(type, servings, -c(country, total_litres_of_pure_alcohol)) %>%
mutate(
type = str_sub(type, start=1, end=-10)
) %>%
arrange(country, type)
|
858be5524cd95cb5772556b69b871a78d142c982 | 2866cc0f4d69352281ebcb0ea2a9ef0d4a7c5110 | /Figures.R | da90c8b556be35d4baf25d97ac96969165bb88f5 | [] | no_license | edvallog/UtahCovidData | af79f576f0a8933492fd6268185e0878c45eb7d4 | d6fa14ba16fa09dd43e2cd2a436cfedbd28dbd96 | refs/heads/master | 2022-09-27T20:23:01.039424 | 2020-06-05T05:08:57 | 2020-06-05T05:08:57 | 269,534,968 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,745 | r | Figures.R | stacked <- plot_ly(covidData, x = ~Day, y = ~totalDeath, type = 'bar', name = 'Total Deaths', marker = list(color = 'red'))
stacked <- stacked %>% add_trace(y = ~actCases, name = 'Total Active Cases', marker = list(color = '#fc7703'))
stacked <- stacked %>% add_trace(y = ~Recov, name = 'Total Estimated Recoveries', marker = list(color = 'green'))
stacked <- stacked %>% layout(yaxis = list(title = 'Counts'), barmode = 'stack') #stacked plot of total cases, deaths and recoveries
totalCasesPlot <- plot_ly(covidData, x = ~Day)
totalCasesPlot <- totalCasesPlot %>% add_trace(y = ~totalCases, name = 'Total Cases', mode = 'lines',
line = list(color = '#000000')) #marker plot of cumulative cases per day
totalCasesLog <- layout(totalCasesPlot, yaxis = list(type = "log")) #cumulative cases on a y-log scale to
#demonstrate the power of the exponential growth over time
deathRatePlot <- plot_ly(covidData, x = ~Day, y = ~deathRate, name = 'Death Rate (UT)', type = 'scatter',
mode = 'lines', line = list(color = '#737373')) #line plot depicting death rate over time
hospStack <- plot_ly(covidData, x = ~Day, y = ~totalDeath, type = 'bar', name = 'Total Deaths', marker = list(color = 'red'))
hospStack <- hospStack %>% add_trace(y = ~totActHosp, name = 'Total Active Hospitalizations', marker = list(color = '#fc7703'))
hospStack <- hospStack %>% add_trace(y = ~hospRecov, name = 'Total Discharges', marker = list(color = '#0373fc'))
hospStack <- hospStack %>% layout(yaxis = list(title = 'Counts'), barmode = 'stack')
outcomeStack <- plot_ly(covidData, x = ~Day, y = ~totalDeath, type = 'bar', name = 'Total Deaths', marker = list(color = 'red'))
outcomeStack <- outcomeStack %>% add_trace(y = ~Recov, name = 'Total Recoveries', marker = list(color = 'green'))
outcomeStack <- outcomeStack %>% layout(yaxis = list(title = 'Counts'), barmode = 'stack')
truRateChange <- plot_ly(covidData, x = ~Day, y = ~actCaseChange, name = 'True Case Change by Day (UT)', type = "bar",
marker = list(line = list (width = 1, color = '#000000')))
recVnew <- plot_ly(covidData, x = ~Day, y = ~dailyCases, name = 'Daily Cases (UT)', type = 'scatter', mode = 'lines')
recVnew <- recVnew %>% add_trace(y = ~covidData$dailyRecov, name = 'Daily Recoveries (UT)')
netCaseChange <- plot_ly(covidData, x = ~Day, y = ~dailyCases - dailyRecov, name = 'Net Change in Cases (UT)', type = "bar",
marker = list(color = 'green'))
actcasesVrecov <- plot_ly(covidData, x = ~Day, y = ~actCases + totalDeath - Recov, name = "Net Cases Total (UT)", type = 'bar',
marker = list(color = 'green'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.