blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b99744699f7ebdc8bcedf7524860b8d2ad85bfb7 | 9a3a1ba98edf8b1c504e644970184f04be7524e1 | /prs_phenotypes.R | 264277d948b81494e3b0a099c5cac97d12b6f29a | [] | no_license | cedricx/scz_prs_connectivity | 2956800d91e2157fbb4173a84d06e78c193a98db | 0a6fb1dbedb73ee6a61f775240f6105e376d9c72 | refs/heads/master | 2020-03-10T09:22:31.221930 | 2018-04-12T20:31:06 | 2018-04-12T20:31:06 | 129,307,512 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,170 | r | prs_phenotypes.R | ## import data ##
wkdir <- '~/Google Drive/TDSlab/SCZ_gene_imaging/'
prs <- read.csv(paste0(wkdir,'genotype/pnc_EA_prs_20170501.csv'))
demographic <- read.csv(paste0(wkdir,'phenotype/EuropeanAmerican/demographics/n9498_demographics_go1_20161212_EA.csv'))
snp108 <- read.csv(paste0(wkdir,'genotype/illumina_EA_poly_scz108_converted.csv'))
cnb <- read.csv(paste0(wkdir,'phenotype/EuropeanAmerican/cnb/n9498_cnb_zscores_fr_20170202_EA.csv'))
cnbar <- read.csv(paste0(wkdir,'phenotype/EuropeanAmerican/cnb/n9498_cnb_zscores_frar_20170202_EA.csv'))
cnb_factor <- read.csv(paste0(wkdir,'phenotype/EuropeanAmerican/cnb/n9498_cnb_factor_scores_fr_20170202_EA.csv'))
cnbar_factor <- read.csv(paste0(wkdir,'phenotype/EuropeanAmerican/cnb/n9498_cnb_factor_scores_frar_20170202_EA.csv'))
bifactor <- read.csv(paste0(wkdir,'phenotype/EuropeanAmerican/clinical/n9498_goassess_itemwise_bifactor_scores_20161219_EA.csv'))
envr <- read.csv(paste0(wkdir,'phenotype/EuropeanAmerican/environment/n9498_go1_environment_factor_scores_tymoore_20150909_EA.csv'))
## REML functions ##
prs_gam10<-function(vab_of_int, prs_df){
out<-gam(vab_of_int ~ s(ageAtCnb1) + sex + scz_prs +
pc1 + pc2 + pc3 + pc4 + pc5 +
pc6 + pc7 + pc8 + pc9 + pc10, data = prs_df, method="REML")
}
voi_reml <-function(voi) {
prs_voi <- merge(prs,voi, by='bblid')
range = 25:dim(prs_voi)[2]
prs_voi_reml<-lapply(range,function(vab_of_int) prs_gam10(prs_voi[,vab_of_int],prs_voi))
prs_voi_pval<-sapply(prs_voi_reml, function(reml_result) summary(reml_result)$p.pv)
colnames(prs_voi_pval) <- colnames(prs_voi)[range]
voi_of_sig <- prs_voi_pval['scz_prs',which(prs_voi_pval['scz_prs',] <0.05)]
out <- list(data = prs_voi, pval = prs_voi_pval, sig_voi = voi_of_sig)
}
## combine PRS and demographics ##
prs <- merge(prs, demographic, by = 'bblid')
prs$sex <- as.ordered(as.factor(prs$sex))
## PRS vs CNB
cnb_out <-voi_reml(cnb)
## PRS vs CNB_ar
cnbar_out <-voi_reml(cnbar)
## PRS vs CNB_factor
cnbfactor_out <-voi_reml(cnb_factor)
cnbfactorar_out <-voi_reml(cnbar_factor)
## PRS vs bifactor
cnbbifactor_out <-voi_reml(bifactor)
## PRS vs env
cnbenv_out <-voi_reml(envr)
|
5ad003211f91acb1a87244cfcdc660158ab791dd | 713b019cb89b44c0e13509a73fa34594650432d2 | /simple_ordinal_model.R | 9c6eb75da83199e3d3f436a2ffc7702bf8b25329 | [] | no_license | kurtis-s/bayesian_ordinal_regression | 9cb7420cfd0aaf4fe0ee790c8a4feacfc2b3e972 | 34aeb2a4411ee75de988f01be272d3f270afe104 | refs/heads/master | 2021-01-09T20:11:09.640930 | 2016-06-27T01:35:00 | 2016-06-27T01:35:00 | 62,016,914 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,810 | r | simple_ordinal_model.R | rm(list=ls())
library(arm)
library(coda)
library(ordinal)
set.seed(8392921)
source("helper_funcs.R")
ordinal_dat <- readRDS(file="ordinal_dat.rds")
dat <- ordinal_dat$dat
# Frequentist estimates ---------------------------------------------------
freq_model <- clm(factor(Y) ~ ., data=dat)
summary(freq_model)
# Bayesian estimates ------------------------------------------------------
Pdim <- 3
Jdim <- nlevels(factor(dat$Y))
Xobs <- as.matrix(dat[,1 + 1:Pdim])
Yobs <- model.matrix( ~ factor(Y) - 1, data=dat)
n_obs <- nrow(Yobs)
log_likelihood <- function(pi_probs) {
#' @param pi_probs N x J matrix of category probabilities
#'
#' @return log-likelihood of the observations up to a constant
# TODO: Need to add restriction for the ordering of delta
return(sum(Yobs * log(pi_probs)))
}
get_coefficient_proposal <- function(coefficient) {
#' Get a new proposed coefficient value for the MCMC
return(coefficient + rnorm(1, 0, sd=.1))
}
metropolis_chooser <- function(param_cur, param_prop, log_lik_cur, log_lik_prop) {
#' Return current or proposed value for Metropolis-Hastings step
#'
#' @param param_cur current parameter value
#' @param param_prop proposed parameter value
acceptance_ratio <- exp(log_lik_prop - log_lik_cur)
u <- runif(1)
if(acceptance_ratio > u) {
ret <- param_prop
}
else {
ret <- param_cur
}
return(ret)
}
get_coef_proposal <- function(coefs, idx, coef_name) {
#' Get a new proposal for the specified vector (for use in M-H step)
#'
#' @param coefs list of the different coefficient vectors
#' @param coef_name string indicating which coefficients to update. The
#' string should be a key in the coefs vector
coefs[[coef_name]][idx] <- get_coefficient_proposal(coefs[[coef_name]][idx])
return(coefs)
}
sample_coef <- function(coefs, coef_name) {
#' Sample a new coefficient vector
#'
#' @param coefs list of the different coefficient vectors
#' @param coef_name string indicating which coefficients to update. The
#' string should be a key in the coefs vector (e.g. "delta", or "beta")
#'
#' @return list of updated coefficient vectors
Kdim <- length(coefs[[coef_name]])
for(k in 1:Kdim) {
pi_cur <- get_pi_from_coefs(coefs)
log_lik_cur <- log_likelihood(pi_cur)
coefs_prop <- get_coef_proposal(coefs, k, coef_name)
pi_prop <- get_pi_from_coefs(coefs_prop)
log_lik_prop_p <- log_likelihood(pi_prop)
new_coefs <- metropolis_chooser(coefs, coefs_prop, log_lik_cur, log_lik_prop_p)
coefs <- new_coefs
}
return(coefs)
}
sampler <- function(nsamps, nburn, coefs_init) {
beta_samps <- matrix(nrow=nsamps, ncol=Pdim)
delta_samps <- matrix(nrow=nsamps, ncol=Jdim - 1)
posterior_preds <- matrix(nrow=nsamps, ncol=n_obs)
coefs <- coefs_init
for(b in -(nburn+1):nsamps) {
coefs <- sample_coef(coefs, "beta")
coefs <- sample_coef(coefs, "delta")
posterior_preds[b,] <- gen_obs(coefs)
beta_samps[b,] <- coefs$beta
delta_samps[b,] <- coefs$delta
}
return(list(beta=beta_samps, delta=delta_samps, post_preds=posterior_preds))
}
nsamps <- 100
nburn <- 100
samps1 <- sampler(nsamps, nburn,
list(beta=ordinal_dat$beta + .5, delta=ordinal_dat$delta))
samps2 <- sampler(nsamps, nburn,
list(beta=rep(0, 3), delta=ordinal_dat$delta))
post_preds <- rbind(samps1$post_preds, samps2$post_preds)
chain1 <- mcmc(cbind(samps1$beta, samps1$delta))
chain2 <- mcmc(cbind(samps2$beta, samps2$delta))
mcmc_chains <- mcmc.list(chain1, chain2)
gelman.diag(mcmc_chains)
saveRDS(list(mcmc=mcmc_chains, post_preds=post_preds, dat=dat),
file="simple_ordinal_samps.rds")
|
d6d35cd0101ca27d647bcd7f227937de9c9aa84f | 74e78cfaf6d5dbce25b5187d5de3d1f51781a618 | /plot5.R | 74784b2f8bc4e43e65e37917db7991f158c6da88 | [] | no_license | gato1005/JHU_C4_ExploratoryDataAnalysis_GradedAssignment | 209beafafe4a9e8834c19df58970dde65417a6dd | defd4590afb3f655b4c9b8c5d751d664860efb77 | refs/heads/master | 2022-11-08T20:47:22.146527 | 2020-07-02T16:36:36 | 2020-07-02T16:36:36 | 276,567,110 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,625 | r | plot5.R | # load the required library
library(dplyr)
library(ggplot2)
# Download the zip file from Coursera to your working directory
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",
destfile = "2FNEI_data.zip")
# Unzip the file in the working directory
unzip("2FNEI_data.zip",
exdir = ".")
# read the required files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# subset the Baltimore, Maryland
Baltimore<-subset(NEI,fips == "24510")
# store the SSC values for all the entries in SSC$EI.Sector dataFrame
# which have "Vehicle" in their string observations
motor <- SCC[grep("Vehicle", SCC$EI.Sector), "SCC"]
# subset the entries from NEI dataFrame which have their SCC values
# equal to the values derived from the SCC DataFrame in the previous command
motor.Baltimore<-subset(Baltimore,SCC %in% motor)
# make a new DataFrame to store the year-wise PM2.5 emission from
# motor related sources
yearwise.motor.emission <- ddply(motor.Baltimore, .(year),
summarise,
TotalEmissions = sum(Emissions))
# store as image(png format)
png(filename = "plot5.png",width = 480,height = 480)
# plot the result
plot(yearwise.motor.emission$year,
yearwise.motor.emission$TotalEmissions,
ylim = c(50,400),
type = "b",
main = " Total Emission from motor sources in 1999-2008",
xlab = "Year",ylab = expression('Total Emission of PM'[2.5]*"(in tons)"),
pch=19,lwd=2)
# return to RStudio default window graphics device by closing the png
dev.off()
|
8f63ba5d6b3eb6ad39fb98ae48c8f88216fa947e | 9dfb166b1d1215a4389240c8bfa92bb1a47e3cbd | /HW1_R_codes.R | 89aa50e200b9a83bcc1f7f7638483667ddf817d7 | [] | no_license | giorgosnty/Data-Mining-DAMI | ee70248131813f5a0c3e0c7946f61e9e90447725 | 5868159f5e1f964b8f18acafa810eac8f19e36eb | refs/heads/master | 2020-03-26T18:23:09.512461 | 2018-08-18T10:23:35 | 2018-08-18T10:23:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,111 | r | HW1_R_codes.R |
# *********************************************
# DAMI Preprocessing Exercise R file
# Complete the codes to complete the assignment
# *********************************************
# 1. Import data for analysis to R environment
# Downloaded "Adult" dataset from UCI Machine Learning Repository
# URL http://archive.ics.uci.edu/ml/datasets/Adult
# Import dataset in adult_db
# Missing values are represented as "?" in data file, make sure that R read them as missing values (NAs)
# HINT: use read.table() function, use ?read.table for more help
# ------------------------------------------------------------------------------------------------------ #
adult_db <- read.table(file = "http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
header = FALSE, sep = ",", na.strings = "?", strip.white = TRUE,
stringsAsFactors = FALSE)
# Assign attribute names (column names) to the data we just imported
# Attribute names are in separate file "adult.names", scroll down to the bottom of this file
# Attribute names such as ("age", "workclass", "fnlwgt",...)
# Last column of the dataset adult.db with values ">50K" and "<=50K" does not have name,
# this is the class attribute, so we just name it as "class"
# ----------------------------------------------------------------------------------------- #
names(adult_db) = c("age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital_status",
"occupation",
"relationship",
"race",
"sex",
"capital_gain",
"capital_loss",
"hours_per_week",
"native_country",
"class")
# Inspect data set in tabular form
# -----------------------------
fix(adult_db)
# Change class labels to 1 (adults who earn more than 50K) and 0 (adults who earn less than or equal to 50K)
# ----------------------------------------------------------------------------------------------
adult_db$class[adult_db$class==">50K"] <- 1
adult_db$class[adult_db$class=="<=50K"] <- 0
# 2. Check for missing values
# Write code to check how many missing values each attribute has
# Hint: use "apply()" function along columns of "adult.db", for each column (attribute) find how many NAs are there
# is.na(x) function can be used to see if x has NA, ?is.na for help
# --------------------------------------------------------------------------------------------------------------- #
# ****** YOUR CODE HERE ******* #
apply(adult_db, 2, function(x) sum(is.na(x)))
# Delete records (rows) with any missing value
# --------------------------------------- #
adult_db_nomiss <-na.omit(adult_db)
# 3. We will take only small chunk of the data for our experimental purpose.
# So, randomly select 1000 records from among 30 thousand records in the dataset.
# ------------------------------------------------------------------------------- #
set.seed(1013)
idx = sample(1:nrow(adult_db_nomiss),1000)
adult_db_lim = adult_db_nomiss[idx,]
row.names(adult_db_lim) <- NULL
# Examine attributes of the dataset
# 3a. Plot histogram for numeric attribute "age", with 50 breaks, show main title and attribute name on the plot.
# HINT: use hist() function for plotting histogram, ?hist to see how to use it.
# --------------------------------------------------------------------------------------------------------
# ******* YOUR CODE FOR HISTOGRAM PLOT GOES HERE ******** #
class_over_50 <- (adult_db_lim$class==1)
hist(adult_db_lim$age[!class_over_50],breaks = 50,main ="Age Distribution",ylim=c(0,30),xlab="Age",ylab="frequency" ,col = rgb(1,0,0,1))
hist(adult_db_lim$age[class_over_50],breaks = 50,ylim=c(0,30),xlab="Age",ylab="frequency", col = rgb(0,0,1,1), add=T)
legend("topright",legend=c(">50K","<=50K"),col=c("blue","red"),pch=20,cex=1.45)
# 3b. Plot barchart for categorical attribute "race", show legend, attribute name and main title for the plot.
# HINT: use barplot() function for plotting barchars, ?barplot for more help.
# --------------------------------------------------------------------------------------
# ******* YOUR CODE FOR BAR CHART GOES HERE ******* #
colors<- c("black","red", "green", "blue", "cyan")
races<- c("Amer-Indian-Eskimo", "Asian-Pac-Islander", "Black","Other","White")
height_of_bar <- table(adult_db_lim$race)
barplot(height_of_bar, col=colors,
main = "Race of Adults",
names.arg = races,
cex.names = 0.8)
legend("topleft",legend=races ,col=colors,pch=20,cex=0.85)
# 3c. Plot a boxplot for attribute "Age" and show possible outlier for this attribute
# HINT: ?boxplot for more help
# ---------------------------------------------------------------------------------------------
# ****** YOUR CODE GOES HERE ***** #
age_boxplot <- boxplot(adult_db_lim$age, pch=20, col="red", main = "Age Of Adults")
# show possible outlier values
boxplot.stats(adult_db_lim$age)$out
#4 Create new data set from our latest dataset with only numeric attributes
# ------------------------------------------------------------------------
adult_db_numeric <- adult_db_lim[,c("age", "fnlwgt", "education_num", "capital_gain", "capital_loss", "hours_per_week")]
class_val <- as.numeric(adult_db_lim[,c("class")])
# Standardize numeric attributes in "adult_db_numeric" dataset.
# mean = 0 and sd = 1 for all numeric attributes
# -----------------------------------------------------------------------------------------------
age_standarized <- scale(adult_db_numeric$age)
fnlwgt_standarized <- scale(adult_db_numeric$fnlwgt)
education_num_standarized <- scale(adult_db_numeric$education_num)
capital_gain_standarized <- scale(adult_db_numeric$capital_gain)
capital_loss_standarized <- scale(adult_db_numeric$capital_loss)
hours_per_week_standarized <- scale(adult_db_numeric$hours_per_week)
adult_db_numeric$age<-age_standarized
adult_db_numeric$fnlwgt<-fnlwgt_standarized
adult_db_numeric$education_num<-education_num_standarized
adult_db_numeric$capital_gain<-capital_gain_standarized
adult_db_numeric$capital_loss<-capital_loss_standarized
adult_db_numeric$hours_per_week<-hours_per_week_standarized
adult_db_num_std <- adult_db_numeric
# we can check the mean and standard deviation of the standardized data
# ------------------------------------------------------------------
apply(adult_db_num_std, 2, mean)
apply(adult_db_num_std, 2, sd)
# 5a. Run Principal Component Analysis (PCA) on the numeric dataset from above "adult_db_num_std"
# plot the first 2 principal components
# HINT: for class specific colours, in plot(...) command use parameter col = (class_val + 2)
# HINT: ?prcomp to know about the parameters
# ------------------------------------------------------------------------------------------
# ******** YOUR CODE FOR GETTING PRINCIPAL COMPONENTS GOES HERE ******** #
pr.out <- prcomp(adult_db_num_std, scale = TRUE, center = TRUE)
names(pr.out)
head(pr.out$x)
principal_components <- pr.out$x
#******** PLOT FOR FIRST TWO PRINCIPAL COMPONENTS GOES HERE ****** #
plot(principal_components[,1:2], col = ( class_val+2), pch = 20, main = "First two Prinsipal Components")
legend("topleft",legend=c(">50K","<=50K"),col=c("green","red"),pch=20,cex=0.75)
# 5b. Plot percentage of the variance explained by principal components
# ----------------------------------------------------------------------------
# write a code to show proportion of variance explained by each Principal Component
# Standard deviation are stored as "sdev"
# *** YOUR CODE TO FIND PROPORTION OF VARIANCE EXPLAINED *** #
pr.var <- (pr.out$sdev)^2
pve <- pr.var/sum(pr.var)
# *** PLOT VARIANCE EXPLAINED BY PRINCIPAL COMPONENTS AND CUMULATIVE PROPORTION OF VARIANCE *** #
# par(...) for a plot with 2 figures.
par(mfrow=c(1,2), oma=c(0,0,2,0))
plot(pve, xlab = "PC", ylab = "Variance", type = "b",col ="red", ylim = c(0,1))
# use cumsum(pve) for cumulative sum of variance
#plot(#*** YOUR CODE, fill up parameters to plot cumulative proportion of variance explained ***#)
#mtext("Proportion of variance explained by Principal Components", outer=TRUE, cex=1.2)
plot(cumsum(pve), xlab = "PC", ylab = "Cumulative variance", type = "b",col ="red", ylim = c(0,1))
mtext("Proportion of Variance explained by PC", outer = TRUE)
par(mfrow = c(1,1))
# 5c. Write as comments how many principal components would you use to capture at least 50% variance
# and 90% varience respectively.
#Answer:
#From the plot we can see that we need 3 principal components to capture at least 50%
#variance and 6 to capture 90%.
#To be more convincing,I printed pve and I took
# "0.2289799 0.1748101 0.1617089 0.1588597 0.1488012 0.1268402".
#So with 3 principal components we get the sum of the first 3 that is approximately 0.56.
#With 5 we get approximately 0,88 so we need 6 to capture 90%.
|
acda0a67465c3070ec49a2de89271f664d4a97d4 | 1945b47177455e900baae351c1179197e0e4078d | /man/readSequenceMetadata.Rd | 0ed32824130e22aa69ff6349ec61fecd252ed712 | [] | no_license | naithanilab/neonMicrobe | 19711934b281d12adef4fd5ba85b517b3f99e344 | 359c2a3947b9151f370e889785e5bfe4fa207121 | refs/heads/master | 2023-04-12T18:36:26.916925 | 2021-04-30T17:04:49 | 2021-04-30T17:04:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 605 | rd | readSequenceMetadata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{readSequenceMetadata}
\alias{readSequenceMetadata}
\title{Read Sequence Metadata into Function}
\usage{
readSequenceMetadata(metadata)
}
\arguments{
\item{metadata}{Either a data.frame returned from \code{\link{downloadSequenceMetadata}} or the filepath to a local csv copy of the output from \code{\link{downloadSequenceMetadata}}.}
}
\value{
data.frame of the metadata.
}
\description{
Helper function to accept two formats of metadata into various functions in this package: data.frame and filepath to a csv
}
|
b75dfe5a34b87609af9b858f7c7641ed5716040b | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /augSIMEX/man/GeneRepeat.Rd | b12fa43867611c81e268467642bc795948cf4573 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,292 | rd | GeneRepeat.Rd | \name{GeneRepeat}
\docType{data}
\alias{GeneRepeat}
\title{Example data for univariate error-prone covariates in repeated measurements case}
\description{
This dataset gives an example data for the illustration of usage of \code{\link{augSIMEX}} function. The data set is adapted from the outbred Carworth Farms White mice data (Parker et. al., 2016). The dataset contains main data and validation data. We are interested to study the association between the response genotype of rs223979909 and the locomotor response to methamphetamine injections, which is subject to mismeasurement.
Tibia_5 - Tibia_30: the repeated measurement of tibia length
Tibia_M: the error-prone measurement of tibia length
Batch_T: the precisely labeled batch effect
Batch_O: the observed batch effect
Weight: the body weights of the mice
}
\usage{data(GeneRepeat)}
\references{
Parker C C, Gopalakrishnan S, Carbonetto P, et al. Genome-wide association study of behavioral, physiological and gene expression traits in outbred CFW mice[J]. Nature genetics, 2016, 48(8): 919.
}
\format{A list of two data frames. Main data (rs38916331, Totdist5, Totdist10, Totdist15, Totdist20, Totdist25, Totdist30, Weight, Batch_O) and validation data (Batch_T, Batch_O).}
\keyword{datasets}
|
f39952c6c18520fa03ae170087fd4029fd957083 | b43b4c788604a964161c961bdf9d5731b656a578 | /Dictionary/Rauh/JITP-Replication-Final/3_Manifestos/1_GetManifestoText.R | e85b54185239dcbec13184398a29f364e717e166 | [] | no_license | msaeltzer/scrape | ee4e4646085129b227051d279e108245fac6d697 | 8554df9e9b2c86ea8db5047fa4fb956f5de60c77 | refs/heads/master | 2020-04-22T22:16:12.249444 | 2019-05-15T07:39:58 | 2019-05-15T07:39:58 | 170,701,809 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,950 | r | 1_GetManifestoText.R | ############################################################
# Script retrives text fragments of directional coding items
# from the MANIFESTO database
#
# Project: Validation of sentiment dictionaries
#
# Author: christian.rauh@wzb.eu / christian-rauh.eu
# Date: 13.04.2017
############################################################
library(manifestoR)
library(stringr)
# Set YOUR working directory here (root of replication package)
setwd("M:/user/rauh/WZB_CR/Datensaetze/SentiWS_Validation/JITP-Replication/")
setwd("C:/Users/CUZ/WZB_CR/Datensaetze/SentiWS_Validation/JITP-Replication/")
# Set API key for Manifesto DB
# You have to register @ https://manifesto-project.wzb.eu/
# and then generate your personal API key on the profile page
mp_setapikey("manifesto_apikey.txt")
# List of CMP codes to be exploited for the test
################################################
# Essentially all classical categories with bi-directional options
# Hand picked from the MANIFESTO category scheme (v4) available at
# https://manifesto-project.wzb.eu/coding_schemes/1 (27.07.2015)
categories <- read.table(file = "./Data/DirectionalManifestoCategories.csv", header = TRUE, sep = ";")
# Some cosmetics for the issue description
categories$issue[categories$issue = "Foreign Special Relationships"] <- "For. Special Relations"
categories$issue[categories$issue = "European Community/Union"] <- "European Union"
categories$issue[categories$issue = "Traditional Morality"] <- "Trad. Morality"
categories$issue[categories$issue = "National Way of Life"] <- "Nat. Way of Life"
# Vector of numerical CMP codes of interest
codes <- categories$cmp_code
# Download of MANIFESTO quasi-sentences based on CMP codes of interest
######################################################################
# Corpus version: 2016-3
all <- mp_corpus((countryname == "Austria" | countryname == "Germany" | countryname == "Switzerland") & edate > as.Date("1998-01-01"), codefilter = codes)
# Turn tm corpora into standard data frames
###########################################
# Empty data frame
all_df <- data.frame(text = character(), cmp_code = integer(), manifesto_id = character(), party = double(), date = double(),
language = character(), stringsAsFactors = FALSE)
# Loop over objects in downloaded corpus
for (i in 1:length(all)){
doc <- all[[i]] # Retrieve information from document i
doc_df <- as.data.frame(doc, with.meta = TRUE) # Turn into data frame
doc_df <- doc_df[ , c("text", "cmp_code", "manifesto_id" , "party", "date", "language")] # Keep only variables of interest, adapt to order of data frame
all_df <- rbind(all_df, doc_df) # Combine
}
# Merge in category descriptions by CMP code
############################################
all_df_cat <- merge(all_df, categories, by = "cmp_code", all.x = TRUE)
# Keep only German language sentences
all_df_cat <- all_df_cat[all_df_cat$language == "german", ]
# Descriptive overviews
#######################
# Available election dates
table(all_df_cat$date)
# Available parties
table(all_df_cat$party)
# Distribution of quasi sentences by category
distr.over.cat <- as.data.frame(table(all_df_cat$issue))
distr.over.cat <- distr.over.cat[order(-distr.over.cat[, 2]), ] # Sort by frequency
write.table(distr.over.cat, file = "./3_Manifestos/Data/QuasiSentencesPerCategory.csv", sep = ";", row.names = FALSE)
# Distribution of quasi sentences by category and direction
distr.over.sub <- as.data.frame(table(all_df_cat$descr))
write.table(distr.over.sub, file = "./3_Manifestos/Data/QuasiSentencesPerSubCategory.csv", sep = ";", row.names = FALSE)
# Average length in terms
all_df_cat$length <- str_count(all_df_cat$text, " ") + 1
mean.length <- aggregate(all_df_cat$length, by = list(all_df_cat$issue), FUN = mean)
# Store the data
################
de.manifesto <- all_df_cat
save(de.manifesto, file = "./3_Manifestos/Data/AT_DE_CH_Sample.Rdata")
|
3b67125b80f2ca47d7df7016d9c757deed5a28a5 | 68429a685f0c4a76fb5db9c749b4d7c7be13b07a | /man/cal_coverage_impl.Rd | 92292fd01bf902ceeeaaa3f4db30834b53e2e904 | [] | no_license | dongzhuoer/paristools | 85de465fa53821c2c6002139bc45dcd31c1a4198 | 430d86eeddf518ae408f5f1ce0af46240f73f645 | refs/heads/master | 2022-06-18T15:45:32.007589 | 2022-05-14T09:09:42 | 2022-05-14T10:08:36 | 163,177,733 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 715 | rd | cal_coverage_impl.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{cal_coverage_impl}
\alias{cal_coverage_impl}
\title{calculate genome coverage for one chromosome}
\arguments{
\item{chrom_loc_df}{data.frame. Locations on one chromosome, columns are \code{strand}, \code{start}, \code{end}. There must be at least one region of minimum length 1.}
}
\value{
tibble. Columns are \code{pos}, \code{+}, \code{-}, the latter two are integers representing the genome, index is 1-based genome position, value is number of reads covering that position.
}
\description{
calculate genome coverage for one chromosome
}
\details{
we use 1-based coordinate, \link{start, end}
}
\keyword{internal}
|
3fab55c39c7b01999f7c7ce0e9fa8a23ce6459b9 | af8aeaa3b352ee10ffadc073c2cb9e6940465dac | /Meta_QC/4. QQ plot and lambda.r | 1d1514352728dac8fdbf53764e402d603297f532 | [] | no_license | ammegandchips/PACE_Paternal_BMI | 357895485b6c17eba3901c4ce5267e45cbe688dc | 724d1fde1b04a0a98083ba6fae1c1b161134f600 | refs/heads/master | 2020-06-26T00:49:41.386778 | 2020-02-10T19:33:16 | 2020-02-10T19:33:16 | 96,999,961 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,678 | r | 4. QQ plot and lambda.r | time_point <- "birth" #or childhood
require(gridExtra)
myQQ <- function(P, ci = 0.95,Title) {
lambda <- Lambda(P)
n <- length(P)
df <- data.frame(
observed = -log10(P),
expected = -log10(ppoints(n)),
clower = -log10(qbeta(p = (1 - ci) / 2, shape1 = 1:n, shape2 = n:1)),
cupper = -log10(qbeta(p = (1 + ci) / 2, shape1 = 1:n, shape2 = n:1))
)
log10Pe <- expression(paste("Expected -log"[10], plain(P)))
log10Po <- expression(paste("Observed -log"[10], plain(P)))
ggplot(df) +
geom_point(aes(expected, observed), shape = 1, size = 3) +
geom_abline(intercept = 0, slope = 1, alpha = 0.5) +
geom_line(aes(expected, cupper), linetype = 2,colour="blue") +
# ylim(0,6) +
geom_line(aes(expected, clower), linetype = 2,colour="blue") +
ggtitle(paste0(Title," Lambda = ",round(lambda,2))) +
theme_classic()+
theme(plot.title = element_text(hjust = 0.5))+
xlab(log10Pe) +
ylab(log10Po)
}
Lambda<-function(P){
chisq <- qchisq(1-P,1)
median(chisq,na.rm=T)/qchisq(0.5,1)
}
extract.p <- function(ewas.dataframe){
ewas.dataframe[,which(colnames(ewas.dataframe)=="Pvalue")]
}
qq.plot <- function(cohort,cohort_name){
Ps <- data.frame(do.call(cbind, lapply(cohort,extract.p)))
filename <- paste0(cohort_name,".",time_point,".qqplots.png")
png(filename, width=30,height=40,units="cm",res=300)
plot.function<-function(i){
myQQ(sort(Ps[,i]),Title=paste(cohort_name,": ",colnames(Ps)[i]))
}
plots <- lapply(1:ncol(Ps),plot.function)
do.call(grid.arrange,plots)
dev.off()
}
qq.plot(list.of.results,"meta_models")
#Just lambda:
Ps <- data.frame(do.call(cbind, lapply(list.of.results,extract.p)))
apply(Ps,2,Lambda)
|
87af89366cff580cebfb495bf6b5a4aabba7bba3 | 98e471ca6c77f2a393aaf801ce35febea0ceeb19 | /man/estimateTuneRangerTime.Rd | a7de16999fd38d8175ba399597bf1b0ab844f1ac | [] | no_license | simonsimon01/tuneRF | 4f705f099d5541c0aad3546b0d9840b85b2e1c6e | 5f45e822a29da9be10d7e6088ef4eb6428151cb5 | refs/heads/master | 2021-01-06T20:44:17.154563 | 2017-08-04T15:19:34 | 2017-08-04T15:19:34 | 99,552,182 | 0 | 0 | null | 2017-08-07T07:55:38 | 2017-08-07T07:55:38 | null | UTF-8 | R | false | true | 552 | rd | estimateTuneRangerTime.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimateTuningTime.R
\name{estimateTuneRangerTime}
\alias{estimateTuneRangerTime}
\title{estimateTuneRangerTime}
\usage{
estimateTuneRangerTime(formula, data, iters = 100, num.threads = 1,
num.trees = 1000, respect.unordered.factors = TRUE)
}
\arguments{
\item{formula}{}
\item{data}{}
\item{iters}{}
\item{num.threads}{}
\item{num.trees}{}
\item{respect.unordered.factors}{}
}
\value{
estimated time for the tuning procedure
}
\description{
estimateTuneRangerTime
}
|
a76bc97f0e53171b6252157e552d34b209cf52e7 | ab09b3fa3ec78151bee19691375eea2a878a707a | /man/lme_mass_F.Rd | 809b60d2d952ad14c4786009c089998d879f2253 | [
"MIT"
] | permissive | kdiers/fslmer | 5e1ab403998bd308770e21026a9c6693113a9995 | 3ac2634c07984949ae18648d978b7aff7530f396 | refs/heads/master | 2023-06-25T11:31:09.848405 | 2021-07-30T15:33:49 | 2021-07-30T15:33:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 606 | rd | lme_mass_F.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lme_mass_F.R
\name{lme_mass_F}
\alias{lme_mass_F}
\title{Estimate F statistics for many vertices}
\usage{
lme_mass_F(stats, C)
}
\arguments{
\item{stats}{Model fit as returned by \code{lme_mass_fit_Rgw}}
\item{C}{Contrast vector}
}
\value{
The function returns a list with entries F, pval, sgn and df for each vertex.
}
\description{
Estimate F statistics for many vertices
}
\examples{
\dontrun{C <- matrix(c(0, 1, 0, 0, 0, 0), nrow=1)}
\dontrun{FitRgw <- lme_mass_fit_Rgw(...)}
\dontrun{F_C <- lme_mass_F(FitRgw$stats, C)}
}
|
c096c1142ecf06f8eda24088fecefc95ca2ae58a | 6a3a70fedc47ba6c4dccd6b05370b4e9aaded250 | /man/MAplot.Rd | b2da2748a4edd0a45d498a12d6fcb879ba19b355 | [] | no_license | bmbolstad/affyPLM | 59abc1d7762ec5de96e2e698a5665de4fddd5452 | c6baedfc045824d9cdfe26cd82daaf55f9f1f3b4 | refs/heads/master | 2023-01-22T13:55:51.132254 | 2023-01-19T23:55:17 | 2023-01-19T23:55:17 | 23,523,777 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 938 | rd | MAplot.Rd | \name{MAplot}
\alias{MAplot}
\alias{MAplot,PLMset-method}
\title{Relative M vs. A plots}
\description{
Create boxplots of M or M vs A plots. Where M is determined relative
to a specified chip or to a pseudo-median reference chip.
}
%% \usage{
%#MAplot(object,ref=NULL,...)
%% }
\arguments{
%z \item{object}{An \code{\link{PLMset-class}}}
\item{...}{Additional parameters for the routine}
\item{A}{A vector to plot along the horizonal axis}
\item{M}{A vector to plot along vertical axis}
\item{subset}{A set of indices to use when drawing the loess curve}
\item{show.statistics}{If true some summary statistics of the M values
are drawn}
\item{span}{span to be used for loess fit.}
\item{family.loess}{\code{"guassian"} or \code{"symmetric"} as in
\code{\link[stats]{loess}}.}
\item{cex}{Size of text when writing summary statistics on plot}
}
\seealso{\code{\link[affy]{mva.pairs}}}
\keyword{hplot}
|
1db3ffd5ce4721925770769c668075472af9a79c | bf9b2e5ba76f2509083483fcbd68c067f6091a41 | /JobTalk_2021/figures_knitr/load_data.R | e049989226a3dbe92753cd5fadc49bb305c9da2b | [] | no_license | rgiordan/Presentations | b5f7c5131b085a25a04a90aa0b9aa1110aeb5be2 | 0f1715e3e8f9f462ebcc36f7020182c710df526b | refs/heads/master | 2023-08-10T13:50:14.977266 | 2023-07-28T17:12:15 | 2023-07-28T17:12:15 | 96,622,743 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 985 | r | load_data.R | # sim_env <- LoadIntoEnvironment(
# file.path(data_path, "simulations", "simulations.Rdata"))
sim_env <- LoadIntoEnvironment(
file.path(data_path, "simulations", "noise_grid.Rdata"))
sim_viz_env <- LoadIntoEnvironment(
file.path(data_path, "simulations", "visualization.Rdata"))
cash_env <- LoadIntoEnvironment(
file.path(data_path, "cash_transfers", "cash_transfers_results.Rdata"))
ohie_env <- LoadIntoEnvironment(
file.path(data_path, "ohie", "OHIE_results.Rdata"))
microcredit_env <- LoadIntoEnvironment(
file.path(data_path, "microcredit", "microcredit_results.Rdata"))
microcredit_refit_env <- LoadIntoEnvironment(
file.path(data_path, "microcredit", "microcredit_fit_paths.Rdata"))
# microcredit_temptation_env <- LoadIntoEnvironment(
# file.path(data_path, "microcredit", "microcredit_temptation_results.Rdata"))
# mcmix_env <- LoadIntoEnvironment(
# file.path(data_path, "microcredit_mixture", "microcredit_mixture_results.Rdata"))
|
b133a31d8a1d5d26ab6c3e97b38361137da52b6e | c77c3ae74c228e3e396ea2cb02f2156feaec58a5 | /man/create_calculated_test_data.Rd | ede412d82d24f42a351ddab98bc3c9b0f7993561 | [] | no_license | johnchower/gloograph | 14d535dbb980fb3f2072ed84d11374182332b435 | 68952446acd499feed0e1beeee8cd1099ffa0ab8 | refs/heads/master | 2021-01-12T12:03:37.355597 | 2016-10-13T22:55:55 | 2016-10-13T22:55:55 | 69,919,425 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 644 | rd | create_calculated_test_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_test_data.r
\name{create_calculated_test_data}
\alias{create_calculated_test_data}
\title{Create 'calculated' test data set.}
\usage{
create_calculated_test_data(timeline, weight_comment = 5, weight_share = 10,
reach_modifier = 0.8)
}
\arguments{
\item{timeline}{A data frame that describes a sequence of post-related
actions.}
\item{weight_comment, }{weight_share, reach_modifier Parameters for the
actual calculation step.}
}
\value{
A data frame consisting of two columns: post_id engagement_score.
}
\description{
Create 'calculated' test data set.
}
|
8cf3ccb759c0acfffd28a9e8f187fd65b4642fb2 | e768aa9cd11382582fec5c8dd054a44e3a45ef96 | /Homework_1/Listings.R | 6e288853d93f27a0c2057ae1233083e64dd0cb97 | [] | no_license | PranavM98/Modelling-and-Representation-of-Data---IDS-702 | 12f5713eaed97fc6fa0f6beb08ccca7e5d5952f6 | db5c53fc6d7310555981c4e144f3a0f27908b166 | refs/heads/master | 2023-01-02T14:30:03.695683 | 2020-10-29T21:32:19 | 2020-10-29T21:32:19 | 292,388,933 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,166 | r | Listings.R | data<-read.table("Listings_QueenAnne.txt", header = TRUE, sep = " ")
data$room_type<-factor(data$room_type)
g1<-ggplot(data, aes(x=bedrooms, y=price)) + geom_boxplot()
g2<-ggplot(data, aes(x=accommodates, y=price)) + geom_boxplot()
g3<-ggplot(data, aes(x=host_identity_verified, y=price)) + geom_boxplot()
g4<-ggplot(data, aes(x=host_is_superhost, y=price)) + geom_boxplot()
g5<-ggplot(data, aes(x=room_type, y=price)) + geom_boxplot()
g6<-ggplot(data, aes(x=bathrooms, y=price)) + geom_boxplot()
chisq.test(data$room_type,data$accommodates)
ggarrange(g1,g2,g3,g4,g5,g6,
ncol = 3, nrow = 2)
plot(data$price, data$bedrooms)
plot(data$price, data$accommodates)
plot(data$price, data$bathrooms)
plot(data$price, data$host_identity_verified)
plot(data$price, data$host_is_superhost)
plot(data$price, data$room_type)
model<-lm(log(price) ~ bedrooms+accommodates+bathrooms+host_identity_verified+host_is_superhost+room_type,data=data)
model1<-lm(log(price) ~ bedrooms+accommodates+bathrooms+host_identity_verified+room_type,data=data)
#Residual Standard error (Like Standard Deviation)
k=length(model$coefficients)-1 #Subtract one to ignore intercept
SSE=sum(model$residuals**2)
n=length(model$residuals)
sqrt(SSE/(n-(1+k))) #Residual Standard Error
k=length(model1$coefficients)-1 #Subtract one to ignore intercept
SSE=sum(model1$residuals**2)
n=length(model1$residuals)
sqrt(SSE/(n-(1+k))) #Residual Standard Error
hist(log(data$price))
plot(model)
#there are influencial points and leveraging points
plot(model,which=1,col=c("blue4"))
lev_scores=hatvalues(model)
p=7
n=305
plot(lev_scores,col=ifelse(lev_scores > (2*p/n), 'red2', 'navy'),type="h", ylab="Leverage score",xlab="Obs. number",main="Leverage Scores")
text(x=c(1:n)[lev_scores > (2*p/n)]+c(rep(2,4),-2,2),y=lev_scores[lev_scores > (2*p/n)], labels=c(1:n)[lev_scores > (2*p/n)])
#REMOVE INFLUENCE AND OUTLIERS. Points have a cook's distance above 1. Standard Residuals plot, above and below -4
data<-data[- c(31,138),]
model<-lm(log(price) ~ bedrooms+accommodates+bathrooms+host_identity_verified+host_is_superhost+room_type,data=data)
plot(model,which=1,col=c("blue4"))
|
590dc7b46f332971d0f02f27927e9cc2d67e709e | d338725b4829209def1259c1253ae56b927e1c96 | /vax_chord_diag.R | 629f706dd519bf94e82e306d583fc80ec90ab710 | [] | no_license | cssmilab/Chord-VAX | ea47d7836b32a896347cf65da796f5cc9da0d778 | 1885809b3326832402eec2599eae7b648ed6a070 | refs/heads/main | 2023-06-01T15:29:30.610266 | 2021-06-14T21:27:44 | 2021-06-14T21:27:44 | 376,960,389 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,400 | r | vax_chord_diag.R | library(circlize)
library(readr)
library(dplyr)
data <- read_csv2("./chord_diag_eng.csv") %>%
na.omit(.)
temak <- colnames(data)[2:ncol(data)]
vakcinak <- data$X1
data <- as.matrix(data[,2:ncol(data)])
colnames(data) <- temak
rownames(data) <- vakcinak
### Magyar
group <- c(Pfizer = "Vaccines",
Moderna = "Vaccines", AstraZeneca = "Vaccines", Szputnyik = "Vaccines", Sinopharm = "Vaccines", Hatékony = "Efficiency", Hatásos = "Efficiency",
Védettség = "Efficiency" , Mellékhatás = "Concerns",
Önkéntes = "Concerns", Tárgyal = "Technical conditions", Szerződés = "Technical conditions",
Szállít = "Technikai feltételek","Orbán V." = "Politikai dimenzió","Szijjártó P." = "Politikai dimenzió")
### Magyar
grid.col <- c(Pfizer = "#A9A700", Moderna = "#58823D",
AstraZeneca = "#244776",
Szputnyik = "#409099",
Sinopharm = "#77C6D3",
Hatékony = "#7A4679", Hatásos = "#7A4679", Védettség = "#7A4679",
Mellékhatás = "#D81E00", Önkéntes = "#D81E00",
Tárgyal = "#AB7942", Szerződés = "#AB7942", Szállít = "#AB7942",
"Orbán V." = "#ED7D31", "Szijjártó P." = "#ED7D31")
#border_mat2 <- matrix("#ff6961", nrow = 1, ncol = ncol(data))
#rownames(border_mat2) <- rownames(data)[1]
#colnames(border_mat2) <- colnames(data)
chordDiagram(data, annotationTrack = c("name","grid"), big.gap = 20, small.gap = 5,
group = group, row.col = rep("grey",length(grid.col)),
transparency = 0.5, , grid.col = grid.col)
circos.clear()
# pfizer
chordDiagram(data,annotationTrack = c("name","grid"), big.gap = 20, small.gap = 5,
row.col = c("#A9A700", rep("grey",length(grid.col)-1)),
transparency = 0.5, , grid.col = grid.col, group = group)
circos.clear()
# moderna
chordDiagram(data, grid.col = grid.col, annotationTrack = c("name","grid"),
big.gap = 20, small.gap = 5, group = group,
row.col = c(rep("grey",3),"#58823D", rep("grey",length(grid.col)-5)), transparency = 0.5)
circos.clear()
# szputnyik
chordDiagram(data, grid.col = grid.col, annotationTrack = c("name","grid"),
big.gap = 20,small.gap = 5, group = group,
row.col = c(rep("grey",8), "#409099" ,rep("grey",length(grid.col)-9)), transparency = 0.5)
circos.clear()
# kínai + Sinovac
chordDiagram(data, grid.col = grid.col, annotationTrack = c("name","grid"),
big.gap = 20,small.gap = 5, group = group,
row.col = c(rep("grey",9),"#77C6D3", "#637177" ,rep("grey",length(grid.col)-11)), transparency = 0.5)
circos.clear()
#highlight.sector(rownames(data)[1:12], track.index = 1, col = "#5F8787",
#text = "Vakcinák", cex = 1, text.col = "black", niceFacing = TRUE)
#highlight.sector(colnames(data)[1:3], track.index = 1, col = "#7A4679",
#text = "Hatékonyság", cex = 1, text.col = "black", niceFacing = TRUE)
#highlight.sector(colnames(data)[4:5], track.index = 1, col = "#D81E00",
#text = "Aggályok", cex = 1, text.col = "black", niceFacing = TRUE)
#highlight.sector(colnames(data)[6:8], track.index = 1, col = "#AB7942",
#text = "Technikai Feltételek", cex = 1, text.col = "black", niceFacing = TRUE)
#highlight.sector(colnames(data)[9:10], track.index = 1, col = "#ED7D31",
#text = "Politikai Dimenzió", cex = 1, text.col = "black", niceFacing = TRUE)
|
77f93ebad362fb14f1dece3d67d2fa9921add24e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/PSF/examples/plot.psf.Rd.R | e0041e74e1a787c80998c6b1f3d4a8ebc825cf1a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 378 | r | plot.psf.Rd.R | library(PSF)
### Name: plot.psf
### Title: Plot actual and forecasted values of an univariate time series
### Aliases: plot.psf
### ** Examples
## Train a PSF model from the univariate time series 'nottem' (package:datasets).
p <- psf(nottem)
## Forecast the next 12 values of such time series.
pred <- predict(p, n.ahead = 12)
## Plot forecasted values.
plot(p, pred)
|
5f168a674b893069f06ef375cabc74ddd1479e3f | 1f6d089f3f5cde7da64cdfe9e8e7ec4dd2d2ab62 | /amplicon_processing/scripts/6.dada2_mergeSequenceTables.R | 1f583f342dd29b8d3ab1b50b097930e5f2c65e62 | [] | no_license | polzlab/VanInsberghe_2019_Cdiff_colonization | 181cb33958c6bb6de0a60b8054afe8c3dce5dc99 | fb80407b3dec2dbd907aa5ac5debe1b83b375fea | refs/heads/master | 2020-04-09T20:26:38.350472 | 2019-12-12T14:05:33 | 2019-12-12T14:05:33 | 160,573,581 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 391 | r | 6.dada2_mergeSequenceTables.R | .libPaths(.libPaths()[2])
library(dada2)
#print(snakemake@input)
tables <- lapply(snakemake@input, readRDS)
#tables <- tables[lapply(tables, length)>0]
combined <- do.call("mergeSequenceTables", tables)
saveRDS(combined, snakemake@output[["sequence_table_rds"]])
#write.csv(combined, snakemake@output[["sequence_table_csv"]])
#uniquesToFasta(combined, snakemake@output[["uniques_fasta"]]) |
b535c641586bd0b8329baa35dcfc43305e5a2230 | d33ae3dc91ab7a6eacf227a44776525e9a376ed8 | /simulation/R/app/server/welcome_custom.R | 30e6b6d1d1664453d34b26447f93ec1d1e7dbdca | [] | no_license | aloctavodia/density_estimation | 75d853a78c9ea703fd01de7c42222d8618536867 | 593d6eacb91e789fa9c4dc4b22444ca0e3018229 | refs/heads/master | 2021-01-05T18:57:20.321700 | 2020-04-09T05:34:39 | 2020-04-09T05:34:39 | 241,108,945 | 1 | 0 | null | 2020-02-17T13:02:22 | 2020-02-17T13:02:22 | null | UTF-8 | R | false | false | 1,725 | r | welcome_custom.R | showModal(
modalDialog(
title = "Python 3 directory",
h4(strong("Select a directory")),
fluidRow(
column(
width = 10,
textInput(
inputId = "python_path",
label = NULL,
width = "100%"
)
),
column(
width = 2,
actionButton(
inputId = "python_look_path_btn",
label = "Browse"
)
)
),
verbatimTextOutput(
"modal_text"
),
footer = tagList(
p("The density estimator panel is not available if you do not have Python 3.x"),
p("Numpy and Scipy are", strong("required")),
actionButton("python_cancel_btn", "Dismiss"),
actionButton("python_add_path_btn", "OK")
),
easyClose = FALSE
))
observeEvent(input$python_look_path_btn, {
PYTHON_DIR <- choose_directory()
updateTextInput(
session = session,
inputId = "python_path",
value = gsub("\\\\", "/", PYTHON_DIR)
)
output$modal_text <- renderText(
input$python_path
)
})
observeEvent(input$python_add_path_btn, {
if (!is_valid_path(input$python_path)) {
showNotification("Please input a valid path", type = "error")
} else if (!dir.exists(input$python_path)) {
showNotification("Path not found.", type = "error")
} else {
show("mainLayout")
removeModal()
store$PYTHON_PATH <- init_python_custom(input)
source("server/panel1.R", local = TRUE)$value
source("server/panel2.R", local = TRUE)$value
source("server/panel3.R", local = TRUE)$value
}
})
observeEvent(input$python_cancel_btn, {
show("mainLayout")
removeModal()
hideTab(
inputId = "tabs",
target = "Density plots"
)
source("server/panel1.R", local = TRUE)$value
source("server/panel2.R", local = TRUE)$value
}) |
88cfc8cf07f6669efe3f9d9905894b0222335152 | 958995c3c660478eafff002b95f3b4876496b171 | /Sourcefile.R | 37aabbe010520eabe46cbd754ce8c703cd3f4aa1 | [] | no_license | satman1001/Project1 | 5f77f26d51c260215df7912cc18ad4864e8d6e31 | 3904ac6f7e905d9b63b7317f266713cec63600d8 | refs/heads/master | 2021-01-01T17:01:30.525588 | 2017-07-29T20:06:54 | 2017-07-29T20:06:54 | 97,974,769 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,002 | r | Sourcefile.R | # Project Proposal
# install.packages('ggplot2') # visualization
# install.packages('ggthemes') # visualization
# install.packages('scales') # visualization
# install.packages('dplyr') # data manipulation
# install.packages('mice') # imputation
# install.packages('randomForest') # classification algorithm
# install.packages('htmlwidgets') # interactive widgets
# install.packages("knitr")
library('ggplot2') # visualization
library('ggthemes') # visualization
library('scales') # visualization
library('dplyr') # data manipulation
library('mice') # imputation
library('randomForest') # classification algorithm
library('htmlwidgets') #interactive widgets
library('knitr') #knitr codechunks
train <- read.csv('train.csv', stringsAsFactors = F)
test <- read.csv('test.csv', stringsAsFactors = F)
full <- bind_rows(train, test) # combine
gender_submission <- read.csv('gender_submission.csv', stringsAsFactors = F)
# check data
str(full)
summary(full)
str(gender_submission)
help(qplot)
|
26463742d604be1282a2c492db0255dfc53e9f2d | b8e17335bfdc39df6d8a74ff30f43e1a4a840eb0 | /R/JLPM-package.R | 1c57208afe8c97639c4192fdf060dd119395358d | [] | no_license | anarouanet/JLPM | 43c18f8ddc6393ee014639fcea5513a61b258f49 | e226c5719aee97b5810e74e4564ee749ca0dce4b | refs/heads/main | 2023-04-15T16:23:43.117175 | 2021-05-04T08:03:58 | 2021-05-04T08:03:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 800 | r | JLPM-package.R | #' Joint latent process models
#'
#' Functions for the estimation of joint latent processes models (JLPM)
#'
#' @name JLPM-package
#' @docType package
#' @author Cecile Proust-Lima, Viviane Philipps, Tiphaine Saulnier
#'
#' \email{cecile.proust-lima@@inserm.fr}
#' @references
#'
#'
#' @keywords package
#' @importFrom graphics axis hist lines matlines matplot mtext par plot points segments polygon
#' @importFrom grDevices rainbow rgb col2rgb n2mfrow
#' @importFrom stats as.formula formula get_all_vars integrate median model.frame model.matrix na.fail na.omit na.pass pchisq pnorm qnorm quantile rnorm sd terms residuals vcov fitted coef update
#' @importFrom survival Surv untangle.specials
#' @importFrom randtoolbox sobol
#' @useDynLib JLPM, .registration=TRUE, .fixes="C_"
NULL
|
c38eba3d6e6515b049086e7af51f5704cd56b4a1 | cd848096499430ef7d722b7bb05c7d9996f82334 | /module3/quiz2/quiz2_source.R | 34479dfe3b8354d12fd194eedcbf9963aa11f697 | [] | no_license | ratiom/dataSpecialization | eaec467400e118aeb09e156bfa4b7fca2123e33f | 5be7ff230c6817022ef7e7c0a21aaac94bbb092b | refs/heads/master | 2021-01-10T21:11:20.486215 | 2014-11-20T03:23:12 | 2014-11-20T03:23:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,482 | r | quiz2_source.R | library(httr)
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. Register an application at https://github.com/settings/applications;
# Use any URL you would like for the homepage URL (http://github.com is fine)
# and http://localhost:1410 as the callback url
#
# Insert your client ID and secret below - if secret is omitted, it will
# look it up in the GITHUB_CONSUMER_SECRET environmental variable.
myapp <- oauth_app("github", key = "d5e0d6f79be758700e74", secret = "10cbd326d3813c825c78d46b8cab3289345f61ef")
# 3. Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
# 4. Use API
gtoken <- config(token = github_token)
req <- GET("https://api.github.com/users/jtleek/repos", gtoken)
stop_for_status(req)
json1 <- content(req)
library(rjson)
json2 = jsonlite::fromJSON(toJSON(json1))
json3 = json2[grep("datasharing", json2$name), ]
#Answer 2013-11-07T13:25:07Z
#----------------Eventually just got answer from
# OR:
req <- with_config(gtoken, GET("https://api.github.com/rate_limit"))
stop_for_status(req)
content(req)
#Q2
library(sqldf, drv='SQLite')
acs<-read.csv("./data/getdata_data_ss06pid.csv")
df21 <- sqldf("select pwgtp1 from acs where AGEP < 50", drv='SQLite')
#Q3
df22 <- sqldf("select distinct AGEP from acs", drv='SQLite')
fileurl <- url("http://biostat.jhsph.edu/~jleek/contact.html")
htmlcode <- readLines(fileurl)
nchar(htmlcode[10])
#45 31 7 25
#Q5 - 32426.7
|
8ac9f9272ca93b1d81c726076ead82d89e3333de | 3f312cabe37e69f3a2a8c2c96b53e4c5b7700f82 | /ver_devel/bio3d/man/get.pdb.Rd | 553b9336b2cdd718b52ad2430d218a06394885b4 | [] | no_license | Grantlab/bio3d | 41aa8252dd1c86d1ee0aec2b4a93929ba9fbc3bf | 9686c49cf36d6639b51708d18c378c8ed2ca3c3e | refs/heads/master | 2023-05-29T10:56:22.958679 | 2023-04-30T23:17:59 | 2023-04-30T23:17:59 | 31,440,847 | 16 | 8 | null | null | null | null | UTF-8 | R | false | false | 2,408 | rd | get.pdb.Rd | \name{get.pdb}
\alias{get.pdb}
\title{ Download PDB Coordinate Files }
\description{
Downloads PDB coordinate files from the RCSB Protein Data Bank.
}
\usage{
get.pdb(ids, path = ".", URLonly=FALSE, overwrite = FALSE, gzip = FALSE,
split = FALSE, format = "pdb", verbose = TRUE, ncore = 1, ...)
}
\arguments{
\item{ids}{ A character vector of one or more 4-letter PDB
codes/identifiers or 6-letter PDB-ID_Chain-ID of the files to be downloaded, or a \sQuote{blast}
object containing \sQuote{pdb.id}. }
\item{path}{ The destination path/directory where files are to be
written. }
\item{URLonly}{ logical, if TRUE a character vector containing the URL
path to the online file is returned and files are not downloaded. If
FALSE the files are downloaded. }
\item{overwrite}{ logical, if FALSE the file will not be downloaded if
it alread exist. }
\item{gzip}{ logical, if TRUE the gzipped PDB will be downloaded and
extracted locally. }
\item{split}{ logical, if TRUE \code{\link{pdbsplit}} funciton will be called to split pdb files
into separated chains. }
\item{format}{ format of the data file: \sQuote{pdb} or
\sQuote{cif} for PDB and mmCIF file formats, respectively. }
\item{verbose}{ print details of the reading process. }
\item{ncore}{ number of CPU cores used to do the calculation.
\code{ncore>1} requires package \sQuote{parallel} installed. }
\item{\dots}{ extra arguments passed to \code{\link{pdbsplit}} function. }
}
\details{
This is a basic function to automate file download from the PDB.
}
\value{
Returns a list of successfully downloaded files. Or optionally if URLonly
is TRUE a list of URLs for said files.
}
\references{
Grant, B.J. et al. (2006) \emph{Bioinformatics} \bold{22}, 2695--2696.
For a description of PDB format (version3.3) see:\cr
\url{http://www.wwpdb.org/documentation/format33/v3.3.html}.
}
\author{ Barry Grant }
\seealso{ \code{\link{read.pdb}}, \code{\link{write.pdb}},
\code{\link{atom.select}}, \code{\link{read.fasta.pdb}},
\code{\link{read.fasta}}, \code{\link{pdbsplit}} }
\examples{
\donttest{
# PDB server connection required - testing excluded
## PDB file paths
get.pdb( c("1poo", "1moo"), URLonly=TRUE )
## These URLs can be used by 'read.pdb'
pdb <- read.pdb( get.pdb("5p21", URL=TRUE) )
summary(pdb)
## Download PDB file
## get.pdb("5p21")
}
}
\keyword{utilities}
|
4d9a1886cad2403f587891e6a9ab927f89a591a2 | 79597e86a75c42b3bbf86ba8b3e01ec300032263 | /man/ClimMobTools.Rd | 78508a6366f94d99be7361f4dbf1e549d04112a0 | [
"MIT"
] | permissive | AgrDataSci/ClimMobTools | 90e1b41e195435258cfc9bcc5eaf147ef12f6f8c | 00f0521431c3a7d547d579f0bf794763b602bb87 | refs/heads/master | 2023-04-12T05:15:19.789991 | 2022-11-21T14:43:51 | 2022-11-21T14:43:51 | 177,651,687 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,433 | rd | ClimMobTools.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ClimMobTools.R
\docType{package}
\name{ClimMobTools}
\alias{ClimMobTools}
\alias{ClimMobTools-package}
\title{API Client for the 'ClimMob' platform in R}
\description{
\if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}}
API client for 'ClimMob', an open source software for decentralized large-N trials with the 'tricot' approach \url{https://climmob.net/}. Developed by van Etten et al. (2016) \doi{10.1017/S0014479716000739}, it turns the research paradigm on its head; instead of a few researchers designing complicated trials to compare several technologies in search of the best solutions for the target environment, it enables many participants to carry out reasonably simple experiments that taken together can offer even more information. 'ClimMobTools' enables project managers to deep explore and analyse their 'ClimMob' data in R.
}
\seealso{
\strong{Useful links:}
\itemize{
\item{Development repository:
\url{https://github.com/agrdatasci/ClimMobTools}}
\item{Static documentation:
\url{https://agrdatasci.github.io/ClimMobTools/}}
\item{Report bugs:
\url{https://github.com/agrdatasci/ClimMobTools/issues}}
\item{ClimMob Platform:
\url{https://climmob.net}}
\item{The tricot user guide:
\url{https://hdl.handle.net/10568/109942}}
}
}
\author{
Kauê de Sousa and Jacob van Etten and Brandon Madriz
}
|
b534c944df2fcdf602badedf1483494800a5a779 | f9ec2c75b2a589070480c205dafe0511718f9739 | /000_gxeonly_figures.R | b7d86c82dacccb17c4ce514c93ac318e11fad9d2 | [] | no_license | amob/GenoPheno | 90484c871c6e354c4ae8a5a4bc7f2effb20453b1 | 96f94c70d1435880e01f51dbeaaab4b7d0d6583d | refs/heads/master | 2022-09-10T13:04:44.627533 | 2022-08-12T13:31:14 | 2022-08-12T13:31:14 | 153,506,383 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,918 | r | 000_gxeonly_figures.R |
range01=function(x){
newnums <- sapply(1:length(x), function(z) ifelse(!is.na(x[z]), (x[z]-min(x,na.rm=T))/(max(x,na.rm=T)-min(x,na.rm=T)), NA ))
return(newnums)
}
####READ IN DATA
soils <- read.csv("~/GenoPheno/covmat.csv",stringsAsFactors=F,header=T)
allpheno <- read.csv("~/GenoPheno/phenomat.csv",stringsAsFactors=F,header=T)
allped <- read.csv("~/GenoPheno/pedmat.csv",stringsAsFactors=F,header=T)
Apopenvdat <-read.csv("AlphabeticalPopEnvDat.csv",stringsAsFactors=F,header=T)
greens <- (allpheno$green)/(allpheno$red + allpheno$blue + allpheno$green)
#swaps colors to ratios with total brightness
allpheno <- allpheno[,-c(3,10:12,14,16)]#cuts some traits not analyzed for various reasons (some colinear, some non-normal, some invariant), leaves 9 traits
allpheno$green <- greens
allpheno$germday <- allpheno$germday - 16 # is in days since july 1st, and later want it in Days to Germination, so here the script subtracts planting date
colnames(allpheno)[1]<- "ID"
cpheno <- scale(allpheno[,-1]) #center and scale
#soil treatment varible
soiltrt <- c()
soiltrt[which(soils$treatment==3)] <- "mt"
soiltrt[which(soils$treatment==5)] <- "mc"
soiltrt[which(soils$treatment==7)] <- "tc"
#soil trt and ped numbers reflect order in which populations were SAMPLED, not alphabet.
#redefining levels in pedigree file to match other files
allped$sire.pop[which(allped$sire.pop=="ml")]="m"
allped$dam.pop[which(allped$dam.pop=="ml")]="m"#"m"is 5th in the factorized, malinalco in the coancestry matrix is row/column 5. "ML" would come out 6th.
allped$dam.pop<-as.numeric(as.factor(allped$dam.pop))
allped$sire.pop<- as.numeric(as.factor(allped$sire.pop))
#now ped pop numbers reflect alphabet, although dams still reflect field sample order, and sire #s are unrelated and unique for all
# seeds in the same biota are extremely unlikely to share a father, since they come from different infructescences
# note that between biota, seeds sharing a mom may also share a father, since they often come from the same set of infructescences in each biota, but this is not reflected in the pedigree
#all analyses downstream that rely on relatedness are conducted separately across biota treatments for this reason.
envdat <- Apopenvdat[allped$dam.pop,]
cenvdat <- scale(envdat)
#since dam.pop now reflects alphabetical order, can pull out env info this way
#### FIGURE
##ploting pop-momxe effects.
#plot phenotypes across soil treatments
# color by pop source temp. plot all individuals connected by lines was original. new figure plots differently.
traits.mt <-allpheno[which(soils$treatment==3),]
traits.mc <- allpheno[which(soils$treatment==5),]
traits.tc <- allpheno[which(soils$treatment==7),]
ped.mt <- allped[which(soils$treatment==3),]
ped.mc <- allped[which(soils$treatment==5),]
ped.tc <- allped[which(soils$treatment==7),]
damtrait.mc <- sapply(2:10,function(z) tapply(traits.mc[,z],ped.mc$dam,mean,na.rm=T))
damtrait.mt <- sapply(2:10,function(z) tapply(traits.mt[,z],ped.mt$dam,mean,na.rm=T))
damtrait.tc <- sapply(2:10,function(z) tapply(traits.tc[,z],ped.tc$dam,mean,na.rm=T))
damtraitse.mc <- sapply(2:10,function(z) tapply(traits.mc[,z],ped.mc$dam,sd,na.rm=T)) / sqrt(sapply(2:10,function(z) tapply(sign(traits.mc[,z]),ped.mc$dam,sum,na.rm=T)))
damtraitse.mt <- sapply(2:10,function(z) tapply(traits.mt[,z],ped.mt$dam,sd,na.rm=T)) / sqrt(sapply(2:10,function(z) tapply(sign(traits.mt[,z]),ped.mt$dam,sum,na.rm=T)))
damtraitse.tc <- sapply(2:10,function(z) tapply(traits.tc[,z],ped.tc$dam,sd,na.rm=T)) / sqrt(sapply(2:10,function(z) tapply(sign(traits.tc[,z]),ped.tc$dam,sum,na.rm=T)))
poptrait.mc <- sapply(2:10,function(z) tapply(traits.mc[,z],ped.mc$dam.pop,mean,na.rm=T))
poptrait.mt <- sapply(2:10,function(z) tapply(traits.mt[,z],ped.mt$dam.pop,mean,na.rm=T))
poptrait.tc <- sapply(2:10,function(z) tapply(traits.tc[,z],ped.tc$dam.pop,mean,na.rm=T))
poptraitse.mc <- sapply(2:10,function(z) tapply(traits.mc[,z],ped.mc$dam.pop,sd,na.rm=T)) / sqrt(sapply(2:10,function(z) table(ped.mc$dam.pop[which( !is.na(traits.mc[,z]) )])))
poptraitse.mt <- sapply(2:10,function(z) tapply(traits.mt[,z],ped.mt$dam.pop,sd,na.rm=T)) / sqrt(sapply(2:10,function(z) table(ped.mc$dam.pop[which( !is.na(traits.mt[,z]) )])))
poptraitse.tc <- sapply(2:10,function(z) tapply(traits.tc[,z],ped.tc$dam.pop,sd,na.rm=T)) / sqrt(sapply(2:10,function(z) table(ped.mc$dam.pop[which( !is.na(traits.tc[,z]) )])))
damtraits.soils <- array(,dim=c(100,3,9))#100 moms, 3 soils, 9 traits ##REORDERED SOILS
for(i in 1:9){
damtraits.soils[,2,i] <- damtrait.mc[,i]
damtraits.soils[,1,i] <- damtrait.mt[,i]
damtraits.soils[,3,i] <- damtrait.tc[,i]
}
damtraitsse.soils <- array(,dim=c(100,3,9))#100 moms, 3 soils, 9 traits ##REORDERED SOILS
for(i in 1:9){
damtraitsse.soils[,2,i] <- damtraitse.mc[,i]
damtraitsse.soils[,1,i] <- damtraitse.mt[,i]
damtraitsse.soils[,3,i] <- damtraitse.tc[,i]
}
poptraits.soils <- array(,dim=c(10,3,9))#10 pops, 3 soils, 9 traits ## REORDERED SOILS
for(i in 1:9){
poptraits.soils[,2,i] <- poptrait.mc[,i]
poptraits.soils[,1,i] <- poptrait.mt[,i]
poptraits.soils[,3,i] <- poptrait.tc[,i]
}
poptraitsse.soils <- array(,dim=c(10,3,9))#10 pops, 3 soils, 9 traits ## REORDERED SOILS
for(i in 1:9){
poptraitsse.soils[,2,i] <- poptraitse.mc[,i]
poptraitsse.soils[,1,i] <- poptraitse.mt[,i]
poptraitsse.soils[,3,i] <- poptraitse.tc[,i]
}
tc.mn <- apply(traits.tc[,2:10],2,mean,na.rm=T)
tc.se <- apply(traits.tc[,2:10],2,sd,na.rm=T)/sqrt(colSums(!is.na(traits.tc[,2:10])))
mc.mn <- apply(traits.mc[,2:10],2,mean,na.rm=T)
mc.se <- apply(traits.mc[,2:10],2,sd,na.rm=T)/sqrt(colSums(!is.na(traits.mc[,2:10])))
mt.mn <- apply(traits.mt[,2:10],2,mean,na.rm=T)
mt.se <- apply(traits.mt[,2:10],2,sd,na.rm=T)/sqrt(colSums(!is.na(traits.mt[,2:10])))
traitcols <- c("Days to flowering","Days to germination","Tassel length cm","Shoot biomass g","Root biomass g","Height cm","Stem width mm","Leaf width mm","Stem greenness %")#nice names
rbb<-rgb(range01(Apopenvdat$TAnn),0,1-range01(Apopenvdat$TAnn))
rbsoft <- rgb(range01(Apopenvdat$TAnn),0,1-range01(Apopenvdat$TAnn),alpha=.75)
rbsoft1<- rgb(range01(Apopenvdat$TAnn),0,1-range01(Apopenvdat$TAnn),alpha=.3)
rbsoft2<- rgb(range01(Apopenvdat$TAnn),0,1-range01(Apopenvdat$TAnn),alpha=.1)
damtoalphpop <- tapply(allped$dam.pop,allped$dam,mean)
pdf("~/popdamtraits_3onlycol.pdf",height=6,width=3)
layout(matrix(1:9,ncol=3,byrow=T),heights=c(5,5,5),widths=c(0.75,0.9,1.1))
jitter1 <- jitter(rep(1,times=10),factor=3)
jitter2 <- jitter(rep(2,times=10),factor=3)
jitter3 <- jitter(rep(3,times=10),factor=3)
jitter1d <- jitter(rep(1,times=100),factor=3)
jitter2d <- jitter(rep(2,times=100),factor=3)
jitter3d <- jitter(rep(3,times=100),factor=3)
par(oma = c(0,2,2,0))
for(i in c(5,3,1)){
par(mar=c(2,3,1,0))
ylims <- c(min(c(mt.mn[i]-mt.se[i],mc.mn[i]-mc.se[i],tc.mn[i]-tc.se[i])),max(c(mt.mn[i]+mt.se[i],mc.mn[i]+mc.se[i],tc.mn[i]+tc.se[i])) )
ylimp <- c(min(poptraits.soils[,,i]-poptraitsse.soils[,,i]),max(poptraits.soils[,,i]+poptraitsse.soils[,,i]))
plot(c(mt.mn[i],mc.mn[i],tc.mn[i])~c(1,2,3),pch=1,cex=1,ylab="",xlab="",xaxt="n",cex.lab=1,xlim=c(0.5,3.5), bty="n",
ylim= ylimp)
axis(seq(from=-1000,to=1000,length.out=2),side=2, labels=NULL)
arrows(c(1,2,3),c(mt.mn[i]-mt.se[i],mc.mn[i]-mc.se[i],tc.mn[i]-tc.se[i]),y1=c(mt.mn[i]+mt.se[i],mc.mn[i]+mc.se[i],tc.mn[i]+tc.se[i]),length=0 )#,add=T)
mtext(traitcols[i],side=2, line=2)
par(mar=c(2,0,1,1))
plot(poptraits.soils[,,i]~1, pch=NA,ylab="",xlab="",xaxt="n",yaxt="n",cex.lab=1,xlim=c(0.8,3.2), bty = "n", ylim=ylimp)
rect(par("usr")[1],par("usr")[3],par("usr")[2],par("usr")[4],col = rgb(0,0,0,alpha=0.1),lty=NULL,border=NA)
sapply(1:nrow(poptraits.soils[,,i]), function(z) lines(c(jitter1[z],jitter2[z],jitter3[z]),poptraits.soils[z,,i],col=rbsoft[z],lty=1 ))
arrows(jitter1,poptrait.mt[,i]-poptraitse.mt[,i],y1=poptrait.mt[,i]+poptraitse.mt[,i],col=rbsoft,length=0 )#,add=T)
arrows(jitter2,poptrait.mc[,i]-poptraitse.mc[,i],y1=poptrait.mc[,i]+poptraitse.mc[,i],col=rbsoft,length=0 )#,add=T)
arrows(jitter3,poptrait.tc[,i]-poptraitse.tc[,i],y1=poptrait.tc[,i]+poptraitse.tc[,i],col=rbsoft,length=0 )#,add=T)
par(mar=c(2,1,1,1))
ylimf <- c( min( c(min(damtraits.soils[,,i]-damtraitsse.soils[,,i],na.rm=T),min(damtraits.soils[,,i],na.rm=T)) ),
max( c(max(damtraits.soils[,,i]+damtraitsse.soils[,,i],na.rm=T),max(damtraits.soils[,,i],na.rm=T)) ) )
plot(damtraits.soils[,,i]~1, pch=NA,ylab="",xlab="",xaxt="n",cex.lab=1,xlim=c(0.8,3.2), bty="n", ylim=ylimf )
axis(seq(from=-1000,to=1000,length.out=2),side=2, labels=NULL)
sapply(1:nrow(damtraits.soils[,,i]), function(z) lines(c(jitter1d[z],jitter2d[z],jitter3d[z]),damtraits.soils[z,,i],col=rbsoft1[damtoalphpop[z]],lty=1 ))
arrows(jitter1d,damtrait.mt[,i]-damtraitse.mt[,i],y1=damtrait.mt[,i]+damtraitse.mt[,i],col=rbsoft1[damtoalphpop],length=0 )#,add=T)
arrows(jitter2d,damtrait.mc[,i]-damtraitse.mc[,i],y1=damtrait.mc[,i]+damtraitse.mc[,i],col=rbsoft1[damtoalphpop],length=0 )#,add=T)
arrows(jitter3d,damtrait.tc[,i]-damtraitse.tc[,i],y1=damtrait.tc[,i]+damtraitse.tc[,i],col=rbsoft1[damtoalphpop],length=0 )#,add=T)
}
dev.off()
#
pdf("~/popdamtraits_alllevelsalltraits.pdf",height=8,width=8)
#all traits by mom, soils on different lines
layout(matrix(1:27,ncol=3,byrow=T),width=c(4,1,0.75))
par(oma=c(0,0,1,2))
par(mar=c(2,3,0,0))
for(i in 1:9){
trtmns <- as.vector(t(damtraits.soils[order(damtoalphpop),,i][order(rep(Apopenvdat$TAnn,each=10)),]))
trtse <- as.vector(t(damtraitsse.soils[order(damtoalphpop),,i][order(rep(Apopenvdat$TAnn,each=10)),]))
plot(trtmns~rep(1:100,each=3),ylim=c(min(trtmns-trtse,na.rm=T),max(trtmns+trtse,na.rm=T)),
ylab="",xlab="",xaxt="n",cex.axis=1.25,cex.lab=1,pch=NA )
mtext(traitcols[i],side=1,line=0.5)
lowy <- seq(from=0.5,to=100.5,by =10)
for(j in 1:10){
polygon(c(lowy[j],lowy[j]+10,lowy[j]+10,lowy[j]),rep(c(min(trtmns-trtse,na.rm=T),max(trtmns+trtse,na.rm=T)),each=2),border=NA,col=rbsoft2[order(Apopenvdat$TAnn)][j])
}
abline(v=c(lowy),lty=2,col=rgb(0,0,0,alpha=0.5))
points(trtmns~rep(1:100,each=3), col=rep(c(1,rgb(0,1,0),rgb(1,0,1)), times=100),
pch=rep(c(1,2,5),times=100), cex=.5)
arrows(rep(1:100,each=3), trtmns-trtse, y1=trtmns+trtse,
col=rep(c(rgb(0,0,0,alpha=0.5),rgb(0,1,0,alpha=0.5),rgb(1,0,1,alpha=0.5)),times=100),
length=0 ,lwd=1 ) #in sel. skewers, mt is black, mc is green and mt is purple
if(i==2){ legend(5,y=80,c("Biota15.0","Biota14.3","Biota13.0"),pch=c(1,2,5),col=c(rgb(0,0,0,alpha=0.5),rgb(0,1,0,alpha=0.5),rgb(1,0,1,alpha=0.5)),bty="n") }
trtmnsp <- as.vector(t(poptraits.soils[order(Apopenvdat$TAnn),,i])) #mt first, them mc then tc
trtsep <- as.vector(t(poptraitsse.soils[order(Apopenvdat$TAnn),,i]))
plot(trtmnsp~rep(1:10,each=3),ylim=c(min(trtmnsp)-max(trtsep),max(trtmnsp)+max(trtsep)),
ylab="",xlab="",xaxt="n",cex.axis=1,cex.lab=1.25,pch=NA,xlim=c(0,11) )
points(trtmnsp~rep(1:10,each=3),col=rep(c(rgb(0,0,0),rgb(0,1,0),rgb(1,0,1)), times=10),pch=rep(c(1,2,5),times=10),cex=.5 )
arrows(rep(1:10,each=3),trtmnsp-trtsep,y1=trtmnsp+trtsep,col=rep(c(rgb(0,0,0,alpha=.5),rgb(0,1,0,alpha=.5),rgb(1,0,1,alpha=.5)), times=10),length=0 ,lwd=1 )
lowy <- seq(from=0.5,to=10.5,by =1)
for(j in 1:10){
polygon(c(lowy[j],lowy[j]+1,lowy[j]+1,lowy[j]),rep(c(min(trtmnsp-trtsep,na.rm=T),max(trtmnsp+trtsep,na.rm=T)),each=2),border=NA,col=rbsoft2[order(Apopenvdat$TAnn)][j])
}
smns <- c(mt.mn[i],mc.mn[i],tc.mn[i])
sses <- c(mt.se[i],mc.se[i],tc.se[i])
plot(smns~c(1:3),ylim=c(min(smns)-max(sses),max(smns)+max(sses)),
ylab="",xlab="",cex.axis=1,cex.lab=1.25,pch=NA ,xlim=c(0,4),xaxt="n")
points(smns~c(1:3),col=rep(c(rgb(0,0,0),rgb(0,1,0),rgb(1,0,1)), times=10),pch=rep(c(1,2,5),times=10),cex=.5 )
arrows(1:3,smns-sses,y1=smns+sses,col=rep(c(rgb(0,0,0,alpha=.5),rgb(0,1,0,alpha=.5),rgb(1,0,1,alpha=.5)), times=10),length=0 ,lwd=1 )
}
dev.off()
|
cff747c50bfe8db9aae4675ec700cf0560269d71 | de14fae2d13215142af3bb23b4f77d389f33ca84 | /PlantDiversitySurvey-globalCommunityData.R | 5f62b898cd70e132b34c669918f199408f529225 | [] | no_license | TREC-Agroecology/plant-diversity | d3f63eebae91a2f0a524bcadceee7ec9916b7836 | c971a2ae90a16d7d2d715f4681f68eafca52beea | refs/heads/master | 2021-12-28T08:51:16.925320 | 2021-12-23T15:31:29 | 2021-12-23T15:31:29 | 133,709,581 | 2 | 1 | null | 2021-10-04T16:05:10 | 2018-05-16T18:46:35 | R | UTF-8 | R | false | false | 10,488 | r | PlantDiversitySurvey-globalCommunityData.R | ### Create 'Community data' table for vegan package from NEON sampling data
library(vegan)
library(tidyverse)
build_community_data <- function(species, plots, survey){
community_data <- matrix(nrow = nrow(plots), ncol = nrow(species))
colnames(community_data) <- species$genus_species
for (c in 1:nrow(species)){
target_species <- species$genus_species[c]
for (r in 1:nrow(plots)){
survey_at_scale <- suppressMessages(left_join(plots[r, ], survey))
survey_target <- survey_at_scale %>%
filter(genus_species == target_species)
community_data[r, c] <- nrow(survey_target)
}
}
return(community_data)
}
## Data
surveys <- read_csv("data/PlantDiversitySurvey-surveys.csv")
surveys_w_plots <- surveys %>%
filter(!is.na(genus)) %>%
mutate(genus_species = paste(tolower(str_extract(genus, "...")),
tolower(str_trunc(species, 3, "right", "")),
sep="")) %>%
separate(code, c("big_plot", "corner", "small_plot"), sep="\\.") %>%
# Expect missing pieces for 100m2 plots
select(block, site, big_plot, corner, small_plot, genus_species)
echo <- read.csv("data/ECHO-surveys.csv") %>%
filter(UncertainId %in% c(NA, "Species")) %>%
mutate(genus_species = tolower(genus_species)) %>%
separate(code, c("big_plot", "corner", "small_plot"), sep="\\.") %>%
select(block, site, big_plot, corner, small_plot, genus_species)
surveys_w_plots <- bind_rows(surveys_w_plots, echo)
all_species <- surveys_w_plots %>%
distinct(genus_species) %>%
arrange(genus_species)
tens <- surveys_w_plots %>%
filter(!is.na(corner)) %>%
group_by(block, site, big_plot, corner) %>%
distinct(genus_species)
hundreds <- surveys_w_plots %>%
group_by(block, site, big_plot) %>%
distinct(genus_species)
cluster <- data.frame(block = c(1, 1, 4, 4, 14, 14, 15, 15, 31, 31, 32, 32),
site = rep(c("N", "S"), 6),
status = c("high", "high", "low", "high",
"high", "low", "low", "low",
"sand", "sand", "sand", "sand"),
cluster = c("open", "open", "lawn", "open",
"open", "hammock", "orchard", "orchard",
"flatwood", "flatwood", "flatwood", "flatwood"))
plots_tens <- distinct(tens, big_plot, corner) %>%
mutate(site_code = paste(block, site, sep = "")) %>%
left_join(cluster, by = c("block", "site"))
plots_hundreds <- distinct(surveys_w_plots, block, site, big_plot) %>%
mutate(site_code = paste(block, site, sep = "")) %>%
left_join(cluster, by = c("block", "site"))
plots_site <- distinct(surveys_w_plots, block, site)
plots_tens_mixed <- read_csv("data/plots_tens_mixed_global.csv") # mixed habitat classification
## Shannon diversity and evenness at various scales
matrix_ten <- build_community_data(all_species, plots_tens, tens)
diversity_ten <- diversity(matrix_ten)
evenness_ten <- diversity_ten/log(specnumber(matrix_ten))
matrix_hundred <- build_community_data(all_species, plots_hundreds, hundreds)
diversity_hundred <- diversity(matrix_hundred)
evenness_hundred <- diversity_hundred/log(specnumber(matrix_hundred))
matrix_site <- build_community_data(all_species, plots_site, tens)
diversity_site <- diversity(matrix_site)
evenness_site <- diversity_site/log(specnumber(matrix_site))
## Plot Diversity and Evenness
diversity_ten_tbl <- plots_tens %>%
bind_cols(diversity = diversity_ten)
ggplot(diversity_ten_tbl, aes(x=diversity, fill=site)) +
geom_histogram(binwidth = 0.25) +
facet_grid(block~.) +
theme_bw()
## Non-metric Multidimensional Analysis [PCOA / metaMDS with cmdscale(), vegdist()]
### Tens
dist_plots_10 <- vegdist(matrix_ten, "bray")
nmds_plots_10 <- metaMDS(dist_plots_10, k=2, try=100, trace=TRUE)
nmds_plots_scores_10 <- plots_tens %>%
bind_cols(NMDS1 = scores(nmds_plots_10)[,1], NMDS2 = scores(nmds_plots_10)[,2])
ggplot(nmds_plots_scores_10, aes(x=NMDS1, y=NMDS2, shape=site, color=as.factor(block))) +
#label = paste(big_plot,corner))) +
geom_point(cex=5) +
#geom_text(color="black") +
labs(x="NMDS1", y="NMDS2", shape="Site", color="Block") +
theme_bw(base_size=20, base_family="Helvetica") +
scale_colour_manual(values=c("#E69F00", "#56B4E9", "#009E73", "#0072B2", "#D55E00", "#CC79A7"))
ggsave("output/nmds-10-global.png", width = 8, height = 6)
perm_plots_10 <- adonis(dist_plots_10 ~ plots_tens$status + plots_tens$cluster +
plots_tens$block + plots_tens$site_code, permutations = 1000)
sink("output/permanova-10-global.txt")
print(perm_plots_10)
sink()
### Hundreds
dist_plots_100 <- vegdist(matrix_hundred, "bray")
nmds_plots_100 <- metaMDS(dist_plots_100, k=2, try=100, trace=TRUE)
nmds_plots_scores_100 <- plots_hundreds %>%
bind_cols(NMDS1 = scores(nmds_plots_100)[,1], NMDS2 = scores(nmds_plots_100)[,2])
ggplot(nmds_plots_scores_100, aes(x=NMDS1, y=(-1*NMDS2), shape=site, color=as.factor(pub_site))) +
#label = big_plot)) +
geom_point(cex=5) +
#geom_text(color="black") +
labs(x="NMDS1", y="NMDS2", shape="Plot", color="Site") +
theme_bw(base_size=20, base_family="Helvetica") +
scale_colour_manual(values=c("#E69F00", "#56B4E9", "#009E73", "#0072B2", "#D55E00", "#CC79A7"))
ggsave("output/nmds-100-global.png", width = 8, height = 6)
perm_plots_100 <- adonis(dist_plots_100 ~ plots_hundreds$cluster +
plots_hundreds$block + plots_hundreds$site_code, permutations = 1000)
sink("output/permanova-100-global.txt")
print(perm_plots_100)
sink()
### Tens Habitat
dist_plots_10 <- vegdist(matrix_ten, "bray")
nmds_plots_10 <- metaMDS(dist_plots_10, k=2, try=100, trace=TRUE)
stressplot(nmds_plots_10)
nmds_plots_scores_10 <- plots_tens_mixed %>%
bind_cols(NMDS1 = scores(nmds_plots_10)[,1], NMDS2 = scores(nmds_plots_10)[,2]) %>%
mutate(cluster = factor(cluster, levels=c("open", "lawn", "orchard",
"hammock", "flatwood")))
ggplot(nmds_plots_scores_10, aes(x=NMDS1, y=NMDS2, shape=status, color=as.factor(cluster))) +
#label = paste(big_plot,corner))) +
geom_point(cex=5) +
#geom_text(color="black") +
labs(x="NMDS1", y="NMDS2", shape="Soil Disturbance", color="Habitat") +
theme_bw(base_size=20, base_family="Helvetica") +
scale_colour_manual(values=c("#E69F00", "#56B4E9", "#009E73", "#0072B2", "#D55E00"))
ggsave("output/nmds-10-habitat-global.png", width = 8, height = 6)
perm_plots_10_soil_habitat <- adonis2(dist_plots_10 ~ plots_tens_mixed$status + plots_tens_mixed$cluster,
permutations = 1000)
perm_plots_10 <- adonis2(dist_plots_10 ~ plots_tens_mixed$status + plots_tens_mixed$cluster +
plots_tens_mixed$block + plots_tens_mixed$site_code, permutations = 1000)
sink("output/permanova-10-habitat-global.txt")
print(perm_plots_10_soil_habitat)
print(perm_plots_10)
sink()
### Hundreds Habitat
dist_plots_100 <- vegdist(matrix_hundred, "bray")
nmds_plots_100 <- metaMDS(dist_plots_100, k=2, try=100, trace=TRUE)
stressplot(nmds_plots_100)
nmds_plots_scores_100 <- plots_hundreds %>%
bind_cols(NMDS1 = scores(nmds_plots_100)[,1], NMDS2 = scores(nmds_plots_100)[,2]) %>%
mutate(cluster = factor(cluster, levels=c("open", "lawn", "orchard",
"hammock", "flatwood")))
ggplot(nmds_plots_scores_100, aes(x=NMDS1, y=NMDS2, shape=status, color=cluster)) +
#label = big_plot)) +
geom_point(cex=5) +
#geom_text(color="black") +
labs(x="NMDS1", y="NMDS2", shape="Soil Disturbance", color="Habitat") +
theme_bw(base_size=20, base_family="Helvetica") +
scale_colour_manual(values=c("#E69F00", "#56B4E9", "#009E73", "#0072B2", "#D55E00")) +
ggsave("output/nmds-100-habitat-global.png", width = 8, height = 6)
perm_plots_100_soil_habitat <- adonis2(dist_plots_100 ~ plots_hundreds$status + plots_hundreds$cluster,
permutations = 1000)
perm_plots_100 <- adonis2(dist_plots_100 ~ plots_hundreds$status + plots_hundreds$cluster +
plots_hundreds$block + plots_hundreds$site_code, permutations = 1000)
sink("output/permanova-100-habitat-global.txt")
print(perm_plots_100_soil_habitat)
print(perm_plots_100)
sink()
### Hundreds Rank
rm(species_rank)
names_list <- c("genus_species")
for (c in unique(plots_hundreds$cluster)){
names_list <- c(names_list, c(paste("count", c, sep="_"), paste("rank", c, sep="_")))
blocks <- filter(plots_hundreds, cluster == c)
cluster_matrix <- build_community_data(all_species, blocks, hundreds)
matrix_sum <- sort(colSums(cluster_matrix), decreasing = TRUE)
if (exists("species_rank")){
species_rank <- full_join(species_rank,
data.frame(genus_species = names(matrix_sum[matrix_sum>0]),
count = matrix_sum[matrix_sum>0],
rank = rank(-matrix_sum[matrix_sum>0], ties.method="min")),
by = "genus_species")
} else {
species_rank <- left_join(all_species,
data.frame(genus_species = names(matrix_sum[matrix_sum>0]),
count = matrix_sum[matrix_sum>0],
rank = rank(-matrix_sum[matrix_sum>0], ties.method="min")),
by = "genus_species")
}
}
names(species_rank) <- c(names_list)
rank_table <- species_rank %>%
select(genus_species, starts_with("rank_"))
sink("output/top-species-global.txt")
top_open <- rank_table %>%
filter(rank_open <= 3) %>%
arrange(rank_open) %>%
select(genus_species, everything(), -rank_open)
cat("Open\n")
as.data.frame(top_open)
top_lawn <- rank_table %>%
filter(rank_lawn == 1) %>%
select(genus_species, everything(), -rank_lawn)
cat("\nLawn\n")
as.data.frame(top_lawn)
top_orchard <- rank_table %>%
filter(rank_orchard == 1) %>%
select(genus_species, everything(), -rank_orchard)
cat("\nOrchard\n")
as.data.frame(top_orchard)
top_hammock <- rank_table %>%
filter(rank_hammock == 1) %>%
select(genus_species, everything(), -rank_hammock)
cat("\nHammock\n")
as.data.frame(top_hammock)
top_echo <- rank_table %>%
filter(rank_echo <= 3) %>%
select(genus_species, everything(), -rank_echo)
cat("\nECHO\n")
as.data.frame(top_echo)
sink()
|
9ac1e9095e5dbaae87d9d3de7265fb7f79477454 | 0044a6ab1270c0cfa09bcd7ebd29e60606676a00 | /R/coherence2.R | 01b4a8d9b067167731a8f2e983c35d48ab94b548 | [] | no_license | driegert/transfer | f5aab16281c7681f648614b5a2c713f7b7d6d14a | 7d5c31b3675c2b597148d27a25d4a00c1b66225a | refs/heads/master | 2020-04-04T22:25:37.759720 | 2017-10-12T20:00:13 | 2017-10-12T20:00:13 | 81,997,706 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,659 | r | coherence2.R | #' Calculates the coherence between two series
#'
#' Estimates the frequency domain coherence using the multitaper method.
#' @param x A \code{data.frame} whose columns are the time domain input series.
#' @param y A \code{numeric} vector containing the response series.
#' @param blockSize A \code{numeric} indicating the block sizes into which the
#' input and response series will be partitioned.
#' @param overlap A \code{numeric} between 0 and 1 indicating how much overlap should exist
#' between adjacent blocks.
#' @param deltat A \code{numeric} indicating the sample rate.
#' @param nw A \code{numeric} indicating the time bandwidth parameter for estimating the
#' Slepian data tapers.
#' @param k A \code{numeric} indicating the number of tapers to use - should be approximately
#' floor(2*nw - 1) and no larger than floor(2*nw).
#' @param nFFT A \code{numeric} indicating the number of frequency bins to use (i.e. setting
#' the zeropadding amount).
#' @param forward Indicates whether the forward (TRUE) or reverse (FALSE)
#' coherence should be calculated.
#' @param average An \code{integer} representing how the average across blocks
#' should be calculated;
#' 0 - no average, return all the individual block information;
#' 1 - average the cross and auto-spectra across blocks, then calculate the coherency
#' 2 - estimate the coherency for each block, average the coherencey across blocks
#' 3 - estimate the MSC for each block, average the MSC across blocks.
#' @param freqRange A \code{numeric} vector containing two elements with the start
#' and end location for the band on which to estimate the coherence.
#' @param maxFreqOffset A \code{numeric} value indicating the maximum offset coherence to
#' calculate in the specified band.
#' @param prewhiten NOT YET IMPLEMENTED
#' @param removePeriodic NOT YET IMPLEMENTED
#' @param sigCutoff NOT YET IMPLEMENTED
#'
#' @details MSC stands for Magnitude Squared Coherence.
#'
#' @export
coherence2 <- function(x, y = NULL, blockSize = length(x), overlap = 0, deltat = 1
, nw = 4, k = 7, nFFT = NULL, forward = TRUE
, average = 1, msc = FALSE
, freqRange = NULL, maxFreqOffset = NULL
, prewhiten = FALSE, removePeriodic = TRUE, sigCutoff = NULL)
{
if (!any(average == 0:3)){
stop("average must have an integer value between 0 and 3.")
}
if (is.null(freqRange) && !is.null(maxFreqOffset)){
stop("maxFreqOffset implies that freqRange should be assigned.")
}
# number of frequencies bins to use (zero-pad length)
if (is.null(nFFT)){
nFFT <- 2^(floor(log2(blockSize)) + 3)
}
# determine the positive values of the frequencies.
freq <- seq(0, 1/(2*deltat), by = 1/(nFFT*deltat))
nfreq <- length(freq)
# block the data (x2, y2 are a list of data.frames)
if (is.null(y)){
x2 <- sectionData(data.frame(x = x[, 1], y = x[, 1]), blockSize = blockSize, overlap = overlap)
} else {
x2 <- sectionData(data.frame(x = x[, 1], y = y[, 1]), blockSize = blockSize, overlap = overlap)
}
numSections <- attr(x2, "numSections")
if (is.null(freqRange)){
freqIdx <- NULL
} else {
freqIdx <- head(which(freq >= (freqRange[1] - maxFreqOffset) ), 1):tail(which(freq <= (freqRange[2] + maxFreqOffset)), 1)
}
wtEigenCoef <- blockedEigenCoef(x2, deltat = deltat, nw = nw, k = k, nFFT = nFFT
, numSections = numSections, adaptiveWeighting = TRUE
, returnWeights = FALSE, idx = freqIdx)
# spec <- lapply(wtEigenCoef, calculateSpec, forward = forward, idx = freqIdx)
# spec <- blockedSpec(x2, deltat = deltat, nw = nw, k = k, nFFT = nFFT
# , numSections = numSections, adaptiveWeighting = TRUE, forward = TRUE
# , idx = freqIdx)
subFreq <- freq[freqIdx]
info = list(allFreq = freq, blockSize = blockSize, overlap = overlap
, numSections = numSections
, deltat = deltat, nw = nw, k = k, nFFT = nFFT
, forward = forward, average = average, msc = msc
, freqRange = freqRange
, freqRangeIdx = c(head(which(freq >= freqRange[1]), 1), tail(which(freq <= freqRange[2]), 1))
, maxFreqOffset = maxFreqOffset
, maxFreqOffsetIdx = tail(which(freq <= maxFreqOffset), 1) - 1 #-1 due to 0 frequency
, freqIdx = freqIdx, prewhiten = prewhiten
, removePeriodic = removePeriodic, sigCutoff = sigCutoff)
subFreqIdxRange <- c(which(freqIdx == info$freqRangeIdx[1]), which(freqIdx == info$freqRangeIdx[2]))
if (average == 0){
info$msc <- FALSE
warning("Not implemented in this version.")
return(list(freq = subFreq, coh = NULL, info = info))
} else if (average == 1) {
Sxy.ave <- Reduce('+', lapply(spec, "[[", "Sxy")) / numSections
Sxx.ave <- Reduce('+', lapply(spec, "[[", "Sxx")) / numSections
Syy.ave <- Reduce('+', lapply(spec, "[[", "Syy")) / numSections
coh <- Sxy.ave / sqrt(Sxx.ave %*% t(Syy.ave))
} else if (average == 2) {
coh <- Reduce('+', lapply(spec, function(obj){ obj$Sxy / sqrt(obj$Sxx %*% t(obj$Syy)) })) / numSections
} else if (average == 3) {
coh <- Reduce('+', lapply(spec, function(obj){ abs(obj$Sxy)^2 / (obj$Sxx %*% t(obj$Syy)) })) / numSections
info$msc <- TRUE
return(list(freq = subFreq, coh = coh, info = info))
}
if (msc){
list(freq = subFreq, coh = abs(coh)^2, info = info)
} else {
list(freq = subFreq, coh = coh, info = info)
}
}
# colSums(t(A) * B)
mscOffsetByFreq <- function(offsetIdx, obj, bandIdxRange){
lapply(obj, mscOffsetByFreqHelper, offsetIdx, bandIdxLength)
}
mscOffsetByFreqHelper <- function(obj, offsetIdx, bandIdxLength){
colSums(obj$x[offsetIdx:bandIdxLength] * obj$y[offsetIdx:bandIdxLength])
}
#' Calculates the coherence between two series
#'
#' Estimates the frequency domain coherence using the multitaper method.
#' @param x A \code{data.frame} whose columns are the time domain input series.
#' @param y A \code{numeric} vector containing the response series.
#' @param blockSize A \code{numeric} indicating the block sizes into which the
#' input and response series will be partitioned.
#' @param overlap A \code{numeric} between 0 and 1 indicating how much overlap should exist
#' between adjacent blocks.
#' @param deltat A \code{numeric} indicating the sample rate.
#' @param nw A \code{numeric} indicating the time bandwidth parameter for estimating the
#' Slepian data tapers.
#' @param k A \code{numeric} indicating the number of tapers to use - should be approximately
#' floor(2*nw - 1) and no larger than floor(2*nw).
#' @param nFFT A \code{numeric} indicating the number of frequency bins to use (i.e. setting
#' the zeropadding amount).
#' @param forward Indicates whether the forward (TRUE) or reverse (FALSE)
#' coherence should be calculated.
#' @param average An \code{integer} representing how the average across blocks
#' should be calculated;
#' 0 - no average, return all the individual block information;
#' 1 - average the cross and auto-spectra across blocks, then calculate the coherency
#' 2 - estimate the coherency for each block, average the coherencey across blocks
#' 3 - estimate the MSC for each block, average the MSC across blocks.
#' @param freqRange A \code{numeric} vector containing two elements with the start
#' and end location for the band on which to estimate the coherence.
#' @param maxFreqOffset A \code{numeric} value indicating the maximum offset coherence to
#' calculate in the specified band.
#' @param prewhiten NOT YET IMPLEMENTED
#' @param removePeriodic NOT YET IMPLEMENTED
#' @param sigCutoff NOT YET IMPLEMENTED
#'
#' @details MSC stands for Magnitude Squared Coherence.
#'
#' @export
offset.coh <- function(x, y = NULL, blockSize = length(x), overlap = 0, deltat = 1
, nw = 4, k = 7, nFFT = NULL, forward = TRUE
, average = 1, msc = FALSE
, freqRange = NULL, maxFreqOffset = NULL
, prewhiten = FALSE, removePeriodic = TRUE, sigCutoff = NULL)
{
if (!any(average == 0:3)){
stop("average must have an integer value between 0 and 3.")
}
if (is.null(freqRange) && !is.null(maxFreqOffset)){
stop("maxFreqOffset implies that freqRange should be assigned.")
}
# number of frequencies bins to use (zero-pad length)
if (is.null(nFFT)){
nFFT <- 2^(floor(log2(blockSize)) + 3)
}
# determine the positive values of the frequencies.
freq <- seq(0, 1/(2*deltat), by = 1/(nFFT*deltat))
nfreq <- length(freq)
# block the data (x2, y2 are a list of data.frames)
if (is.null(y)){
x2 <- sectionData(data.frame(x = x[, 1], y = x[, 1]), blockSize = blockSize, overlap = overlap)
} else {
x2 <- sectionData(data.frame(x = x[, 1], y = y[, 1]), blockSize = blockSize, overlap = overlap)
}
numSections <- attr(x2, "numSections")
# wtEigenCoef <- blockedEigenCoef(x2, deltat = deltat, nw = nw, k = k
# , nFFT = nFFT, numSections = numSections
# , adaptiveWeighting = TRUE, returnWeights = FALSE)
#
# if (is.null(freqRange)){
# spec <- lapply(wtEigenCoef, calculateSpec, forward = forward)
# freqIdx <- NULL
# } else {
# freqIdx <- head(which(freq >= (freqRange[1] - maxFreqOffset) ), 1):tail(which(freq <= (freqRange[2] + maxFreqOffset)), 1)
# spec <- lapply(wtEigenCoef, calculateSpec, forward = forward, idx = freqIdx)
# }
if (is.null(freqRange)){
freqIdx <- NULL
} else {
freqIdx <- head(which(freq >= (freqRange[1] - maxFreqOffset) ), 1):tail(which(freq <= (freqRange[2] + maxFreqOffset)), 1)
}
# spec <- lapply(wtEigenCoef, calculateSpec, forward = forward, idx = freqIdx)
spec <- blockedSpec(x2, deltat = deltat, nw = nw, k = k, nFFT = nFFT
, numSections = numSections, adaptiveWeighting = TRUE, forward = TRUE
, idx = freqIdx)
subFreq <- freq[freqIdx]
info = list(allFreq = freq, blockSize = blockSize, overlap = overlap
, numSections = numSections
, deltat = deltat, nw = nw, k = k, nFFT = nFFT
, forward = forward, average = average, msc = msc
, freqRange = freqRange
, freqRangeIdx = c(head(which(freq >= freqRange[1]), 1), tail(which(freq <= freqRange[2]), 1))
, maxFreqOffset = maxFreqOffset
, maxFreqOffsetIdx = tail(which(freq <= maxFreqOffset), 1) - 1 #-1 due to 0 frequency
, freqIdx = freqIdx, prewhiten = prewhiten
, removePeriodic = removePeriodic, sigCutoff = sigCutoff)
if (average == 0){
info$msc <- FALSE
return(list(freq = subFreq, coh = spec, info = info))
} else if (average == 1) {
Sxy.ave <- Reduce('+', lapply(spec, "[[", "Sxy")) / numSections
Sxx.ave <- Reduce('+', lapply(spec, "[[", "Sxx")) / numSections
Syy.ave <- Reduce('+', lapply(spec, "[[", "Syy")) / numSections
coh <- Sxy.ave / sqrt(Sxx.ave %*% t(Syy.ave))
} else if (average == 2) {
coh <- Reduce('+', lapply(spec, function(obj){ obj$Sxy / sqrt(obj$Sxx %*% t(obj$Syy)) })) / numSections
} else if (average == 3) {
coh <- Reduce('+', lapply(spec, function(obj){ abs(obj$Sxy)^2 / (obj$Sxx %*% t(obj$Syy)) })) / numSections
info$msc <- TRUE
return(list(freq = subFreq, coh = coh, info = info))
}
if (msc){
list(freq = subFreq, coh = abs(coh)^2, info = info)
} else {
list(freq = subFreq, coh = coh, info = info)
}
}
|
977ca62fdf7ee835f43ab67c35827e7b360a81be | 81ca090d080aa02c85fee815a58df39c8360a83c | /R/ipc.estK.R | f4bbd02b9c7c1cce07b9c936fcbe30f5dd265c01 | [] | no_license | cran/ecespa | b7e5673c99764e4a01d6c853ffc52ef3c5d3d021 | 3ab15613957414d862f0c1847295bcae73f2f35a | refs/heads/master | 2023-01-11T02:48:30.607369 | 2023-01-05T20:50:44 | 2023-01-05T20:50:44 | 17,695,706 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,251 | r | ipc.estK.R | `ipc.estK` <-
function (mippp, lambda=NULL, correction="iso", r=NULL, sigma2 = NULL, rho = NULL, q = 1/4, p = 2)
{
Kclust <- function (r, sigma2, rho) {
(pi * r^2) + ((1 - exp(-(r^2)/(4 * sigma2)))/rho)
}
D.theta <- function(theta, Kobs, r) {
sum((Kobs^q - Kclust(r, theta[1], theta[2])^q)^p)
}
lambdaname <- deparse(substitute(lambda))
if(is.null(lambda)){
lambda <- predict(ppm(mippp), type="trend")
lambdaname <- NULL
}
Kobs <- Kinhom(mippp, r=r, correction=correction, lambda=lambda)
if(is.null(r)) r <- Kobs$r
Kobs <- Kobs[[3]]
theta <- c(sigma2, rho)
if (is.null(theta)) theta <- c(1, 1)
nsfit <- optim(par = theta, fn = D.theta, Kobs = Kobs, r = r)
Kfit <- Kclust(r, nsfit$par[1], nsfit$par[2])
dataname <- deparse(substitute(mippp))
dtheta <- sum((Kobs^q - Kfit^q)^p)
result <- list(sigma2 = nsfit$par[1], rho = nsfit$par[2], d.theta=dtheta, Kobs=Kobs,
Kfit=Kfit, r=r, data=mippp, lambda=lambda, dataname=dataname,
lambdaname=lambdaname, p=p, q=q)
class(result) <- c("ecespa.minconfit", class(result))
return(result)
}
|
85b8f1bd0b13b854cae38a8d49048077bc4ddf8f | 9daf18e4b81d01553c3f1a63c34be3d4690fc7f6 | /figure/plot1.r | e3ed689f5ab3f605becd4fa7d1706d60491847e5 | [] | no_license | magnus-sigurdsson/ExData_Plotting1 | 12890ff7fe5eb3e9ae33acd6c302413ee97dd49f | a43119055809585836a5b7771dca0da338410ad3 | refs/heads/master | 2021-01-17T18:30:14.661600 | 2015-01-09T02:55:23 | 2015-01-09T02:55:23 | 28,943,482 | 0 | 0 | null | 2015-01-08T01:49:08 | 2015-01-08T01:49:06 | null | UTF-8 | R | false | false | 761 | r | plot1.r | # reading in data
dat = read.table("C:/RWork/datascience/Exploratory data analysis/household_power_consumption.txt", na.strings = "?",
sep = ";", stringsAsFactor = F, header = T)
# converting to numeric
dat[,3:ncol(dat)] = apply(dat[,3:ncol(dat)], 2, as.numeric)
dat$Date = as.Date(dat$Date, format = "%d/%m/%Y")
# getting the data for 1 and 2 February 2007
dat = dat[dat$Date == as.Date("2007-02-01") | dat$Date == as.Date("2007-02-02"),]
width = 480
height = 480
setwd("C:/Users/magnuss/datasciencecoursera/exploratory/ExData_Plotting1/figure")
png("plot1.png",width = width, height = height)
hist(dat$Global_active_power, col = "red", ylab = "Frequency", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.off()
|
1f68a635c498fa7521c2a9f99b8bda8f529afab8 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610554500-test.R | f9d14a514745fdcb42b470bee0e4446f4a63dba4 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 817 | r | 1610554500-test.R | testlist <- list(data = structure(c(3.52953696534134e+30, 3.52939363896286e+30, 3.52953696534131e+30, 4.84136405102239e-305, 3.48604090771692e+30, 2.64619557400541e-260, 7.2911220195564e-304, 2.4173705217461e+35, 4.73123150715541e-308, 1.38523885228052e-309, 4.46122518269139e+43, 1.390671161567e-309, 3.41778517619225e-305, 9.32399348468807e-256, 5.02193200176413e-74, 1.7471912709942e-130, 1.07039357451597e+45, 2.97653801809403e-306, 2.49220424908538e+35, 1.34780321829832e-289, 3.13886214953611e-305, 4.86146168521645e-299, 2.38845656110448e+35, 4.14452302922905e-317, 2.71621385351021e-312, 0, 2.52476053894575e-312, 6.9658755830017e-310, 6.92360072063196e-251, 1.62602577596037e-260 ), .Dim = c(10L, 3L)), q = 1.62602904054797e-260)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) |
2dca4788efcbbc0156a68b68c82c7ef45a54b6b2 | 6490267b3849bfa3f35fa328822967db28dc409a | /R/expand_params.R | 351941aa3cda64d9fe131398c8939c28d214eabc | [
"MIT"
] | permissive | stfriedman/tidyouwie | c4db6b5c792bf230b3f17ecd718417de26287a30 | 13be105e7eaa708843cf313ae6404d8af2f2cbd0 | refs/heads/master | 2022-05-13T22:23:18.787543 | 2022-04-19T00:30:55 | 2022-04-19T00:30:55 | 194,533,118 | 0 | 2 | MIT | 2019-08-09T17:09:17 | 2019-06-30T15:41:52 | R | UTF-8 | R | false | false | 1,494 | r | expand_params.R | #' Expand parameters from OUwie output
#'
#' This function expands the output from ouwie_tidy so that parameter values for all regimes are visible.
#' @param results output from the ouwie_tidy function
#' @return Function returns the results tibble with parameter values for each regime in a column
#' @references Beaulieu, J. M., & O’Meara, B. 2014. OUwie: analysis of evolutionary rates in an OU framework.
#' @examples
#' #simulating dummy data to run through OUwie
#' phy <- pbtree(n = 20, nsim = 2)
#' disc_trait <- setNames(sample(letters[1:2], 20, replace = TRUE), phy[[1]]$tip.label)
#' cont_traits <- as_tibble(iris[1:20, 1:2]) %>%
#' mutate(species = phy[[1]]$tip.label)
#'
#' models <- c("BM1", "OUM") #set of models to run on each simmap
#'
#' results <- ouwie_tidy(phy, disc_trait, cont_traits, models, nsim = 2)
#' expand_params(results)
#' @export
# reads in an ouwie_tidy object and prints parameters of each regime in columns
expand_params <- function(results) {
regimes <- colnames(results$input$simtree[[1]]$mapped.edge)
results$tidy_output %>%
gather(theta:sigma.sq, key = "param", value = "value") %>%
mutate(value = map(value, ~set_names(.x, regimes[seq_along(.)])),
value = map(value, ~enframe(.x, name = "regime"))) %>%
unnest(value) %>%
unite("name", param, regime) %>%
filter(name != "theta_NA") %>%
pivot_wider(names_from = "name", values_from = "value",
values_fn = list(value = list)) %>%
unnest(-eigval)
}
|
89c2d834888f111504576c15ecbefc5ef411710d | 6da868e097b227dfbd7f9c53386af1ecac24bd70 | /scripts/get_hyper_params.R | 942a2c8936cbfa76a1526aeaf40f413158971d88 | [] | no_license | skdeshpande91/clustering_spatialMeans | b80491d8a0a83249a46e3daa87fd4fb7327b9d95 | 014e18a798aded3b6ff78608ee78ce223ed7a967 | refs/heads/master | 2020-04-20T02:17:47.079375 | 2019-09-23T23:25:22 | 2019-09-23T23:25:22 | 168,568,016 | 0 | 0 | null | 2019-04-22T19:40:01 | 2019-01-31T17:39:39 | C++ | UTF-8 | R | false | false | 931 | r | get_hyper_params.R | #get_hyper_param <- function(Y, W, rho = 0.9, q = 0.05){
get_hyper_param <- function(Y, nu = 3,rho = 0.9, q = 0.05){
#D <- diag(rowSums(W))
#W_star <- D - W
#Omega_alpha <- rho * W_star + (1 - rho) * diag(N)
#Sigma_alpha <- solve(Omega_alpha)
#avg_trace <- mean(diag(Sigma_alpha))
T <- ncol(Y)
alpha_hat <- rowMeans(Y)
alpha_inf <- max(abs(alpha_hat))
sigma2_alpha <- var(alpha_hat)
sigma2_i <- apply(Y, FUN = var, MAR = 1) * (T-1)/T # don't want the unbiased estimates
sigma2_hat <- mean(sigma2_i)
lambda_sigma <- sigma2_hat * qchisq(0.1, df = nu)/nu
if(alpha_inf^2/(sigma2_hat * qnorm(1 - q/2)^2) < sigma2_alpha/sigma2_hat){
a2 <- alpha_inf^2/(sigma2_hat * qnorm(1 - q/2)^2)
a1 <- (sigma2_alpha/sigma2_hat - a2) * (1 - rho)
results <- c()
results["lambda_sigma"] <- lambda_sigma
results["a1"] <- a1
results["a2"] <- a2
} else{
results <- NULL
}
return(results)
}
|
52c8af806005cea80409fdf519b93a2969b2d1d1 | 5689f2c25e4f82bfc0965d13ef98fe712e5f5bdb | /Exploratory Data Analysis/Week 4 Course Project/plot4.R | 117ea561948ac020fca9bd16371f0fbc70326024 | [] | no_license | Frank-duuuu/Data-Science-Specialization-Assignments | e97a594896df8691821b6ba7bbf1b664eb205669 | 357cdd03469bea302d90d0aeadbe1b50a1fc9632 | refs/heads/master | 2022-12-13T03:55:30.739866 | 2020-09-04T14:48:35 | 2020-09-04T14:48:35 | 273,859,501 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 902 | r | plot4.R | PM <- readRDS('exdata_data_NEI_data/summarySCC_PM25.rds')
SCC <- readRDS('exdata_data_NEI_data/Source_Classification_Code.rds')
combustionRelated <- grepl('comb', SCC$SCC.Level.One, ignore.case = TRUE)
coalRelated <- grepl('coal', SCC$SCC.Level.Four, ignore.case = TRUE)
coalCombustion <- (combustionRelated & coalRelated)
combustionSCC <- SCC[coalCombustion,]$SCC
combustionPM <- PM[PM$SCC %in% combustionSCC,]
png('plot4.png', width = 480, height = 480, units = 'px', bg = 'transparent')
library(ggplot2)
plot4 <- ggplot(combustionPM, aes(factor(year), Emissions/10^5)) +
geom_bar(stat = 'identity', fill = 'grey', width = 0.75) +
theme_bw() + guides(fill = FALSE) +
labs(x = 'Year', y = expression('PM'[2.5] * ' Emissions (10^5 tons)')) +
labs(title = expression('PM'[2.5] * ' Coal Combustion Source Emssions Across US From 1999-2008'))
print(plot4)
dev.off() |
4a5a42be120b53cc386e1e2615da87efaaad707f | 80976dd8efcba7d17a6638c030be0effcbcf37fe | /Brouillon/graphique.R | cd5ff6aaa4c25a6873627c2cde6d3c9081afc6a3 | [] | no_license | jcantet/simu_impot | e58015ac992785f089dc88d166f0a7e2d8850c66 | bc763646e75dd939fa4cd38cd5e651f0421e1f18 | refs/heads/master | 2023-01-22T12:23:46.011486 | 2020-11-29T14:00:03 | 2020-11-29T14:00:03 | 304,384,890 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,231 | r | graphique.R | # Fonction pour calculer les impôts bruts 2019
# Fonction pour calculer les impôts bruts 2019
calcul_impot <- function(annee_rev, sit, rev1, rev2, rfr1, rfr2, bareme, plaf_qf, nb_parts, nb_payeurs, decote, coeff_decote, reduc){
rev <- rev1 + rev2
rev_retenu_fam <- (rfr1 + rfr2) / nb_parts # Base calcul impot en prenant en compte toute la famille
rev_retenu_duo <- (rfr1 + rfr2) / nb_payeurs # Base calcul impot en prenant en compte seul le couple
data <-
bareme %>%
mutate(max_rev_tr = cumsum(max_tr),
rev_impose = ifelse(rev_retenu_fam > max_rev_tr,
max_tr - min_tr,
pmax(pmin(rev_retenu_fam - min_tr, max_tr - min_tr),0)), # pmax : avoir le max par ligne (pas possible avec la fonction max)
impot_tr = tx * rev_impose)
# uniquement utile pour calculer l'avantage lié au QF
data_duo <-
bareme %>%
mutate(max_rev_tr = cumsum(max_tr),
rev_impose = ifelse(rev_retenu_duo > max_rev_tr,
max_tr - min_tr,
pmax(pmin(rev_retenu_duo - min_tr, max_tr - min_tr),0)), # pmax : avoir le max par ligne (pas possible avec la fonction max)
impot_tr = tx * rev_impose)
mt_impot_brut_duo <- sum(data_duo$impot_tr)
# Impôt brut solo par part de QF
mt_impot_brut <- sum(data$impot_tr)
# Impôt brut pour le foyer fiscal si pas de QF (pas de prise en compte des enfants)
mt_impot_brut_sans_qf <- mt_impot_brut_duo * nb_payeurs
# Impôt brut pour le foyer fiscal avec le QF (prise en compte des enfants)
mt_impot_brut_avec_qf <- mt_impot_brut * nb_parts
# Avantage fiscal max lié au QF en fonction du nombre de part
avantage_QF <- max(min(
max(nb_parts - nb_payeurs, 0) / 0.5 * plaf_qf,
mt_impot_brut_sans_qf - mt_impot_brut_avec_qf),0)
# Impôt brut pour le foyer fiscal après QF plafonné
mt_impot_brut_ff <- mt_impot_brut_sans_qf - avantage_QF
# Montant de la décote
avantage_fin_decote <-
min(
if (mt_impot_brut_ff < decote[which(decote$statut == sit),2]){
# Si décôte active compte tenu des revenus
as.numeric(decote[which(decote$statut == sit),2] * coeff_decote - coeff_decote * mt_impot_brut_ff)
} else {
# Si revenu trop élevé pour bénéficier de la décote
0
}
,as.numeric(mt_impot_brut_ff))
# Impôt corrigé du montant de la décote éventuelle
mt_impot_brut_ff_dec <- mt_impot_brut_ff - avantage_fin_decote
if (annee_rev == 2019){
# Réduction de 20%, uniquement sur 2019( et peut être avant...)
# Calcul du plafond de revenu pour bénéficier de la réduction
plaf_reduc <-
as.numeric(
reduc[which(reduc$statut_reduc == sit), 2] + # Selon si célibataire ou couple
reduc[3,2] * (nb_parts - nb_payeurs)/0.5) # selon le nombre de demi part fiscal supplémentaire
rfr <- rfr1 + rfr2
# Montant de la réduction
avantage_fin_reduc <-
if(rfr < (plaf_reduc - 2072 * nb_payeurs)){
0.2 * mt_impot_brut_ff_dec
} else if ((rfr > (plaf_reduc - 2072 * nb_payeurs)) & (rfr < plaf_reduc)){
0.2 * (plaf_reduc - rfr) / (2072 * nb_payeurs) * mt_impot_brut_ff_dec
} else {
0
}
# Impôt final après réduction
impot_net <- mt_impot_brut_ff_dec - avantage_fin_reduc
} else {
# Impôt final
impot_net <- mt_impot_brut_ff_dec
}
Indicateurs <- c("Impot sans QF", "Avantage QF", "Impot après QF", "Avantage decote", "Avantage reduc", "Impot_net")
Valeurs <- c(round(mt_impot_brut_sans_qf,0), -round(avantage_QF,0), round(mt_impot_brut_ff,0), -round(avantage_fin_decote,0), ifelse(annee_rev == 2019, -round(avantage_fin_reduc,0), 0), round(impot_net,0))
cbind.data.frame(Indicateurs,Valeurs)
}
library(dplyr)
library(tidyr)
library(plotly)
# Infos institutionnelles 2020 ====
# Barême utilisé en 2021 sur les revenus 2020
tx <- c(0, 0.11, 0.3, 0.41, 0.45)
min_tr <- c(0, 10064, 25659, 73369,157806)
max_tr <- c(10064, 25659, 73369, 157806, 100000000)
bareme_2020 <- as_tibble(cbind(tx,min_tr,max_tr))
# Plafond de l'avantage fiscal procuré par le QF pour chaque demie-part
plaf_qf_2020 <- 1567
# Décote 2019
statut <- c("Célibataire","Couple")
plafond <- c(1717, 2842)
decote_2020 <- as_tibble(cbind.data.frame(statut, plafond)) # cbind.data.frame pour éviter la conversion en character
coeff_decote_2020 <- 0.4525
# Pour générer tous les résultats selon les revenus pour un célibataire
result_A1_E0 <- data.frame()
for (rev in seq(0,200000,500)){
result_temp <- calcul_impot(2020, "Célibataire",rev,0,rev*0.9,0,bareme_2020, plaf_qf_2020, 1,1,decote_2020, coeff_decote_2020,NA)
result_A1_E0 <- rbind.data.frame(result_A1_E0, cbind("situation" = rep("1 adulte",6),"rev"= rep(rev,6), result_temp))
}
result_A1_E0 <- pivot_wider(data = result_A1_E0, names_from = "Indicateurs", values_from = "Valeurs")
# Pour générer tous les résultats selon les revenus pour 1 adulte avec 1 enfant
result_A1_E1 <- data.frame()
for (rev in seq(0,200000,500)){
result_temp <- calcul_impot(2020, "Célibataire",rev,rev,rev*0.9,0,bareme_2020, plaf_qf_2020, 1.5,1,decote_2020, coeff_decote_2020,NA)
result_A1_E1 <- rbind.data.frame(result_A1_E1, cbind("situation" = rep("1 adulte et 1 enfant",6), "rev"= rep(rev,6), result_temp))
}
result_A1_E1 <- pivot_wider(data = result_A1_E1, names_from = "Indicateurs", values_from = "Valeurs")
# Pour générer tous les résultats selon les revenus pour 1 adulte avec 2 enfant
result_A1_E2 <- data.frame()
for (rev in seq(0,200000,500)){
result_temp <- calcul_impot(2020, "Célibataire",rev,rev,rev*0.9,0,bareme_2020, plaf_qf_2020, 2,1,decote_2020, coeff_decote_2020,NA)
result_A1_E2 <- rbind.data.frame(result_A1_E2, cbind("situation" = rep("1 adulte et 2 enfants",6), "rev"= rep(rev,6), result_temp))
}
result_A1_E2 <- pivot_wider(data = result_A1_E2, names_from = "Indicateurs", values_from = "Valeurs")
# Pour générer tous les résultats selon les revenus pour 1 adulte avec 3 enfant
result_A1_E3 <- data.frame()
for (rev in seq(0,200000,500)){
result_temp <- calcul_impot(2020, "Célibataire",rev,rev,rev*0.9,0,bareme_2020, plaf_qf_2020, 3,1,decote_2020, coeff_decote_2020,NA)
result_A1_E3 <- rbind.data.frame(result_A1_E3, cbind("situation" = rep("1 adulte et 3 enfants",6), "rev"= rep(rev,6), result_temp))
}
result_A1_E3 <- pivot_wider(data = result_A1_E3, names_from = "Indicateurs", values_from = "Valeurs")
# Pour générer tous les résultats selon les revenus pour un couple sans enfants
result_A2_E0 <- data.frame()
for (rev in seq(0,200000,500)){
result_temp <- calcul_impot(2020, "Couple",rev,0,rev*0.9,0,bareme_2020, plaf_qf_2020, 2,2,decote_2020, coeff_decote_2020,NA)
result_A2_E0 <- rbind.data.frame(result_A2_E0, cbind("situation" = rep("2 adultes",6), "rev"= rep(rev,6), result_temp))
}
result_A2_E0 <- pivot_wider(data = result_A2_E0, names_from = "Indicateurs", values_from = "Valeurs")
# Pour générer tous les résultats selon les revenus pour un couple sans enfants
result_A2_E1 <- data.frame()
for (rev in seq(0,200000,500)){
result_temp <- calcul_impot(2020, "Couple",rev,0,rev*0.9,0,bareme_2020, plaf_qf_2020, 2.5,2,decote_2020, coeff_decote_2020,NA)
result_A2_E1 <- rbind.data.frame(result_A2_E1, cbind("situation" = rep("2 adultes et 1 enfant",6), "rev"= rep(rev,6), result_temp))
}
result_A2_E1 <- pivot_wider(data = result_A2_E1, names_from = "Indicateurs", values_from = "Valeurs")
# Pour générer tous les résultats selon les revenus pour un couple sans enfants
result_A2_E2 <- data.frame()
for (rev in seq(0,200000,500)){
result_temp <- calcul_impot(2020, "Couple",rev,0,rev*0.9,0,bareme_2020, plaf_qf_2020, 3,2,decote_2020, coeff_decote_2020,NA)
result_A2_E2 <- rbind.data.frame(result_A2_E2, cbind("situation" = rep("2 adultes et 2 enfants",6), "rev"= rep(rev,6), result_temp))
}
result_A2_E2 <- pivot_wider(data = result_A2_E2, names_from = "Indicateurs", values_from = "Valeurs")
# Pour générer tous les résultats selon les revenus pour un couple sans enfants
result_A2_E3 <- data.frame()
for (rev in seq(0,200000,500)){
result_temp <- calcul_impot(2020, "Couple",rev,0,rev*0.9,0,bareme_2020, plaf_qf_2020, 4,2,decote_2020, coeff_decote_2020,NA)
result_A2_E3 <- rbind.data.frame(result_A2_E3, cbind("situation" = rep("2 adultes et 3 enfants",6), "rev"= rep(rev,6), result_temp))
}
result_A2_E3 <- pivot_wider(data = result_A2_E3, names_from = "Indicateurs", values_from = "Valeurs")
global_result <- rbind.data.frame(result_A1_E0,result_A1_E1,result_A1_E2,result_A1_E3,result_A2_E0,result_A2_E1,result_A2_E2,result_A2_E3)
save(global_result,file = "./App/base_exemple_impots_2020.Rdata")
# Palette de couleurs
pal_name <- c("1 adulte","1 adulte et 1 enfant","1 adulte et 2 enfants","1 adulte et 3 enfants", "2 adultes", "2 adultes et 1 enfant", "2 adultes et 2 enfants", "2 adultes et 3 enfants")
pal_col <- c("#F6D09D", "#F2B96E","#FBA739","#F28500","#C7B3CE","#B194B9","#A579B4","#9255A5")
pal <- setNames(pal_col, pal_name)
# Graphique des revenus
plot_ly(data = global_result,hoverinfo = 'text',
text = ~paste('</br> <b> Situation : </b>', situation,
'</br> <b> Revenus : </b>', easy_format(rev,"milliers",suffix = "€"),
'</br> <b> Impôt avant QF : </b>', easy_format(`Impot sans QF`,"milliers",suffix = "€"),
'</br> <b> Avantage QF : </b>', easy_format(`Avantage QF`,"milliers",suffix = "€"),
'</br> <b> Décôte : </b>', easy_format(`Avantage decote`,"milliers",suffix = "€"),
'</br> <b> Impôt net : </b>', easy_format(Impot_net,"milliers",suffix = "€"))) %>%
add_trace(data = global_result, name = ~situation, x = ~rev, y = ~Impot_net, color = ~situation, colors = pal, type = 'scatter', mode = 'lines') %>%
layout(title = "Impôt à payer en fonction du revenu et de la situation",
xaxis = list(title = "Revenus"),
yaxis = list(title = "Impôt net à payer", tickformat = ".0f"),
legend = list(x = 0.1, y = 0.9))
|
3b356d613ab91ab9c2f8e0bbb1a2037caddfcfd3 | 88a5bb4573eb5c078009e99837d466f0be3cffe6 | /man/opt.tbreg.Rd | c964d463fb1f79bdb5259ffeb8cb596f367d2a20 | [
"Apache-2.0"
] | permissive | jinseob2kim/jstable | 609ab7b2f556a08c2317c9639a67476d9d3a4c1b | 6759e0b770d505cf59d7db73c365a2b7de648a24 | refs/heads/master | 2023-09-01T07:39:17.228323 | 2023-08-21T13:25:54 | 2023-08-21T13:25:54 | 135,014,959 | 15 | 6 | null | null | null | null | UTF-8 | R | false | true | 448 | rd | opt.tbreg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DToption.R
\name{opt.tbreg}
\alias{opt.tbreg}
\title{datable option for regression table(DT package)}
\usage{
opt.tbreg(fname)
}
\arguments{
\item{fname}{File name to download}
}
\value{
datatable option object
}
\description{
DT::datatable option for glm, gee(geepack package), lmer/glmer(lme4 package)
}
\details{
DETAILS
}
\examples{
options = opt.tbreg("mtcars")
}
|
10ebbad9bcd175e510ce5cc93648372f512f3fdd | 6321c8a1497020ae8a387d1281301aaa6aa38dbf | /naver_stock.R | e02d821383a1a6a156574633e1d8a3466d192519 | [] | no_license | daawonn/R_project | fad2c4e2fe4dc7c71cf549e59813b43328cca15c | 617c806d17cf43011594df73540815bbc9cc8319 | refs/heads/master | 2021-01-05T12:19:46.002970 | 2020-02-24T01:22:16 | 2020-02-24T01:22:16 | 241,022,949 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 722 | r | naver_stock.R | ## 네이버 금융 크롤링
install.packages('rvest')
install.packages('R6')
install.packages('XML')
# 라이브러리불러오기
library(rvest)
library(R6)
library(XML)
stock_data <- NULL
for (i in 1:10){
page <- 1
url_base <- 'http://finance.naver.com/item/sise_day.nhn?code=005930&page='
b <- paste(url_base, i, sep ='') %>%
read_html() %>%
html_nodes('table') %>%
.[1] %>%
html_table()
b2 <- b[[1]]
b3 <- b2[!is.na(b2[,2]),]
colnames(b3) <-c('날짜','종가','전일비','시가', '고가', '저가', '거래량')
stock_data <-rbind(stock_data,b3)
cat("\n",i)
}
setwd("./")
write.csv(stock_data, "naver_stock.csv", row.names = F)
|
a1ad8394512749a062a55b21a8f95e7e64a7f7d3 | 7c3b1b37f1986d00ef740e0185db4e24b5ca4cb4 | /inst/examples/index.R | 47e11ad18890df3b702e07973271d6faaec2a918 | [] | no_license | jverzani/gWidgetsWWW2.rapache | 2b9ea2402b334d9b57cc434ef81d8169d5a88f54 | f0678d800d0e824f15f0098212271caac71bb67c | refs/heads/master | 2020-04-06T07:02:06.600687 | 2014-02-01T03:47:41 | 2014-02-01T03:47:41 | 5,430,063 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,291 | r | index.R | ## Show index page
require(whisker)
f <- list.files(system.file("examples", package="gWidgetsWWW2.rapache"), full=TRUE)
f <- Filter(function(x) !grepl("index.R|README",x), f)
nms <- lapply(f, function(i) list(nm=basename(i)))
tpl <- "
<h2>gWidgetsWWW2.rapache</h2>
<p>
The <code>gWidgetsWWW2.rapache</code> package allows webpages to be
written with <code>R</code> code using the <code>gWidgets</code>
interface. <br/>
The pages are served through the <code>apache</code>
webserver with the aid of Jeffrey
Horner's <code>rapache</code> module. <br/>
The package is a relative of
<code>gWidgetsWWW2</code>, which uses the <code>Rook</code> package
to serve pages locally through <code>R</code>'s
internal web server.
</p>
<h3>Details</h3>
Some details on the package can be read
<a href='static_file/html/gWidgetsWWW2_rapache.html' target='_blank'>here</a>.
<h3>Examples</h3>
<ul>
{{#nms}}
<li>
(<a href=https://raw.github.com/jverzani/gWidgetsWWW2.rapache/master/inst/examples/{{nm}} target='_blank'>source</a>)
<a href='{{nm}}' target='_blank'>See example</a>
{{nm}}
</li>
{{/nms}}
</ul>
"
w <- gwindow("gWidgetsWWW2.rapache")
sb <- gstatusbar("Powered by gWidgetsWWW2.rapache and rapache", cont=w)
g <- ggroup(cont=w, spacing=10)
ghtml(whisker.render(tpl), cont=g)
|
6e0450a605ce87dffe2fe4be7b68c2a6dc642f2b | d14bcd4679f0ffa43df5267a82544f098095f1d1 | /inst/apps/example7_1/server.R | ee9a15b41fbac141af9ee74198d082694b3177a0 | [] | no_license | anhnguyendepocen/SMRD | 9e52aa72a5abe5274f9a8546475639d11f058c0d | c54fa017afca7f20255291c6363194673bc2435a | refs/heads/master | 2022-12-15T12:29:11.165234 | 2020-09-10T13:23:59 | 2020-09-10T13:23:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 787 | r | server.R | server = function(input, output, session) {
output$example7.1 <- renderUI({
withMathJax(HTML(includeMarkdown('background.Rmd')))
})
output$berkson <- DT::renderDataTable({
L <- c(0,100,300,500,700,1000,2000,4000)
U <- c(100,300,500,700,1000,2000,4000,'Inf')
n.10220 <- c(1609, 2424,1770,1306,1213,1528,354,16)
n.2000 <- c(292,494,332,236,261,308,73,4)
n.200 <- c(41,44,24,32,29,21,9,0)
n.20 <- c(3,7,4,1,3,2,0,0)
table.7.1 <- data.frame(L,U,n.10220,n.2000,n.200,n.20)
colnames(table.7.1) <- c("Lower","Upper","n = 10220","n = 2000","n = 200","n = 20")
DT::datatable(table.7.1, options = list(pageLength = 10))
})
output$plotfig71 <- renderPlot({
par(oma = c(0,0,0,0), mar = c(4,4,2,2))
input$evalfig71
return(isolate(eval(parse(text=input$fig71plot))))
})
} |
b1d59c57fd6dd40b13a3ae7448556e016ca84722 | 2bce49ab57997fe6121efcfba8544e8ede5f6376 | /man/get_lb.Rd | e6262566f8b9e88f6a6cf0108436a967c7d1fef7 | [] | no_license | hfjn/nmmso_benchmark | 7e0309244529226f1ab2b0bd7091772392ceba38 | bf13085ce7ce6d597c45095d30d2e0909467eb56 | refs/heads/master | 2021-05-30T02:31:26.276871 | 2016-01-09T16:24:01 | 2016-01-09T16:24:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 298 | rd | get_lb.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_lb.R
\name{get_lb}
\alias{get_lb}
\title{get_lb}
\usage{
get_lb(fno)
}
\arguments{
\item{fno}{Number of the niching function to be used.}
}
\value{
The lower bound for the function.
}
\description{
get_lb
}
|
d76c0f91c704b76b84a3735c62c38e747128c669 | 08a6e8e2b86a015fe6f847102bf244fc1ce18d6a | /4-Demography/PCA/snprelate_PCA_cluster_rail.R | 8316ecb91aafc03cb8a87de32e37ed351ae0ceff | [] | no_license | dechavezv/2nd.paper | 40d5578aef8dfdaa07a5d9eb27c9f632f0750cd3 | ffa6506ec062bc1442e3d0ee7325f60087ac53e1 | refs/heads/master | 2020-12-19T21:31:43.317719 | 2020-08-07T05:46:59 | 2020-08-07T05:46:59 | 235,857,029 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,687 | r | snprelate_PCA_cluster_rail.R | ##### Load libraries
library(gdsfmt)
library(SNPRelate)
library(ggplot2)
library(RColorBrewer)
##### Set working directory?
todaysdate=format(Sys.Date(),format="%Y%m%d")
calldate=20200504
setwd("/u/scratch/d/dechavez/rails.project/SNPRelate")
plotoutdir=paste("/u/scratch/d/dechavez/rails.project/SNPRelate",calldate,"/PCA/",sep="")
dir.create(plotoutdir,recursive = T)
##### Specify VCF filename
vcf.fn <- "LS_joint_allchr_Annot_Mask_Filter_passingSNPs.vcf"
##### Convert VCF to GDS format
snpgdsVCF2GDS(vcf.fn, "LS_joint_allchr_Annot_Mask_Filter_passingSNPs.gds", method="biallelic.only")
######## Exclude low coverage genomes if is the case #########################
sample.list=c("LS05","LS09","LS21","LS29","LS34","LS35","LS49","LS57")
snpgdsCreateGenoSet("LS_joint_allchr_Annot_Mask_Filter_passingSNPs.gds", "LS_joint_allchr_Annot_Mask_Filter_passingSNPs_removeInds.gds", sample.id=sample.list)
genofile <- snpgdsOpen("LS_joint_allchr_Annot_Mask_Filter_passingSNPs_removeInds.gds")
##### Prune SNPs based on LD
set.seed(1000)
snpset <- snpgdsLDpruning(genofile, ld.threshold=.2,maf=0.1,autosome.only=FALSE)
snpset.id <- unlist(snpset)
snpgdsCreateGenoSet("LS_joint_allchr_Annot_Mask_Filter_passingSNPs_removeInds.gds", "LS_joint_allchr_Annot_Mask_Filter_passingSNPs_removeInds_pruned.gds", snp.id=snpset.id)
##### Close old genofile, open new genofile
snpgdsClose(genofile)
genofile <- snpgdsOpen("LS_joint_allchr_Annot_Mask_Filter_passingSNPs_removeInds_pruned.gds")
##### Add population information
pop_code=c("St.Cruz","St.Cruz","Isabela", "Isabela",
"Isabela", "Pinta", "Pinta", "Santiago", "Santiago")
#pop_code <- read.gdsn(index.gdsn(genofile, "sample.annot/pop.group")) # <- doesn't work
##### Run PCA
# pca <- snpgdsPCA(genofile, snp.id=snpset.id, num.thread=1)
pca <- snpgdsPCA(genofile, num.thread=1,autosome.only=FALSE)
pc.percent <- pca$varprop*100
# head(round(pc.percent, 2))
tab <- data.frame(sample.id = pca$sample.id,
EV1 = pca$eigenvect[,1],
EV2 = pca$eigenvect[,2],
EV3 = pca$eigenvect[,3],
EV4 = pca$eigenvect[,4],
stringsAsFactors = FALSE)
write.table(tab, file="LS_joint_allchr_Annot_Mask_Filter_passingSNPs_removeInds_pruned_PCA_1_2_3_4.txt", col.names=T, row.names=F, quote=F, sep='\t')
pdf("LS_joint_allchr_Annot_Mask_Filter_passingSNPs_removeInds_pruned_PCA_1_2.pdf", width=6, height=6)
plot(tab$EV2, tab$EV1, xlab="eigenvector 2", ylab="eigenvector 1")
dev.off()
##### Plot the first 4 PCs against each other
lbls <- paste("PC", 1:4, "\n", format(pc.percent[1:4], digits=2), "%", sep="")
pdf("LS_joint_allchr_Annot_Mask_Filter_passingSNPs_removeInds_pruned_PCA_1_2_3_4.pdf", width=6, height=6)
pairs(pca$eigenvect[,1:4], labels=lbls)
dev.off()
########### pop map ########
#population information
popmap = read.table("/u/scratch/d/dechavez/rails.project/bams/Daniel.2020/bam/VCF/list.sample.rails.txt",header=T)
sample.id = as.character(popmap$Sample)
pop1_code = as.character(popmap$PrimaryPop)
#make a data.frame
tab1a <- data.frame(sample.id = pca$sample.id, pop1 = factor(pop1_code)[match(pca$sample.id, sample.id)],
EV1 = pca$eigenvect[,1],
EV2 = pca$eigenvect[,2],
EV3 = pca$eigenvect[,3],
EV4 = pca$eigenvect[,4],
stringsAsFactors = FALSE)
#head(tab1a)
############### set up your colors -- keep this consistent across all plots ######
colorPal=RColorBrewer::brewer.pal(n=8,name = "Dark2")
colors=list(St.Cruz=colorPal[1],Isabela=colorPal[3],Santiago=colorPal[5],
Pinta=colorPal[8]) # your population colors
#plot first 2 pc coloring by primary population
p1a <- ggplot(tab1a,aes(x=EV2,y=EV4,color=pop1))+
geom_point(size=3)+
theme_bw()+
ylab(paste("PC4_", format(pc.percent[4], digits=2),"%", sep=""))+
xlab(paste("PC2_", format(pc.percent[2], digits=2),"%", sep=""))+
ggtitle(paste("PCA based on ",as.character(length(pca$snp.id))," LD Pruned SNPs",sep=""))+
theme(legend.title = element_blank(),axis.text = element_text(size=14),
axis.title = element_text(size=14),legend.text = element_text(size=14))+
scale_shape_manual(values=c(1,16))+
scale_color_manual(values=unlist(colors))
# paste("PC", 1:4, "\n", format(pc.percent[1:4], digits=2), "%", sep="")
#p1a
ggsave(paste(plotoutdir,"/PCA.rails.",todaysdate,"correctedAxes.PC2_PC4.pdf",sep=""),p1a,device="pdf",width = 8,height=5)
##### Create cluster dendrogram
set.seed(100)
ibs.hc <- snpgdsHCluster(snpgdsIBS(genofile, num.thread=1,autosome.only=FALSE))
rv <- snpgdsCutTree(ibs.hc)
pdf("LS_joint_allchr_Annot_Mask_Filter_passingSNPs_removeInds_pruned_IBScluster.pdf", width=8, height=12)
plot(rv$dendrogram, main="SNPRelate Clustering")
dev.off()
#PCA wuth Hihg coverage indiv
|
62d2c448c12e8450c73794874859c677d7b772bb | ea2f728a0254ca19a98981aecf8d5d10f2230885 | /denoising_docs/denoising.R | 7febc5de78528567a36cd12a4056efe37d0bba1d | [] | no_license | itsnavneetk/Kaggle | 5c753ccf3036ef6542f003d475baa2f83a66546c | ed4600c6452119d1ac12f71df337e0c49d828193 | refs/heads/master | 2021-04-15T16:59:37.201290 | 2018-03-25T08:33:39 | 2018-03-25T08:33:39 | 126,678,391 | 0 | 0 | null | 2018-03-25T08:32:31 | 2018-03-25T08:32:30 | null | UTF-8 | R | false | false | 7,814 | r | denoising.R | #Denoising Dirty Documents
#https://www.kaggle.com/c/denoising-dirty-documents
library(ggplot2)
library(rpart)
library(caret)
library(randomForest)
library(e1071)
library(glmnet)
library(xgboost)
library(deepnet)
library(dplyr)
library(data.table)
library(bit64)
library(pROC)
library(png)
library(grid)
library(Metrics)
sample_sub = fread("sampleSubmission.csv")
train_files <- list.files("train", pattern="*.png", full.names=TRUE)
#Read training images into a list
train_images = list()
for (img in train_files){
train_images[[img]] = readPNG(img, native = FALSE, info = FALSE)
}
train_cleaned <- list.files("train_cleaned", pattern="*.png", full.names=TRUE)
#Read training images into a list
train_cleaned_images = list()
for (img in train_cleaned){
train_cleaned_images[[img]] = readPNG(img, native = FALSE, info = FALSE)
}
test_files <- list.files("test", pattern="*.png", full.names=TRUE)
#Read test images into a list
test_images = list()
for (img in test_files){
test_images[[img]] = readPNG(img, native = FALSE, info = FALSE)
}
#There are 2 image sizes: 540x258 and 540x420 with 139320 and 226800 pixels respectively.
#Function to view images in R:
# grid.raster(img_matrix)
#Exploration area----------------------------------
test_image = "test/1.png"
#Try some basic thresholding on 1 image
image1 = test_images[[test_image]]
avg_pixel = mean(image1)
# grid.raster(image1)
dark_ratio = length(which(image1 < 0.35))/length(image1)
#Start with a strong treshold to remove most/all nontext pixels
thres_img = ifelse(image1>(avg_pixel-(4*dark_ratio)), 0, 1)
grid.raster(thres_img)
#Spread pixel intensity in vertical direction by 1 pixel
cols = rep(0,ncol(thres_img))
smear_vertical_by =1
for (x in 1:smear_vertical_by){
thres_d_1 = rbind(thres_img, cols )[2:(nrow(thres_img)+1), ]
thres_u_1 = rbind(cols,thres_img )[1:(nrow(thres_img)), ]
thres_img = (thres_img+thres_d_1+thres_u_1)
}
rows = rep(0,nrow(thres_img))
smear_laterally_by =1
for (x in 1:smear_laterally_by){
thres_r_1 = cbind(thres_img, rows )[,2:(ncol(thres_img)+1)]
thres_l_1 = cbind(rows, thres_img)[,1:ncol(thres_img)]
thres_img = (thres_img+thres_r_1+thres_l_1)
}
#Create new image matrix using filter matrix
filter_matrix = ifelse(thres_img ==0,-1,1)
flitered_image = ifelse(filter_matrix ==-1,1, image1)
grid.raster(flitered_image)
#Exploration area----------------------------------
#First threshold model validation--------
avg_rmse = 0
for (image in names(train_images)){
print(image)
#Calculate average img pixel intensity
img = train_images[[image]]
avg_pixel = mean(img)
#Create simple threshold
thres_img = ifelse(img>0.95, 1,
ifelse(img<0.025, 0,
ifelse(img>avg_pixel-0.15, 0.985,
img)))
#Actual cleaned image:
test_image =paste("train_cleaned/", strsplit(image,"/")[[1]][2], sep="")
print(test_image)
cleaned_img = train_cleaned_images[[test_image]]
img_rmse = rmse(thres_img, cleaned_img)
print ( img_rmse )
avg_rmse = avg_rmse+img_rmse
}
avg_rmse= avg_rmse/144
print(avg_rmse)
#First threshold model validation--------
#First threshold model submission--------
#Run threshold on all test_images and submit
cleaned_test = list()
for (image in names(test_images)){
img = test_images[[image]]
avg_pixel = mean(img)
thres_img = thres_img = ifelse(img>avg_pixel-0.225, 0.985, img)
cleaned_test[[image]] = thres_img
}
solution_vector = c()
for (image in cleaned_test){
solution_vector = c(solution_vector, as.vector(image))
}
submission1 = sample_sub
submission1$value = solution_vector
write.csv(submission1, "denoising_sub1_thresh.csv", row.names=FALSE)
#Submission score: 0.08036 First place! (out of 5...)
#First threshold model submission--------
#Look at test output images:
test_image = "test/154.png"
test_img = test_images[[test_image]]
avg_pixel = mean(test_img)
thres_img = ifelse(test_img>avg_pixel-0.225, 0.985, test_img)
grid.raster(thres_img )
#Training filter matrix model
avg_rmse = 0
for (image in names(train_images)){
print(image)
#Try some basic thresholding on 1 image
image1 = train_images[[image]]
avg_pixel = mean(image1)
# grid.raster(image1)
dark_ratio = length(which(image1 < 0.35))/length(image1)
#Start with a strong treshold to remove most/all nontext pixels
thres_img = ifelse(image1>(avg_pixel-(4*dark_ratio)), 0, 1)
#Smear pixel intensity vertically and laterally
cols = rep(0,ncol(thres_img))
smear_vertical_by =1
for (x in 1:smear_vertical_by){
thres_d_1 = rbind(thres_img, cols )[2:(nrow(thres_img)+1), ]
thres_u_1 = rbind(cols,thres_img )[1:(nrow(thres_img)), ]
thres_img = (thres_img+thres_d_1+thres_u_1)
}
rows = rep(0,nrow(thres_img))
smear_laterally_by =1
for (x in 1:smear_laterally_by){
thres_r_1 = cbind(thres_img, rows )[,2:(ncol(thres_img)+1)]
thres_l_1 = cbind(rows, thres_img)[,1:ncol(thres_img)]
thres_img = (thres_img+thres_r_1+thres_l_1)
}
#Create new image matrix using smeared filter matrix
filter_matrix = ifelse(thres_img ==0,-1,1)
flitered_image = ifelse(filter_matrix ==-1,1, image1)
# grid.raster(flitered_image)
#Calculate average non-white pixel intensity of flitered image
white_space = (flitered_image==1)
non_white_vec = flitered_image[!white_space]
avg_non_zero = mean(non_white_vec)
#Use a simple threshold
final_img = ifelse(flitered_image>avg_non_zero+0.10, 0.99, flitered_image)
# final_img = ifelse(final_img<0.05, 0, final_img)
# grid.raster(final_img)
#Actual cleaned image:
test_image =paste("train_cleaned/", strsplit(image,"/")[[1]][2], sep="")
cleaned_img = train_cleaned_images[[test_image]]
img_rmse = rmse(final_img, cleaned_img)
print ( img_rmse )
avg_rmse = avg_rmse+img_rmse
}
avg_rmse= avg_rmse/144
print(avg_rmse)
#Filter with threshold model submission--------
#Run threshold on all test_images and submit
cleaned_test = list()
for (image in names(test_images)){
print(image)
#Try some basic thresholding on 1 image
image1 = test_images[[image]]
avg_pixel = mean(image1)
# grid.raster(image1)
dark_ratio = length(which(image1 < 0.35))/length(image1)
#Start with a strong treshold to remove most/all nontext pixels
thres_img = ifelse(image1>(avg_pixel-(4*dark_ratio)), 0, 1)
#Smear pixel intensity vertically and laterally
cols = rep(0,ncol(thres_img))
smear_vertical_by =1
for (x in 1:smear_vertical_by){
thres_d_1 = rbind(thres_img, cols )[2:(nrow(thres_img)+1), ]
thres_u_1 = rbind(cols,thres_img )[1:(nrow(thres_img)), ]
thres_img = (thres_img+thres_d_1+thres_u_1)
}
rows = rep(0,nrow(thres_img))
smear_laterally_by =1
for (x in 1:smear_laterally_by){
thres_r_1 = cbind(thres_img, rows )[,2:(ncol(thres_img)+1)]
thres_l_1 = cbind(rows, thres_img)[,1:ncol(thres_img)]
thres_img = (thres_img+thres_r_1+thres_l_1)
}
#Create new image matrix using smeared filter matrix
filter_matrix = ifelse(thres_img ==0,-1,1)
flitered_image = ifelse(filter_matrix ==-1,1, image1)
# grid.raster(flitered_image)
#Calculate average non-white pixel intensity of flitered image
white_space = (flitered_image==1)
non_white_vec = flitered_image[!white_space]
avg_non_zero = mean(non_white_vec)
#Use a simple threshold
final_img = ifelse(flitered_image>avg_non_zero+0.1, 0.99, flitered_image)
cleaned_test[[image]] = final_img
}
solution_vector = c()
for (image in cleaned_test){
solution_vector = c(solution_vector, as.vector(image))
}
submission2 = sample_sub
submission2$value = solution_vector
write.csv(submission2, "denoising_sub2_thresh.csv", row.names=FALSE)
#Submission score: 0.08036 First place! (out of 5...)
#First threshold model submission-------- |
3840e850ea50eeb1952b8166e808e9fea57cb5e2 | 6150161144eb5fcd4bf9ca2752f11c56f5833226 | /PLR_IO/get_baseline_period_from_bins.R | 90d904699e20f6954efee71ed04f6e3cd632ca87 | [] | no_license | petteriTeikari/R-PLR | 1d8be9cd1b612942e4772735699c2a2cc9555dab | b1701c771ccb80bac7766135f67834abf9ea08a1 | refs/heads/master | 2020-03-21T02:55:44.329164 | 2019-02-13T07:29:52 | 2019-02-13T07:29:52 | 138,028,683 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 389 | r | get_baseline_period_from_bins.R | get.baseline.period.from.bins = function(bins) {
bin_names = bins$Name
index = match("Baseline", bin_names)
if (is.na(index)) {
warning('No variable (column) name of "Baseline" was not found from bins.csv, Using the default value!')
baseline_period = c(10, 15)
} else {
baseline_period = c(bins$Start[index], bins$End[index])
}
return(baseline_period)
} |
9529fc3c3bb1fac9971534ff76fbd1f9b7daea22 | 431681cebb0f2e5ff4ccd167aba2067a8c3f52c5 | /mc_yvar_appx.R | 2dc4093b081907018beca4101ed00e36fc553340 | [] | no_license | dcries/ebmodel | f2a09ac96635abfabf9bfa7cab976333d8c60bf4 | fc08a454e319501c210c7bcca99d3eae6e2b1fb6 | refs/heads/master | 2021-01-12T11:12:00.363741 | 2017-03-08T23:38:47 | 2017-03-08T23:38:47 | 72,864,760 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,414 | r | mc_yvar_appx.R | nrep <- 500
yeesd <- rep(0,nrep)
yessd <- rep(0,nrep)
for(k in 1:nrep){
n <- 300
#known covariates
zg <- rbinom(n,1,0.5) #gender indicator
zb <- rnorm(n,27,5) #bmi
za <- runif(n,20,40) #age
#ze <- rbinom(n,1,) #education
#zr <- #race
geg <- params[3]#200 #indicator level for male for ee
geb <- params[4]#3.5 #slope for bmi for ee
gea <- params[5]#-2.5 #slope for age for ee
gig <- params[6]#-200 #indicator level for male for ei
gib <-params[7]# 4 #slope for bmi for ei
gia <- params[8]#-3 #slope for age for ei
#slopes for true ee/ei
#note zig and gig are much different, zig refers to ei and gig to es, a mistake
zeg <- 400 #indicator level for male for ee
zeb <- 10 #slope for bmi for ee
zea <- -7 #slope for age for ee
zig <- 350 #indicator level for male for ei
zib <- 14 #slope for bmi for ei
zia <- -6 #slope for age for ei
#mean daily EI
#mean daily EI
mei1 <- 1500 + zig*zg + zib*zb + zia*za
mei2 <- 1900 + zig*zg + zib*zb + zia*za
mei3 <- 2100 + zig*zg + zib*zb + zia*za
mei4 <- 2900 + zig*zg + zib*zb + zia*za
mei5 <- 3200 + zig*zg + zib*zb + zia*za
#sd EI
sdei1 <- 50 +0#50
sdei2 <- 80 +0#50
sdei3 <- 60 +0#90
sdei4 <- 400 +0#40
sdei5 <- 220 +0#90
#mean daily EE = mean daily EI
mee1 <- 1500 + 130 + zeg*zg + zeb*zb + zea*za # 1500
mee2 <- 1900 + 130 + zeg*zg + zeb*zb + zea*za # 2000
mee3 <- 2100 + 80 + zeg*zg + zeb*zb + zea*za # 2300
mee4 <- 2900 + 130 + zeg*zg + zeb*zb + zea*za # 2600
mee5 <- 3200 + 130 + zeg*zg + zeb*zb + zea*za #- 3500
mee <- c(mee1,mee2,mee3,mee4,mee5)
#sd EE
sdee1 <- sdei1 + 20
sdee2 <- sdei2 + 15
sdee3 <- sdei3 + 30
sdee4 <- sdei4 + 10
sdee5 <- sdei5 + 30
#cor btwn the two
rho <- 0.4376205 #cor(eb$Energy_expenditure,eb$energy,use="complete.obs")
#cor btwn ee and es, calculate
rhos <- -(1-rho)
#sd es, calculated
#sdes <- sqrt(sdee^2+sdei^2-2*sdee*sdei*rho)
#cov matrix btwn the two
xcov1 <- matrix(c(sdei1^2, rho*sdei1*sdee1 , rho*sdei1*sdee1, sdee1^2),ncol=2,byrow=T)
xcov2 <- matrix(c(sdei2^2, rho*sdei2*sdee2 , rho*sdei2*sdee2, sdee2^2),ncol=2,byrow=T)
xcov3 <- matrix(c(sdei3^2, rho*sdei3*sdee3 , rho*sdei3*sdee3, sdee3^2),ncol=2,byrow=T)
xcov4 <- matrix(c(sdei4^2, rho*sdei4*sdee4 , rho*sdei4*sdee4, sdee4^2),ncol=2,byrow=T)
xcov5 <- matrix(c(sdei5^2, rho*sdei5*sdee5 , rho*sdei5*sdee5, sdee5^2),ncol=2,byrow=T)
#xcovs <- matrix(c(sdee^2, rhos*sdes*sdee , rhos*sdes*sdee, sdes^2),ncol=2,byrow=T)
#intercept of yee bias
be0 <- params[1]#100
#intercept of yes bias
bs0 <- params[2]#50
#slope of yee bias
#be1 <- 0.6
#slopt of yes bias
#bs1 <- 1.2
#simulate n observations of daily EI and EE, mixture
x1 <- matrix(c(rep(0,n/5),rep(0,n/5)),ncol=2)
x2 <- matrix(c(rep(0,n/5),rep(0,n/5)),ncol=2)
x3 <- matrix(c(rep(0,n/5),rep(0,n/5)),ncol=2)
x4 <- matrix(c(rep(0,n/5),rep(0,n/5)),ncol=2)
x5 <- matrix(c(rep(0,n/5),rep(0,n/5)),ncol=2)
# for(i in 1:(n/5)){
# x1[i,] <- mvrnorm(1,c(mei1[i],mee1[i]),xcov1)
# x2[i,] <- mvrnorm(1,c(mei2[(i+n/5)],mee2[(i+n/5)]),xcov2)
# x3[i,] <- mvrnorm(1,c(mei3[(i+2*n/5)],mee3[(i+2*n/5)]),xcov3)
# x4[i,] <- mvrnorm(1,c(mei4[(i+3*n/5)],mee4[(i+3*n/5)]),xcov4)
# x5[i,] <- mvrnorm(1,c(mei5[(i+4*n/5)],mee5[(i+4*n/5)]),xcov5)
# }
#
df <- 4
for(i in 1:(n/5)){
x1[i,] <- rmvt(1,delta=c(mei1[i],mee1[i]),sigma=xcov1*(df-2)/df,df=df)
x2[i,] <- rmvt(1,delta=c(mei2[(i+n/5)],mee2[(i+n/5)]),sigma=xcov2*(df-2)/df,df=df)
x3[i,] <- rmvt(1,delta=c(mei3[(i+2*n/5)],mee3[(i+2*n/5)]),sigma=xcov3*(df-2)/df,df=df)
x4[i,] <- rmvt(1,delta=c(mei4[(i+3*n/5)],mee4[(i+3*n/5)]),sigma=xcov4*(df-2)/df,df=df)
x5[i,] <- rmvt(1,delta=c(mei5[(i+4*n/5)],mee5[(i+4*n/5)]),sigma=xcov5*(df-2)/df,df=df)
}
# x1 <- mvrnorm(n/5,c(mei1,mee1),xcov1)
# x2 <- mvrnorm(n/5,c(mei2,mee2),xcov2)
# x3 <- mvrnorm(n/5,c(mei3,mee3),xcov3)
# x4 <- mvrnorm(n/5,c(mei4,mee4),xcov4)
# x5 <- mvrnorm(n/5,c(mei5,mee5),xcov5)
#for bimodal error model
mix <- rbinom(n,1,0.5)
xei <- c(x1[,1],x2[,1],x3[,1],x4[,1],x5[,1])
xee <- c(x1[,2],x2[,2],x3[,2],x4[,2],x5[,2])
#within person variability
dmatrix <- matrix(c(50^2,-2000,-2000,150^2),ncol=2,byrow=TRUE) #EI first row
#delta <- list()
#for(j in 1:nrep){
delta = mvrnorm(n,rep(0,2),dmatrix)
#}
#calculate delta ES, positive change => ei > ee
#xes <- xei - xee
xes <- sample(c(rnorm(n/5,-38,23),rnorm(n/5,38,23),rnorm(n/5,0,34),rnorm(n/5,-80,85),rnorm(n/5,80,85)),n,replace=FALSE)
#calculate true values T_ij of EE,EI, ES
# tei1 <- xei + delta1[,1]
# tei2 <- xei + delta2[,1]
# tee1 <- xee + delta1[,2]
# tee2 <- xee + delta2[,2]
# tes1 <- tei1 - tee1
# tes2 <- tei2 - tee2
#calculate DLW est of EE
#yee <- be0+eecurve(xee+ delta[,2],4000,2100,0.002)+geg*zg+geb*zb+gea*za + mix*rnorm(n,-350,sqrt(380^2-350^2)) + (1-mix)*rnorm(n,350,sqrt(380^2-350^2))
#calc cheap ES
#yes <- bs0+escurve(xes+ delta[,1],800,k=0.07)+gig*zg+gib*zb+gia*za + mix*rnorm(n,-190,sqrt(210^2-190^2)) + (1-mix)*rnorm(n,190,sqrt(210^2-190^2))
yee <- be0+eecurve(xee+ delta[,2])+geg*zg+geb*zb+gea*za + rsnorm(n,0,380,10)
#calc cheap ES
yes <- bs0+escurve(xes+ delta[,1],k=0.1)+gig*zg+gib*zb+gia*za + rsnorm(n,0,210,10)
# yee <- be0+eecurve(xee+ delta[,2],4000,2100,0.002)+geg*zg+geb*zb+gea*za + rnorm(n,0,380) #truth 408.534
# yes <- bs0+escurve(xes+ delta[,1],800,k=0.07)+gig*zg+gib*zb+gia*za+ rnorm(n,0,210) #truth 216.1748
yeesd[k] <- sd(yee-(be0+eecurve(xee,4000,2100,0.002)+geg*zg+geb*zb+gea*za))
yessd[k] <- sd(yes-(bs0+escurve(xes,k=0.1)+gig*zg+gib*zb+gia*za))
#print(k)
} |
60a7625181071d8a9a9b2d629c100cc1943be83c | a068c88c7c26a18b5d0da7e02f0cae6430417609 | /R/feather_file_creator-build_genes_files.R | 3fea9971cba33ab8bb09afab40bfd1e356b4fb84 | [] | no_license | CRI-iAtlas/iatlas-feather-files | 9d2e59216a6991ab444d0f64537b8c299a23afc9 | f305040f82406129269bee6df48797d992279c56 | refs/heads/master | 2023-07-16T03:45:12.682450 | 2021-08-23T15:58:45 | 2021-08-23T15:58:45 | 271,083,654 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 901 | r | feather_file_creator-build_genes_files.R | build_genes <- function(){
require(magrittr)
immunomodulators <- iatlas.data::synapse_feather_id_to_tbl("syn23518460")
io_targets <-
iatlas.data::synapse_feather_id_to_tbl("syn22151533") %>%
dplyr::select(
"entrez" = "Entrez ID",
"io_landscape_name" = "Friendly Name",
"pathway" = "Pathway",
"therapy_type" = "Therapy Type",
"description" = "Description"
) %>%
dplyr::group_by(.data$entrez) %>%
dplyr::mutate("entrez" = as.integer(.data$entrez))
hgnc_to_entrez <- iatlas.data::synapse_feather_id_to_tbl("syn22240716")
tbl <-
purrr::reduce(
list(immunomodulators, io_targets),
dplyr::full_join,
by = "entrez"
) %>%
dplyr::right_join(hgnc_to_entrez, by = "entrez") %>%
dplyr::select("entrez", "hgnc", dplyr::everything())
iatlas.data::synapse_store_feather_file(tbl, "genes.feather", "syn22125640")
}
|
5f6c70cba69deccaf37098fc1995a42c34d90233 | 5406025f4f757c544643c30728953f03c77e6b10 | /tests/testthat/test-11-last_tuesday.R | 745fdfa1b823d50b0bfa2103e71371f9c6f43fed | [
"MIT"
] | permissive | jrosen48/tidytuesdayR | 755dc2cedfffc9763a2f810283060616706df65a | c5a3f3c083ef53c8ca94b3cd771ccb640bc215d7 | refs/heads/master | 2022-11-29T04:24:58.338687 | 2020-08-11T16:57:04 | 2020-08-11T16:57:04 | 286,800,052 | 0 | 0 | NOASSERTION | 2020-08-11T16:57:05 | 2020-08-11T16:55:37 | null | UTF-8 | R | false | false | 1,109 | r | test-11-last_tuesday.R |
tt_ref_test_that("last_tuesday will give you the most recent tuesday", {
check_api()
## Look backwards to the last tt
date_1 <- as.Date("2020-01-01")
last_tuesday_1 <- last_tuesday(date_1)
## Look forwards to the "next" tt (they can be posted on mondays)
date_2 <- as.Date("2020-01-06")
last_tuesday_2 <- last_tuesday(date_2)
## day of returns same day
date_3 <- as.Date("2020-01-07")
last_tuesday_3 <- last_tuesday(date_2)
expect_equal(
last_tuesday_1,
as.Date("2019-12-31")
)
expect_equal(
last_tuesday_2,
as.Date("2020-01-07")
)
expect_equal(
last_tuesday_3,
as.Date("2020-01-07")
)
})
tt_ref_test_that("tt_date will give you the date of the tuesday", {
check_api()
## Look backwards to the last tt
refdate1 <- tt_date(2018, week = 1)
refdate2 <- tt_date(2019, week = 1)
refdate3 <- tt_date(2020, week = 2) # no data available for week 1!
expect_equal(refdate1,
as.Date("2018-04-02"))
expect_equal(refdate2,
as.Date("2019-01-01"))
expect_equal(refdate3,
as.Date("2020-01-07"))
})
|
5a16cb6a46cedb4cb401eff39815ae28555bbc38 | f96537a7dbd2229905344dd33870ae0eaf7ed90b | /00_Plots/PlotLongitudinalData.R | 9407be9812e178ece322f5d8ae146445013f2215 | [] | no_license | Yaseswini/RScripts | ee44413fa8080f1dde1470191af09ddf0497f737 | 0200c2fc2e4b64b248f03f1570637c471839eec0 | refs/heads/main | 2023-05-12T07:06:54.868521 | 2023-05-02T14:46:58 | 2023-05-02T14:46:58 | 105,283,154 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,194 | r | PlotLongitudinalData.R | # / Script params
# Author : Yaseswini Neelamraju
# Script name : PlotLongitudinalData.R
# Description : The script is used to plot average expression/any values per gene across time points/other categorical groups
# /
#
# input data :
# The input data should like this :
# gene timePoint1 timePoint2 timePoint3 ... timePointn
# g1 value1 value2 value3 ... valuen
# g2
# g3
## Inputs from user :
inData = read_tsv( "<path_to_your_file>" )
plot_longitudinal <- function( dat , yaxisLabel = NULL , plotTitle = NULL , LogTransformY = FALSE )
{
# generating the plotData
plotData = dat %>% pivot_longer( cols = -contains("gene") , names_to = "xaxis_group" )
if( LogTransformY ){
plotData = plotData %>% mutate( value = log10(value) )
}
p = ggplot( plotData , aes( x = xaxis_group , y = value , color = gene ) ) +
geom_point() +
geom_line( aes(group=1) ) +
facet_wrap( gene ~ . ) +
theme_bw() +
theme( legend.position = "none" ) +
labs( x = "" , y = yaxisLabel , title = plotTitle )
return( p )
}
pdf( outFile )
p = plot_longitudinal( inData )
print(p)
dev.off()
|
6336dc51fe118a44babc5796f9db15c4fc14907f | 067f36b3a6822ad19439f912781c741f7142e226 | /R/zzz.R | 20cf7d33d5fb8c56d4f3f1ac8fe9ea07bdd48b03 | [
"MIT"
] | permissive | guhjy/blorr | 1cbe692b2078ef6a5d066b4cebc77e4fb9c05353 | f938cae27ed4a0a2988c75b7ae222134598c6355 | refs/heads/master | 2020-04-09T10:54:58.022560 | 2018-10-18T11:48:31 | 2018-10-18T11:48:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 564 | r | zzz.R | .onAttach <- function(...) {
if (!interactive() || stats::runif(1) > 0.1) return()
tips <- c(
"Learn more about blorr at https://github.com/rsquaredacademy/blorr/.",
"Use suppressPackageStartupMessages() to eliminate package startup messages.",
"Need help getting started with logisitc regression models? Visit: https://www.rsquaredacademy.com",
"Check out our interactive apps for quick data exploration. Visit: https://apps.rsquaredacademy.com/."
)
tip <- sample(tips, 1)
packageStartupMessage(paste(strwrap(tip), collapse = "\n"))
}
|
f475a50844e78023ff155a874d32ac6d4d6d6e70 | 5fc672d84618a45c16542dc8680fa3d41937ce22 | /R/R2G2/man/Hist2GE.Rd | 62320fa9fa53d31d20aebba640fad71c19978bea | [] | no_license | arrigon/R2G2 | 4dccffe82d01660b13eee54598a21de561cc1928 | f49f292c903879295ddb84c676f02f80883a9db5 | refs/heads/master | 2021-01-11T00:14:27.612052 | 2016-10-11T11:44:57 | 2016-10-11T11:44:57 | 70,573,434 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,684 | rd | Hist2GE.Rd | \name{Hist2GE}
\alias{Hist2GE}
\title{
Producing 3D histograms in Google Earth
}
\description{
The current implementation produces Google Earth 3D histograms to i) count the number of distinct entities (e.g. species) per spatial unit (regional species diversity) or ii) count the number of occurrences of each entity per spatial unit (regional species abundance, detailled for each species).
}
\usage{
Hist2GE(coords, species = 0, grid, goo, nedges, orient, maxAlt = 1e+05, colors = "auto", ...)
}
\arguments{
\item{coords}{
An array of geographical positions of observations (lines = observation, columns = lon & lat in Decimal Degrees).
}
\item{species}{
A corresponding vector qualitatively describing each observation (typically the taxonomic species identity).
}
\item{grid}{
The precomputed spatial grid to be used. Choose among grid50, grid500, grid5000, grid10000 or grid20000 to get the needed number of points on the earth surface, see examples below. Technically, any set of equally spaced points can be used as long as the provided grid complies with the format used in ?grid50.
}
\item{goo}{
Name of the KML file to that will be saved into the working directory (use getwd() to find it).
}
\item{nedges}{
The number of desired edges (3 -> triangle, 4 -> square, etc) for drawing the histograms.
}
\item{orient}{
The rotation factor of histograms (in degrees).
}
\item{maxAlt}{
The maximum height (ceiling altitude) of the histograms.
}
\item{colors}{
Vector of colors corresponding to each species (one color per species), must be defined as hexadecimal values (as produced by usual R color palettes); leave to "auto" to get rainbow colors.
}
\item{...}{
Any additional arguments used internally.
}
}
\details{
The computations are based on a set of precomputed grids, where each cells are equally spaced and cover equal earth areas. The cell locations were obtained using the EQSP matlab library (Paul Leopardi). Each observation is first assigned to its closest neighbouring cell, then Hist2GE outputs cell-based statistics.
}
\value{
Two KML files (regional species richness and detailled species abundance) are produced in the current working directory. The function also outputs all these cell-based statistics as an array.
}
\author{
Nils Arrigo, nils.arrigo@gmail.com
2012 EEB, the University of Arizona, Tucson
}
\seealso{
\code{
\link{Shapes2GE}
\link{GetEdges}
\link{grid50}
}
}
\examples{
###Using Hist2GE: the easy way
#Produce fake species occurrences
coords = cbind(rnorm(210, 6.32, 5), rnorm(210, 46.75, 5))
coords = coords[order(coords[,1]), ]
species = rep(c("sp1", "sp2", "sp3"), each = 70)
#Choose grid
data(grid10000) # choose among grid50, grid500, grid5000, grid1000, grid20000
grid = grid10000
Hist2GE(coords = coords,
species = species,
grid = grid,
goo = "Jura",
nedges = 6,
orient = 45,
maxAlt = 1e5)
###Using Hist2GE: using custom grids, when working at local scale (not accounting for earth curvature)
#Produce fake species occurrences
coords = cbind(rnorm(210, -110.954795, 0.1), rnorm(210, 32.228724, 0.1))
coords = coords[order(coords[,1]), ]
species = rep(c("sp1", "sp2", "sp3"), 70)
#Define the resolution (cell width, decimal degrees)
cellwdth = 0.02
#And produce the desired grid automatically
lonrange = range(coords[, 1])
if(sum(sign(lonrange)) == -2){
lonwdth = -cellwdth
}else{
lonwdth = cellwdth
}
latrange = range(coords[, 2])
if(sum(sign(latrange)) == -2){
latwdth = -cellwdth
}else{
latwdth = cellwdth
}
lonLeft = lonrange[1] - 0.01 * lonrange[1]
lonRight = lonrange[2] + 0.01 * lonrange[2]
latBottom = latrange[1] - 0.01 * latrange[1]
latTop = latrange[2] + 0.01 * latrange[2]
#Produce cell coordinates along lon and lat axes
lonmarks = seq(lonLeft, lonRight, by = lonwdth)
latmarks = seq(latBottom, latTop, by = latwdth)
#Produce complete grid
lonDD = rep(lonmarks, length(latmarks))
latDD = rep(latmarks, each = length(lonmarks))
gridDD = cbind(lonDD, latDD)
#Convert it to radians centered and formated as in a "grid50" array
DD2Rad = function(lon, lat){
lonrad = (lon + 180) * pi/180
latrad = (lat + 90) * pi/180
cbind(lonrad, latrad)
}
MyGridDD = cbind(lonDD, latDD)
MyGridRad = DD2Rad(MyGridDD[, 1], MyGridDD[, 2])
MyGridRad = data.frame("Num" = 1:nrow(MyGridRad),
"lon" = MyGridRad[, 1],
"lat" = MyGridRad[, 2]) #this step is only cosmetic and necessary for compatibily issues.
#Run Hist2GE
Hist2GE(coords = coords,
species = species,
grid = MyGridRad,
goo = "Tucson",
nedges = 4,
orient = 45,
maxAlt = 5e3)
}
\keyword{ google earth }
\keyword{ histogram }
|
5bffb507509be2d71e1d49b93cbb075df0a3438b | 38efe5955099b359e6797de19fc54f8a1b06858f | /vaje1.R | 988675c3953f21a9edd674c1da8471bd74399821 | [] | no_license | tinarazic/casovne_vrste | ef0d8ab0a2704acad282382fc90a9f779748889d | 90b82d758cbe21038dd1595ece6bfe4908f98439 | refs/heads/master | 2020-08-16T09:45:04.638735 | 2019-12-11T20:26:57 | 2019-12-11T20:26:57 | 215,487,170 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,173 | r | vaje1.R | # PREPISANA 1. ura od Eve:
v <- c(100, 200, 300)
cumsum(v)
diff(v)
diff(v, lag = 2)
diff(v, differences = 2)
diff(v, differences = 1)
M <- matrix(1:15, 3, 5)
M <- matrix(1:15, 3, 5, byrow = TRUE)
M[2,3]
M[2,]
M[,3]
c(M)
t(M)
2 * M
M + 2
A <- rbind(c(2, -2), c(2, 2))
B <- rbind(c(2, 2), c(2, -2))
A * B
A %*% B
A %*% c(3, 4)
solve(A, c(-2, 14))
solve(A)
c(2, "bla")
v[1] + 2
list(2, "bla") -> l
l[1]
l[2]
nas_clovek <- list(ime = "Janez", starost = 35, zena = "Marija", starost_otrok = c(15, 13,2))
# $ime
nas_clovek[["ime"]]
nasi_ljudje <- data.frame()
nasi_ljudje <- data.frame(
ime = c("Janez", "Franc", "Tone"),
starost = c(32, 43,55),
st_otrok = c(3, 2,4),
zena = c("Marija", "Štefka", "Lojzka")
)
nasi_ljudje[1]
nasi_ljudje[c(1,3)]
as.vector(nasi_ljudje[,1])
############################################################################################################
# http://valjhun.fmf.uni-lj.si/~raicm/Poucevanje/CV/Datoteke/
#iz katere mape r bere
getwd()
setwd("U:/casovne_vrste")
scan("DM.txt") -> dm
# 1) gibanje te?aja nem?ke marke proti ameri?kem dolarju
plot(dm)
plot(dm, type ="l")
ts.plot(dm) #samo x os spremeni v time
# 2) padavine LJ
dezLJ <- read.csv("Padavine_LJ_ARSO.csv", header = TRUE)
dezLJ
#kronolo?ko uredimo
as.matrix(dezLJ[,2:13]) -> dezLJ.matrika
dezLJ.chron <- c(t(dezLJ.matrika))
ts.plot(dezLJ.chron)
# tip ?asovne vrste
dezLJ.chron.ts <- ts(dezLJ.chron)
dezLJ.chron.ts
#frequency koliko je podatkov na eno ?asovno enoto
dezLJ.matrika.ts <- ts(dezLJ.matrika)
plot(ts(dezLJ.matrika[,1:5], start=1991))
dezLJ.chron #navaden vektor
#naredimo ?asovno vrsto
ts(dezLJ.chron, start = 191, frequency = 12)
# 3) CO2
# ugrajena casovna vrsta
co2
plot(co2)
# TREND:
# RO?NO
# y = a + bx + e
# c_xy = sum_i^n (x_i - )
co2.v <- c(co2)
co2.t <- seq(1, length(co2))
co2.v.m <- mean(co2.v)
co2.t.m <- mean(co2.t)
b <-sum((co2.t - co2.t.m)*(co2.v -co2.v.m))/sum((co2.t -co2.t.m)^2)
a <- co2.v.m -b*co2.t.m
#AVTOMATICNO
co2.lm <- lm(co2.v ~ co2.t)
co2.lm
co2.lm <- lm(co2.v ~ co2.t - 1) #ZAČETNI koeficient odstrani
co2.lm
co2.lm <- lm(co2.v ~ co2.t)
co2.lm
plot(co2.v, type = "l")
abline(co2.lm)
co2.lm$coefficients["co2.t"]
summary(co2.lm)
#p test
#korelacijski test
cor.test(co2.t, co2.v)
#residuali
#ročno:
co2.v - a -b*co2.t
#avtomatično
co2.lm$residuals
co2.lm.res <- c(co2.lm$residuals)
plot(co2.lm.res, type = "l")
# 4) TEMPERATURE V LJUBLJANI
tempLJ <- read.table("Temp_LJ_NASA.txt", header = TRUE)
tempLJ.matrika <- as.matrix(tempLJ[,2:13])
tempLJ.chron <- c(t(tempLJ.matrika))
ts.plot(tempLJ.chron)
tempLJ.chron.t <- seq(1, length(tempLJ.chron))
#testiramo ali imajo tempretaure test:
cor.test(tempLJ.chron.t, tempLJ.chron)
#p vrednost 0.003 -> zelo značilno, tempreature rastejo
lm(tempLJ.chron ~ tempLJ.chron.t)
#funkcija APPLY
apply(tempLJ.matrika, 1, mean) #povprečja po letih
apply(tempLJ.matrika, 2, mean) #povprečja po
head(apply(tempLJ.matrika, 1, mean), -1) -> tempLJ.letne
plot(ts(tempLJ.letne, start =1929))
tempLJ.leta <- seq(1, length(tempLJ.letne))
cor.test(tempLJ.leta, tempLJ.letne)
#letna nihanja zmotijo test, zato imamo p vrednost še bolj značilno, če gledamo samo po letih
|
fb9a22f00e96bd6df8d3428a2ae8c984b2175c84 | 057d770e54614c2684a598f184c6ed16a2791be8 | /week10-session-Random Forest & Ensemble/random forest and ensemble/tree.R | 6d00ca8202aa11f9cb4fa1492783057c6f75f526 | [] | no_license | jeonyoonhoi/boaz_base | 9b59e470fe75daa8615abc07980f49fc3d8044e9 | 4f2f8b247105c76213f034d25f82230be1e4f713 | refs/heads/master | 2020-03-31T04:46:02.355960 | 2019-03-20T10:03:44 | 2019-03-20T10:03:44 | 151,918,993 | 0 | 0 | null | null | null | null | UHC | R | false | false | 1,928 | r | tree.R | # 17.09.28. A조
# 의사결정나무(Decision Tree)
# 필요한 라이브러리 설치 및 불러오기
install.packages('MASS')
install.packages('tree')
library(MASS)
library(tree)
# 만만한 iris data 이용...
data(iris)
# 입력변수에 따른 출력변수 plot 찍어보기
# 입력변수(Sepal.Lenghth, Sepal.Width, Petal.Length, Petal.Width)
# 출력변수(Species): Setosa(검), versicolor(빨), virginica(초)
# 산점도를 보면 tree모델을 적용하여 분류해볼만함
plot(iris[,1:4], col=as.integer(iris$Species), pch=substring((iris$Species),1,1))
# tree 모델 적용하기
ir.tr = tree(Species ~., iris) # iris에 species를 최종라벨으로 tree적용!
summary(ir.tr) # 만들어진 tree모델의 개요를 볼 수 있음
ir.tr # tree의 구체적인 내용(분류조건)을 줄글로 볼 수 있음... 눈이 어지러움
# 나무 모양으로 그림 그리기 ! 보기 편함
plot(ir.tr) # 가지를 만들고
text(ir.tr, all=T) # 글자를 채워넣기
# 그림을 다시보면 모든 입력변수 4가지를 전부 분류 기준으로 사용했다
# 조금 과하다는 생각이 듬 (overfitting)
# nod를 직접 정해주기
ir.tr1=snip.tree(ir.tr, nodes=c(12,7)) # 7번과 12번마디를 쳐내고 저장
plot(ir.tr1)
text(ir.tr1, all=T)
# 산점도에서 tree모델로 어떤식으로 구분했는지도 볼 수 있음
par(pty="s")
plot(iris[,3],iris[,4],type="n", xlab="petal length", ylab="petal width")
text(iris[,3],iris[,4],c("s","c","v")[iris[, 5]])
partition.tree(ir.tr1, add=TRUE, cex=1.5)
# 하나하나 줄글을 보고 수동으로 가지를 쳐내는 작업을 하기 귀찮음...
# tree 사이즈에따른 오분류 class의 갯수를 보자
ir.tr2=prune.misclass(ir.tr)
plot(ir.tr2) # 4개 이후론 misclass비슷함. 4로 선택!
fin.tr=prune.misclass(ir.tr, best=4) # best=4
plot(fin.tr)
text(fin.tr, all=T) |
d7ecbec42bdb658460e2af058813c12ed54bd7f6 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/cherry/examples/hommelFast.Rd.R | 1eecf3aeb4c475650fbf9ce0a9f157de6daf5de3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 569 | r | hommelFast.Rd.R | library(cherry)
### Name: hommelFast
### Title: Calculates adjusted p-values of Hommel's method efficiently.
### Aliases: hommelFast
### ** Examples
#Generate a vector of pvalues
set.seed(1)
n <- 1000
pvalues <- c(runif(0.50*n,0,0.001), runif(0.50*n,0,1))
#Create an hommel object in which the adjusted p-values are stored, based on a Simes'test
#(or Hommel's test, by choosing simes = FALSE):
hom <- hommelFast(pvalues, simes = TRUE)
#Retrieve the first 10 adjusted p-values by using the \code{pvalue} method.
pvalue(hom,1:10)
|
7be93d1ecb6833a539d76bb55253aa7fb3833599 | 386bd61637777c188de55c2035da0c1cf8e8578a | /helper_functions/exploratory data analysis.R | e4943767f8f4db1c919b3d6b3b775d2a30b72212 | [] | no_license | SzefKuchni/house_price | 42d1364974678c105a94b28e270154b6937f2519 | 7d27d965ee0f194a273bf694022dcfc9e97e2e5e | refs/heads/master | 2021-01-18T05:09:50.605979 | 2016-10-31T13:36:03 | 2016-10-31T13:36:03 | 68,593,325 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,278 | r | exploratory data analysis.R | source("C://Users/T540pDLEWYNBQ/Desktop/house_price/helper_functions/sample_ec.R")
source("helper_functions/explore_feature_div.R")
train_div<-feature_div(train, "Id")
train_div$class
source("helper_functions/explore_cor_with.R")
correlation<-cor_with(data = train_div$numeric, measure_to_check_cor_with = "SalePrice")
source("helper_functions/explore_isNA.R")
missing<-isNA(data = train)
source("helper_functions/explore_outliers_num.R")
outliers<-outliers_num(data = train_div$numeric)
test$SalePrice<-NA
combi<-rbind(train, test)
combi$origin<-ifelse(is.na(combi$SalePrice), "test", "train")
source("helper_functions/explore_plots_uni_num.R")
plot_uni_num(data_with_num_var = train_div$numeric, data_to_plot_with = combi)
source("helper_functions/explore_plots_uni_cat.R")
plot_uni_cat(data_with_cat_var = train_div$other, data_to_plot_with = combi)
source("helper_functions/explore_plots_bi_num_outliers_LM.R")
plots_bi_num(data_num = train_div$numeric, y_variable = "SalePrice")
source("helper_functions/explore_plots_bi_cat.R")
plots_bi_cat(data_cat = train_div$other, y_variable = "SalePrice")
#changes to be implemented
#-count of observations below the bivariate categorical chart
#-outliers in the bivvariate numerical plot based on the distance from cluster |
192859bfad2012800231fd03fea9a376fc5c7ce2 | e6b8fd1f12520370672e08a5837a0f42721e0932 | /test_img.R | 97ceb5a77b08636f07a02c1ad1c50e73a2465013 | [
"MIT"
] | permissive | tjv32/shinyVCT | a19d47369060bc559c1f081cd5de3ffed166b7d7 | 985f4dce57c29b3d2cdfe557c1ad4a8cd409ea63 | refs/heads/master | 2022-11-22T17:43:19.725149 | 2020-07-24T22:00:23 | 2020-07-24T22:00:23 | 278,509,285 | 0 | 0 | MIT | 2020-07-10T01:29:16 | 2020-07-10T01:29:16 | null | UTF-8 | R | false | false | 276 | r | test_img.R | setwd("/home/thomas/Documents/Project/finalVCT/clone/shinyVCT/")
source("img_functions.R")
library(magick)
library(ggplot2)
load("fname.RData")
print(params)
#params[['plot_type']] = 'logarithmic'
hold <- generate_final_image(params)
print(hold)
image_write(hold, 'test.png')
|
356d464ef793976f7d9d7ec38b2bfc00a50ce3e9 | cea73465520723f4e53bfd19e4706132c6f50083 | /datavizconfdraft.R | f05ea4779c2ab015d604842bec341d47223460be | [] | no_license | tmayhew/NBAdraft | 290b0060ebc227fade32055b079db31af5e8259e | 7b5e6181875df908316149c4faf0c310c31fd570 | refs/heads/master | 2022-04-27T20:26:35.861697 | 2020-04-20T19:50:50 | 2020-04-20T19:50:50 | 252,315,347 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,961 | r | datavizconfdraft.R | library(rvest)
library(ggplot2)
library(ggthemes)
library(tidyverse)
library(nbastatR)
'%!in%' <- function(x,y)!('%in%'(x,y))
draftpool1 = read.csv("data/drafts20152019.csv")[,-1]
draftpool2 = read.csv("data/drafts20082014.csv")[,-1]
draftpool3 = read.csv("data/drafts19952007.csv")[,-1]
sch.df = read.csv("data/schools.csv")[,-1]
sch.df$school = as.character(sch.df$school)
sch.df$conf = as.character(sch.df$conf)
df = rbind.data.frame(draftpool1, draftpool2, draftpool3) %>% select(pk, Player, Yr, Yrs, allstar, PC1, College)
for (i in 1:nrow(df)){
if (df$pk[i] < 4){
df$rank[i] = "Top 3"
} else if (df$pk[i] < 6){
df$rank[i] = "Top 5"
} else if (df$pk[i] < 11){
df$rank[i] = "Top 10"
} else if (df$pk[i] < 15){
df$rank[i] = "Lottery"
} else{
df$rank[i] = "Non-Lottery"
}
}
for (i in 1:nrow(df)){
if (df$College[i] == ""){
df$cbb[i] = 0
} else{
df$cbb[i] = 1
}
}
dfc = df %>% filter(College != "" & College != "University of West Florida" & College != "Butler County Community College" & College != "Augsburg College" & College != "Northeast Mississippi Community College" & College != "Shaw")
# 618/750 (82.4%) of players in the entire dataset played D1 basketball.
for (i in 1:nrow(dfc)){
dfc$conf[i] = sch.df$conf[which(as.character(sch.df$school) == dfc$College[i])]
}
#############################################################################
dfc$conf = factor(dfc$conf)
newtab = data.frame(count = summary(dfc$conf))
newtab[,2] = rownames(newtab)
names(newtab) = c("count", "conf")
rownames(newtab) = NULL
newtab1 = newtab %>% arrange(desc(count))
newtab1$conf = factor(as.character(newtab1$conf), levels = newtab1$conf)
newtab1 %>% ggplot(aes(x = conf, y = count)) +
geom_bar(stat = "identity", width = I(1/2)) + coord_flip() +
scale_y_continuous("Players") + theme_clean() + scale_x_discrete("")
dfc = dfc %>% arrange(desc(Yr))
dfc$Player = factor(as.character(dfc$Player), levels = dfc$Player)
plot = c("Ja Morant", "Pascal Siakam", "Karl-Anthony Towns", "Elfrid Payton", "CJ McCollum", "Kyle O'Quinn", "James Harden", "George Hill", "Jason Thompson", "Paul Millsap", "Kevin Martin", "Wally Szczerbiak", "Shawn Marion", "Anthony Parker", "Steve Nash")
xnudge = c(0, 0, 0.5, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 0)
ynudge = c(2.93, 3.9, -0.2, 3.35, 4, 3.45, -.01, 3.15, 4.85, 3.60, 3.35, 3.7, 4.5, 4, 3.2)
nudges = cbind.data.frame(plot, xnudge, ynudge);nudges
dfc %>% filter(Player %in% plot) %>% arrange(desc(Yr))
dfc %>% ggplot(aes(x = conf, y = PC1)) + geom_hline(yintercept = 0, linetype = "dashed", color = "grey50") + geom_boxplot() + coord_flip() + geom_text(aes(label = Player), data = dfc[dfc$Player %in% plot,], position = position_nudge(x = xnudge, y = ynudge)) + theme_bw() +
scale_y_continuous("Relative Career Productivity (PC1)", breaks = seq(-12,20,2), limits = c(-12,20)) + scale_x_discrete("Conference") + ggtitle("NBA Productivity by Conference, 1995-2019")
|
79532ec1652487cf32b91a3133623bea6f522ec9 | 34192cc1bcd12310e7c24b171fd07583f3fbf88a | /tests/tests-num_dat_probs.R | bb89cddf714c75b222ce5c44df140926442736d6 | [] | no_license | n8thangreen/i-sense-model | f1da56a180b4cc5a294a54c664f7f21a32f0155a | b1034b6981e09cec67433fe59c3e6fa393de9b69 | refs/heads/master | 2022-03-26T17:03:38.088630 | 2020-01-15T13:42:27 | 2020-01-15T13:42:27 | 87,080,908 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 263 | r | tests-num_dat_probs.R |
with(num_dat_probs, Sx.notseekcare_H1N1 + Sx.notseekcare_notH1N1+ Sx.GP_H1N1 + Sx.NPFS_H1N1 + Sx.NPFS_notH1N1 + Sx.GP_notH1N1)
with(num_dat_probs, complete.hosp/ILI.hosp)
with(num_dat_probs, auth_NPFS.coll*coll.start*start.complete*complete.hosp*hosp.death)
|
c996a3710811bb383e51e583578b09f6a05b344e | a88c13f65f5e8a89ba16fc328b341b867a97610e | /Group6exploreData.R | ccd17bcd56091ff9374d7389981f8138df0dc724 | [] | no_license | briandannenmueller/UnitedWayProject | ec8a6275f1efceff2ff7fe95f326fc8deb873145 | 7a1e8a2badc1d575f92abaf507b2abad2aaded61 | refs/heads/main | 2023-05-20T14:34:03.113021 | 2021-06-05T03:48:06 | 2021-06-05T03:48:06 | 359,864,700 | 0 | 1 | null | 2021-04-28T17:56:42 | 2021-04-20T15:28:49 | R | UTF-8 | R | false | false | 12,267 | r | Group6exploreData.R | # Group 6 R Code
#Brian Dannenmueller, Zohaib Hussain, David Holmes, and Ryan Holt
rm(list = ls())
#read in cps
cps = read.csv("cps_stat172.csv", stringsAsFactors = T)
################################################
###-------PREPARATION---------------------------
################################################
# Call libraries needed for plotting & Color Palettes
library(ggplot2)
library(reshape2)
library(RColorBrewer)
str(cps2)
summary(cps)
# THERE ARE 160 NA's in the data that need to be removed.
cps <- cps[!is.na(cps$fsecurity),]
# THIS SHOULD REMOVE ALL NA's from fsecurity.
# WE ALSO NEED TO REMOVE THE X.1, X, and ID rows from the dataset
cps2 = subset(cps, select = -c(X.1, X, id))
# I NEED TO CHANGE THE RESPONSE VARIABLE AND DISABILITY INTO CATEGORICAL VARIABLES
cps2$disability_cat = ifelse(cps2$disability > 0, "Disability", "No_Disability")
cps2$disability_cat = as.factor(cps2$disability_cat)
cps2$fsecurity_cat = ifelse(cps2$fsecurity > 0, "yes", "no")
cps2$fsecurity_cat = as.factor(cps2$fsecurity_cat)
################################################
###-------VISUALIZATION-------------------------
################################################
# CREATE BARPLOTS OF THE CATEGORICAL VARIABLES TO OBSERVE DISTRIBUTION OF DATA AMONG CATEGORIES
ggplot(data = cps2, aes (x = fsecurity_cat))+geom_bar() + geom_text(stat = 'count',aes(label=..count..), vjust = -1)
ggplot(data = cps2, aes (x = disability_cat))+geom_bar() + geom_text(stat = 'count',aes(label=..count..), vjust = -1)
# CREATE HISTOGRAMS TO SHOW PROPORTIONALITY AMONG NUMERIC VARIABLES
ggplot(data = cps2) +
geom_histogram(aes(x = hhsize, fill = fsecurity_cat), position = 'fill', binwidth = 1) +
ggtitle("Food Security as Household Size Increase") +
labs(x = "Household Size", y = "Proportion") +
scale_fill_grey("Food Insecure") +
theme_bw()
ggplot(data = cps2) +
geom_histogram(aes(x = elderly, fill = fsecurity_cat), position = 'fill', binwidth = 1) +
ggtitle("Food Security as Elders Within Household Increase") +
labs(x = "Elderly", y = "Proportion") +
scale_fill_grey("Food Insecure") +
theme_bw()
ggplot(data = cps2) +
geom_histogram(aes(x = hhsize, fill = fsecurity_cat), position = 'fill', binwidth = 1) +
ggtitle("Food Security as Household Size Increase") +
labs(x = "Household Size", y = "Proportion") +
scale_fill_grey("Food Insecure") +
theme_bw()
ggplot(data = cps2) +
geom_histogram(aes(x = elderly, fill = fsecurity_cat), position = 'fill', binwidth = 1) +
ggtitle("Food Security as Elders Within Household Increase") +
labs(x = "Elderly", y = "Proportion") +
scale_fill_grey("Food Insecure") +
theme_bw()
################################################
###-------CLUSTERING----------------------------
################################################
cps_X = subset(cps2, select = -c(fsecurity_cat,disability_cat, fsecurity, disability))
# WE ALSO NEED TO STANDARDIZE THE VARIABLES IN ORDER TO LIMIT CONTROL, KEEP IT
# EVEN BETWEEN ALL.
cps_stand = apply(cps_X, 2, function(x){(x - mean(x))/sd(x)})
summary(cps_stand)
wss = (nrow(cps_stand)-1)*sum(apply(cps_stand,2,var))
for (i in 2:15) {
wss[i] = sum(kmeans(cps_stand, centers = i)$withinss)}
plot(1:15, wss, type = "b", xlab = "Number of clusters", main = "Elbow Plot")
# MAKES ME THINK THAT 4 WOULD BE A GOOD POINT
# WE WILL NOW DO HIERARCHICAL CLUSTERING
cps_dist = dist(cps_stand, method = "euclidean")
cps_clust = hclust(cps_dist, method = "ward.D")
plot(cps_clust)
# This creates red rectangles to create clusters for the dendrogram
rect.hclust(cps_clust, k = 4, border = "red")
cps_kmeans = kmeans(cps_stand, 4)
str(cps_kmeans)
cps_X$km_cluster = as.factor(cps_kmeans$cluster)
cps_long = melt(cps_X, id.vars = c("km_cluster"))
View(cps_long)
# This will create boxplots for the clusters among the specified variables in cps_X
ggplot(data = cps_long) +
geom_boxplot(aes (x = km_cluster, y = value, fill = km_cluster)) +
facet_wrap(~variable, scales = "free") +
scale_fill_brewer(palette = "Blues")
display.brewer.all(colorblindFriendly = T)
# I WILL NOW CREATE A HEATMAP
cps2$groups = as.factor(cutree(cps_clust, k = 4))
rownames(cps_stand) = paste(cps2$groups, ": ", cps$Country)
heatmap(as.matrix(cps_stand),
col = paste("grey", 1:99, sep = ""),
hclustfun = function(x){hclust(x, method = "ward.D")})
################################################
###-------Decision Tree-------------------------
################################################
library(rpart)
library(rpart.plot)
library(ggplot2)
library(pROC)
cps2 = subset(cps2, select = -c(fsecurity, disability))
#set seed to ensure reproducibility
RNGkind(sample.kind = "default") #sets the sampling algorithm
set.seed(27892789) #number is arbitrary, just as long as we all use the same one
#train.idx will contain a random sample of row indices
train.idx = sample(x = 1:nrow(cps2), size = floor(.8*nrow(cps2)))
#make training cps
train.df = cps2[train.idx,]
#the rest will be for testing
test.df = cps2[-train.idx,] #note the negative
View(train.df)
set.seed(27892789) #again, for reproducibility
# This takes some time to run
tree = rpart(fsecurity_cat ~ .,
data = train.df,
method = "class",
control = rpart.control(cp = 0.00001, minsplit = 1))
# this full tree visualization might take a while to run, FYI
rpart.plot(tree, box.palette = "Blues")
printcp(tree)
# Looks like the lowest xerror will be found after no split, which means decision trees cannot model
# this data.
################################################
###-------Random Forest-------------------------
################################################
library(randomForest)
library(caret)
#Next, we will try a random forest and see if we can get a useful model
set.seed(27892789)
forest = randomForest(fsecurity_cat ~ .,
data = train.df, #TRAINING DATA
ntree = 1000, #fit B = 1000 separate classification trees
mtry = 4, #choose m - sqrt(10) = 3.16 - i rounded up to 4
importance = FALSE) #importance can help us identify important predictors (later)
forest
#Confusion matrix:
# no yes class.error
#no 294 11 0.03606557
#yes 34 3 0.91891892
#Confusion matrix:
# no yes class.error
#no 295 10 0.03278689
#yes 34 3 0.91891892
# TUNING
mtry <- c(1:10)
#make room for m, OOB error
keeps2 <- data.frame(m = rep(NA,length(mtry)),
OOB_err_rate = rep(NA, length(mtry)))
for (idx in 1:length(mtry)){
tempforest<- randomForest(fsecurity_cat ~ .,
data = train.df,
ntree = 1000,
mtry = mtry[idx]) #mtry is varying
#record iteration's m value
keeps2[idx, "m"] <- mtry[idx]
#record what our OOB error rate was
keeps2[idx,"OOB_err_rate"] <- mean(predict(tempforest)!= train.df$fsecurity_cat)
}
# interestingly enough, OOB error rate is lowest on mtry = 1 and 2
# OOB shoots up at mtry = 3 ... why?
qplot(m, OOB_err_rate, geom = c("line", "point"), data = keeps2) +
theme_bw() + labs(x = "m (mtry) value", y = "OOB error rate")
set.seed(27892789)
# final forest uses mtry = 1
final_forest<- randomForest(fsecurity_cat ~ .,
data = train.df,
ntree = 1000,
mtry = 2,#based on tuning
importance = TRUE) # now we can make variable importance plot
#make a column of predictions on the test set
test.df$forest_pred <- predict(final_forest, test.df, type = "class")
#confusion matrix where pi* = 0.5. Forest always guesses "no"!
table(test.df$forest_pred, test.df$fsecurity_cat)
# Construct a confusion matrix visualization
# code for this courtesy of Cybernetic at
# https://stackoverflow.com/questions/23891140/r-how-to-visualize-confusion-matrix-using-the-caret-package/42940553
cm <- confusionMatrix(data = test.df$forest_pred, reference = test.df$fsecurity_cat)
cm
draw_confusion_matrix <- function(cm) {
layout(matrix(c(1,1,2)))
par(mar=c(2,2,2,2))
plot(c(100, 345), c(300, 450), type = "n", xlab="", ylab="", xaxt='n', yaxt='n')
title('CONFUSION MATRIX', cex.main=2)
# create the matrix
rect(150, 430, 240, 370, col='#3F97D0')
text(195, 435, 'Food Secure', cex=1.2)
rect(250, 430, 340, 370, col='#F7AD50')
text(295, 435, 'Not Food Secure', cex=1.2)
text(125, 370, 'Predicted', cex=1.3, srt=90, font=2)
text(245, 450, 'Actual', cex=1.3, font=2)
rect(150, 305, 240, 365, col='#F7AD50')
rect(250, 305, 340, 365, col='#3F97D0')
text(140, 400, 'Food Secure', cex=1.2, srt=90)
text(140, 335, 'Not Food Secure', cex=1.2, srt=90)
# add in the cm results
res <- as.numeric(cm$table)
text(195, 400, res[1], cex=1.6, font=2, col='white')
text(195, 335, res[2], cex=1.6, font=2, col='white')
text(295, 400, res[3], cex=1.6, font=2, col='white')
text(295, 335, res[4], cex=1.6, font=2, col='white')
# add in the specifics
plot(c(100, 0), c(100, 0), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
text(10, 85, names(cm$byClass[1]), cex=1.2, font=2)
text(10, 70, round(as.numeric(cm$byClass[2]), 3), cex=1.2)
text(30, 85, names(cm$byClass[2]), cex=1.2, font=2)
text(30, 70, round(as.numeric(cm$byClass[1]), 3), cex=1.2)
text(50, 85, names(cm$byClass[5]), cex=1.2, font=2)
text(50, 70, round(as.numeric(cm$byClass[5]), 3), cex=1.2)
text(70, 85, names(cm$byClass[6]), cex=1.2, font=2)
text(70, 70, round(as.numeric(cm$byClass[6]), 3), cex=1.2)
text(90, 85, names(cm$byClass[7]), cex=1.2, font=2)
text(90, 70, round(as.numeric(cm$byClass[7]), 3), cex=1.2)
# add in the accuracy information
text(30, 35, names(cm$overall[1]), cex=1.5, font=2)
text(30, 20, round(as.numeric(cm$overall[1]), 3), cex=1.4)
text(70, 35, names(cm$overall[2]), cex=1.5, font=2)
text(70, 20, round(as.numeric(cm$overall[2]), 3), cex=1.4)
}
draw_confusion_matrix(cm)
# Confusion matrix before pi-hat optimization:
# no yes
#no 79 7
#yes 0 0
pi_hat <- predict(final_forest, test.df, type = "prob")[,"yes"]
View(test.df)
rocCurve <- roc(response = test.df$fsecurity_cat,
predictor = pi_hat,
levels = c("no", "yes"))
# AUC = 0.657. Optimized threshold is pi* = 0.008, very small!
plot(rocCurve,print.thres = TRUE, print.auc=TRUE)
# This adjusts the predicts based on the selected pi_hat, will probably experiment more.
# There is clearly a class imbalance problem
test.df$forest_pred = as.factor(ifelse(pi_hat > 0.085, "yes", "no"))
table(test.df$forest_pred, test.df$fsecurity_cat)
cm2 <- confusionMatrix(data = test.df$forest_pred, reference = test.df$fsecurity_cat)
# Confusion matrix validates ROC curve result and shows improvement
# However, if we wanted to minimize our false negative rate (guessing "food secure" when people are not)
# we may need to smaller, more aggresive pi_hat.
draw_confusion_matrix(cm2)
# most important variables are "employed", "married", "elderly", and "hhsize". "education" is the least important!
varImpPlot(final_forest, type = 1)
################################################
###-------GLM-----------------------------------
################################################
rm(list = ls())
library(boot)
library(rpart)
library(rpart.plot)
library(ggplot2)
library(pROC)
library(RColorBrewer)
fs <- subset(cps, select = -c(1,2,3))
# Making binary variables out of food security and disability
fs$fsecurity[fs$fsecurity > 0] <- 1
fs$disability_cat = ifelse(fs$disability > 0, "Disability", "No_Disability")
fs$disability_cat = as.factor(fs$disability_cat)
# First model with every variable
model1 <- glm(fs$fsecurity ~ fs$hhsize + fs$female + fs$kids + fs$elderly + fs$black + fs$hispanic + fs$education + fs$employed + fs$married + fs$disability_cat, family=binomial(link="logit"))
# summary(model1)
# BIC(model1)
# 296.5758
# Second model using employed, disability_cat, elderly, hhsize
model2 <- glm(fs$fsecurity ~ fs$hhsize + fs$elderly + fs$employed + fs$disability_cat, family=binomial(link="logit"))
summary(model2)
confint(model2)
BIC(model2)
# 283.8745
anova(model2, test = "Chisq")
|
c5fb1cc8d1290564a2811955fbbc2ee3d56bfff8 | 907af44f17d7246e7fb2b967adddb937aa021efb | /man/fslcog.Rd | e5219209b03e372efaf310a63c4c474414e2c20a | [] | no_license | muschellij2/fslr | 7a011ee50cfda346f44ef0167a0cb52420f67e59 | 53276dfb7920de666b4846d9d8fb05f05aad4704 | refs/heads/master | 2022-09-21T07:20:18.002654 | 2022-08-25T14:45:12 | 2022-08-25T14:45:12 | 18,305,477 | 38 | 23 | null | 2019-01-10T20:57:47 | 2014-03-31T19:35:03 | R | UTF-8 | R | false | true | 1,064 | rd | fslcog.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fslhd.R
\name{fslcog}
\alias{fslcog}
\title{Image Center of Gravity (FSL)}
\usage{
fslcog(img, mm = TRUE, verbose = TRUE, ts = FALSE)
}
\arguments{
\item{img}{Object of class nifti, or path of file}
\item{mm}{Logical if the center of gravity (COG) would be in mm (default \code{TRUE})
or voxels (\code{FALSE})}
\item{verbose}{(logical) print out command before running}
\item{ts}{(logical) is the series a timeseries (4D), invoking \code{-t}
option}
}
\value{
Vector of length 3 unless ts option invoked
}
\description{
Find Center of Gravity of Image from FSL
}
\note{
FSL uses a 0-based indexing system, which will give you a different
answer compared to \code{cog}, but \code{fslcog(img, mm = FALSE) +1}
should be relatively close to \code{cog(img)}
}
\examples{
if (have.fsl()){
x = array(rnorm(1e6), dim = c(100, 100, 100))
img = nifti(x, dim= c(100, 100, 100),
datatype = convert.datatype()$FLOAT32, cal.min = min(x),
cal.max = max(x), pixdim = rep(1, 4))
fslcog(img)
}
}
|
cf65596f252f0ef6e7f2b73e1ff5f2ea3e3d2ec6 | 543df30ebc6bb7fd0404b62df30858b9416e3783 | /gdb_read.R | 6a2a5093eccdb498c411a89fb41fad73b748eeed | [] | no_license | ARMurray/WellsDashboard | 96ff84953022b106adfc09852fd73f41eee5e513 | 166a8fe1b51b2035d61af02a5acdc0d70a094cc7 | refs/heads/master | 2021-05-21T01:25:39.910529 | 2020-05-05T18:08:20 | 2020-05-05T18:08:20 | 252,485,317 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,252 | r | gdb_read.R | require(rgdal)
library(sf)
library(here)
library(leaflet)
# The input file geodatabase
fgdb <- "C:/Users/HP/OneDrive - University of North Carolina at Chapel Hill/EPA_12_13_2017/Groundwater Well Use/Final Well Estimates/subsets/subsets.gdb"
# List all feature classes in a file geodatabase
subset(ogrDrivers(), grepl("GDB", name))
fc_list <- ogrListLayers(fgdb)
print(fc_list)
# Read the feature class
NCBG <- st_read(fgdb, layer = "NorthCarolina")
# Determine the FC extent, projection, and attribute information
summary(NCBG)
# View the feature class
plot(NCBG)
# Project to new epsg
st_crs(NCBG)
NCBG17N <- st_transform(NCBG,crs=2958)
NCBG84 <- st_transform(NCBG,crs=4326)
# Save it
st_write(NCBG17N, here("data/NCBG17N.gpkg"))
st_write(NCBG84, here("data/NCBG84.gpkg"))
simple <- st_simplify(NCBG84)
# Leaflet
leaflet(data = NCBG84)%>%
addTiles()%>%
addPolygons(color = "#444444", weight = 1, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.5,
fillColor = ~colorQuantile("YlOrRd", Hybd_Tot_10)(Hybd_Tot_10),
highlightOptions = highlightOptions(color = "white", weight = 2,
bringToFront = TRUE))
|
95b0b986a5355b174ff53e70c7970305dfb578be | 990147d8da8fe93edd1f4d9af736d947108e64b6 | /plot3.R | 91279a26fded62a1e1e2076ef5a0ba83b01ffcb6 | [] | no_license | lamchoonho/Exploratory_Data_Analysis_Project_2 | c3e5021b5de73abdf5e27e839f7b744ffc1898be | 52882efb427d7e87b9f531d14ab30b75976eb4b7 | refs/heads/master | 2021-01-12T04:57:19.217619 | 2017-01-02T08:03:18 | 2017-01-02T08:03:18 | 77,811,626 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,199 | r | plot3.R | # Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad)
# variable, which of these four sources have seen decreases in emissions from 1990-2008
# for Baltimore City? Which have seen increases in emissions from 1999-2008? Use the
# ggplot2 plotting system to make a plot answer this question.
library(ggplot2)
# Reading source file
source("archiveFile.R")
# Load the NEI & SCC data frames.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Subset NEI data by Baltimore's fip.
baltimoreNEI <- NEI[NEI$fips=="24510",]
# Aggregate using sum the Baltimore emissions data by year
aggTotalsBaltimore <- aggregate(Emissions ~ year, baltimoreNEI,sum)
# Output plot file
png("plot3.png", bg="transparent")
# plotting bar chart
ggp <- ggplot(baltimoreNEI,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
theme_bw() + guides(fill=FALSE)+
facet_grid(.~type,scales = "free",space="free") +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Tons)")) +
labs(title=expression("PM"[2.5]*" Emissions, Baltimore City 1999-2008 by Source Type"))
print(ggp)
dev.off() |
71715ff08734fe3378df906797b55665074379ea | fbd719c38f707a192d06ab4de7a1b3fcb15cd539 | /R/wc_assignment.R | 22e4c39ce7d7769f6c56445aa72a68e34131e36f | [] | no_license | cran/whiboclustering | cd65d81c04cd4af9485a4ebb76f08c7b53add534 | dfb57913f4f0e0df5c9cfba75738792aea58ed2c | refs/heads/master | 2020-04-07T13:04:07.970688 | 2018-11-20T12:10:03 | 2018-11-20T12:10:03 | 158,392,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 32,882 | r | wc_assignment.R | #' General Component for Assignment of data points to Cluster Representatives.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @param assignment_type String which signal which assignment type to be used. Check \code{wc_assign_types} for possible values.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assignment <- function(data, centroids, assignment_type)
{
# CHECKING FOR ERRORS
if (!(tolower(assignment_type) %in% tolower(wc_assign_types$Type)))
{
stop('Please enter assignment function that is available in wc_assign_types data frame')
}
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
#ASSIGN EXAMPLES TO CENTROIDS
assignment <- eval(call(name = as.character(wc_assign_types$Method[tolower(wc_assign_types$Type) == tolower(assignment_type)]), data, centroids))
return(assignment)
}
#' Assign data points using Euclidean distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_euclidean <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
assignment <- apply(X = as.matrix(stats::dist(rbind(data, centroids[, !grepl("WCCluster", colnames(centroids))]), method = 'euclidean'))[1:nrow(data), (nrow(data) + 1):(nrow(data) + nrow(centroids))], MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using squared Euclidean distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_squared_euclidean <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
assignment <- apply(X = as.matrix(stats::dist(rbind(data^2, centroids[, !grepl("WCCluster", colnames(centroids))]^2), method = 'euclidean'))[1:nrow(data), (nrow(data) + 1):(nrow(data) + nrow(centroids))], MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Manhattan distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_manhattan <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
assignment <- apply(X = as.matrix(stats::dist(rbind(data, centroids[, !grepl("WCCluster", colnames(centroids))]), method = 'manhattan'))[1:nrow(data), (nrow(data) + 1):(nrow(data) + nrow(centroids))], MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Canberra distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_canberra <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
assignment <- apply(X = as.matrix(stats::dist(rbind(data, centroids[, !grepl("WCCluster", colnames(centroids))]), method = 'canberra'))[1:nrow(data), (nrow(data) + 1):(nrow(data) + nrow(centroids))], MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Chebyshev distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_chebyshev <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
assignment <- apply(X = as.matrix(stats::dist(rbind(data, centroids[, !grepl("WCCluster", colnames(centroids))]), method = 'maximum'))[1:nrow(data), (nrow(data) + 1):(nrow(data) + nrow(centroids))], MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Cosine distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_cosine <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
distances <- (as.matrix(data) %*% as.matrix(t(centroids[, !grepl("WCCluster", colnames(centroids))])))/(as.matrix(apply(X = data, MARGIN = 1, FUN = function(x) {sqrt(sum(x^2))})) %*% as.matrix(t(apply(X = centroids, MARGIN = 1, FUN = function(x) {sqrt(sum(x^2))}))))
assignment <- apply(X = distances, MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Correlation distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_correlation <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
distances <- 1 - (as.matrix(data - 1/ncol(data) * rowSums(data)) %*% as.matrix(t(centroids[, !grepl("WCCluster", colnames(centroids))] - 1/ncol(centroids[, !grepl("WCCluster", colnames(centroids[, !grepl("WCCluster", colnames(centroids))]))]) * rowSums(centroids[, !grepl("WCCluster", colnames(centroids))])))) / (sqrt(as.matrix(data - 1/ncol(data) * rowSums(data))^2) %*% sqrt(as.matrix(t(centroids[, !grepl("WCCluster", colnames(centroids))] - 1/ncol(centroids[, !grepl("WCCluster", colnames(centroids))]) * rowSums(centroids[, !grepl("WCCluster", colnames(centroids))]))^2)))
assignment <- apply(X = distances, MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Sorensen distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_sorensen <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
upper_matrix <- matrix(unlist(lapply(X = apply(X = data, MARGIN = 1, FUN = '-', centroids[, !grepl("WCCluster", colnames(centroids))]), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(abs(x))})})), nrow = nrow(data), byrow = TRUE)
lower_matrix <- matrix(data = rep(x = rowSums(data), times = nrow(centroids)), nrow = nrow(data), byrow = FALSE) + matrix(data = rep(x = t(rowSums(centroids[, !grepl("WCCluster", colnames(centroids))])), times = nrow(data)), nrow = nrow(data))
assignment <- apply(X = upper_matrix / lower_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
# #' Assign data points using Soergel distance.
# #'
# #' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
# #' @param centroids Cluster representatives.
# #' @return A vector of assignments.
# #' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
# wc_assign_soergel <- function(data, centroids)
# {
# if(!(class(data) %in% c('data.frame', 'matrix')))
# {
# stop('Data should be data.frame or matrix')
# }
#
# if(!(class(centroids) %in% c('data.frame', 'matrix')))
# {
# stop('Centroids should be data.frame or matrix')
# }
#
# if(ncol(data) != (ncol(centroids) - 1))
# {
# stop('Data and Centroids not compatible')
# }
#
# upper_matrix <- matrix(unlist(lapply(X = apply(X = data, MARGIN = 1, FUN = '-', centroids[, !grepl("WCCluster", colnames(centroids))]), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(abs(x))})})), nrow = nrow(data), byrow = TRUE)
# lower_matrix <- t(apply(X = data, MARGIN = 1, FUN = function(r) {apply(X = centroids[, !grepl("WCCluster", colnames(centroids))], MARGIN = 1, FUN = function(c) {sum(pmax(r, c))})}))
#
# assignment <- apply(X = upper_matrix / lower_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
# assignment <- as.numeric(assignment)
# return(assignment)
# }
#' Assign data points using Kulczynski distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_kulczynski <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
upper_matrix <- matrix(unlist(lapply(X = apply(X = data, MARGIN = 1, FUN = '-', centroids[, !grepl("WCCluster", colnames(centroids))]), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(abs(x))})})), nrow = nrow(data), byrow = TRUE)
lower_matrix <- t(apply(X = data, MARGIN = 1, FUN = function(r) {apply(X = centroids[, !grepl("WCCluster", colnames(centroids))], MARGIN = 1, FUN = function(c) {sum(pmin(r, c))})}))
assignment <- apply(X = upper_matrix / lower_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Lorentzian distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_lorentzian <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
dist_matrix <- matrix(unlist(lapply(X = apply(X = data, MARGIN = 1, FUN = '-', centroids[, !grepl("WCCluster", colnames(centroids))]), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(log(1 + abs(x)))})})), nrow = nrow(data), byrow = TRUE)
assignment <- apply(X = dist_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Gower distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_gower <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
dist_matrix <- 1/ncol(data) * matrix(unlist(lapply(X = apply(X = data, MARGIN = 1, FUN = '-', centroids[, !grepl("WCCluster", colnames(centroids))]), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(abs(x))})})), nrow = nrow(data), byrow = TRUE)
assignment <- apply(X = dist_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using intersection distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_intersection <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
dist_matrix <- 0.5 * matrix(unlist(lapply(X = apply(X = data, MARGIN = 1, FUN = '-', centroids[, !grepl("WCCluster", colnames(centroids))]), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(abs(x))})})), nrow = nrow(data), byrow = TRUE)
assignment <- apply(X = dist_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
# #' Assign data points using Czekanowski distance.
# #'
# #' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
# #' @param centroids Cluster representatives.
# #' @return A vector of assignments.
# #' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
# wc_assign_czekanowski <- function(data, centroids)
# {
# if(!(class(data) %in% c('data.frame', 'matrix')))
# {
# stop('Data should be data.frame or matrix')
# }
#
# if(!(class(centroids) %in% c('data.frame', 'matrix')))
# {
# stop('Centroids should be data.frame or matrix')
# }
#
# if(ncol(data) != (ncol(centroids) - 1))
# {
# stop('Data and Centroids not compatible')
# }
#
# upper_matrix <- t(apply(X = data, MARGIN = 1, FUN = function(r) {apply(X = centroids[, !grepl("WCCluster", colnames(centroids))], MARGIN = 1, FUN = function(c) {sum(pmin(r, c))})}))
# lower_matrix <- matrix(unlist(lapply(X = apply(X = data, MARGIN = 1, FUN = '+', centroids[, !grepl("WCCluster", colnames(centroids))]), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(x)})})), nrow = nrow(data), byrow = TRUE)
#
# assignment <- apply(X = upper_matrix / lower_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
# assignment <- as.numeric(assignment)
# return(assignment)
# }
# #' Assign data points using Motika distance.
# #'
# #' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
# #' @param centroids Cluster representatives.
# #' @return A vector of assignments.
# #' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
# wc_assign_motika <- function(data, centroids)
# {
# if(!(class(data) %in% c('data.frame', 'matrix')))
# {
# stop('Data should be data.frame or matrix')
# }
#
# if(!(class(centroids) %in% c('data.frame', 'matrix')))
# {
# stop('Centroids should be data.frame or matrix')
# }
#
# if(ncol(data) != (ncol(centroids) - 1))
# {
# stop('Data and Centroids not compatible')
# }
#
# upper_matrix <- t(apply(X = data, MARGIN = 1, FUN = function(r) {apply(X = centroids[, !grepl("WCCluster", colnames(centroids))], MARGIN = 1, FUN = function(c) {sum(pmax(r, c))})}))
# lower_matrix <- matrix(unlist(lapply(X = apply(X = data, MARGIN = 1, FUN = '+', centroids[, !grepl("WCCluster", colnames(centroids))]), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(x)})})), nrow = nrow(data), byrow = TRUE)
#
# assignment <- apply(X = upper_matrix / lower_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
# assignment <- as.numeric(assignment)
# return(assignment)
# }
# #' Assign data points using Ruzicka distance.
# #'
# #' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
# #' @param centroids Cluster representatives.
# #' @return A vector of assignments.
# #' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
# wc_assign_ruzicka <- function(data, centroids)
# {
# if(!(class(data) %in% c('data.frame', 'matrix')))
# {
# stop('Data should be data.frame or matrix')
# }
#
# if(!(class(centroids) %in% c('data.frame', 'matrix')))
# {
# stop('Centroids should be data.frame or matrix')
# }
#
# if(ncol(data) != (ncol(centroids) - 1))
# {
# stop('Data and Centroids not compatible')
# }
#
# upper_matrix <- t(apply(X = data, MARGIN = 1, FUN = function(r) {apply(X = centroids[, !grepl("WCCluster", colnames(centroids))], MARGIN = 1, FUN = function(c) {sum(pmin(r, c))})}))
# lower_matrix <- t(apply(X = data, MARGIN = 1, FUN = function(r) {apply(X = centroids[, !grepl("WCCluster", colnames(centroids))], MARGIN = 1, FUN = function(c) {sum(pmax(r, c))})}))
#
# assignment <- apply(X = upper_matrix / lower_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
# assignment <- as.numeric(assignment)
# return(assignment)
# }
#' Assign data points using Tanimoto distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_tanimoto <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
upper_matrix <- t(apply(X = data, MARGIN = 1, FUN = function(r) {apply(X = centroids[, !grepl("WCCluster", colnames(centroids))], MARGIN = 1, FUN = function(c) {sum(pmax(r, c) - pmin(r, c))})}))
lower_matrix <- t(apply(X = data, MARGIN = 1, FUN = function(r) {apply(X = centroids[, !grepl("WCCluster", colnames(centroids))], MARGIN = 1, FUN = function(c) {sum(pmax(r, c))})}))
assignment <- apply(X = upper_matrix / lower_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Inner product distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_inner_product <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
assignment <- apply(X = as.matrix(data) %*% as.matrix(t(centroids[, !grepl("WCCluster", colnames(centroids))])), MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
# #' Assign data points using Jaccart (numerical version) distance.
# #'
# #' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
# #' @param centroids Cluster representatives.
# #' @return A vector of assignments.
# #' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
# wc_assign_jaccard_numerical <- function(data, centroids)
# {
# if(!(class(data) %in% c('data.frame', 'matrix')))
# {
# stop('Data should be data.frame or matrix')
# }
#
# if(!(class(centroids) %in% c('data.frame', 'matrix')))
# {
# stop('Centroids should be data.frame or matrix')
# }
#
# if(ncol(data) != (ncol(centroids) - 1))
# {
# stop('Data and Centroids not compatible')
# }
#
# upper_matrix <- matrix(unlist(lapply(X = apply(X = data, MARGIN = 1, FUN = '-', centroids[, !grepl("WCCluster", colnames(centroids))]), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(x^2)})})), nrow = nrow(data), byrow = TRUE)
# lower_matrix <- matrix(data = rep(x = rowSums(data^2), times = nrow(centroids)), nrow = nrow(data), byrow = FALSE) + matrix(data = rep(x = t(rowSums(centroids[, !grepl("WCCluster", colnames(centroids))]^2)), times = nrow(data)), nrow = nrow(data)) - as.matrix(data) %*% as.matrix(t(centroids[, !grepl("WCCluster", colnames(centroids))]))
#
# assignment <- apply(X = upper_matrix / lower_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
# assignment <- as.numeric(assignment)
# return(assignment)
# }
# #' Assign data points using Dice (numerical version) distance.
# #'
# #' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
# #' @param centroids Cluster representatives.
# #' @return A vector of assignments.
# #' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
# wc_assign_dice_numerical <- function(data, centroids)
# {
# if(!(class(data) %in% c('data.frame', 'matrix')))
# {
# stop('Data should be data.frame or matrix')
# }
#
# if(!(class(centroids) %in% c('data.frame', 'matrix')))
# {
# stop('Centroids should be data.frame or matrix')
# }
#
# if(ncol(data) != (ncol(centroids) - 1))
# {
# stop('Data and Centroids not compatible')
# }
#
# upper_matrix <- matrix(unlist(lapply(X = apply(X = data, MARGIN = 1, FUN = '-', centroids[, !grepl("WCCluster", colnames(centroids))]), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(x^2)})})), nrow = nrow(data), byrow = TRUE)
# lower_matrix <- matrix(data = rep(x = rowSums(data^2), times = nrow(centroids)), nrow = nrow(data), byrow = FALSE) + matrix(data = rep(x = t(rowSums(centroids[, !grepl("WCCluster", colnames(centroids))]^2)), times = nrow(data)), nrow = nrow(data))
#
# assignment <- apply(X = upper_matrix / lower_matrix, MARGIN = 1, FUN = function(x) {which.min(x)})
# assignment <- as.numeric(assignment)
# return(assignment)
# }
#' Assign data points using Fidelity (numerical version) distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_fidelity_numerical <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
assignment <- apply(X = t(apply(X = data, MARGIN = 1, FUN = function(r) {apply(X = centroids[, !grepl("WCCluster", colnames(centroids))], MARGIN = 1, FUN = function(c) {sum(sqrt(r * c))})})), MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Bhattacharyya distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_bhattacharyya_numerical <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
assignment <- apply(X = t(-1 * log(apply(X = data, MARGIN = 1, FUN = function(r) {apply(X = centroids[, !grepl("WCCluster", colnames(centroids))], MARGIN = 1, FUN = function(c) {sum(sqrt(r * c))})}))), MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Hellinger (numerical version) distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_hellinger_numerical <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
assignment <- apply(X = 2 * sqrt(matrix(unlist(lapply(X = apply(X = data, MARGIN = 1, FUN = '-', centroids[, !grepl("WCCluster", colnames(centroids))]), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(x^2)})})), nrow = nrow(data), byrow = TRUE)), MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Assign data points using Whittaker distance.
#'
#' @param data A dataset for which data points needs to be assigned to Cluster Representatives.
#' @param centroids Cluster representatives.
#' @return A vector of assignments.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_whittaker <- function(data, centroids)
{
if(!(class(data) %in% c('data.frame', 'matrix')))
{
stop('Data should be data.frame or matrix')
}
if(!(class(centroids) %in% c('data.frame', 'matrix')))
{
stop('Centroids should be data.frame or matrix')
}
if(ncol(data) != (ncol(centroids) - 1))
{
stop('Data and Centroids not compatible')
}
assignment <- apply(X = matrix(unlist(lapply(X = apply(X = data/apply(X = data, MARGIN = 2, FUN = mean), MARGIN = 1, FUN = '-', centroids[, !grepl("WCCluster", colnames(centroids))]/apply(X = data, MARGIN = 2, FUN = mean)), FUN = function(x) {apply(X = x, MARGIN = 1, FUN = function(x) {sum(x^2)})})), nrow = nrow(data), byrow = TRUE), MARGIN = 1, FUN = function(x) {which.min(x)})
assignment <- as.numeric(assignment)
return(assignment)
}
#' Data frame for possible values of assignment types.
#' @author Sandro Radovanovic \email{sandro.radovanovic@@gmail.com}
wc_assign_types <- data.frame()
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Euclidean', 'Method' = 'wc_assign_euclidean'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Squared Euclidean', 'Method' = 'wc_assign_squared_euclidean'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Manhattan', 'Method' = 'wc_assign_manhattan'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Canberra', 'Method' = 'wc_assign_canberra'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Chebyshev', 'Method' = 'wc_assign_chebyshev'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Cosine', 'Method' = 'wc_assign_cosine'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Correlation', 'Method' = 'wc_assign_correlation'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Sorensen', 'Method' = 'wc_assign_sorensen'))
# wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Soergel', 'Method' = 'wc_assign_soergel'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Kulczynski', 'Method' = 'wc_assign_kulczynski'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Lorentzian', 'Method' = 'wc_assign_lorentzian'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Gower', 'Method' = 'wc_assign_gower'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Inersection', 'Method' = 'wc_assign_intersection'))
# wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Czekanowski', 'Method' = 'wc_assign_czekanowski'))
# wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Motika', 'Method' = 'wc_assign_motika'))
# wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Ruzicka', 'Method' = 'wc_assign_ruzicka'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Tanimoto', 'Method' = 'wc_assign_tanimoto'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Inner Product', 'Method' = 'wc_assign_inner_product'))
# wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Jaccard', 'Method' = 'wc_assign_jaccard_numerical'))
# wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Dice', 'Method' = 'wc_assign_dice_numerical'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Fidelity', 'Method' = 'wc_assign_fidelity_numerical'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Bhattacharyya', 'Method' = 'wc_assign_bhattacharyya_numerical'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Hellinger', 'Method' = 'wc_assign_hellinger_numerical'))
wc_assign_types <- rbind.data.frame(wc_assign_types, data.frame('Type' = 'Whittaker', 'Method' = 'wc_assign_whittaker'))
|
96ac8f3512f27f9acc8b9fcda29e849056b978a6 | ffbad7c2d97a913c99f872611cc3e7a9493265cc | /R/missingdata.R | 1be1f7667a8703d6d6296477e632ac4e509acd1f | [] | no_license | cran/AGHmatrix | b98e25ddadeca16198d07615816462565c291140 | ec303d31f7f69b38ffc2d89e1650ecd7a2c87067 | refs/heads/master | 2023-04-16T01:44:45.265585 | 2023-04-03T19:32:38 | 2023-04-03T19:32:38 | 136,302,876 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,409 | r | missingdata.R | #########################################
#
# Package: AGHmatrix
#
# File: missingdata.R
# Contains: missingdata
#
# Written by Rodrigo Rampazo Amadeu
#
# First version: Feb-2014
# Last update: 14-Apr-2015
# License: GPL-3
#
#########################################
#' Survying on missing data
#'
#' This function verify which rows in a pedigree data has missing parental or conflictuos data
#'
#' @param data data name from a pedigree list
#' @param unk unknown value of your data
#'
#' @return list with $conflict: rows of the data which are at least one parental name equal to the individual. $missing.sire: rows of the data which arie missing data sire (Parental 1) information. $missing.dire: same as above for dire (Parental 2). $summary.missing: summary of the missing data. 2 columns, 1st for the name of the parental listed, 2nd for the how many times appeared in the data.
#'
#' @examples
#' data(ped.mrode)
#' missingdata(ped.mrode)
#'
#' @author Rodrigo R Amadeu, \email{rramadeu@@gmail.com}
#'
#' @export
missingdata <- function(data,
unk=0
){
pedigree.data<-data
data <- c()
if( ncol(pedigree.data) != 3 ){
print("Data with more than 3 columns, please verify")
return()
}
#Treating all as numeric
ind.data <- as.vector(c(unk,as.character(pedigree.data[,1])))
sire.data <- as.vector(pedigree.data[,2])
dire.data <- as.vector(pedigree.data[,3])
sire <- match(sire.data, ind.data)
dire <- match(dire.data, ind.data)
ind <- as.vector(c(1:length(ind.data)))
missing <- c()
#Verify the individual w/ same name in sire/dire
missing$conflict <- c(which( sire == ind[-1]),which( dire == ind[-1] ))
#Verify the missing sire (Parent 1)
missing$sire.na <- c( which(is.na(sire)))
#Verify the missing dire (Parent 2)
missing$dire.na <- c( which(is.na(dire)))
#Making a summary of the missing data
missing$sire <- as.matrix(summary(as.factor(pedigree.data[which(is.na(sire)),2])))
missing$dire <- as.matrix(summary(as.factor(pedigree.data[which(is.na(dire)),3])))
names <- unique(c(rownames(missing$sire),rownames(missing$dire)))
pos.sire <- match(rownames(missing$sire),names)
pos.dire <- match(rownames(missing$dire),names)
missing$parent <- rep(0,length(names))
missing$parent[pos.dire] <- missing$dire
missing$parent[pos.sire] <- missing$parent[pos.sire]+missing$sire
missing$parent <- as.matrix(missing$parent)
rownames(missing$parent) <- names
#Final list
missing <- list(conflict=missing$conflict,missing.sire=missing$sire.na,missing.dire=missing$dire.na,summary.missing=missing$parent)
## Improve this part :)
#if(molecular){
# if( csv )
# data <- read.csv("molecular_diploid.csv")
# mol.data <- data[,-1]
# row.names(mol.data) <- data[,1]
# mol.data <- replace(mol.data, mol.data == unk, NA)
# missing.per.ind <- apply(mol.data,1, function(x) sum(is.na(x)))# / ncol(example) * 100
# missing.per.marker <- apply(t(mol.data),1,function(x) sum(is.na(x)))
# summary(missing.per.ind)/length(missing.per.marker)
# summary(missing.per.marker)/length(missing.per.ind)
#}
return(missing)
}
|
8c1ac463e600cdb5c7508ca18e89f4d378ae807e | f49fa5f89188ef24f06701fa5c30b160d88075d3 | /dataPreprocess_glassData.R | a9d47c84be183a14dad5c256f6ba980b79d8823d | [] | no_license | rsharma11/Machine-learning | bbf2b0251c73a4192a6bd73514613508308f22c5 | 89a459b73d1748212af196dc404b15e0e2a76545 | refs/heads/master | 2020-03-27T06:47:22.886872 | 2018-10-30T22:22:57 | 2018-10-30T22:22:57 | 146,135,458 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 878 | r | dataPreprocess_glassData.R | library(mlbench)
library(ggplot2)
library(corrplot)
library(corrgram)
library(reshape2)
library(e1071)
data(Glass)
str(Glass)
attach(Glass)
glassData <- Glass
##### Distribution and relationship between predictors
names<-names(glassData)
classes<-sapply(glassData,class)
ggplot(melt(glassData),aes(x=value)) + geom_histogram() + facet_wrap(~variable) + xlim(0,20)
corrMat <- cor(Glass[, -10])
corrplot(corr = corrMat, type = "lower")
##### Outliers detection
boxplot(glassData)
##### Skewness detection
apply(glassData[,-10], 2, skewness)
par(mfrow=c(1,9))
hist( Glass$Ri )
hist( Glass$Na )
hist( Glass$Mg ) # Looks multimodal
hist( Glass$Al )
hist( Glass$Si )
hist( Glass$K ) # Looks like a data error in that we have only two samples with a very large K value
hist( Glass$Ca )
hist( Glass$Ba ) # Looks like a skewed distribution
hist( Glass$Fe )
par(mfrow=c(3,3))
|
b21a1748913e816d3f1c9b174e383af9717c3a34 | ab2749ed7092f3d79690a2495867b109daf9a48b | /R/app_server.R | 8fb30acd1db257e4e2e1ef03531e9d1ab2f20dbd | [
"MIT"
] | permissive | TileDB-Inc/gtexplorer | 49f93d47f055fd1498dd52ed25ee1df1a972b2d5 | df9a9814d095e53d8b93e97164f81ca42a8847d0 | refs/heads/master | 2023-08-26T22:16:00.507436 | 2021-09-30T16:00:45 | 2021-09-30T16:01:07 | 368,531,835 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,518 | r | app_server.R | #' Server-side logic
#'
#' You can override the default array URI with the environment variable,
#' `GTEXPLORER_URI`.
#'
#' @param input,output,session Internal parameters for {shiny}
#' @importFrom DT datatable renderDT JS
#' @import shiny
#' @importFrom dplyr inner_join
#' @noRd
app_server <- function(input, output, session) {
array_uri <- Sys.getenv(
x = "GTEXPLORER_URI",
unset = "tiledb://TileDB-Inc/gtex-analysis-rnaseqc-gene-tpm"
)
tdb_genes <- open_gtex_array(array_uri, attrs = "tpm")
selected_genes <- queryParamsServer("params")
output$table_genes <- DT::renderDT({
req(selected_genes())
message("Rendering table of selected genes")
DT::datatable(
selected_genes(),
rownames = FALSE,
style = "bootstrap4",
selection = list(mode = "single", selected = 1, target = "row"),
extensions = c("Responsive"),
options = list(
stateSave = TRUE,
searching = FALSE,
paging = TRUE,
info = FALSE,
lengthChange = FALSE
)
)
})
selected_gene_id <- eventReactive(input$table_genes_rows_selected, {
message(
sprintf(
"Selecting gene_id from row %i of table",
input$table_genes_rows_selected
)
)
selected_genes()$gene_id[input$table_genes_rows_selected]
})
output$r_snippet <- shiny::renderText({
message("Updating R snippet")
build_r_snippet(array_uri, selected_gene_id())
})
output$py_snippet <- shiny::renderText({
message("Updating Python snippet")
build_py_snippet(array_uri, selected_gene_id())
})
tbl_results <- shiny::reactive({
req(selected_gene_id())
message(sprintf("Querying array for %s", selected_gene_id()))
tdb_genes[selected_gene_id(),]
})
shiny::observeEvent(selected_genes(), {
req(input$`main-tabs` != "Results")
message("Switching to results tab")
shiny::updateTabsetPanel(session, "main-tabs",
selected = "Results"
)
})
# output$download_results <- shiny::downloadHandler(
# filename = "tiledb-quokka-export.csv",
# content = function(file) {
# readr::write_csv(tbl_results(), file)
# }
# )
output$plot_results <- plotly::renderPlotly({
req(tbl_results())
message("Rendering results plot\n")
build_boxplot(
dplyr::inner_join(tbl_results(), tbl_samples, by = "sample"),
title = sprintf(
"Gene expression for %s (%s)",
selected_genes()$gene_name[1],
selected_gene_id()
)
)
})
}
|
8905758eeab161fd77769ce6c6fb0933d407236c | 0a906cf8b1b7da2aea87de958e3662870df49727 | /distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610036529-test.R | 08ec2cf8cd1c57b43961563c82210f400c6ded34 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 595 | r | 1610036529-test.R | testlist <- list(data = structure(c(0, 0, 1.82919901241135e-183, 1.61344522674997e-231, 3.13151306251402e-294), .Dim = c(1L, 5L)), x = structure(c(1.51067888575209e-314, 5.11785327037676e+269, 2.12276966337746e-313, 2.64301187028838e-260, 5.22892022591822e-307, Inf, 5.29946746816433e-169, 1.34719146781012e-309, NaN, 0, 1.51067888575209e-314, 0, 3.31561842338324e-316, 1.25197751666951e-312, 2.12199579047121e-314, 1.51067888575209e-314, 2.46691094279225e-308, 9.58807024836225e-92, 7.17720171462568e-304, 0), .Dim = 5:4))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result) |
0a67ad4b325d030c5e71533897f2b3e08817b92e | 5003125ed199b027af3657b5b2b16f209966e019 | /cachematrix.R | d4935cf1fcd06f8284b067be86aa1d497511a4ea | [] | no_license | courspeter/ProgrammingAssignment2 | f310249397031d3e46ac4ec75efd56621148d585 | 7d53a5d1aa0d2dbf65123cfa1253ded46eb2e146 | refs/heads/master | 2021-01-11T05:54:06.543455 | 2016-09-24T12:16:03 | 2016-09-24T12:16:03 | 32,617,996 | 0 | 0 | null | 2015-03-21T03:09:39 | 2015-03-21T03:09:39 | null | UTF-8 | R | false | false | 1,453 | r | cachematrix.R | ## This file provides functions makeCacheMatrix and cacheSolve that can speed
## up computations involving matrix inversion by caching inversion results.
## makeCacheMatrix creates an object that can hold a matrix with its inverse
## lazily cached.
makeCacheMatrix <- function(x = matrix()) {
# inv is used for caching the inverse of x
inv <- NULL
# set is setter for x
set <- function(y) {
x <<- y
# clear previously cached inverse
inv <<- NULL
}
# get is getter for x
get <- function() x
# setinv is setter for inv
setinv <- function(newinv) inv <<- newinv
# getinv is getter for inv
getinv <- function() inv
# return list of setters and getters
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve returns the inverse of the matrix held by x. It uses the inverse
## matrix cached from a previous invocation if any, or computes newly using
## solve() and caches the result.
cacheSolve <- function(x, ...) {
# use cached data if any
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
# calculate inverse
mat <- x$get()
inv <- solve(mat, ...)
# cache inverse
x$setinv(inv)
# return inverse
inv
}
|
2e5ac5aed3ade960a08932fe7dce490b9c45b234 | 1b9f39f73e01b8deaa2d23bbcb7908e9c7bd2700 | /src/d2q9_kuper/Dynamics.R | dc04123a1618e2ea06c50854f919f7e92c9c01e8 | [] | no_license | myousefi2016/TCLB | cc9d1fdf7158ebe5880a7478ca648e2edcc41670 | 5cc29ddb286094264892bdb3fa646088900cf836 | refs/heads/master | 2021-01-25T05:44:08.545930 | 2014-05-30T11:33:23 | 2014-05-30T11:33:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,694 | r | Dynamics.R |
AddDensity( name="f0", dx= 0, dy= 0, group="f")
AddDensity( name="f1", dx= 1, dy= 0, group="f")
AddDensity( name="f2", dx= 0, dy= 1, group="f")
AddDensity( name="f3", dx=-1, dy= 0, group="f")
AddDensity( name="f4", dx= 0, dy=-1, group="f")
AddDensity( name="f5", dx= 1, dy= 1, group="f")
AddDensity( name="f6", dx=-1, dy= 1, group="f")
AddDensity( name="f7", dx=-1, dy=-1, group="f")
AddDensity( name="f8", dx= 1, dy=-1, group="f")
AddDensity(
name = paste("fs",0:8,sep=""),
dx = 0,
dy = 0,
dz = 0,
comment = paste("density F",0:8),
group = "fs"
)
AddDensity(
name = paste("phi",0:8,sep=""),
dx = c( 0, 1, 0,-1, 0, 1,-1,-1, 1),
dy = c( 0, 0, 1, 0,-1, 1, 1,-1,-1),
dz = c( 0, 0, 0, 0, 0, 0, 0, 0, 0),
comment = paste("density F",0:8),
group = "phi"
)
AddQuantity(name="Rho", unit="kg/m3");
AddQuantity(name="U", unit="m/s", vector=T);
AddQuantity(name="P", unit="Pa");
AddQuantity(name="F", unit="N", vector=T);
AddSetting(name="omega", comment='one over relaxation time')
AddSetting(name="nu", omega='1.0/(3*nu + 0.5)', default=1.6666666, comment='viscosity')
AddSetting(name="InletVelocity", default="0m/s", comment='inlet velocity')
AddSetting(name="InletPressure", InletDensity='1.0+InletPressure/3', default="0Pa", comment='inlet pressure')
AddSetting(name="InletDensity", default=1, comment='inlet density')
AddSetting(name="OutletDensity", default=1, comment='inlet density')
AddSetting(name="InitDensity", comment='inlet density')
AddSetting(name="WallDensity", comment='vapor/liquid density of wall')
AddSetting(name="Temperature", comment='temperature of the liquid/gas')
AddSetting(name="FAcc", comment='Multiplier of potential')
AddSetting(name="Magic", comment='K')
AddSetting(name="MagicA", comment='A in force calculation')
AddSetting(name="MagicF", comment='Force multiplier')
AddSetting(name="GravitationY", comment='Gravitation in the direction of y')
AddSetting(name="GravitationX", comment='Gravitation in the direction of x')
AddSetting(name="MovingWallVelocity", comment='Velocity of the MovingWall')
AddSetting(name="WetDensity", comment='wet density')
AddSetting(name="DryDensity", comment='dry density')
AddSetting(name="Wetting", comment='wetting factor')
AddGlobal(name="MovingWallForceX", comment='force x')
AddGlobal(name="MovingWallForceY", comment='force y')
AddGlobal(name="Pressure1", comment='pressure at Obj1')
AddGlobal(name="Pressure2", comment='pressure at Obj2')
AddGlobal(name="Pressure3", comment='pressure at Obj3')
AddGlobal(name="Density1", comment='density at Obj1')
AddGlobal(name="Density2", comment='density at Obj2')
AddGlobal(name="Density3", comment='density at Obj3')
|
9cc9903c487054887559a7df649cfb97c1851715 | fb81bddc974906b65b84bb213435fa23f58b8368 | /CodiceEsercizi/03_Marzo/Esercizio_18_03_2019.r | 0a28db82d1dbaed7b5874a9d810bdea63658b9ac | [] | no_license | ChabbakiAymane/Statistica-e-Probabilita | c19cdabd8ebd8696fb923573c72521ba676b6793 | 734ec7dd4e6715e0955b34f1b424cf57ee5c70bb | refs/heads/master | 2020-05-26T14:28:14.281836 | 2019-06-03T16:07:22 | 2019-06-03T16:07:22 | 188,264,152 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,101 | r | Esercizio_18_03_2019.r | library(MASS);
#Se non installata
#install.packages("combinat");
require(combinat);
#dati
matematici <- 21;
fisici <- 12;
#comitati
comitati <- 8;
#calcolo
gruppo <- matematici + fisici;
casiTotali <- dim(combn(gruppo, comitati))[2];
#1: Comitati formati da 5 matematici e 3 fisici
nMatematici <- 5;
nFisici <- 3;
casiMatematici <- dim(combn(matematici, nMatematici))[2];
casiFisici <- dim(combn(fisici, nFisici))[2];
casiFavorevoli <- casiMatematici * casiFisici;
casiFavorevoli;
#2: Probabilità di avere comitati formati da 5 matematici e 3 fisici
Pr_comitati <- casiFavorevoli/casiTotali;
fractions(Pr_comitati);
#3: Comitati formati da almeno 1 fisico e matematici > fisici
#Comitati possibili:
#1: 7 matematici + 1 fisico
casoMat1 <- dim(combn(matematici, 7))[2];
casoFis1 <- dim(combn(fisici, 1))[2];
caso1 <- casoMat1 * casoFis1;
#caso1 <- dim(combn(matematici, 7))[2] * dim(combn(fisici, 1))[2];
#2: 6 matematici + 2 fisici
casoMat2 <- dim(combn(matematici, 6))[2];
casoFis2 <- dim(combn(fisici, 2))[2];
caso2 <- casoMat2 * casoFis2;
#caso2 <- dim(combn(matematici, 6))[2] * dim(combn(fisici, 2))[2];
#3: 5 matematici + 3 fisici
casoMat3 <- dim(combn(matematici, 5))[2];
casoFis3 <- dim(combn(fisici, 3))[2];
caso3 <- casoMat3 * casoFis3;
#caso3 <- dim(combn(matematici, 5))[2] * dim(combn(fisici, 3))[2];
comitatiTot <- (caso1 + caso2 + caso3);
comitatiTot;
#Fare controllo per vedere se la somma di tutti i comitati da 8 persone è uguale a casiTotali
#caso4 <- (dim(combn(matematici, 4))[2]) * (dim(combn(fisici, 4))[2]);
#caso5 <- (dim(combn(matematici, 3))[2]) * (dim(combn(fisici, 5))[2]);
#caso6 <- (dim(combn(matematici, 2))[2]) * (dim(combn(fisici, 6))[2]);
#caso7 <- (dim(combn(matematici, 1))[2]) * (dim(combn(fisici, 7))[2]);
#caso8 <- (dim(combn(matematici, 0))[2]) * (dim(combn(fisici, 8))[2]);
#caso9 <- (dim(combn(matematici, 8))[2]) * (dim(combn(fisici, 1))[2]);
#sommaCasi <- (comitatiTot + caso4 + caso5 + caso6 + caso7 + caso8 + caso9);
#Somma Casi
#sommaCasi;
#Casi Totali
#casiTotali; |
f31dd6dfb49f3e2c0e29bdf239541479151383bf | ca4cc9c323fe000df7189a448dd59618f70b8c2f | /man/RomicsBatchCorrection.Rd | 224e357705cec466cacaca8348087a6e07e3e670 | [
"BSD-2-Clause"
] | permissive | PNNL-Comp-Mass-Spec/RomicsProcessor | 235c338d2192f385d408e55c302868e37ff9dc06 | 72d35c987900febc3e6c6ed416d4d72dc5820075 | refs/heads/master | 2023-03-18T08:14:48.098980 | 2023-03-15T16:50:14 | 2023-03-15T16:50:14 | 206,400,976 | 4 | 2 | BSD-2-Clause | 2022-12-05T21:55:36 | 2019-09-04T19:49:26 | HTML | UTF-8 | R | false | true | 1,230 | rd | RomicsBatchCorrection.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/11_sva_Batch_Correction.R
\name{romicsBatchCorrection}
\alias{romicsBatchCorrection}
\title{romicsBatchCorrection()}
\usage{
romicsBatchCorrection(
romics_object,
batch_factor = "factor",
method = "ComBat",
...
)
}
\arguments{
\item{romics_object}{A romics_object created using romicsCreateObject()}
\item{batch_factor}{has to be a factor contained in the romics_object that will serve as batch covariate To obtain the}
\item{method}{has to be either 'ComBat' or 'mean.only' to indicate how the ComBat function should be run.}
\item{...}{parameters can be passed to sva::ComBat(), see sva::ComBat() documentation for more details.}
}
\value{
This function returns a transformed romics_object.
}
\description{
Performs the sva::ComBat() batch correction on the data layer of the romics_object. The data layer must not contain missing values and the factor utilized will be the one used for the correction.
}
\details{
This function is used to perform a ComBat batch correction on a romics_object. it can be performed using the ComBat method or using a mean.only method. sva::ComBat() documentation for more details.
}
\author{
Geremy Clair
}
|
6582ca1aa7459dbd073cb88cdc628dd5b924b22d | 2b28a8ee68f2c749987185760ebcdf25fd1a0e16 | /man/is_multidim.Rd | dfc9f539ec26df7d039b415941ece3e24e24715e | [] | no_license | gastonstat/tester | 8becbb7ef1338bd37e6f3c7fe4cde80e3448756d | 863cb587b8d3d9a173f377062aa5313089a2302a | refs/heads/master | 2020-04-06T07:05:41.475677 | 2015-08-28T21:45:40 | 2015-08-28T21:45:40 | 11,073,788 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 717 | rd | is_multidim.Rd | \name{is_multidim}
\alias{is_multidim}
\title{Test if an object is multi-dimensional}
\usage{
is_multidim(x)
}
\arguments{
\item{x}{an R object}
}
\value{
whether x is multi-dimensional
}
\description{
Returns \code{TRUE} if an object is a matrix or data
frame with at least 2 rows and at least 2 columns,
\code{FALSE} otherwise
}
\examples{
# general matrix (nrow>1, ncol>1)
is_multidim(matrix(1:9, 3, 3)) # TRUE
# general data frame
is_multidim(iris) # TRUE
# vector
is_multidim(1:5) # FALSE
# factor
is_multidim(iris$Species) # FALSE
# one row matrix
is_multidim(matrix(1:5, 1, 5)) # FALSE
# one column matrix
is_multidim(matrix(1:5, 5, 1)) # FALSE
}
\seealso{
\code{\link{is_one_dim}}
}
|
dea245f2f9d9951e842e0c4f42c48e931eeb7372 | 036983f65dc517593f84d44cb14a712ea0687225 | /projects/project3/Bialonczyk_Kurowski_Vinichenko/load_libraries.R | 6d1f521c0faaf2fbaa24d1309b89c78e3cbd45f4 | [] | no_license | iketutg/2021Z-DataVisualizationTechniques | 027c0228ed95e37ddd73500909117449f6b87e25 | 125077c8b02426b779e351324c3c6d4e96ad64c1 | refs/heads/master | 2023-03-19T08:59:42.177496 | 2021-02-23T20:08:31 | 2021-02-23T20:08:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 208 | r | load_libraries.R | library("shinydashboard")
library("shiny")
library("shinydashboardPlus")
library("data.table")
library(DT)
library("shinycssloaders")
library("purrr")
library(plotly)
library(wordcloud2)
library(RColorBrewer) |
844fcb9c81ecfcafd436a385659ede69932946f1 | 0edac561faa684f45a3626ed5ce28b72ded4c547 | /R/predictive_models.r | 1277a8a7b3d72dab16f50d3a7cf1972a53b0ce46 | [] | no_license | rafaoncloud/iris-dataset | c105ed55d1324dd15ab1d8f49f18985103366c19 | f3fc2fa96121b350d0d3523d6bdcb9316e96674c | refs/heads/master | 2020-06-20T03:18:00.693282 | 2019-07-15T10:18:51 | 2019-07-15T10:18:51 | 196,972,872 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 336 | r | predictive_models.r | # Rafael Henriques
# 15-Jul-2019
# Read Dataset
df = read.table("../dataset/iris.data", header=TRUE, sep=",")
summary(df)
head(df)
# Prepare the training and testing data
# 4/5 (80%) train
# 1/5 (20%) test
testIdx = which(1:length(df[,1]) %% 5 == 0)
dfTrain = df[-testIdx,]
dfTest = df[testIdx,]
summary(dfTrain)
summary(dfTest)
|
75833ec4c39c69eaa730e2bdbc9e11f7a747c4cd | a2ac457f30f0690fc4328c3e5ca047617d72cc96 | /paper-plom-scatter.R | d28c855c3d675e539734c4a855ddf1e05714f52c | [] | no_license | AtrayeeNeog/Cardio-Classifier-HIWi | ce0d8f6acf1383d9aa7867cc1467b94218bd92b9 | e6e85eefc93588522fffa92ac4274c24eb91a9c2 | refs/heads/master | 2022-11-19T23:37:29.853748 | 2020-07-14T14:29:57 | 2020-07-14T14:29:57 | 259,164,483 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,242 | r | paper-plom-scatter.R | # scatterplots for PLOM lower triangle entries
# meant to be called from make_plom()
plom_scatter <- function(df, fr, fc, target) {
dfp <- tibble(
fr = df[[fr]],
fc = df[[fc]],
target = df[[target]]
) %>%
mutate(target = fct_expand(target, "all"))
dfcor <-
dfp %>%
bind_rows(dfp %>% mutate(target = factor("all", levels = levels(dfp$target)))) %>%
nest_by(target) %>%
mutate(fit = list(lm(fr ~ fc, data = data))) %>%
ungroup() %>%
mutate(x_rng = map(data, ~ range(.x$fc))) %>%
mutate(x_min = min(map_dbl(x_rng, 1))) %>%
# mutate(x_max = max(map_dbl(x_rng, 2))) %>%
mutate(x_max = map_dbl(x_rng, 2)) %>%
mutate(cor = map_dbl(data, ~cor(.x$fr, .x$fc, method = "spearman"))) %>%
mutate(label_pos_x = x_max + 0.04 * (x_max - x_min)) %>%
mutate(label_pos_y = map2_dbl(label_pos_x, fit, ~ predict(.y, newdata = tibble(fc = .x)))) %>%
mutate(label_text = paste0("$\\rho = ", format(round(cor,2),2), "$"))
ggplot(dfp, aes(x = fc, y = fr)) +
scale_x_continuous(expand = c(0.02,0,0.02,0)) +
geom_point(aes(color = target), size = 0.75, alpha = 0.75, pch = 16) +
geom_smooth(method = "lm", formula = "y ~ x", color = "black", se = FALSE,
size = 0.6, fullrange = TRUE) +
geom_smooth(aes(color = target), method = "lm", formula = "y ~ x", se = FALSE,
size = 0.4, fullrange = FALSE) +
geom_label_repel(data = dfcor,
aes(x = label_pos_x, y = label_pos_y,
label = lapply(label_text, function(x){TeX(x, output = "character")}),
color = target),
size = 6/.pt, hjust = 0, direction = "y", #xlim = max(dfcor$x_max, Inf),
fill = "white", alpha = 0.85,
label.size = NA, box.padding = 0, label.padding = 0.05, label.r = 0,
force = 0.5, parse = TRUE) +
scale_color_manual(values = c(.fig_opts$colors_target[[target]], "black"),
drop = FALSE) +
guides(color = FALSE) +
# labs(x = fc, y = fr)
labs(x = NULL, y = NULL)
}
# plom_scatter(df, fr = f_grid$fr[[7]], fc = f_grid$fc[[7]], target = target)
|
0d379a618b0ddc57586aa56e0ed5d4b1e772b6a2 | 55f5928b477b2c63ff46e40c61b618d5d30000e1 | /countySummaries/callCorrectDataSummary.R | 0f5fd47821b9b79128cfb7e2f5828ee4c6a16902 | [] | no_license | dcarver1/covidNightLights | b581ec5212b590e6e87a0a9b538e05bcf06862c7 | 450229d1ed060d7fb5bd0cfe16259bd078343b86 | refs/heads/main | 2023-05-01T17:08:45.005209 | 2021-05-10T22:28:56 | 2021-05-10T22:28:56 | 302,479,805 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,251 | r | callCorrectDataSummary.R | ###
# render markdown summary for specific counties
# carverd@colostate.edu
# 20210426
###
#install.packages("psych")
library(raster)
library(dplyr)
library(sf)
library(tmap)
library(plotly)
library(psych)
library(DT)
tmap_mode("view")
# avoid scientific notation
options(scipen=999)
locations <- c("San Diego", "Brazoria", "Chambers", "Fort Bend", "Galveston",
"Harris", "Liberty", "Montgomery", "Waller", "Robeson")
months1 <<- c("janurary", "feburary", "march", "april", "may", "june", "july",
"august", "september", "october")
for(i in locations){
county <<- i
rads <<- list.files(path="F:/geoSpatialCentroid/covidNightLights/data/correct2020imagery",
pattern = paste0(i,".tif"),full.names = TRUE, recursive = TRUE)
counts <<- list.files(path="F:/geoSpatialCentroid/covidNightLights/data/correct2020imagery",
pattern = paste0(i,"_obs.tif"),full.names = TRUE, recursive = TRUE)
rmarkdown::render(input = "F:/geoSpatialCentroid/covidNightLights/src/countySummaries/correctedDataCountsSummary.Rmd",
output_file = paste0("F:/geoSpatialCentroid/covidNightLights/data/correct2020imagery/compiledMonthlyValues/", county,"_summary"))
}
``` |
518ea4f6ec608b7b0e1da02aee47126e45629866 | e8caac731d5786a24ea1a4b4d2d8e84a4b8298a7 | /plot4.R | eebee6670bf4cf7c5981d77140c3d99f8780a753 | [] | no_license | tpolling/ExData_Plotting1 | 849398439aae8badd08b440c2b31e64a8b752de7 | fc4cb0adf5d2e52c039ee747afed22c120886ee1 | refs/heads/master | 2021-01-24T01:21:10.122599 | 2015-02-07T06:15:14 | 2015-02-07T06:15:14 | 30,444,956 | 0 | 0 | null | 2015-02-07T04:24:37 | 2015-02-07T04:24:37 | null | UTF-8 | R | false | false | 1,292 | r | plot4.R | # Plot 4: Combined plots
# Reading measurements file
consumptionFile <- "household_power_consumption__2007_02.txt"
consumption <- read.csv(consumptionFile, sep=";")
# Converting date and time
consumption$Time = strptime(paste(as.character(consumption$Date),
as.character(consumption$Time)), format="%d/%m/%Y %H:%M:%S")
consumption$Date = as.Date(as.character(consumption$Date), format="%d/%m/%Y")
# Plots
par(mfrow=c(2,2), mar=c(4,4,2,2))
# Plot 1
plot(consumption$Time, consumption$Global_active_power,
xlab="", ylab="Global Active Power", type="l")
# Plot 2
plot(consumption$Time, consumption$Voltage,
xlab="datetime", ylab="Voltage", type="l")
# Plot 3
plot(consumption$Time, consumption$Sub_metering_1,
xlab="", ylab="Energy sub metering", type="l")
points(consumption$Time, consumption$Sub_metering_2, col="red", type="l")
points(consumption$Time, consumption$Sub_metering_3, col="blue", type="l")
legend("topright", lty=1, col=c("black", "red", "blue"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Plot 4
plot(consumption$Time, consumption$Global_reactive_power,
xlab="datetime", ylab="Global_reactive_power", type="l")
dev.copy(png, filename="plot4.png", width=480, height=480)
dev.off()
|
7d2c6c9aa35b1d313ad97b9e1ea0f38c4749caa0 | 890a0d15de75850f1ef7199d05b326e55494e47f | /cleaning_project_1/run_analysis.R | 7a826b5ff655522fe82ea572807a0d9bac26b2cf | [] | no_license | spekolator/datasciencecoursera | d9a404f582ea73370dc54501bba985f5ae975c27 | 024967dc164d47653ce7b988da99c112f0bf05f0 | refs/heads/master | 2021-01-10T19:25:31.649555 | 2015-02-22T22:11:25 | 2015-02-22T22:11:25 | 27,830,341 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,045 | r | run_analysis.R | # course project: getting and cleaning data
# 1. clean up,load requirements, set working directory
rm(list=ls())
require(dplyr)
require(stringr)
require(tidyr)
setwd("C:/Users/Speko2/Documents/Hopkins/cleaning/UCI HAR Dataset/")
# 2. load activity labels, feature name
activity_labels <-read.table('activity_labels.txt')
features<- read.table('features.txt')
# 3. load test data
test_data_x <- read.table('test/X_test.txt')
test_data_y <- read.table('test/y_test.txt')
test_subjects <- read.table('test/subject_test.txt')
test_data <- cbind(test_subjects,test_data_y, test_data_x)
# 4.load train data
train_data_x <- read.table('train/X_train.txt')
train_data_y <- read.table('train/y_train.txt')
train_subjects <- read.table('train/subject_train.txt')
train_data <- cbind(train_subjects,train_data_y, train_data_x)
# 5. merge test & train data
all_data <- rbind(train_data, test_data)
# 6. select variables with 'mean()' or 'std()'
var_idx <- features[grep('mean\\(\\)|std\\(\\)', features$V2),]
# subset with found indeces
all_data_mean_std <- all_data[,c(1,2,(2+var_idx$V1))]
# 7. format the feature names (remove paranthesis + hyphen to underscore)
feature_names <- str_replace(string = var_idx$V2 ,pattern = '\\(\\)',replacement = '')
feature_names <- as.character(str_replace_all(feature_names,pattern = '-',replacement = '_'))
# names the columns
names(all_data_mean_std)<-c('subject', 'activity', feature_names)
# 8. name Activities
all_data_mean_std$activity <- as.character(factor(all_data_mean_std$activity, levels = activity_labels$V1, labels = activity_labels$V2))
all_data_mean_std <- arrange(all_data_mean_std,subject, activity)
# 9. create tidy data set using tidyr+dplyr (to reshape and aggregate)
tidy <- gather(all_data_mean_std, subject, activity)
names(tidy) <- c('subject','activity','feature','measure')
tidy_set <- group_by(tidy, subject,activity, feature) %>%
summarise(calculated_mean = mean(measure))
# save tidy data set
write.table(x = tidy_set, file = '../tidy_set.txt',row.names =FALSE)
|
9325f3d618625cf181dcbb53ab4b115840d24cff | 1ea9d1b9db9b6afc701d29a73b571bd7b04ecf8d | /man/combinep.Rd | e45134e1fa4bc6146f5a74b092914cb01e2656d8 | [] | no_license | sdateam/combinIT | f0af7b59fd43acdb6be07c9a90d85afb045491fc | 9c26ab060f60e603e0a015d7ab976ea3cee5e1df | refs/heads/master | 2020-04-26T04:05:41.199409 | 2019-03-30T21:04:52 | 2019-03-30T21:04:52 | 172,266,466 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,450 | rd | combinep.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test.R
\name{combinep}
\alias{combinep}
\title{Combined several interaction tests}
\usage{
combinep(x, nsim = 500, nc0 = 10000, ...)
}
\arguments{
\item{x}{A data matrix in two-factor analysis}
\item{nsim}{Number of simulation for compueting exact p.value}
\item{dist}{If dist="sim", we used Monte Carlo simulation for estimating exact p-value,
and if dist="asy", the p.values is
estimated from an asymptotic distribuion. The defaut value is "sim".}
}
\value{
A p.value for input
}
\description{
Reports p-values tests for non-additivity developed by Boik (1993a), Piepho (1994),
Kharrati-Kopaei and Sadooghi-Alvandi (2007), Franck et al. (2013), Malik et al. (2016)
and Kharrati-Kopaei and Miller (2016). In addition by use of four combination methods:
Bonferroni, Sidak, Jacobi expantion, and Gaussian Copula combined reported p-values.
}
\details{
If rows numer(b) of data matrix is less than it's columns number(t) we
transpose data matrix. In addition requires that data matrix has more than two
rows or columns.
Needs "mvtnorm" packages
}
\examples{
\dontrun{this is an example}
data(cnv6)
combinep(cnv6,nsim=500,nc0=10000)
}
\references{
Shenavari, Z.,Kharrati-Kopaei, M. (2018). A Method for Testing Additivity
in Unreplicated Two-Way Layouts Based on Combining Multiple Interaction Tests.International
Statistical Review
}
\author{
Zahra. Shenavari, ...
}
|
4be58330757824f7ae0157e7dac84389ec11d142 | f24edb31e7cbdf4e08cbe50b4a228e98d2ff5d13 | /R/print_conferences.R | 97e23bfb98dfec1964f13eaea53ca5a1520e837b | [] | no_license | fcampelo/ChocoLattes | b870f2edbb55d2742ef9f946882f727fd65a185b | 14df29beb5d32668d209c3d378d0a7e3898567d3 | refs/heads/master | 2021-09-09T13:54:10.879361 | 2018-03-09T13:14:55 | 2018-03-09T13:14:55 | 57,807,314 | 9 | 5 | null | 2018-03-09T13:14:56 | 2016-05-01T20:33:18 | R | UTF-8 | R | false | false | 1,480 | r | print_conferences.R | #' Print conference papers
#'
#' Prints published conference papers
#'
#' @param x data frame containing information on published conference papers
#' @param isIntl logical flag, TRUE for international conferences, FALSE for
#' national/regional
#' @param language Language to use in section headers
#'
print_conferences <- function(x,
isIntl = TRUE,
language = c("EN", "PT")){
x <- x[which(x$Internac == isIntl), ]
npap <- nrow(x)
if(npap){
if (language == "PT"){
cat("### Artigos e Resumos em Confer\u00EAncias",
ifelse(isIntl,
"Internacionais\n",
"Nacionais e Regionais\n"))
}
if (language == "EN"){
cat("### Works in Conference Proceedings ",
ifelse(isIntl,
"(International)\n",
"(National / Regional)\n"))
}
for (i in 1:nrow(x)){
cat(i, ". ",
x$Authors[[i]],
": _", x$Title[i], "._ ",
x$Conference[i], ", ",
sep = "")
if(length(grep("[0-9]+-[0-9]+$", x$Pages[i]))){
cat("pp. ", x$Pages[i],
sep = "")
}
cat(", ", x$Year[i],
sep = "")
if(!is.null(x$DOI[i]) && !is.na(x$DOI[i]) && x$DOI[i] != ""){
cat(".<br/>[[DOI: ",
x$DOI[i],
"](https://doi.org/",
x$DOI[i], ")]",
sep = "")
}
cat("\n\n<hr>",
sep = "")
}
}
}
|
b80a24f19ac9b2b4741691877d5969f05acb6db1 | bb13d99730f782d619c82e661b2861f87572d23b | /Intro to Shiny/Example 2/server.R | b129df9d6113a8e7c56d9a2fcccf4996dc6bccab | [] | no_license | skeptycal/Women-in-Data | 1162dcf544b81b973c9f08bcace23b014cd0c936 | 00ec2a2f4ddd54ff319a73e090594d0a7a8bc61a | refs/heads/master | 2020-04-02T15:36:34.153245 | 2017-12-01T08:59:07 | 2017-12-01T08:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 313 | r | server.R | library(shiny)
function(input, output){
output$textOutput <- renderText(paste("You entered the text:", input$myTextInput))
output$numberOutput <- renderText(paste("You selected the number:", input$myNumberInput))
output$selectOutput <- renderText(paste("You selected option:", input$mySelectInput))
} |
df9fe30364d701fa6e6bfb8d6cfc76f708723578 | f288cf82b0a96188fe6f6d8e3fedd89f24e49c17 | /code_jobs/jobs_1_mailing.r | ab2d38c1a4d8c49d54f3f8ddf4631d1dab3e46ae | [] | no_license | antievictionmappingproject/sf-llc-data-prep | cde006efba486e85de35db32580256b64ae1162b | 2d2fc42f60fc9dc0dea5208c500d2c75364935af | refs/heads/master | 2020-06-11T14:06:56.243058 | 2019-07-24T20:37:06 | 2019-07-24T20:37:06 | 193,992,452 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 48,961 | r | jobs_1_mailing.r | library(RPostgres)
library(dplyr)
library(janitor)
## Loading required package: DBI
pw <- {
"password"
}
con <- dbConnect(RPostgres::Postgres(), dbname = "azaddb",
host = "localhost", port = 5432,
user = "azad", password = pw, bigint = "numeric")
'%!in%' <- function(x,y)!('%in%'(x,y))
data_llc <- readr::read_csv('/home/azad/data/LPMASTER.csv')
remove_1 <- c("ADDR", "SAME", ")", "SAME", ")", "EXEC", "70)", "46)", "20634)", "1)", "348)", "ES", "9282)", "EX", "0)", "E", "2)", "X", "181)", "2723)",
"6)", "208)", "6", "RIEZ", "PIER", "9", "GP1", "GP", "OREXCO", "SAMECA", "476)", "EZ", "N", "11", "LEE" , "1348)", "278)", "9)", "EC", "RC )",
"`", "1", "36)", "3665 H", "3X", "446)", "48)", "CEO", "WZ", ".", "% KTBS", "`EX", "170)", "2798)", "282)", "3 RIEZ", "EX" ,"FLOOR", "92199)", "TOWER")
remove_2 <- c("EEDDG", "POB 9", "GGGGG", "RR 2", "S", "AA", "2", "NONE" )
data_llc <- (data_llc %>% filter(mailing_address %!in% remove_1))
data_llc <- (data_llc %>% filter(mailing_address %!in% remove_2))
remove_others <- (data_llc %>% filter(nchar( mailing_address) < 6))$mailing_address
data_llc <- (data_llc %>% filter(mailing_address %!in% remove_others))
data_llc <- data_llc %>% mutate(year = substr(file_date, 1, 4))
## mailing address 18 - 18
data_ll_mail_18 <- split((data_llc %>% filter(year == "2018")), (0:nrow(data_llc %>% filter(year == "2018")) %/% (nrow(data_llc %>% filter(year == "2018")) / 2) ))
# data_llc2018[1]
dbWriteTable(con, "llc_data_mail", data_ll_mail_18[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2018 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_18[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2018_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_17_17 ####
data_ll_mail_17 <- split((data_llc %>% filter(year == "2017")), (0:nrow(data_llc %>% filter(year == "2017")) %/% (nrow(data_llc %>% filter(year == "2017")) / 2) ))
# data_llc2017[1]
dbWriteTable(con, "llc_data_mail", data_ll_mail_17[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2017 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_17[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2017_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_16_16 ####
data_ll_mail_16 <- split((data_llc %>% filter(year == "2016")), (0:nrow(data_llc %>% filter(year == "2016")) %/% (nrow(data_llc %>% filter(year == "2016")) / 2) ))
# data_llc2016[1]
dbWriteTable(con, "llc_data_mail", data_ll_mail_16[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2016 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_16[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2016_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_15_15 ####
data_ll_mail_15 <- split((data_llc %>% filter(year == "2015")), (0:nrow(data_llc %>% filter(year == "2015")) %/% (nrow(data_llc %>% filter(year == "2015")) / 2) ))
# data_llc2015[1]
dbWriteTable(con, "llc_data_mail", data_ll_mail_15[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2015 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_15[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2015_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_14_14 ####
data_ll_mail_14 <- split((data_llc %>% filter(year == "2014")), (0:nrow(data_llc %>% filter(year == "2014")) %/% (nrow(data_llc %>% filter(year == "2014")) / 2) ))
dbWriteTable(con, "llc_data_mail", data_ll_mail_14[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2014 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_14[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2014_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_13_13 ####
data_ll_mail_13 <- split((data_llc %>% filter(year == "2013")), (0:nrow(data_llc %>% filter(year == "2013")) %/% (nrow(data_llc %>% filter(year == "2013")) / 2) ))
dbWriteTable(con, "llc_data_mail", data_ll_mail_13[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2013 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_13[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2013_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_12_12 ####
data_ll_mail_12 <- split((data_llc %>% filter(year == "2012")), (0:nrow(data_llc %>% filter(year == "2012")) %/% (nrow(data_llc %>% filter(year == "2012")) / 2) ))
dbWriteTable(con, "llc_data_mail", data_ll_mail_12[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2012 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_12[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2012_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_11_11 ####
data_ll_mail_11 <- split((data_llc %>% filter(year == "2011")), (0:nrow(data_llc %>% filter(year == "2011")) %/% (nrow(data_llc %>% filter(year == "2011")) / 2) ))
dbWriteTable(con, "llc_data_mail", data_ll_mail_11[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2011 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_11[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2011_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_10_10 ####
data_ll_mail_10 <- split((data_llc %>% filter(year == "2010")), (0:nrow(data_llc %>% filter(year == "2010")) %/% (nrow(data_llc %>% filter(year == "2010")) / 2) ))
dbWriteTable(con, "llc_data_mail", data_ll_mail_10[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2010 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_10[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2010_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_09_09 ####
data_ll_mail_09 <- split((data_llc %>% filter(year == "2009")), (0:nrow(data_llc %>% filter(year == "2009")) %/% (nrow(data_llc %>% filter(year == "2009")) / 2) ))
dbWriteTable(con, "llc_data_mail", data_ll_mail_09[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2009 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_09[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2009_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_08_08 ####
data_ll_mail_08 <- split((data_llc %>% filter(year == "2008")), (0:nrow(data_llc %>% filter(year == "2008")) %/% (nrow(data_llc %>% filter(year == "2008")) / 2) ))
dbWriteTable(con, "llc_data_mail", data_ll_mail_08[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2008 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_08[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2008_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_07_07 ####
data_ll_mail_07 <- split((data_llc %>% filter(year == "2007")), (0:nrow(data_llc %>% filter(year == "2007")) %/% (nrow(data_llc %>% filter(year == "2007")) / 2) ))
dbWriteTable(con, "llc_data_mail", data_ll_mail_07[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2007 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_07[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2007_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
##### matches_mailing_06_06 ####
data_ll_mail_06 <- split((data_llc %>% filter(year == "2006")), (0:nrow(data_llc %>% filter(year == "2006")) %/% (nrow(data_llc %>% filter(year == "2006")) / 2) ))
dbWriteTable(con, "llc_data_mail", data_ll_mail_06[[1]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2006 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
# mail 2
dbWriteTable(con, "llc_data_mail", data_ll_mail_06[[2]] %>% select(file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip) %>% filter(!is.na(mailing_address)), overwrite = TRUE, row.names = FALSE)
dbSendQuery(con, "DROP TABLE IF EXISTS llc_data_mail1;")
dbSendQuery(con, "CREATE TABLE llc_data_mail1 AS
SELECT
file_number,
mailing_address,
mailing_city,
mailing_state,
mailing_zip,
phraseto_tsquery('simple', unnest(postal_normalize(concat_ws(', ', mailing_address, mailing_city, mailing_state, mailing_zip )))) AS tsq
FROM llc_data_mail;
")
dbSendQuery(con, "CREATE INDEX llc_data_mail1_idx ON llc_data_mail1 USING GIST (tsq);" )
dbSendQuery(con, "CREATE TABLE llc_mailing2006_2 AS SELECT
assessors_address.*,
llc_data_mail1.file_number
FROM assessors_address
JOIN llc_data_mail1 ON (assessors_address.ts @@ llc_data_mail1.tsq);")
### pull all data
llc_mailing18_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2018")
# llc_mailing18_1a <- llc_mailing18_1 %>% distinct(document_number, file_number, .keep_all = TRUE)
llc_mailing18_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2018_2")
llc_mailing17_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2017")
llc_mailing17_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2017_2")
llc_mailing16_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2016")
llc_mailing16_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2016_2")
llc_mailing15_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2015")
llc_mailing15_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2015_2")
llc_mailing14_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2014")
llc_mailing14_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2014_2")
llc_mailing13_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2013")
llc_mailing13_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2013_2")
llc_mailing12_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2012")
llc_mailing12_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2012_2")
llc_mailing11_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2011")
llc_mailing11_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2011_2")
llc_mailing10_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2010")
llc_mailing10_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2010_2")
llc_mailing09_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2009")
llc_mailing09_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2009_2")
llc_mailing08_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2008")
llc_mailing08_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2008_2")
llc_mailing07_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2007")
llc_mailing07_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2007_2")
llc_mailing06_1 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2006")
llc_mailing06_2 <- dbGetQuery(con, "SELECT * FROM
llc_mailing2006_2")
Pattern1 <- grep("llc_mailing",names(.GlobalEnv),value=TRUE)
listpat <- list(Pattern1)
Pattern1_list <- do.call(list, mget(Pattern1))
data_mailing <- do.call( bind_rows, Pattern1_list )
data_mailing1 <- data_mailing %>% distinct(document_number, file_number, .keep_all = TRUE)
readr::write_csv(data_mailing1, "~/R_Proj/data_out/var_mailing_2.csv")
# map2_df(Pattern1_list, names(Pattern1_list), ~ mutate(.x, ID = .y))
# args1 <- list("document_number", "file_number", ".keep_all = TRUE")
dbRemoveTable(con, "llc_mailing2006")
dbRemoveTable(con, "llc_mailing2006_2")
dbRemoveTable(con, "llc_mailing2007")
dbRemoveTable(con, "llc_mailing2007_2")
dbRemoveTable(con, "llc_mailing2008")
dbRemoveTable(con, "llc_mailing2008_2" )
dbRemoveTable(con, "llc_mailing2009")
dbRemoveTable(con, "llc_mailing2009_2" )
dbRemoveTable(con, "llc_mailing2010")
dbRemoveTable(con, "llc_mailing2010_2" )
dbRemoveTable(con, "llc_mailing2011")
dbRemoveTable(con, "llc_mailing2011_2" )
dbRemoveTable(con, "llc_mailing2012")
dbRemoveTable(con, "llc_mailing2012_2" )
dbRemoveTable(con, "llc_mailing2013")
dbRemoveTable(con, "llc_mailing2013_2" )
dbRemoveTable(con, "llc_mailing2014")
dbRemoveTable(con, "llc_mailing2014_2" )
dbRemoveTable(con, "llc_mailing2015")
dbRemoveTable(con, "llc_mailing2015_2" )
dbRemoveTable(con, "llc_mailing2016")
dbRemoveTable(con, "llc_mailing2016_2" )
dbRemoveTable(con, "llc_mailing2017")
dbRemoveTable(con, "llc_mailing2017_2" )
dbRemoveTable(con, "llc_mailing2018")
dbRemoveTable(con, "llc_mailing2018_2" )
|
8b823696672e62983be691a8fc30347147c6b3dd | 364272276aa666c6b0c338070fda3bd88d047382 | /fecScrape/man/choose_cand.Rd | a00edae4a02dcd6e861afe343507bcb980bfebab | [] | no_license | PHP-2560/final-project-fecscrape | fe512de13bd2912722c7b694b3fbed23734d7f88 | fd35c254cf833454597228306c22e53a1bc3d849 | refs/heads/master | 2020-04-11T14:12:39.015798 | 2020-03-05T17:13:34 | 2020-03-05T17:13:34 | 161,845,727 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 524 | rd | choose_cand.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/choose_cand.R
\name{choose_cand}
\alias{choose_cand}
\title{Selects candidates from a list}
\usage{
choose_cand(df, firstCandidate = NULL, secondCandidate = NULL)
}
\arguments{
\item{df}{the list of candidates}
\item{firstCandidate}{The number of the first candidate in the list, if known (otherwise will prompt)}
\item{secondCandidate}{The number of the second candidate in the list, if known}
}
\description{
Selects candidates from a list
}
|
bcc103ab4a8073f6322834d74b456fa6d6431304 | 7f31b9d9740c1d938b0bd6a46428c9524ad13a44 | /man/indSample.iid.cA.cY_list.Rd | 05233c331e575ab9385619f556ae3bce04439481 | [] | no_license | chizhangucb/tmleCommunity | 9dfeff5d877d1e4e594a1674cc62a6b4f2e77572 | 7b637e507ed7f74190d1adeb9a714090729467cc | refs/heads/master | 2021-01-20T15:54:04.182971 | 2019-03-08T19:17:34 | 2019-03-08T19:17:34 | 90,799,969 | 8 | 3 | null | null | null | null | UTF-8 | R | false | true | 2,499 | rd | indSample.iid.cA.cY_list.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tmleCommunity-package.R
\docType{data}
\name{indSample.iid.cA.cY_list}
\alias{indSample.iid.cA.cY_list}
\title{An Example of a Non-Hierarchical Data Containing a Continuous Exposure with a Continuous Outcome.}
\format{A data frame with 10,000 independent observations (rows) and 6 variables:
\describe{
\item{W1}{binary baseline covariate with \eqn{P(W1=1) = 0.5}}
\item{W2}{binary baseline covariate with \eqn{P(W2=1) = 0.3}}
\item{W3}{continuous normal baseline covariate with \eqn{\mu} = 0 and \eqn{\sigma} = 0.25}
\item{W4}{continuous uniform baseline covariate with \code{min=0} and \code{max=1}}
\item{A}{continuous normal exposure where its mean depends on individual's baseline covariate values in \code{(W1, W2, W3, W4)}}
\item{Y}{continuous normal outcome where its mean depends on individual's baseline covariate and exposure values in (\code{W1},
\code{W2}, \code{W3}, \code{W4}, \code{A})}
}}
\source{
\url{https://github.com/chizhangucb/tmleCommunity/blob/master/tests/dataGeneration/get.iid.dat.Acont.R}
}
\usage{
data(indSample.iid.cA.cY_list)
}
\description{
Simulated (non-hierarchical) dataset containing 10,000 i.i.d. observations, with each row \code{i} consisting of measured baseline
covariates (\code{W1}, \code{W2}, \code{W3} and \code{W4}), continuous exposure (\code{A}) and continous outcome (\code{Y}).
The baseline covariates \code{W1}, \code{W2}, \code{W3} and \code{W4} were sampled as i.i.d., while the value of exposure \code{A}
for each observation \code{i} was drawn conditionally on the value of \code{i}'s four baseline covariates. Besides, the continuous
outcome \code{Y} for each observation depends on \code{i}'s baseline covariates and exposure values in (\code{W1[i]},\code{W2[i]},
\code{W3[i]}, \code{W4[i]}, \code{A[i]}). The following section provides more details regarding individual variables in simulated
data.
}
\examples{
data(indSample.iid.cA.cY_list)
indSample.iid.cA.cY <- indSample.iid.cA.cY_list$indSample.iid.cA.cY
# True mean of outcome under intervention g0
psi0.Y <- indSample.iid.cA.cY_list$psi0.Y
# True mean of outcoem under stochastic intervention gstar
psi0.Ygstar <- indSample.iid.cA.cY_list$psi0.Ygstar
# truncated bound used in sampling A* under gstar (in data generating mechanism)
indSample.iid.cA.cY_list$truncBD
# shift value used in sampling A* under gstar
indSample.iid.cA.cY_list$shift.val
}
\keyword{datasets}
|
2df31f56f7945c573c47a3ec34c46668e8b23947 | 0b3ce6db60b460275e9c4e2c2383e24bc992ff8d | /man/MyDimPlot.Rd | 7401b03b9ba49a335c06d0508dac25bc9cb246ba | [
"MIT"
] | permissive | RachelQueen1/SCFunctionsV3 | 5f078e9ef6655aa36ff20bdb8c1fba13025b2466 | 5dfb6419d5338d6ad7a524bd1b8134df60e4c1ef | refs/heads/master | 2020-12-06T20:06:59.540359 | 2020-02-24T13:17:43 | 2020-02-24T13:17:43 | 232,541,308 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,059 | rd | MyDimPlot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myDimPlot.R
\name{MyDimPlot}
\alias{MyDimPlot}
\title{My Dim Plot function for dimension reduction plots calculated by Seurat.
More options for plotting different cluster names}
\usage{
MyDimPlot(SeuratObj, Clusters, Reduction.Type = c("umap", "tsne", "pca"),
Label.Size = 3, Point.Size = 3, Point.Colours = NULL,
Show.Legend = FALSE)
}
\arguments{
\item{SeuratObj}{seurat object}
\item{Clusters}{Vector indicating cluster identity equal to number of cells in Seurat Object}
\item{Reduction.Type}{Which dimensionality reduction to use (umap, tsne, pca)}
\item{Label.Size}{size for cluster label}
\item{Point.Colours}{vector of colours with length of number of clusters.}
\item{Show.Legend}{Show plot legend. Default is False}
}
\description{
My Dim Plot function for dimension reduction plots calculated by Seurat.
More options for plotting different cluster names
}
\examples{
clusters = seuratObj@active.ident
myDimPlot(seuratObj, clusters, Reduction.Type = "tsne")
}
|
ac29ba7f4758186288eb51e835c4f45611e731a4 | 5a490bbd9415322708ca24f26feace43c7360634 | /man/qa_write_data.Rd | 35fd2b98c2618d1339dd54d9ca45c3244e119c0f | [] | no_license | takewiki/nsclpkg | fcc1d4111497cdf06f891fa284f705f5aeb3c69a | 53ce22a4b35f5c9fcf21c2c6cf69299c6d893219 | refs/heads/master | 2021-07-15T15:31:29.634080 | 2020-10-02T07:28:39 | 2020-10-02T07:28:39 | 212,937,575 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 298 | rd | qa_write_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qalist.R
\name{qa_write_data}
\alias{qa_write_data}
\title{保存QA数据}
\usage{
qa_write_data(data)
}
\arguments{
\item{data}{数据}
}
\value{
返回值
}
\description{
保存QA数据
}
\examples{
qa_write_data();
}
|
aa3d44e244b9cb99b1a731c3da03a1ad6b929cb3 | 586377a389df457cfc6f0bf272dda891a538fcb1 | /R/RdistanceControls.R | 29b8e3b8d7605c73723bb165811d945c50055d66 | [] | no_license | wmcdonald1/Rdistance | 572f0f032ef397d99eac8c5a312b393ad1890dd7 | 856e7d25ae9d59056cfb7e4e46831dbe2fdf125a | refs/heads/master | 2022-11-06T11:06:24.931869 | 2020-05-25T20:56:24 | 2020-05-25T20:56:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,935 | r | RdistanceControls.R | #' @title Control parameters for \code{Rdistance} optimization.
#'
#' @description Returns a list of optimization controls used in
#' \code{Rdistance} and provides a way to change them if needed.
#'
#' @param maxIters The maximum number of optimization
#' iterations allowed.
#'
#' @param evalMax The maximum number of objective function
#' evaluations allowed.
#'
#' @param likeTol The maximum change in the likelihood
#' (the objective) between
#' iterations that is tolerated during optimization.
#' If the likelihood changes by less than this amount,
#' optimization stops and a solution is declared.
#'
#' @param coefTol The maximum change in the model coefficients
#' between
#' iterations that is tolerated during optimization.
#' If the sum of squared coefficient differences changes
#' by less than this amount between iterations,
#' optimization stops and a solution is declared.
#'
#'
#' @param optimizer A string specifying the optimizer
#' to use. Results
#' vary between optimizers, so switching algorithms sometimes
#' makes a poorly behaved distance function converge. The valid
#' values are "optim" which uses \code{optim::optim},
#' and "nlminb" which uses \code{stats:nlminb}. The authors
#' have had better luck with "nlminb" than "optim" and "nlminb"
#' runs noticeably faster. Problems with solutions near parameter
#' boundaries may require use of "optim".
#'
#' @param hessEps A vector of parameter distances used during
#' computation of numeric second derivatives. Should have length
#' 1 or the number of parameters in the model. See function
#' \code{\link{secondDeriv}}.
#'
#' @param maxBSFailPropForWarning The proportion of bootstrap
#' iterations that can fail without a warning. If the proportion
#' of bootstrap iterations that did not converge exceeds this
#' parameter, a warning about the validity of CI's is issued in
#' the print method for
#' abundance objects.
#'
#' @return A list containing named components for each of the
#' controls. This list has the same components as this function
#' has input parameters.
#'
#' @author Trent McDonald \email{tmcdonald@west-inc.com}
#'
#' @examples
#' # increase number of iterations
#' RdistanceControls(maxIters=2000)
#'
#' # change optimizer and decrease tolerance
#' RdistanceControls(optimizer="optim", likeTol=1e-6)
#'
#' @export
RdistanceControls <- function(optimizer="nlminb",
evalMax=2000,
maxIters=1000,
likeTol=1e-8,
coefTol=1.5e-8,
hessEps=1e-8,
maxBSFailPropForWarning = 0.2){
list(optimizer=optimizer,
evalMax=evalMax,
maxIters=maxIters,
likeTol=likeTol,
coefTol=coefTol,
hessEps=hessEps,
maxBSFailPropForWarning = maxBSFailPropForWarning
)
}
|
9228eb05fff2c10046c3040a3d7f639407f57e57 | 35fac53e593ad39a3c092f84fa0d88fa11f46fa7 | /man/get_table_reverse.Rd | c2031bb4c310747eedc53bd739e5d66d7bc00132 | [] | no_license | gastonstat/cranium | a916a61595e7432385a410e5a90fe21e6a41cae4 | 1ba7a60fd7b3ef62c2d2f38d66151494f382f837 | refs/heads/master | 2020-05-24T13:24:13.206253 | 2015-08-26T00:03:01 | 2015-08-26T00:03:01 | 23,559,996 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 301 | rd | get_table_reverse.Rd | \name{get_table_reverse}
\alias{get_table_reverse}
\title{Get Table of Reverse Dependencies}
\usage{
get_table_reverse(pkg_doc)
}
\arguments{
\item{pkg_doc}{an object of class
\code{"HTMLInternalDocument"}}
}
\description{
Extracts the html table of reverse dependencies
}
\keyword{internal}
|
f8c3b9f6253c6821e0a5686d25ec958b7ba25871 | 74d75a48fbb88fb254f02fec1c4f0ba354ecd639 | /app_answers/1-7 Answer.R | 0bd552f0e584404cc4a607730f9fc67630470194 | [] | no_license | QFCatMSU/R-Class-Material | 2f00172444150a12a4b33dc050be12b04896f2af | c7a41975a4ab9e10f1267b80e3500506b41cf606 | refs/heads/master | 2021-07-12T19:27:27.471478 | 2021-03-02T15:05:04 | 2021-03-02T15:05:04 | 238,800,472 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 934 | r | 1-7 Answer.R | {
rm(list=ls()); options(show.error.locations = TRUE);
# Part A: get high temperature and weather condition from the user
highTemp = readline("What was the high temperature today? ");
highTemp = as.numeric(highTemp);
weatherCond = readline("What was the weather like (cloudy, sunny, or rainy)? ");
# Part B: give message if temperatures was less than 30
if(highTemp < 30)
{
cat("The high temperature,", highTemp, ", was less than 30.\n");
}
# Part C: give message if temperatures is greater than or equal to 80
if(highTemp >= 80)
{
cat("The high temperature,", highTemp, ", was greater than or equal to 80.\n");
}
# Part D: give message if weather is cloudy
if(weatherCond == "cloudy")
{
cat("It was a cloudy day.\n");
}
# Part E: give message if weather was not rainy
if(weatherCond != "rainy")
{
cat("It did not rain today.\n");
}
} |
5620c7d56ad523feb6e42a6cc5befb2bf7b06266 | fc19cbc296e0d3c27b796069279abbfef795d4b0 | /man/odometer.Rd | 449e2dc823a0c629a66699f11c720e3fdb02e57d | [] | no_license | stla/expansions | 3514c85d52c7570b23ce646948225cead8b3c32a | 29c77754a46d647864554e4f39f30249ee10e877 | refs/heads/master | 2020-05-21T23:48:56.615754 | 2017-01-08T22:10:52 | 2017-01-08T22:10:52 | 65,611,986 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,086 | rd | odometer.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/odometers.R
\name{odometer}
\alias{odometer}
\alias{odometerBW}
\alias{odometerBW_iterated}
\alias{odometer_iterated}
\title{Odometer}
\usage{
odometer(x, base = 2L)
odometer_iterated(x, n, base = 2L)
odometerBW(x, base = 2L)
odometerBW_iterated(x, n, base = 2L)
}
\arguments{
\item{x}{a sequence of digits in the given base}
\item{base}{integer, the base of the expansion}
\item{n}{integer, power of the iteration}
}
\value{
\code{odometer} returns the transformation of \code{x} by the odometer; \code{odometer_iterated} returns the \code{n}-th iterate of the odometer; \code{odometerBW} returns the transformation of \code{x} by the backward odometer; \code{odometerBW_iterated} returns the \code{n}-th iterate of the backward odometer.
}
\description{
The odometer in a given integer base
}
\examples{
odometer(c(0,1))
odometer(c(1,1))
odometerBW(odometer(c(0,1))) == c(0,1)
odometer_iterated(c(0,1), n=2)
odometer_iterated(0, n=13) == intAtBase(13)
odometerBW_iterated(intAtBase(13), n=13) == 0
}
|
9b11db58252d1e6abec598564c9ef952df0c09f5 | a5ad9b63672831ca5c4e728f639bd50f87b9f410 | /GFforecastLessthan50.R | e6952d2eeb36b0746b021c574d47a794186d0630 | [] | no_license | royxss/GrocerySalesForecasting | 51dc17dbf7075b967fdaadac01ff1d33d8d7b998 | 87104a9fb378e5e4a80bd407be27bc675a357c2c | refs/heads/master | 2021-05-12T15:36:27.276256 | 2018-01-10T17:29:17 | 2018-01-10T17:29:17 | 116,989,099 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,590 | r | GFforecastLessthan50.R | setwd("C:\\Users\\SROY\\Documents\\CodeBase\\Datasets\\GrocerySalesForecast")
rm(list=ls())
seedVal = 17869
options(warn=-1)
options(scipen=999)
# Libraries
library(dplyr)
library(missForest)
library(VIM)
library(lubridate)
library(ggplot2)
library(reshape2)
library(zoo)
library(dummies)
library(forecastHybrid)
library(tsoutliers)
library(caret)
# holidays_events <- read.csv2("holidays_events.csv", header = TRUE, sep = ',')
# items <- read.csv2("items.csv", header = TRUE, sep = ',')
# oil <- read.csv2("oil.csv", header = TRUE, sep = ',')
# stores <- read.csv2("stores.csv", header = TRUE, sep = ',')
# transactions <- read.csv2("transactions.csv", header = TRUE, sep = ',')
# test <- read.csv2("test.csv", header = TRUE, sep = ',')
# train <- read.csv2("train.csv", header = TRUE, sep = ',')
#save.image('GFDataDump.RData')
apply(oil, 2, function(x) length(which(x == "" | is.na(x) | x == "NA")))
# 43 dcoilwtico
# apply(train[len], 2, function(x) length(which(x == "" | is.na(x) | x == "NA")))
# apply(train, 2, function(x) length(which(x == "" | is.na(x) | x == "NA")))
# not working
str(holidays_events)
holidays_events$date <- as.Date(holidays_events$date)
apply(holidays_events, 2, function(x) length(which(x == "" | is.na(x) | x == "NA")))
str(items)
items$class <- as.factor(items$class)
items$perishable <- as.factor(items$perishable)
items$item_nbr <- as.factor(items$item_nbr)
apply(items, 2, function(x) length(which(x == "" | is.na(x) | x == "NA")))
str(oil)
oil$date <- as.character(oil$date)
oil$dcoilwtico <- as.numeric(as.character(oil$dcoilwtico))
apply(oil, 2, function(x) length(which(x == "" | is.na(x) | x == "NA")))
# Impute using KNN
oil <- kNN(oil[,c('date','dcoilwtico')], variable = c('dcoilwtico'), k=5)
oil <- oil[,-3]
oil$date <- as.Date(oil$date)
str(stores)
unique(stores$cluster)
stores$cluster <- as.factor(stores$cluster)
apply(stores, 2, function(x) length(which(x == "" | is.na(x) | x == "NA")))
str(transactions)
transactions$date <- as.Date(transactions$date)
unique(transactions$store_nbr)
transactions$store_nbr <- as.factor(transactions$store_nbr)
apply(transactions, 2, function(x) length(which(x == "" | is.na(x) | x == "NA")))
str(test)
test$date <- as.Date(test$date)
test$store_nbr <- as.factor(test$store_nbr)
test$item_nbr <- as.factor(test$item_nbr)
str(train)
train$date <- ymd(train$date)
train$store_nbr <- as.factor(train$store_nbr)
train$item_nbr <- as.factor(train$item_nbr)
train$unit_sales <- as.numeric(as.character(train$unit_sales))
train$onpromotion <- as.character(train$onpromotion)
train$onpromotion <- ifelse(train$onpromotion == "", NA, train$onpromotion)
train$onpromotion <- as.factor(train$onpromotion)
trainPromoMiss <- train %>% filter(is.na(onpromotion))
nrow(trainPromoMiss)*100/nrow(train)
# 17.25% missing Promo values
#save.image('GFDataDumpFormatted.RData')
############################## <= 50 mine #######################################
# find items <=50
train1 <- train %>% filter(!is.na(onpromotion))
freqitemstore <- train1 %>% group_by(item_nbr) %>% count()
freqitemstoreReduced <- freqitemstore %>% filter(n <= 50)
freqitemstoreReduced <- as.integer(as.character(freqitemstoreReduced$item_nbr))
dftrain <- train %>% filter(item_nbr %in% freqitemstoreReduced)
dftrain$itemstr <- paste0(dftrain$store_nbr,'|',dftrain$item_nbr)
dftrain <- dftrain %>% arrange(item_nbr,store_nbr,date)
row.names(dftrain) <- NULL
dftest <- test %>% filter(item_nbr %in% freqitemstoreReduced)
dftest$itemstr <- paste0(dftest$store_nbr,'|',dftest$item_nbr)
dftest <- dftest[dftest$itemstr %in% dftrain$itemstr,]
dftest <- dftest %>% arrange(item_nbr,store_nbr,date)
row.names(dftest) <- NULL
items <- freqitemstoreReduced
dftest$tunitsales <- NA
it=0
for (itm in items){
it<-it+1
#itm=2011437
start_time <- Sys.time()
print(paste0("Iteration ",it,": Starting Item ",itm))
df <- dftrain %>% filter(item_nbr == itm)
stores <- unique(as.integer(as.character(df$store_nbr)))
for (str in stores){
#str=7
df1 <- df %>% filter(store_nbr == str)
tseries <- round(abs(df1$unit_sales))
tseries <- ifelse(tseries == 0, 1, tseries)
tseries <- round(abs(tsclean(tseries)))
model.2 <- auto.arima(tseries)
#model.2 <- ets(tseries)
#model.2 <- nnetar(tseries)
frcst <- forecast(model.2, h = 16)
#model.2 <- auto.arima(tseries[1:10]);frcst <- forecast(model.2, h = 7);frcst$mean
#ensbl.1 <- hybridModel(tseries[1:10], models="aent"); frcst <- forecast(ensbl.1, h = 7);frcst$pointForecasts
dftest[dftest$item_nbr == itm & dftest$store_nbr == str, 'tunitsales'] <- round(abs(frcst$mean))
}
end_time <- Sys.time()
print(paste0("Time Elapsed: ",end_time - start_time))
}
apply(dftest, 2, function(x) length(which(is.na(x))))
dftest$tunitsales <- ifelse(dftest$tunitsales == 0, 1, dftest$tunitsales)
write.table(dftest, file = "resultForLessitem50ARIMAmine.csv", quote = FALSE, row.names=FALSE, sep=",")
# ############################### < 20 mishra #######################################
# freqitemstore <- train %>% group_by(item_nbr, store_nbr) %>% count()
# freqitemstoreReduced <- freqitemstore %>% filter(n < 20)
#
# dftrain <- inner_join(train, freqitemstoreReduced)
# dftrain$itemstr <- paste0(dftrain$store_nbr,'|',dftrain$item_nbr)
# dftrain <- dftrain %>% arrange(item_nbr,store_nbr,date)
# row.names(dftrain) <- NULL
#
# dftest <- inner_join(test, freqitemstoreReduced)
# dftest$itemstr <- paste0(dftest$store_nbr,'|',dftest$item_nbr)
# dftest <- dftest[dftest$itemstr %in% dftrain$itemstr,]
# dftest <- dftest %>% arrange(item_nbr,store_nbr,date)
# row.names(dftest) <- NULL
#
# items <- unique(as.integer(as.character(dftrain$item_nbr)))
#
# dftest$tunitsales <- NA
# it=0
# for (itm in items){
# it<-it+1
# #itm=1005458
# start_time <- Sys.time()
# print(paste0("Iteration ",it,": Starting Item ",itm))
# df <- dftrain %>% filter(item_nbr == itm)
# stores <- unique(as.integer(as.character(df$store_nbr)))
# for (str in stores){
# #str=37
# df1 <- df %>% filter(store_nbr == str)
# tseries <- round(abs(df1$unit_sales))
# tseries <- ifelse(tseries == 0, 1, tseries)
# tseries <- round(abs(tsclean(tseries)))
# model.2 <- auto.arima(tseries)
# #model.2 <- ets(tseries)
# #model.2 <- nnetar(tseries)
# frcst <- forecast(model.2, h = 16)
#
# #model.2 <- auto.arima(tseries[1:10]);frcst <- forecast(model.2, h = 7);frcst$mean
# #ensbl.1 <- hybridModel(tseries[1:10], models="aent"); frcst <- forecast(ensbl.1, h = 7);frcst$pointForecasts
# dftest[dftest$item_nbr == itm & dftest$store_nbr == str, 'tunitsales'] <- round(abs(frcst$mean))
# }
# end_time <- Sys.time()
# print(paste0("Time Elapsed: ",end_time - start_time))
# }
# apply(dftest, 2, function(x) length(which(is.na(x))))
# dftest$tunitsales <- ifelse(dftest$tunitsales == 0, 1, dftest$tunitsales)
# write.table(dftest, file = "resultForLess20ARIMA.csv", quote = FALSE, row.names=FALSE, sep=",")
############################### mishra check Inf #######################################
#
# tt <- train %>% filter(item_nbr==2054300)
#
# pred21 <- read.csv2("inf_after_123456.csv", header = TRUE, sep = ',')
# pred21$itemstr <- paste0(pred21$store_nbr,'|',pred21$item_nbr)
# pred21$item_nbr <- as.factor(pred21$item_nbr)
# pred21$store_nbr <- as.factor(pred21$store_nbr)
#
# dftrain <- inner_join(train, pred21)
# dftrain <- dftrain %>% arrange(item_nbr,store_nbr,date)
# row.names(dftrain) <- NULL
# unique(dftrain$item_nbr)
#
# # dftest <- inner_join(test, pred21)
# # dftest <- dftest[dftest$itemstr %in% dftrain$itemstr,]
# # dftest <- dftest %>% arrange(item_nbr,store_nbr,date)
# # row.names(dftest) <- NULL
# dftest <- dftrain[0,c(2,3,4,5)]
# names(dftest) <- c('date','item_nbr','store_nbr','unit_sales')
# items <- unique(as.integer(as.character(dftrain$item_nbr)))
# it=0
# testdates <- as.character(test[test$item_nbr==96995 & test$store_nbr==1, 'date'])
# for (itm in items){
# it<-it+1
# #itm=1001305
# start_time <- Sys.time()
# print(paste0("Iteration ",it,": Starting Item ",itm))
# df <- dftrain %>% filter(item_nbr == itm)
# stores <- unique(as.integer(as.character(df$store_nbr)))
# for (str in stores){
# #str=22
# df1 <- df %>% filter(store_nbr == str)
# tseries <- round(abs(df1$unit_sales))
# tseries <- ifelse(tseries == 0, 1, tseries)
# tseries <- round(abs(tsclean(tseries)))
# #model.2 <- hybridModel(tseries, models="ae") #frcst$pointForecasts
# model.2 <- auto.arima(tseries)
# #model.2 <- ets(tseries)
# #model.2 <- nnetar(tseries)
# frcst <- forecast(model.2, h = 16)
#
# #model.2 <- auto.arima(tseries[1:10]);frcst <- forecast(model.2, h = 7);frcst$mean
# #ensbl.1 <- hybridModel(tseries, models="aent"); frcst <- forecast(ensbl.1, h = 16); frcst$pointForecasts
# #temp <- data.frame(cbind(rep(itm, 16), rep(str, 16), round(abs(frcst$mean))))
# temp <- data.frame(cbind(testdates,rep(itm, 16), rep(str, 16), round(abs(frcst$mean))))
# names(temp) <- c('date','item_nbr','store_nbr','unit_sales')
# dftest <- rbind(dftest, temp)
# #dftest[dftest$item_nbr == itm & dftest$store_nbr == str, 'tunitsales'] <- round(abs(frcst$mean))
# temp <- temp[0,]
# }
# end_time <- Sys.time()
# print(paste0("Time Elapsed: ",end_time - start_time))
# }
# names(dftest) <- c('date','item_nbr','store_nbr','unit_sales')
# apply(dftest, 2, function(x) length(which(is.na(x))))
# dftest$unit_sales <- ifelse(dftest$unit_sales == 0, 1, dftest$unit_sales)
# write.table(dftest, file = "resultInfExtraARIMA.csv", quote = FALSE, row.names=FALSE, sep=",")
#
# ############################### mishra check CV #######################################
#
# pred2 <- read.csv2("prediction2_left.csv", header = TRUE, sep = ',')
# pred21 <- pred2[,c(1,2)]
# pred21$itemstr <- paste0(pred21$store,'|',pred21$item)
# names(pred21) <- c('item_nbr','store_nbr','itemstr')
# pred21$item_nbr <- as.factor(pred21$item_nbr)
# pred21$store_nbr <- as.factor(pred21$store_nbr)
#
# pred3 <- read.csv2("prediction3_left.csv", header = TRUE, sep = ',')
# pred31 <- pred3[,c(1,2)]
# pred31$itemstr <- paste0(pred31$store,'|',pred31$item)
# names(pred31) <- c('item_nbr','store_nbr','itemstr')
# pred31$item_nbr <- as.factor(pred31$item_nbr)
# pred31$store_nbr <- as.factor(pred31$store_nbr)
#
# dftrain <- inner_join(train, pred21)
# dftrain <- dftrain %>% arrange(item_nbr,store_nbr,date)
# row.names(dftrain) <- NULL
#
# # dftest <- inner_join(test, pred21)
# # dftest <- dftest[dftest$itemstr %in% dftrain$itemstr,]
# # dftest <- dftest %>% arrange(item_nbr,store_nbr,date)
# # row.names(dftest) <- NULL
# dftest <- dftrain[0,c(2,3,4,5)]
# names(dftest) <- c('date','item_nbr','store_nbr','unit_sales')
# items <- unique(as.integer(as.character(dftrain$item_nbr)))
# it=0
# testdates <- as.character(test[test$item_nbr==96995 & test$store_nbr==1, 'date'])
# for (itm in items){
# it<-it+1
# #itm=2013931
# start_time <- Sys.time()
# print(paste0("Iteration ",it,": Starting Item ",itm))
# df <- dftrain %>% filter(item_nbr == itm)
# stores <- unique(as.integer(as.character(df$store_nbr)))
# for (str in stores){
# #str=45
# df1 <- df %>% filter(store_nbr == str)
# tseries <- round(abs(df1$unit_sales))
# tseries <- ifelse(tseries == 0, 1, tseries)
# tseries <- round(abs(tsclean(tseries)))
# #model.2 <- auto.arima(tseries)
# model.2 <- ets(tseries)
# #model.2 <- nnetar(tseries)
# frcst <- forecast(model.2, h = 16)
#
# #model.2 <- auto.arima(tseries[1:10]);frcst <- forecast(model.2, h = 7);frcst$mean
# #ensbl.1 <- hybridModel(tseries[1:10], models="aent"); frcst <- forecast(ensbl.1, h = 7);frcst$pointForecasts
# #temp <- data.frame(cbind(rep(itm, 16), rep(str, 16), round(abs(frcst$mean))))
# temp <- data.frame(cbind(testdates,rep(itm, 16), rep(str, 16), round(abs(frcst$mean))))
# names(temp) <- c('date','item_nbr','store_nbr','unit_sales')
# dftest <- rbind(dftest, temp)
# #dftest[dftest$item_nbr == itm & dftest$store_nbr == str, 'tunitsales'] <- round(abs(frcst$mean))
# temp <- temp[0,]
# }
# end_time <- Sys.time()
# print(paste0("Time Elapsed: ",end_time - start_time))
# }
# names(dftest) <- c('date','item_nbr','store_nbr','unit_sales')
# apply(dftest, 2, function(x) length(which(is.na(x))))
# dftest$unit_sales <- ifelse(dftest$unit_sales == 0, 1, dftest$unit_sales)
# write.table(dftest, file = "resultFormorethan50ETS.csv", quote = FALSE, row.names=FALSE, sep=",")
|
7f007098cf3568fe6f0495eacea74de0d09427c6 | aed2396d1ebc7f4177c6f676600c8ea808032dad | /R/utils-functions.R | 14889c13421485b4efcfc7c34c31db18bdaf58dc | [] | no_license | juliangehring/SomaticCancerAlterations | 56c0981804bb29fca9f0db09fcf5b88df63154a2 | 6c98820c9d4dd06556b2c5c938926f30c2d79b87 | refs/heads/master | 2021-01-24T19:52:20.552312 | 2020-11-28T18:58:54 | 2020-11-28T18:58:54 | 316,799,631 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,294 | r | utils-functions.R |
scaListDatasets <- function() {
x = data(package = "SomaticCancerAlterations")$results[ ,"Item"]
x = x[(x != "meta")]
return(x)
}
scaLoadDatasets <- function(names, merge = FALSE) {
all_datasets = scaListDatasets()
if(missing(names))
names = all_datasets
if(!all(idx <- names %in% all_datasets)) {
msg = sprintf("Data set %s not found.",
paste(names[!idx], collapse = ", "))
stop(msg)
}
x = sapply(names, .load_dataset, simplify = FALSE, USE.NAMES = TRUE)
res = GRangesList(unlist(x), compress=FALSE)
if(merge) {
datasets = rep(factor(rep(names(res), lengths(res))))
res = unlist(res)
res$Dataset = datasets
}
return(res)
}
scaMetadata <- function() {
res = .load_dataset("meta")
return(res)
}
.load_dataset <- function(name, package = "SomaticCancerAlterations") {
tmp_env = new.env()
data(list = name, package = package, envir = tmp_env)
res = get(name, envir = tmp_env)
return(res)
}
ncbi2hg <- function(x) {
seqnameStyle(x) = "ucsc"
genome(x) = NA
return(x)
}
hg2ncbi <- function(x) {
seqnameStyle(x) = "ncbi"
genome(x) = NA
return(x)
}
seqchr <- function(x) {
y = as.character(seqnames(x))
return(y)
}
|
28ffc07378135ac4f471ff3fbeda4ceb1aafebe1 | 1f32096af05da776c59a11b74a424637aa718113 | /dsp_runner/summarization.R | 31cf76a746f472b79b155b75d2af89d81767221e | [] | no_license | ohsu-comp-bio/compbio-galaxy-wrappers | a222dbef5d4d4101f1705c6101f2e212435d1ea8 | 6162bc6d8ee37401de8dffec545935953028bed7 | refs/heads/master | 2023-08-31T05:32:22.305366 | 2023-08-29T18:24:59 | 2023-08-29T18:24:59 | 32,424,561 | 6 | 13 | null | 2023-09-14T19:28:18 | 2015-03-17T22:40:39 | Python | UTF-8 | R | false | false | 12,988 | r | summarization.R | #Summarization Functions
#' Generate quantile interpolation functions from a reference set
#'
#' @param mat A numeric matrix of features (e.g. antibodies) x samples, expected to be normalized.
#' Should be from a reference set
#' @return A named list per feature containing an \code{approxfun} function
#' @import data.table
#' @import stats
#' @export
quant_func <- function(mat){
ab.interp <- lapply(setNames(rownames(mat), rownames(mat)), function(x){
q.map <- data.table(v=quantile(mat[x,], seq(0, 1, by=.01)), q= seq(0, 1, by=.01))
fc <- approxfun(x=q.map$v, y=q.map$q, rule=2)
fc
})
ab.interp
}
#' Interpolate corresponding quantiles from a reference distribution
#'
#' @param interp.list A list of \code{approxfun}'s as derived from \code{quant_func}
#' @param mat A numeric matrix of features (e.g. antibodies) x samples, expected to be normalized.
#' @return A \code{data.table} containing the corresponding normalized data and quantile for each feature and barcode.
#' @import data.table
#' @export
get_quants <- function(interp.list, mat){
Var1=Var2=value=`.`=NULL # due to NSE notes in R CMD check
if (is.matrix(mat)==F){
mat <- as.matrix(mat)
}
tmp.quants <- mapply(function(x,y){
interp.list[[x]](y)
}, rownames(mat), data.frame(t(mat)))
if (is.matrix(tmp.quants)==F){
ref.quants <- as.matrix(tmp.quants)
}else{
ref.quants <- t(tmp.quants)
}
colnames(ref.quants) <- colnames(mat)
ref.melt <- data.table(reshape2::melt(mat, as.is=T))
ref.melt <- merge(ref.melt, data.table(reshape2::melt(ref.quants, as.is=T))[,.(Var1, Var2, quant=value)], by=c("Var1", "Var2"))
names(ref.melt) <- c("ProbeName", "barcode", "norm", "quant")
ref.melt
}
#' Compute mean and standard deviation for a reference set
#'
#' @param mat A numeric matrix of features (e.g. antibodies) x samples, expected to be normalized.
#' Should be from a reference set
#' @return A \code{data.table} containing the estimated mean and standard deviation from the matrix for each feature
#' @import data.table
#' @import matrixStats
#' @export
learn_mean_sd <- function(mat){
data.table(ProbeName=rownames(mat), ref_mean=matrixStats::rowMeans2(mat), ref_sd=matrixStats::rowSds(mat))
}
#' Compute maximum correlation amonst ROIs
#'
#' @param mat A numeric matrix features (e.g. antibodies) x samples, expected to be un-normalized and log2 transformed
#' @param meta A \code{data.table} containing metadata as returned by \code{process_batches}
#' @param roi.thresh The lowest correlation value before considering the ROI(s) an outlier
#' @return A new copy of 'meta' containing the maximum correlation amongst ROIs per sample and 'rm_croi'
#' a flag indicating if the max value was less than the specified threshold
#' @import data.table
#' @import matrixStats
#' @import stats
#' @export
flag_roi <- function(mat, meta, roi.thresh=.9){
tmp.meta <- copy(meta)
rm_croi=max_cor=`Segment (Name/ Label)`=NULL # due to NSE notes in R CMD check
melt.exp <- data.table(reshape2::melt(mat, as.is=T))
melt.exp.m <- merge(tmp.meta, melt.exp, by.x="barcode", by.y="Var2")
rep.cors <- rbindlist(lapply(split(melt.exp.m, by=c("Segment (Name/ Label)","sample_id", "batch")), function(x){
tmp.cor <- cor(reshape2::acast(Var1~croi,value.var="value", data=x))
diag(tmp.cor) <- NA
data.table(`Segment (Name/ Label)`=x[1,`Segment (Name/ Label)`],sample_id=x$sample_id[1], batch=x$batch[1],croi=colnames(tmp.cor), max_cor=matrixStats::colMaxs(tmp.cor, na.rm=T))
}))
rep.cors[is.infinite(max_cor), max_cor:=1]
if (rep.cors[,.N] != tmp.meta[,.N]){
stop("ERROR: A size mismatch has occured, please check metadata to ensure 'Segment (Name/ Label)', 'sample_id' and 'batch' look sane")
}
tmp.meta <- merge(tmp.meta, rep.cors, by=c("Segment (Name/ Label)", "sample_id", "batch", "croi"))
tmp.meta[,rm_croi:=max_cor < roi.thresh]
tmp.meta
}
.summarize_roi <- function(norm.mat, meta, num.roi.avg=3){
# due to NSE notes in R CMD check
num_batch=sample_id=`Segment (Name/ Label)`=croi=use_roi=avg_barcode=`.`=NULL
##we will first order by increasing ROI
ord.meta <- meta[order(num_batch, sample_id, `Segment (Name/ Label)`, as.numeric(croi))]
ord.meta[,use_roi:=ifelse(seq_len(.N) %in% seq_len(num.roi.avg), "avg", croi),by=.(num_batch, sample_id, `Segment (Name/ Label)`)]
segment.proc <- lapply(split(ord.meta, by="Segment (Name/ Label)"), function(x){
#average over the ROIs
avg.abund <- sapply(split(x, by=c("sample_id", "num_batch", "use_roi")), function(y){
rowMeans(norm.mat[,y$barcode,drop=F])
})
new.x <- copy(x)
new.x[,avg_barcode:=paste(sample_id, num_batch, use_roi, sep=".")]
list(meta=new.x[,.(num_ROI=.N),by=.(avg_barcode, sample_id=sample_id, num_batch)], avg_abund=avg.abund)
})
segment.proc
}
#' High-level procedure for pre-processing DSP data using TMAs
#'
#' This function first computes RUVIII normalization factors after background correction
#' for the tumor microarrays (TMAs). These factors are then applied to the experimental
#' data after background correction. The resulting matrix is then averaged over the ROIs
#' to produce normalized antibody x sample matrices for each segment.
#'
#' @param tma.meta A \code{data.table} containing the following columns: \describe{
#' \item{num_batch}{Numeric batch identifier or other ordered run signifier such as date}
#' \item{barcode}{The unique sample identifier for the TMA sample/run}
#' \item{name}{Harmonized TMA sample name, consistent across batches}
#' \item{type}{The 'Type' label of TMA sample, only sample's with values in
#' \code{use.type}will be used}
#' }
#' @param tma.mat A numeric matrix (log2 scale) with dimensions: features (e.g. antibodies) x TMA sample barcodes
#' @param exp.meta A \code{data.table} needs to have the following columns: \describe{
#' \item{Segment (Name/ Label)}{Segment Label}
#' \item{sample_id}{Sample Identifier}
#' \item{barcode}{The unique sample identifier for the sample/run}
#' \item{num_batch}{Numeric batch identifier or other ordered run signifier such as date}
#' \item{croi}{Corrected region of interest typically as generated in a previous step}
#' }
#' @param exp.mat A numeric matrix (log2 scale) containing experimental data with
#' dimensions: features (e.g. antibodies) x (# segments x # rois x # samples)
#' @param igg.map A \code{data.table} containing the mapping between 'ProbeName'
#' and corresponding 'igg'.
#' @param bg.method, choice of background correction method (defaults to no correction)
#' @param controls The specific features used for the correction, defaults to all features
#' @param use.type A character vector of values in the \code{tma.meta} \code{type} column to be used.
#' Defaults to 'quant'.
#' @param k The number of PC's used as part of the correction
#' @param num.roi.avg The number of ROIs to average, assumes they are numbered as the first 1:num.roi.avg
#' @return A list with one element per segment each of which contains: \describe{
#' \item{meta}{A summarized meta \code{data.table} containing the number of ROIs, the new sample barcodes ('avg_barcode'), sample ID and averaged abundance }
#' \item{avg_abund}{A features (e.g. antibodies) x samples numeric matrix}}
#'
#' @import data.table
#' @export
preprocess_dsp_tma <- function(tma.meta, tma.mat, exp.meta, exp.mat, igg.map=NULL, bg.method=c('none', 'log2_ratio', 'diff'), controls=NULL, use.type='quant', k=2, num.roi.avg=3){
tmpval=type=sample_id=num_batch=NULL # due to NSE notes in R CMD check
#compute normalization factors from tma
meta.cp <- copy(tma.meta)
meta.cp[,tmpval:=1]
cl.repmat <- reshape2::acast(barcode~name, value.var="tmpval", data=meta.cp[type %in% use.type], fun.aggregate = function(x) as.integer(length(x) > 0))
if (all(colSums(cl.repmat) != meta.cp[,length(unique(num_batch))])){
stop("ERROR: Samples are not found for every batch")
}
bg.tma.mat <- bg_correct(tma.mat, igg.map, bg.method)
cl.pcs <- compute_factors(bg.tma.mat, cl.repmat)
#apply bg correction to experimental and apply normalization
bg.exp.mat <- bg_correct(exp.mat, igg.map, bg.method)
#apply RUV normalization, note that it is technically applied sample-by-sample so ok to mix tumor/stroma segments here
norm.igg.abund <- apply_norm(bg.exp.mat, cl.pcs$pcs, k, controls)
.summarize_roi(norm.igg.abund, exp.meta, num.roi.avg)
}
#' Cohort-level scoring of abundance data
#'
#' Scores antibofy abundance data with respect to a reference cohort returning either the estimated reference quantile ('quant') or
#' a robust version of the Zscore ('rscore') indicating deviation from the reference cohort median. If the set of reference samples
#' isn't supplied, uses the entire set of input samples as the reference cohort therefore producing an intra-cohort scoring.
#'
#' @param norm.list A list per segment as generated by `preprocess_dsp_tma` with each element
#' containing a list with two elements: \describe{
#' \item{meta}{A summarized meta \code{data.table} containing the number of ROIs, the new sample barcodes ('avg_barcode'), sample ID and averaged abundance }
#' \item{avg_abund}{A features (e.g. antibodies) x samples numeric matrix}
#' }
#' @param ref.samples A charancter vector of barcodes corresponding to `avg_barcode` containing the samples to use as the reference cohort.
#' If missing or NULL will treat every sample as part of the reference.
#' @param score.type One of either reference quantile ('quant') or robust Zscore ('rscore')
#' @return A list with one element per segment: \describe{
#' \item{scores}{A \code{data.table} containing the antibody name (ProbeName), barcode, normalized abundance and a corresponding 'quant' or 'rscore' column }
#' \item{ref_abund}{A \code{data.table} containing the antibody name (ProbeName), barcode and normalized abundance for the reference cohort}
#' }
#' @import matrixStats
#' @import data.table
#' @export
score_abs <- function(norm.list, ref.samples=NULL, stroma=T, score.type=c("quant", "rscore")){
score.type <- match.arg(score.type)
mads=sample_id=avg_barcode=rscore=medians=score_abs=NULL # due to NSE notes in R CMD check
no.ref <- F
if (missing(ref.samples) || is.null(ref.samples)){
no.ref <- T
}
if (stroma){
lapply(norm.list, function(nl){
if (no.ref){
tmp.ref <- nl$avg_abund
tmp.exp <- nl$avg_abund
}else{
tmp.ref <- nl$avg_abund[,nl$meta[sample_id %in% ref.samples,avg_barcode],drop=F]
tmp.exp <- nl$avg_abund[,nl$meta[sample_id %in% ref.samples == F,avg_barcode],drop=F]
}
if (score.type == "quant"){
tmp.interp <- quant_func(tmp.ref)
tmp.quants <- get_quants(tmp.interp, tmp.exp)
names(tmp.quants)[2] <- "avg_barcode"
}else{
#add in robust zscore as an alternative
#https://stats.stackexchange.com/questions/523865/calculating-robust-z-scores-with-median-and-mad
ref.mstats <- data.table(ProbeName=rownames(tmp.ref), mads=rowMads(tmp.ref, constant=1), medians=rowMedians(tmp.ref))
tmp.quants <- setNames(data.table(reshape2::melt(tmp.exp, as.is=T)), c("ProbeName", "avg_barcode","norm"))
tmp.quants <- merge(tmp.quants, ref.mstats, by="ProbeName")
tmp.quants[,rscore:=(norm-medians)/mads]
tmp.quants[,`:=`( mads=NULL, medians=NULL)]
}
list(
scores=tmp.quants,
ref_abund=setNames(data.table(reshape2::melt(tmp.ref, as.is=T)), c("ProbeName", "avg_barcode","norm"))
)
})
}else{
nl <- norm.list
if (no.ref){
tmp.ref <- nl$avg_abund
tmp.exp <- nl$avg_abund
}else{
tmp.ref <- nl$avg_abund[,nl$meta[sample_id %in% ref.samples,avg_barcode],drop=F]
tmp.exp <- nl$avg_abund[,nl$meta[sample_id %in% ref.samples == F,avg_barcode],drop=F]
}
if (score.type == "quant"){
tmp.interp <- quant_func(tmp.ref)
tmp.quants <- get_quants(tmp.interp, tmp.exp)
names(tmp.quants)[2] <- "avg_barcode"
}else{
#add in robust zscore as an alternative
#https://stats.stackexchange.com/questions/523865/calculating-robust-z-scores-with-median-and-mad
ref.mstats <- data.table(ProbeName=rownames(tmp.ref), mads=rowMads(tmp.ref, constant=1), medians=rowMedians(tmp.ref))
tmp.quants <- setNames(data.table(reshape2::melt(tmp.exp, as.is=T)), c("ProbeName", "avg_barcode","norm"))
tmp.quants <- merge(tmp.quants, ref.mstats, by="ProbeName")
tmp.quants[,rscore:=(norm-medians)/mads]
tmp.quants[,`:=`( mads=NULL, medians=NULL)]
}
list(
scores=tmp.quants,
ref_abund=setNames(data.table(reshape2::melt(tmp.ref, as.is=T)), c("ProbeName", "avg_barcode","norm"))
)
}
}
|
c67a96ab256bf41838c3bbcd7d42b336a21335a5 | bd13b6a7b1565163100ebba110e550224a78fb7d | /plot2.R | e8467e50d5f6f272fd10fc84e6ae4c40d1e5676e | [] | no_license | kikachica/Exploratory | e2df05cad49fd6062d6bf439ff89a1c1518a73c7 | 51cfdf180aa55792592a6ec288171e9afd028cf0 | refs/heads/master | 2021-01-25T12:19:45.495300 | 2015-05-06T15:20:36 | 2015-05-06T15:20:36 | 35,110,970 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 952 | r | plot2.R | #======setting my local WD and changing system time to English======
setwd("~/Coursera/Exploratory Data Analysis")
Sys.setlocale("LC_TIME", "English")
#======if neccessary installs the package "sqldf" for reading big data using MYSQL======
install.packages("sqldf")
library(sqldf)
#=======reads the data and subsets for requested dates======
file<-read.table("power.txt", header=TRUE, sep=";", stringsAsFactors=FALSE)
Mysql<-"select * from file where Date = '1/2/2007' or Date='2/2/2007'"
myData<-sqldf(Mysql, row.names=FALSE)
rm(file)
#======parse date and time======
datetime <- strptime(paste(myData$Date, myData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#======converting other column to numeric======
myData$Global_active_power<-as.numeric(myData$Global_active_power)
#======plot2======
png("plot2.png", width=480, height=480)
with(myData, plot(datetime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
|
4fe8b58ca01071abbf560c69712d5a4b074dad16 | cb960fb7e833bb7d41047beec12beff3c31871b0 | /man/CalcTrapezoidalAUC.Rd | 9b9ffe42cdd061ef259b53840d6bedd4e7f538d9 | [] | no_license | wlktan/surrogateSampling | e539514f65742cd5cabb96d99ba622f7b53e3900 | e15ca05d1bfe2c96f446ff2902100784185f1c1e | refs/heads/master | 2021-09-10T02:35:37.868690 | 2018-03-20T18:41:44 | 2018-03-20T18:41:44 | 103,601,101 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 577 | rd | CalcTrapezoidalAUC.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CalcTrapezoidalAUC.R
\name{CalcTrapezoidalAUC}
\alias{CalcTrapezoidalAUC}
\title{CalcTrapezoidalAUC}
\usage{
CalcTrapezoidalAUC(sens, spec)
}
\arguments{
\item{sens}{numeric; sensitivity of the binary test}
\item{spec}{numeric; specificity of the binary test}
}
\value{
the area under the ROC curve of the binary test for the binary outcome
}
\description{
This function returns the auc of a binary test
according to the trapezoidal rule.
}
\examples{
CalcTrapezoidalAUC(0.2,0.8)
}
\keyword{auc}
|
ca1e13f10646af6bca4212281716469d1468b04c | 4d4c59511f4d62528d2c6510d9fe81d3ec957455 | /Day 2_Class Practice.R | b15cef6fd3a55276a09d9a07c311dfc211666559 | [] | no_license | ayushishrivastava16/AMMA_MICA_PGP2 | 6a551789af1c85dbc2523e1e3638c70ec6db2194 | 58e89f1ba6145c23ebbf0741487a493e9100ea52 | refs/heads/master | 2021-01-23T16:25:17.779389 | 2017-09-07T20:26:04 | 2017-09-07T20:26:04 | 102,741,777 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,484 | r | Day 2_Class Practice.R | ##Day 2
##data frame
student <- data.frame(Name=c(LETTERS[1:5]),
Age = c(23,22,21,25,20),
Maths_marks= c(87,86,83,88,89),
Science_marks= c(65,81,78,55,93))
Name1 <- c(LETTERS[1:5])
Age1 <- c(23,22,21,25,20)
Maths_marks1 <- c(87,86,83,88,89)
Science_marks1 <- c(65,81,78,55,93)
student1 <- data.frame(Name1,Age1,Maths_marks1,Science_marks1)
##add column
student$total_marks <- student$Maths_marks + student$Science_marks
##check
student$pct_maths_marks <- round(100*student$Maths_marks/student$total_marks.2)
##drop or remove columns
student2 <- student[,c(2:6)]
student2 <- student[,-1]
student2 <- student1[,-c(2,3)]
rm(student1)
student1 <- student
student1[1,2] <- 70
student1 <- student[,"Name"]
names(student)
names_student <- names(student)
names(student) <- names_student
vect1 <- seq(1,10,by=2)
vect1[3] <-100
v_col <- names(student)
v_col[3] <- 'New_maths'
student3 <- student
names(student3) <- v_col
##Check if column name with Value and then change it
names(student3[4])
##transformation
student3$log_age <- log(student3$Age)
student3$exp_age <- exp(student3$Age)
student3$inv_age <- 1/(student3$Age)
student3$sqrt_age <- sqrt(student3$Age)
student3$sqr_age <- student3$Age*student3$Age
student3$exp_age <- exp(student3$Age/mean(student$Age))
class(student$Name)
student$Name <- as.character(student$Name)
nn <- c("xx","23",24,"78.6")
nn.num <- as.numeric(nn)
is.na(nn.num)
table(is.na(nn.num))
## Read Data From Facebook- 232
##readHTMLTable
##Conditional selection
##excluding based on criteria
View(student)
student$Age>=23
s1 <- student$Age>=23
student4 <- student[s1,]
student4 <- student[!s1,]
student5 <- student[student$Science_marks>80 & student$Maths_marks>80, c(1:5)]
?sample
sample_index <- sample(1:nrow(student),3,replace = F)
student[sample_index,]
## select both observations and variables
## 2 more data frames
##Combining vertically - appending & combining
##and horizontally - merging
## Class 6
missing_value_tbl
missing_value_tbl$missing_age <- is.na(missing_value_tbl$Age)
missing_value_tbl$missing_sex <- is.na(missing_value_tbl$Sex)
missing_value_tbl$missing_amt <- is.na(missing_value_tbl$Spend_Amt)
missing_non <- missing_value_tbl[!missing_value_tbl$missing_age | !missing_value_tbl$missing_sex]
##class 8
windows()
|
e52983e8f0d9cfb53c304ba1fb17bf07de82e89b | 7ac89d07d43cc2de73f02805bf6df1bf07484478 | /2-3-2 differences from data frames.R | 131f8925a913346932d3b1d7d458979660e66593 | [] | no_license | timn32/edx-course-Linear-Regression | 1f6b467912bbe8b959899f51ca31d65da465b15e | b8cb775d9bbc33d2d52b32bc1a06906c29c78b4b | refs/heads/master | 2022-08-01T08:00:13.344758 | 2020-05-27T10:42:35 | 2020-05-27T10:42:35 | 265,506,966 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 446 | r | 2-3-2 differences from data frames.R | # inspect data frame and tibble
Teams
as.tibble(Teams)
# subsetting a data frame sometimes generates vectors
class(Teams[,20])
# subsetting a tibble always generates tibbles
class(as.tibble(Teams[,20]))
# pulling a vector out of a tibble
class(as.tibble(Teams)$HR)
# access a non-existing column in a data frame or a tibble
Teams$hr
as.tibble(Teams)$hr
# create a tibble with comples objects
tibble(id = c(1,2,3), func = c(mean, median, sd)) |
894a6fe1f921e54bc3619d510bccb7540c7219e8 | 06fced22dfcab99a69344f4961a0b76d2a5ceb35 | /man/phrase_extract.Rd | cacdabcb8fe5fbbd702c38b60aa831a5c0de328e | [] | no_license | ktargows/sentimental | 969836c4413fe0e68321ef7d4712d4c98e98f23b | 678a65952ec63cb593ce13f257fbcbe5ee2d3968 | refs/heads/master | 2021-01-21T16:52:56.645913 | 2016-07-27T14:09:17 | 2016-07-27T14:09:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 952 | rd | phrase_extract.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phrase.r
\name{phrase_extract}
\alias{phrase_extract}
\title{Phrase extraction & named entity recognition on text}
\usage{
phrase_extract(text, language = c("english", "dutch", "portuguese",
"spanish"))
}
\arguments{
\item{text}{one element character vector containing the text to analyze (part of
speech). Must be <= 21000 characters.}
\item{language}{the language \code{text} is in. One of "\code{english}",
"\code{dutch}", "\code{portugese}" or "\code{spanish}". here:
\url{http://text-processing.com/docs/phrases.html}.}
}
\value{
\code{list} with parsed \code{text}
}
\description{
Phrase extraction & named entity recognition on text
}
\note{
The public API is for non-commercial purposes, and each method is throttled to
1,000 calls per day per IP
}
\examples{
phrase_extract("Maine is nice")
}
\references{
\url{http://text-processing.com/docs/phrases.html}
}
|
b2ab144bc140aaee6a6f38cbfab36d0730c8608d | 5cbccd29702fc65dde52f5d0b3a5e8d880edb9fb | /functions/fingerprinting_results.R | 9dd810d75312f4ece8f5dfa3a10f5f7b3a68edc7 | [
"MIT"
] | permissive | edickie/wynR | 100ae3a0195dd9660d28c6253b8ac59f5a4b16c2 | 6a4a4e8fcfb8e37094827381c43ca147fea8cca4 | refs/heads/master | 2021-01-19T22:16:39.944482 | 2017-04-21T18:46:05 | 2017-04-21T18:46:05 | 88,784,200 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,819 | r | fingerprinting_results.R | #' Calculates "fingerprinting" style stats from a subject to subject correlation matrix
#'
#' Uses igraph to do so.
#'
#' @param sub2sub_cormat correlation matrix of M X N subjects with subject ID as the row and column names
#'
#' @return dataframe of within and between subject values.
#'
#' Each row representing stats calculated from one subject's row of the input matrix.
#' Results include.
#' subid1 the subject ID, taken from the row.name of the input matrix
#' within_popZ The Z score for the within subject correlation within the row.
#' within_rank The percentile rank of the withing subject correlation within the row.
#' within_Zrho The within subject correlation (Z-transformed)
#' cross_meanZ The mean of the cross subject (Z-transformed) correlation (within that row)
#' cross_sdZ The stanard deviation of the cross subject (Z-transformed) correlation (within that row)
#'
#' @details
#' Currently only works when there is one subject ID in the row.names that matches one subject ID in teh column names.
#' Requires dplyr and tidyr
#'
#' @export
fingerprint_results <- function(sub2sub_cormat) {
require(dplyr)
require(tidyr)
tmpdf <- as.data.frame(sub2sub_cormat) # convert to dataframe
tmpdf$subid1 <- row.names(tmpdf) # make subid1 column from row.names
# melt the datafrom, Z-transform the correlations,
# and label within and cross connections based on if original row and column names match
result <- tmpdf %>%
gather(subid2, rho, -subid1) %>%
mutate(Z = atanh(rho),
within_cross = if_else(subid1==subid2, "within", "cross")) %>%
## calculate population Z score and rank within each row
group_by(subid1) %>%
mutate(popZ = as.numeric(scale(Z)),
rankZ = as.numeric(rank(Z), ties.method = "max")/n()) %>%
## calculate summary statistics for both within and cross participant values
ungroup() %>%
group_by(subid1,within_cross) %>%
summarise(n = n(),
meanZ = mean(Z),
sdZ = sd(Z),
med_popZ = median(popZ),
med_rankZ = median(rankZ)) %>%
ungroup() %>%
## rearrange the output to a one row per participant format
gather(measure, value, -subid1, -within_cross) %>%
unite(mycolnames, within_cross, measure, sep = "_") %>%
spread(mycolnames, value) %>%
## remove participants where the their was not matching column name from the result
filter(within_n == 1, cross_n == (ncol(sub2sub_cormat)-1)) %>%
select(subid1,
within_med_popZ, within_med_rankZ, within_meanZ,
cross_meanZ, cross_sdZ)
## rename some of the columns
names(result) <- c("subid", "within_popZ", "within_rank", "within_Zrho",
"cross_meanZ", "cross_sdZ")
return(result)
} |
872194f017c3f63271cecff04c314bc6579db5ca | 2ea5154f274190849584ab7194e1866f44287ef6 | /classifications.R | 9d5a3bbbd5b7c7cd6a0ba5a8bf77dfbca48fb72f | [] | no_license | markozeman/machine_learning_diabetes | 79fe6f4baad3dcd6899e0f2f72f6bb6b2dda55c3 | 1ca6e1acc7f3c2bae4fefeb87ea09ce26aeac3de | refs/heads/master | 2021-09-01T04:29:37.687675 | 2017-12-24T20:10:27 | 2017-12-24T20:10:27 | 108,756,254 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,227 | r | classifications.R | source("helpers.R");
source("preprocessing.R");
diabetes <- preprocess();
### Majority classifier
majority.class <- names(which.max(table(diabetes$readmitted)));
default.accuracy <- sum(diabetes$readmitted == majority.class) / length(diabetes$readmitted);
# install.packages("pROC");
# install.packages(c("pROC", "adabag", ipred", "prodlim", "CORElearn", "e1071", "randomForest", "kernlab", "nnet"));
library(pROC);
library(rpart);
library(CORElearn);
library(e1071);
library(adabag);
library(ipred);
library(randomForest);
library(kernlab);
library(nnet);
learn <- diabetes[1:16000, ];
test <- diabetes[16001:20000, ];
true_class <- test$readmitted;
diabetes_sample <- diabetes[sample(1:nrow(diabetes), 5000, replace=FALSE), ];
obsMat <- model.matrix(~readmitted-1, test);
##### DECISION TREE - rpart
# #dt <- rpart(readmitted ~ ., data = learn, method="class", control=rpart.control(minsplit=2, minbucket=1, cp=0.001));
# dt <- rpart(readmitted ~ ., data = learn, method="class");
#
# ######### Zakaj ne naredi drevesa, ampak samo koren ???
#
# predicted <- predict(dt, test, type = "class");
#
# confusion_matrix <- table(observed, predicted);
#
# ca <- CA(observed, predicted);
##### DECISION TREE - CoreModel
# cm.dt <- CoreModel(readmitted ~ ., data = learn, model="tree");
# # plot(cm.dt, learn);
#
# predicted <- predict(cm.dt, test, type="class");
#
# confusion_matrix <- table(true_class, predicted);
#
# ca <- CA(true_class, predicted);
# sens <- Sensitivity (true_class, predicted, "YES");
# spec <- Specificity (true_class, predicted, "YES");
#
# err <- errorest(readmitted ~ ., data = diabetes, model = mymodel.coremodel, predict = mypredict.coremodel, target.model = "tree");
# CA <- 1 - err$error;
# write(c("CA: ", CA), file = "DT.txt", sep = "\n");
##### ROC curve
# predicted_prob <- predict(cm.dt, test, type = "prob");
#
# rocobj <- roc(true_class, predicted_prob[, "YES"]);
# # plot(rocobj);
#
# ### make specificity and sensitivity more equal
# cutoffs <- rocobj$thresholds;
# tp = rocobj$sensitivities;
# fp = 1 - rocobj$specificities;
#
# dist <- (1-tp)^2 + fp^2;
# best.cutoff <- cutoffs[which.min(dist)];
#
# predicted.label <- factor(ifelse(predicted_prob[,"YES"] >= best.cutoff, "YES", "NO"));
#
# confusion_matrix_2 <- table(true_class, predicted.label);
# ca_2 <- CA(true_class, predicted.label);
# sens_2 <- Sensitivity (true_class, predicted.label, "YES");
# spec_2 <- Specificity (true_class, predicted.label, "YES");
##### NAIVE BAYES
### No.1
# nb <- naiveBayes(readmitted ~ ., data = learn);
# predicted <- predict(nb, test, type="class");
#
# confusion_matrix <- table(true_class, predicted);
# ca <- CA(true_class, predicted);
# sens <- Sensitivity (true_class, predicted, "YES");
# spec <- Specificity (true_class, predicted, "YES");
#
# predMat <- predict(nb, test, type = "raw");
# bs <- brier.score(obsMat, predMat);
#
# err <- errorest(readmitted ~ ., data = diabetes, model = naiveBayes, predict = mypredict.generic);
# CA <- 1 - err$error;
# write(c("CA: ", CA), file = "NB.txt", sep = "\n");
### No.2
# cm.nb <- CoreModel(readmitted ~ ., data = learn, model="bayes");
# predicted <- predict(cm.nb, test, type="class");
# ca <- CA(true_class, predicted);
# sens <- Sensitivity (true_class, predicted, "YES");
# spec <- Specificity (true_class, predicted, "YES");
#
# predMat <- predict(cm.nb, test, type = "probability");
# bs <- brier.score(obsMat, predMat);
#
# err <- errorest(readmitted ~ ., data = diabetes, model = mymodel.coremodel, predict = mypredict.coremodel, target.model="bayes");
# CA <- 1 - err$error;
# write(c("CA: ", CA), file = "NB_core.txt", sep = "\n");
##### KNN
# knn <- CoreModel(readmitted ~ ., data = learn, model="knn", kInNN = 5);
# predicted <- predict(knn, test, type="class");
#
# ca <- CA(true_class, predicted);
# sens <- Sensitivity (true_class, predicted, "YES");
# spec <- Specificity (true_class, predicted, "YES");
#
# predMat <- predict(knn, test, type = "probability");
# bs <- brier.score(obsMat, predMat);
#
# err <- errorest(readmitted ~ ., data = diabetes_sample, model = mymodel.coremodel, predict = mypredict.coremodel, target.model="knn");
# CA <- 1 - err$error;
# write(c("CA: ", CA), file = "kNN.txt", sep = "\n");
### POŽENI IN SI SHRANI !!!
##### RANDOM FOREST
### No.1
# rf <- randomForest(readmitted ~ ., data = learn);
# predicted <- predict(rf, test, type="class");
#
# ca <- CA(true_class, predicted);
# sens <- Sensitivity (true_class, predicted, "YES");
# spec <- Specificity (true_class, predicted, "YES");
#
# predMat <- predict(rf, test, type = "prob");
# bs <- brier.score(obsMat, predMat);
#
# err <- errorest(readmitted ~ ., data = diabetes_sample, model = randomForest, predict = mypredict.generic);
# CA <- 1 - err$error;
# write(c("CA: ", CA), file = "RF_20000_samples.txt", sep = "\n");
### POŽENI IN SI SHRANI !!!
### No.2
# cm.rf <- CoreModel(readmitted ~ ., data = learn, model="rf");
# predicted <- predict(cm.rf, test, type="class");
#
# ca <- CA(true_class, predicted);
# sens <- Sensitivity (true_class, predicted, "YES");
# spec <- Specificity (true_class, predicted, "YES");
#
# predMat <- predict(cm.rf, test, type = "probability");
# bs <- brier.score(obsMat, predMat);
#
# err <- errorest(readmitted ~ ., data = diabetes_sample, model = mymodel.coremodel, predict = mypredict.coremodel, target.model="rf");
# CA <- 1 - err$error;
# write(c("CA: ", CA), file = "RF_core_20000_samples.txt", sep = "\n");
### POŽENI IN SI SHRANI !!!
##### SVM
### No.1
# sm <- svm(readmitted ~ ., data = learn);
# predicted <- predict(sm, test, type="class");
# ca <- CA(true_class, predicted);
#
# sm <- svm(readmitted ~ ., learn, probability = T);
# pred <- predict(sm, test, probability = T);
# predMat <- attr(pred, "probabilities");
# # in this particular case, the columns of predMat are in reverse order, so we need to invert them
# bs <- brier.score(obsMat, predMat[, c(2,1)]);
#
# err <- errorest(readmitted ~ ., data = diabetes_sample, model = svm, predict = mypredict.generic);
# CA <- 1 - err$error;
# write(c("CA: ", CA), file = "SVM_20000_samples.txt", sep = "\n");
### No.2
# model.svm <- ksvm(readmitted ~ ., data = learn, kernel = "rbfdot");
# predicted <- predict(model.svm, test, type = "response");
# ca <- CA(true_class, predicted);
#
# model.svm <- ksvm(readmitted ~ ., data = learn, kernel = "rbfdot", prob.model = T);
# predMat <- predict(model.svm, test, type = "prob");
# bs <- brier.score(obsMat, predMat);
#
# err <- errorest(readmitted ~ ., data = diabetes_sample, model = ksvm, predict = mypredict.ksvm);
# CA <- 1 - err$error;
# write(c("CA: ", CA), file = "kSVM_20000_samples.txt", sep = "\n");
##### NEURAL NETWORKS
# # the algorithm is more robust when normed data is used
# norm.data <- scale.data(rbind(learn, test));
# norm.learn <- norm.data[1:nrow(learn), ];
# norm.test <- norm.data[-(1:nrow(learn)), ];
# norm.diabetes <- scale.data(diabetes);
# norm.diabetes_sample = scale.data(diabetes_sample);
# nn <- nnet(readmitted ~ ., data = norm.learn, size = 5, decay = 0.0001, maxit = 10000);
# predicted <- predict(nn, norm.test, type = "class");
# ca <- CA(true_class, predicted);
#
# # in the case of a binary classification task the method returns probabilities just for one class so we have to reconstruct the complete matrix on our own
# pm <- predict(nn, norm.test, type = "raw");
# predMat <- cbind(1-pm, pm);
# bs <- brier.score(obsMat, predMat);
# err <- errorest(readmitted ~ ., data = norm.diabetes, model = nnet, predict = mypredict.nnet, size = 5, decay = 0.0001, maxit = 10000);
# CA <- 1 - err$error;
# write(c("CA: ", CA), file = "NN.txt", sep = "\n");
##### LOGISTIC REGRESSION
### not working!
# log_reg <- glm(readmitted ~ . , family = binomial(link='logit'), data = learn);
# predicted <- predict(log_reg, test, type="response");
# ca <- CA(true_class, predicted);
##### Combining machine learning algorithms
# modelDT <- CoreModel(readmitted ~ ., learn, model="tree");
# modelNB <- CoreModel(readmitted ~ ., learn, model="bayes");
# modelKNN <- CoreModel(readmitted ~ ., learn, model="knn", kInNN = 5);
#
# predDT <- predict(modelDT, test, type="class");
# caDT <- CA(true_class, predDT);
#
# predNB <- predict(modelNB, test, type="class");
# caNB <- CA(true_class, predNB);
#
# predKNN <- predict(modelKNN, test, type="class");
# caKNN <- CA(true_class, predKNN);
### Voting
# # combine predictions into a data frame
# pred <- data.frame(predDT, predNB, predKNN);
#
# predicted <- voting(pred);
# ca_voting <- CA(true_class, predicted);
### Weighted voting
# predDT.prob <- predict(modelDT, test, type="probability");
# predNB.prob <- predict(modelNB, test, type="probability");
# predKNN.prob <- predict(modelKNN, test, type="probability");
#
# # combine predictions into a data frame
# pred.prob <- caDT * predDT.prob + caNB * predNB.prob + caKNN * predKNN.prob;
#
# # pick the class with the highest score
# highest <- apply(pred.prob, 1, which.max);
# classes <- levels(learn$readmitted);
# predicted <- classes[highest];
#
# ca_weighted_voting <- CA(true_class, predicted);
### Stacking
# # divide the learning set into two sets
# sel <- sample(1:nrow(learn), size=1000, replace=F);
# base.train <- learn[-sel, ];
# base.valid <- learn[sel, ];
#
# # get predictions from the base models
# predM1 <- predict(modelDT, base.valid, type="class");
# predM2 <- predict(modelNB, base.valid, type="class");
# predM3 <- predict(modelKNN, base.valid, type="class");
#
# # combine predictions into a data frame
# combiner.train <- data.frame(M1=predM1, M2=predM2, M3=predM3, readmitted=base.valid$readmitted);
#
# # train a combiner model
# combiner.M <- multinom(readmitted ~ ., combiner.train, maxit=1000);
#
#
# ## testing the stacked model
#
# # get predictions from the base models
# test.M1 <- predict(modelDT, test, type="class");
# test.M2 <- predict(modelNB, test, type="class");
# test.M3 <- predict(modelKNN, test, type="class");
#
# # combine predictions into a data frame
# combiner.test <- data.frame(M1=test.M1, M2=test.M2, M3=test.M3);
#
# # get the final predictions from the combiner model
# predicted <- predict(combiner.M, combiner.test, type="class");
#
# ca_stacking <- CA(true_class, predicted);
### Bagging
# n <- nrow(learn);
# m <- 15;
#
# models <- list();
# for (i in 1:m)
# {
# sel <- sample(1:n, n, T);
# train <- learn[sel, ];
# models[[i]] <- CoreModel(readmitted ~ ., train, model="tree", minNodeWeightTree=2);
# }
#
# tmp <- NULL;
# for (i in 1:m)
# tmp <- cbind(tmp, as.character(predict(models[[i]], test, type="class")));
#
# highest <- apply(tmp, 1, function(x){which.max(table(factor(x, levels=classes)))});
# classes <- levels(learn$readmitted);
# predicted <- classes[highest];
# ca_our_bagging <- CA(true_class, predicted);
#
#
# # bagging is implemented in the package "ipred"
# bag <- bagging(readmitted ~ ., learn, nbagg=15);
# bag.pred <- predict(bag, test, type="class");
# ca_bagging <- CA(true_class, bag.pred);
### Boosting
# bm <- boosting(readmitted ~ ., learn);
# predictions <- predict(bm, test);
#
# predicted <- predictions$class;
# ca_boosting <- CA(true_class, predicted);
|
6f623c2d6e009195d33e069df67cb8c200db5479 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/AquaEnv/examples/K_HPO4.Rd.R | 04ee23159d36864030957f0edf99a869ac9de999 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 239 | r | K_HPO4.Rd.R | library(AquaEnv)
### Name: K_HPO4
### Title: K\_HPO4
### Aliases: K_HPO4
### Keywords: misc
### ** Examples
K_HPO4(35, 15)
K_HPO4(35, 15, 10)
K_HPO4(S=35, t=15, p=10, SumH2SO4=0.03)
plot(K_HPO4(35, 1:25), xlab="temperature / degC")
|
c5edcba7549f838ba75af015425ff810ef7e9ab4 | 1f3454240dee5264aa7227255a9166f9663db36d | /Popgen_analysis/snp/pca.R | bf705d2b26220e1bd476e586de1d0f7929034ba4 | [] | no_license | harrisonlab/neonectria_ditissima | 1163df5cc7ef83682174f193dfdad0b0052a3421 | 41d88d1f18b66362829fb871406c37653df4dfae | refs/heads/master | 2022-05-30T07:53:56.568612 | 2022-04-14T13:08:42 | 2022-04-14T13:08:42 | 39,201,858 | 3 | 1 | null | 2017-08-24T16:51:36 | 2015-07-16T14:32:34 | null | UTF-8 | R | false | false | 1,972 | r | pca.R | #!/usr/bin/env Rscript
#Print a PCA plot calculated based on SNP matrix.
#First argument (required): input VCF file
#Output: a PDF file with the figure, suffix: _pca.pdf
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
stop("At least one argument must be supplied (input file).n", call.=FALSE)}
library("SNPRelate")
library("gdsfmt")
library("ggplot2")
library("ggrepel")
library(tools)
#The package offers also IBD and IBS calculations
vcf.fn <- args[1]
file_gds <- paste(file_path_sans_ext(vcf.fn), ".gds", sep="")
snpgdsVCF2GDS(vcf.fn, file_gds, method="biallelic.only")
snpgdsSummary(file_gds)
genofile <- snpgdsOpen(file_gds)
#PCA including haploid data
pca <- snpgdsPCA(genofile, autosome.only=FALSE)
#Percentage of variation explained by each axis
pc.percent <- pca$varprop*100
tab <- data.frame(sample.id = pca$sample.id,
EV1 = pca$eigenvect[,1], # the first eigenvector
EV2 = pca$eigenvect[,2], # the second eigenvector
stringsAsFactors = FALSE)
##Assign populations identifiers to each individual
#tab$pop <- c("1", "1", "2", "1", "2", "2", "2", "1", "2", "2")
plot(tab$EV2, tab$EV1, xlab="eigenvector 2", ylab="eigenvector 1")
sample.id <- read.gdsn(index.gdsn(genofile, "sample.id"))
variance <- (round(pc.percent,2))
pca_1 <- variance[1]
pca_2 <- variance[2]
xlabel <- paste("PC2 (", pca_2, "%)", sep="")
ylabel <- paste("PC1 (", pca_1, "%)", sep="")
pca_plot <- ggplot(tab, aes(EV2,EV1), fill="red") + geom_point(size=2) + geom_text_repel(aes(label=sample.id)) + xlab(xlabel) + ylab(ylabel)
#pca_plot <- ggplot(tab, aes(EV2,EV1), fill="red") + geom_point(colour=factor(tab$pop), size=2) + geom_text_repel(aes(label=sample.id)) + xlab(xlabel) + ylab(ylabel)
pca_plot2 <- pca_plot + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
file_pca <- paste(file_path_sans_ext(vcf.fn), "_pca.pdf", sep="")
ggsave(file_pca, pca_plot2, dpi=300, height=5, width=5)
|
ec9b169c6931b5afc03310c339114c889160e189 | 547d9593d2808563e9a3abb970cf39d40d0cf024 | /man/metricmaker.Rd | cc458c722ffe4195272943e1528889aa6c5d7ce7 | [] | no_license | dncnbrn/EmsiR | c5176d65ea2661c69c50b755f4e5c530638b25b1 | 2d643be0af6b9b8838af65cb57a763dd209f46f8 | refs/heads/master | 2021-03-24T12:54:31.031726 | 2018-02-23T13:30:42 | 2018-02-23T13:30:42 | 75,728,416 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,528 | rd | metricmaker.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metrics.R
\name{metricmaker}
\alias{metricmaker}
\title{Specify metrics for an Episteme API query}
\usage{
metricmaker(metricdf, geoparent, along)
}
\arguments{
\item{metricdf}{at minimum, a data frame with two columns: \code{name} sets out the desired names for the metrics and
\code{as} sets out the titles of the codes on Emsi Episteme. Where using derivative metrics (Openings, Location Quotients and
Shift-Share), additional columns are required in the form of \code{metrics} to specify if they are \emph{"OP"}, \emph{"LQ"} or
\emph{"SS"} and, for Openings and Shift-Share, a \code{base} column identifies the comparison metric for the year.}
\item{geoparent}{is required for derivative metrics, and is a geographical code identifing the parent geographical unit for analysis.}
\item{along}{is required for derivative metrics, and reflects the intended domain for analysis (e.g. "Industry" or "Occupation").}
}
\value{
A prepared data frame which will be ready for inclusion in a data pull query.
}
\description{
Takes a data frame of required metrics and necessary supporting criteria and specifies them ready for an Emsi Episteme data pull.
}
\examples{
met1 <- data.frame(names=c("Jobs.2016","Jobs.2022"), as=c("Jobs.2016","Jobs.2022"))
metricmaker(met1)
met2 <- data.frame(name=c("Jobs.2016","Jobs.2016","Jobs.2016"),as=c("Jobs16","LQ16","SS16"),metrics=c(NA,"LQ","SS"),base=c(NA,NA,"Jobs.2003"))
metricmaker(met2, "GB", "Occupation")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.