blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5cb698a9705b19261b424445b0f069e582702300 | 4da1b4f9a30d813fdfc68cc6a1a3387da187b432 | /support/inout/fs_rmd_knitall.R | bce320587ad548eede1a919b0e49ba809e4000e3 | [
"MIT"
] | permissive | FanWangEcon/R4Econ | 50b21f0f53fd73722ab49f3383102df001415fda | 7c7c4af32721817b2bc18944cfa17ecd8fb1f848 | refs/heads/master | 2023-04-03T18:41:31.038769 | 2023-03-23T05:06:53 | 2023-03-23T05:06:53 | 173,583,807 | 19 | 14 | null | null | null | null | UTF-8 | R | false | false | 5,108 | r | fs_rmd_knitall.R | # source('C:/Users/fan/R4Econ/support/inout/fs_rmd_knitall.R')
#
# library(tidyverse)
# library(tidyr)
# library(knitr)
# library(kableExtra)
# Knit all Rmd Files stored in folders
# I maintain both bookdown as well as individually compiled PDFs for each page
# This file finds all Rmd files in R4Eon and knits them all to pdf and html.
spt_root <- 'C:/Users/fan/R4Econ/'
# if the path contains these skip
spn_skip <- c('summarize', 'panel', 'support')
spn_skip <- c('')
# Group A
spt_amto <- paste0(spt_root, 'amto')
spt_summ <- paste0(spt_root, 'summarize')
spt_func <- paste0(spt_root, 'function')
ls_path_group_a <- c(spt_amto, spt_summ, spt_func)
# Group B
spt_math <- paste0(spt_root, 'math')
spt_gent <- paste0(spt_root, 'generate')
spt_panl <- paste0(spt_root, 'panel')
ls_path_group_b <- c(spt_math, spt_panl)
# Group C
spt_linr <- paste0(spt_root, 'linreg')
spt_regn <- paste0(spt_root, 'regnonlin')
spt_opti <- paste0(spt_root, 'optimization')
spt_dyna <- paste0(spt_root, 'dynamic')
ls_path_group_c <- c(spt_opti, spt_regn, spt_linr)
# Group TEMP
spt_one <- paste0(spt_root, 'amto')
spt_two <- paste0(spt_root, 'summarize/dist')
spt_thr <- paste0(spt_root, 'summarize/aggregate')
spt_fou <- paste0(spt_root, 'function/noloop')
spt_fiv <- paste0(spt_root, 'math')
spt_six <- paste0(spt_root, 'panel')
ls_path_group_temp <- c(spt_one, spt_two, spt_thr)
ls_path_group_temp <- c(ls_path_group_temp, spt_fou, spt_fiv, spt_six)
# All ls_path_group_use
ls_path_group <- c(ls_path_group_a, ls_path_group_b, ls_path_group_c)
# Group To Use
ls_path_group_use <- ls_path_group
ls_path_group_use <- paste0(spt_root, '')
# ls_path_group_use <- ls_path_group_temp
# Get Path
ls_sfls <- list.files(path=ls_path_group_use, recursive=T, pattern=".Rmd", full.names=T)
# Excludes elements of path that have exclusion strings
if (spn_skip != '') {
ls_sfls <- ls_sfls[!grepl(paste(spn_skip, collapse = "|"), ls_sfls)]
}
# print
for (spt_file in ls_sfls) {
# 1. Check if the RMD file has been modified or is new, if neither, do not generate pdf html
# 2. store pdf and html files in a subfolder
# 3. main folder keeps only Rmd file
# 4. delete tex and other files
st_fullpath_noname <- dirname(spt_file)
st_fullpath_nosufx <- sub('\\.Rmd$', '', spt_file)
st_file_wno_suffix <- sub('\\.Rmd$', '', basename(spt_file))
setwd(st_fullpath_noname)
# Check if the RMD file has been modified or is new, if neither, do not generate pdf html
spg_check_git_status <- paste0('git status -s ', spt_file)
st_git_status <- toString(system(spg_check_git_status, intern=TRUE))
bl_modified <- grepl(' M ', st_git_status, fixed=TRUE)
bl_anewfile <- grepl('?? ', st_git_status, fixed=TRUE)
bl_nochange <- (st_git_status == "")
if (bl_modified == 1) {
print(paste0('MODIFIED: ', spt_file))
} else if (bl_anewfile == 1) {
print(paste0('A NEW FL: ', spt_file))
} else {
print(paste0('NO CHNGE: ', spt_file))
}
if (bl_modified + bl_anewfile == 10) {
print(paste0('spt_file:',spt_file))
print(paste0('st_fullpath_noname:', st_fullpath_noname))
print(paste0('st_fullpath_nosufx:', st_fullpath_nosufx))
print(paste0('st_file_wno_suffix:', st_file_wno_suffix))
spth_pdf_html <- paste0(st_fullpath_noname, '/htmlpdfr/')
sfle_pdf_html <- paste0(st_fullpath_noname, '/htmlpdfr/', st_file_wno_suffix)
print(spth_pdf_html)
sfl_nht <- paste0(st_fullpath_nosufx, '.nb.html')
sfl_tex <- paste0(st_fullpath_nosufx, '.tex')
sfl_pdf <- paste0(st_fullpath_nosufx, '.pdf')
sfl_htm <- paste0(st_fullpath_nosufx, '.html')
sfl_Rla <- paste0(st_fullpath_nosufx, '.R')
sfl_log <- paste0(st_fullpath_nosufx, '.log')
sfl_sub_nht <- paste0(sfle_pdf_html, '.nb.html')
sfl_sub_tex <- paste0(sfle_pdf_html, '.tex')
if (grepl('_main', spt_file)) {
# try(file.remove(paste0(st_fullpath_nosufx, '.pdf')))
# try(file.remove(paste0(st_fullpath_nosufx, '.html')))
} else {
# rmarkdown::render(spt_file, output_format='pdf_document(includes = includes(in_header = "C:/Users/fan/R4Econ/preamble.tex"))', output_dir = spth_pdf_html)
# rmarkdown::render(spt_file, output_format='pdf_document(includes = includes(in_header))', output_dir = spth_pdf_html)
print(paste0('spt_file:',spth_pdf_html, ', PDF started'))
rmarkdown::render(spt_file, output_format='pdf_document', output_dir = spth_pdf_html)
print(paste0('spt_file:',spth_pdf_html, ', PDF finished'))
print(paste0('spt_file:',spth_pdf_html, ', HTML started.'))
rmarkdown::render(spt_file, output_format='html_document', output_dir = spth_pdf_html)
print(paste0('spt_file:',spth_pdf_html, ', HTML finished.'))
print(paste0('purl_to:', paste0(sfle_pdf_html, ".R")))
knitr::purl(spt_file, output=paste0(sfle_pdf_html, ".R"), documentation = 1)
}
try(file.remove(sfl_nht))
try(file.remove(sfl_tex))
try(file.remove(sfl_pdf))
try(file.remove(sfl_htm))
try(file.remove(sfl_Rla))
try(file.remove(sfl_log))
try(file.remove(sfl_sub_nht))
try(file.remove(sfl_sub_tex))
}
}
|
02786eb097c2069bccc9f7c61ad62957bec5cfc1 | e709f97fc10a761c5f6dbfba1036e52c863792d4 | /R/initial_plots.R | d1d87e6fe3cfb3a8368b1176b2c2361cc7fc0264 | [] | no_license | rstats-tartu/dmitry_plant | eff1efe6b136754635419a1db2a0ad4bd646592f | 7358cd73b3db60d7da97860429869fdeb5f5606e | refs/heads/master | 2020-03-30T16:59:04.035529 | 2018-10-03T15:59:15 | 2018-10-03T15:59:15 | 151,436,189 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,026 | r | initial_plots.R |
# Load libraries
library(tidyverse)
library(readxl)
library(lubridate)
# Import data
fc <- read_xlsx("data/Fc_Plant.xlsx") %>%
separate(plant_name, c("co2", "temp", "water", "light"), sep = "_")
ir <- read_xlsx("data/Ir_Plant.xlsx") %>%
separate(plant_name, c("co2", "temp", "water", "light"), sep = "_")
drgb <- read_xlsx("data/Rgb_Morpho_Plant.xlsx") %>%
separate(plant_name, c("co2", "temp", "water", "light"), sep = "_")
sc <- read_xlsx("data/ScalesMeasure.xlsx") %>%
separate(plant_name, c("co2", "temp", "water", "light"), sep = "_")
irb <- read_xlsx("data/Ir_Plant_before.xlsx") %>%
separate(plant_name, c("co2", "temp", "water", "light"), sep = "_")
fc$date = as.character(fc$date)
ir$date = as.character(ir$date)
irb$date = as.character(irb$date)
drgb$date = as.character(drgb$date)
sc$date = as.character(sc$date)
fc = separate(fc, date, c("day", "hours"), sep = " ") %>% select(-hours, -time)
fc$day = ymd(fc$day)
ir = separate(ir, date, c("day", "hours"), sep = " ") %>% select(-hours, -time)
ir$day = ymd(ir$day)
irb = separate(irb, date, c("day", "hours"), sep = " ") %>% select(-hours, -time)
irb$day = ymd(irb$day)
drgb = separate(drgb, date, c("day", "hours"), sep = " ") %>% select(-hours, -time)
drgb$day = ymd(drgb$day)
sc$date = ymd(sc$date)
sc$day = sc$date
sc = select(sc, -date, -time)
# Plots
fc %>%
filter(day > "2018-08-04") %>%
ggplot(aes(day, QY_max, color = genotype)) +
stat_summary(fun.y = mean, geom = "line") +
stat_summary(fun.data = mean_se, geom = "pointrange") +
facet_grid(water ~ light)
fc %>%
filter(day > "2018-08-04") %>%
ggplot(aes(day, QY_Lss, color = genotype)) +
stat_summary(fun.y = mean, geom = "line") +
stat_summary(fun.data = mean_se, geom = "pointrange") +
facet_grid(water ~ light)
fc %>%
filter(day > "2018-08-04") %>%
ggplot(aes(day, NPQ_Lss, color = genotype)) +
stat_summary(fun.y = mean, geom = "line") +
stat_summary(fun.data = mean_se, geom = "pointrange") +
facet_grid(water ~ light)
fc %>%
filter(day > "2018-08-04") %>%
ggplot(aes(day, Rfd_Lss, color = genotype)) +
stat_summary(fun.y = mean, geom = "line") +
stat_summary(fun.data = mean_se, geom = "pointrange") +
facet_grid(water ~ light)
ir %>%
filter(day != "2018-08-06") %>%
ggplot(aes(day, `Temp-avg`, color = genotype)) +
stat_summary(fun.y = mean, geom = "line") +
stat_summary(fun.data = mean_se, geom = "pointrange") +
facet_grid(water ~ light)
irb %>%
filter(day != "2018-08-06") %>%
ggplot(aes(day, `Temp-avg`, color = genotype)) +
stat_summary(fun.y = mean, geom = "line") +
stat_summary(fun.data = mean_se, geom = "pointrange") +
facet_grid(water ~ light)
drgb %>%
ggplot(aes(day, AREA_MM, color = genotype)) +
stat_summary(fun.y = mean, geom = "line") +
stat_summary(fun.data = mean_se, geom = "pointrange") +
facet_grid(water ~ light)
sc %>%
filter(genotype != "empty") %>%
ggplot(aes(day, weight, color = plant_id)) +
geom_line() +
facet_grid(water ~ light) +
guides(color = FALSE)
|
a319df3e777f85bc1b827c1a57947d4b705802ca | a6c7846fe22127c0d463c7837d09d85401d172f4 | /R/zzz.R | 3557c551a6bdeb6ce9e48574c243855d50886ad5 | [] | no_license | rplzzz/stayinalive | ec6bcd10ecdef21977d115045441d64c578dd544 | 99b75b5cbb536d2fbeab210e560319d638d1d61e | refs/heads/master | 2021-03-13T12:56:02.232065 | 2020-02-27T23:01:33 | 2020-02-27T23:01:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 126 | r | zzz.R | .onLoad <- function(libname, pkgname)
{
py3 <- system2('which', 'python3', stdout=TRUE)
reticulate::use_python(py3)
}
|
fc4b870828ef29b8b6ab3a937305e6b620093d46 | d58151abb81dba81e8ac2804d89a0e66de26af0d | /Functions/compareClustersAcrossGroups.R | 23728c3702db067692299d28081f131dfd1902f6 | [] | no_license | vanrooij-lab/scRNAseq-HCM-human--old | c7fa3ac20a85aa77c26bde9a4b9731d236206df2 | 8f24236128e9cc89225e0e6d9c70b6c6eb0e88f1 | refs/heads/master | 2022-12-06T03:21:56.520140 | 2020-08-18T15:04:58 | 2020-08-18T15:04:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,515 | r | compareClustersAcrossGroups.R | ### compareClustersAcrossGroups()
###
### TODO Upgrade this comment
### TODO Consider renaming this function
### TODO Consider splitting this function into 3 functions
###
### Authors
### - Joep Eding
### Return:
### --- Outputs 3 graphs:
### --- clusterOriginBarplot_clusterResolution: Shows which sample clusters the clusters in referenceGroup originate from.
### --- clusterDestinationBarplot: Shows which referenceGroup clusters each sampleCluster contributes to.
### --- clusterOriginBarplot_sampleResolution: Similar to the clusterResolution version, now shows only from which samples the clusters originate.
### --- Outputs 1 excel file: (only when outputMode is not 'show')
### --- clusterComparison.xlsx: contains the raw data the graphs are based upon.
compareClustersAcrossGroups <- function(config, groupedSCS, referenceGroup=NULL, groupNames=NULL, orientation='vertical', includeOutliers=TRUE, overrideDir=F, outputMode='pdf') {
# Sanitize input groupNames & input referenceGroup
if(is.null(groupNames)) {
groupNames = names(groupedSCS)
} else {
for(groupName in groupNames) {
if(!(groupName %in% names(groupedSCS))) {stop(paste0("You want to compare clusters across groups for '",groupName,"' that is not in groupedSCS."))}
}
}
if(is.null(referenceGroup)) {stop(paste0("You want to compare clusters accross groups without setting a reference group."))}
if(!(referenceGroup %in% groupNames)) {stop(paste0("You want to compare clusters across groups for reference group '", referenceGroup,"' that is not in groupNames."))}
if(length(referenceGroup) > 1) {stop("You have selected more than 1 referenceGroup.")}
if(all(groupNames %in% referenceGroup)) {stop("You have not selected any other groups than the referenceGroup.")}
# Sanitize input orientation
if(!(orientation %in% c('horizontal','vertical'))) {stop("Parameter 'orientation' needs to be one of: 'horizontal', 'vertical'")}
# Sanitize input includeOutliers
if(!is.logical(includeOutliers)) {stop("Parameter 'includeOutliers' needs to be either TRUE or FALSE. Pay attention: no quotes!")}
# Sanitize input outputMode
if(!(outputMode %in% c('show','png','pdf','eps','ps','tiff','svg'))) {
stop("'outputMode' needs to be one of 'show','png','pdf','eps','ps','tiff','svg'. Pay attention to capitalisation.")
}
# Generate list of all clusters
clusterList = list()
for(groupName in groupNames) {
for(clusterName in names(clusterDifferentialExpression[[groupName]])) {
#Check if whether this isn't an outlierCluster
if(includeOutliers) {
if(!(as.integer(strsplit(clusterName, 'cl.')[[1]][2]) %in% 1:max(groupedSCS[[groupName]]@cpart))) { next }
} else {
if(!(as.integer(strsplit(clusterName, 'cl.')[[1]][2]) %in% 1:max(groupedSCS[[groupName]]@cluster$kpart))) { next }
}
newClusterName = paste0(groupName,'_',clusterName)
clusterList[[newClusterName]] = list()
clusterList[[newClusterName]]['groupName'] = groupName
clusterList[[newClusterName]]['clusterNum'] = clusterName
}
}
# Calculate comparisonTable: contains the number of overlapping cells between all of the clusters (compares individual samples vs groups too)
comparisonTable <- data.frame()
for(clusterAName in names(clusterList)) {
groupAName = clusterList[[clusterAName]]$groupName
clusterANum = clusterList[[clusterAName]]$clusterNum
if(includeOutliers) {
clusterAcells <- names(groupedSCS[[groupAName]]@cpart[groupedSCS[[groupAName]]@cpart==as.integer(strsplit(clusterANum, 'cl.')[[1]][2])])
} else {
clusterAcells <- names(groupedSCS[[groupAName]]@cluster$kpart[groupedSCS[[groupAName]]@cluster$kpart==as.integer(strsplit(clusterANum, 'cl.')[[1]][2])])
}
for(clusterBName in names(clusterList)) {
groupBName = clusterList[[clusterBName]]$groupName
clusterBNum = clusterList[[clusterBName]]$clusterNum
if(includeOutliers) {
clusterBcells <- names(groupedSCS[[groupBName]]@cpart[groupedSCS[[groupBName]]@cpart==as.integer(strsplit(clusterBNum, 'cl.')[[1]][2])])
} else {
clusterBcells <- names(groupedSCS[[groupBName]]@cluster$kpart[groupedSCS[[groupBName]]@cluster$kpart==as.integer(strsplit(clusterBNum, 'cl.')[[1]][2])])
}
comparisonTable[clusterAName,clusterBName] = length(which(clusterAcells %in% clusterBcells))
}
}
if(outputMode != 'show') {
if(overrideDir == FALSE) {
saveDir <- paste0(outputDir,.Platform$file.sep,referenceGroup,.Platform$file.sep,'Clusters')
} else {
saveDir <- paste0(outputDir,.Platform$file.sep,overrideDir,.Platform$file.sep,groupName)
}
# Create directory if necessary
if(!dir.exists(saveDir)) {dir.create(saveDir, showWarnings = TRUE, recursive = TRUE, mode = "0777")}
}
# Generate source data for the stacked bar graph by melting the comparisonTable
stackedBarSource = melt(as.matrix(comparisonTable)) #Uses as.matrix to keep the rowNames from
colnames(stackedBarSource) <- c('destinationCluster','sourceCluster','number')
# Remove rows that don't have a cluster in the referenceGroup as destinationCluster
stackedBarSource = stackedBarSource[which(grepl(referenceGroup,stackedBarSource$destinationCluster)),]
# Remove rows that have a cluster in the referenceGroup as sourceCluster
stackedBarSource = stackedBarSource[which(!grepl(referenceGroup,stackedBarSource$sourceCluster)),]
# Generate lists of all cluster names per sample
sourceGroupNames = names(config$groups)[which(names(config$groups) != referenceGroup)]
sourceGroups = list()
for(sourceName in unique(stackedBarSource$sourceCluster)) {
sourceGroup = strsplit(sourceName, '_cl.')[[1]][1]
sourceNum = strsplit(sourceName, '_cl.')[[1]][2]
sourceGroups[[sourceGroup]] = append(sourceName, sourceGroups[[sourceGroup]])
}
# Generate different colors for each sample, and then variations (lighter/darker) on that color for cluster in that sample
colorList = list()
for(sampleNum in 1:length(sourceGroups)) {
#Count number of clusters for this sample
numColors = length(sourceGroups[[sampleNum]])
#Test whether a colour is specified for this group
if(!(nchar(config$colors[names(sourceGroups)[sampleNum]]) > 0)) {
stop("No colour is provided for sample '",names(sourceGroups)[sampleNum],"'")
}
#Genereate colors
colorList[[sampleNum]] = colorRampPalette(c(
config$colors[names(sourceGroups)[sampleNum]],
#rainbow(length(sourceGroups))[sampleNum], #Generate distinct colors for each sample
'#000000'))(numColors*2)[1:numColors] #Generate a colorRampPalette ranging from the sample color to white
}
colorList = unlist(colorList)
# Name colors so they mach the clusters
names(colorList) <- unlist(sourceGroups)
#Draw a plot that shows which clusters the cells in each cluster of referenceGroup came from.
clusterOriginGraph = ggplot(
data=stackedBarSource[which(stackedBarSource$number>5),],
aes(x=destinationCluster, y=number, fill=sourceCluster)
) + geom_bar(
stat='identity'
) + labs(
x="Cluster",
y="Cells (#)",
title="Cluster origin",
subtitle="Shows from which sample-specific clusters the final clusters derive.\nExcludes contributions <5 cells per sample cluster"
) + scale_fill_manual( #Define coloring per sample based on colors from config$colors
values=colorList
) + geom_text(
aes(label=lapply(sourceCluster, function(x){strsplit(as.character(x),'_')[[1]][2]})),
position = position_stack(vjust = 0.5),
angle=if(orientation=='horizontal') 90 else 0, #Tilt x-axis labels only when numbers go above 9
size=5/.pt
) + scale_x_discrete(
breaks=unique(stackedBarSource$destinationCluster),
labels=lapply(unique(stackedBarSource$destinationCluster), function(x){as.character(strsplit(as.character(x),'_cl.')[[1]][2])})
) + theme_classic(
) + theme(
axis.text.x = element_text(angle = 45, hjust = 1, vjust=1),
panel.border = element_rect(colour = "black", fill=NA, size=1),
axis.line = element_blank(),
legend.title = element_text(size=rel(0.75)),
legend.text = element_text(size=rel(0.5)),
plot.subtitle = element_text(size=rel(0.5))
)
if(orientation == 'horizontal') {
clusterOriginGraph = clusterOriginGraph+coord_flip()
}
if(outputMode != 'show') {
ggsave(paste0('clusterOriginBarplot_clusterResolution.',outputMode), path=saveDir)
} else {
print(clusterOriginGraph)
}
#Draw a plot that shows which cluster in referenceGroup each of the cells from all group-specific cluster go to.
clusterDestinationGraph = ggplot(
data=stackedBarSource[which(stackedBarSource$number>5),],
aes(x=sourceCluster, y=number, fill=as.character(lapply(destinationCluster, function(x){strsplit(as.character(x),'_cl.')[[1]][2]})))
) + geom_bar(
stat='identity'
) + labs(
x="Sample clusters",
y="Cells (#)",
fill="Destination cluster",
title="Cluster destination",
subtitle="Shows which final clusters the sample-specific clusters contribute to.\nExcludes contributions <5 cells"
) + geom_text(
aes(label=lapply(destinationCluster, function(x){strsplit(as.character(x),'_cl.')[[1]][2]})),
position = position_stack(vjust = 0.5)
) + theme_classic(
) + theme(
plot.subtitle = element_text(size=rel(0.5)),
axis.text.x = element_text(
angle = 90,
vjust = 0.5,
hjust = 0
)
)
if(orientation == 'horizontal') {
clusterDestinationGraph = clusterDestinationGraph+coord_flip()
}
if(outputMode != 'show') {
ggsave(paste0('clusterDestinationBarplot.',outputMode), path=saveDir)
} else {
print(clusterDestinationGraph)
}
#Generate new dataframe for a sample-resolution clusterOriginBarPlot
clusterOriginSource = data.frame(
destinationCluster = grep(referenceGroup,names(clusterList),value=T),
stringsAsFactors = F
)
rownames(clusterOriginSource) <- grep(referenceGroup,names(clusterList),value=T)
for(destinationCluster in clusterOriginSource$destinationCluster) {
for(sourceSample in names(sourceGroups)) {
clusterOriginSource[destinationCluster,sourceSample] = sum(comparisonTable[destinationCluster,grepl(sourceSample,colnames(comparisonTable))])
}
}
clusterOriginSource <- melt(clusterOriginSource,id.vars='destinationCluster')
colnames(clusterOriginSource) <- c('destinationCluster','sourceSample','number')
clusterOriginSource$destinationCluster <- unlist(lapply(clusterOriginSource$destinationCluster, function(x){as.character(str_pad(strsplit(x,'_cl.')[[1]][2],2,side='left',pad=' '))}))
#clusterOriginSource$destinationCluster <- as.integer(clusterOriginSource$destinationCluster)
#Draw a plot that shows which samples the cells in each cluster of referenceGroup came from.
clusterOriginGraph = ggplot(
data=clusterOriginSource,
aes(x=destinationCluster, y=number, fill=sourceSample)
) + geom_bar(
stat='identity'
) + labs(
x="Cluster",
y="Cells (#)",
fill="Sample",
title="Cluster origin",
subtitle="Shows from which samples the final clusters derive."
) + scale_fill_manual( #Define coloring per sample based on colors from config$colors
values=unlist(config$colors)
) + theme_classic(
) + theme(
axis.text.x = element_text(angle = 45, hjust = 1, vjust=1),
panel.border = element_rect(colour = "black", fill=NA, size=1),
axis.line = element_blank(),
plot.subtitle = element_text(size=rel(0.5))
)
if(orientation == 'horizontal') {
clusterOriginGraph = clusterOriginGraph+coord_flip()
}
if(outputMode != 'show') {
ggsave(paste0('clusterOriginBarplot_sampleResolution.',outputMode), path=saveDir)
} else {
print(clusterOriginGraph)
}
clusterComparisonWB = createWorkbook()
#Write comparisonTable to the new sheet
addWorksheet(clusterComparisonWB, sheetName='comparisonTable')
writeDataTable(
clusterComparisonWB,
sheet = 'comparisonTable',
comparisonTable
)
#Write stackedBarSource to the new sheet
addWorksheet(clusterComparisonWB, sheetName = 'stackedBarSource')
writeDataTable(
clusterComparisonWB,
sheet='stackedBarSource',
stackedBarSource
)
#Save workbook
if(outputMode != 'show') {
saveWorkbook(clusterComparisonWB, paste0(saveDir,.Platform$file.sep,'clusterComparison.xlsx'))
}
} |
7eae230f74929e1c68567446f1f559d52c78d652 | 2975fba6bf359214c55e7d936f896a5a4be3d8f5 | /man/print.IPA.Rd | 0bb0d1117c8c984de68b9dadada7c0674e4fc266 | [] | no_license | tagteam/riskRegression | 6bf6166f098bbdc25135f77de60122e75e54e103 | fde7de8ca8d4224d3a92dffeccf590a786b16941 | refs/heads/master | 2023-08-08T03:11:29.465567 | 2023-07-26T12:58:04 | 2023-07-26T12:58:04 | 36,596,081 | 38 | 14 | null | 2023-05-17T13:36:27 | 2015-05-31T09:22:16 | R | UTF-8 | R | false | true | 402 | rd | print.IPA.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.IPA.R
\name{print.IPA}
\alias{print.IPA}
\title{Print IPA object}
\usage{
\method{print}{IPA}(x, digits = 2, ...)
}
\arguments{
\item{x}{Object obtained with \code{IPA}}
\item{digits}{Number of digits}
\item{...}{passed to print}
}
\description{
Print method for IPA
}
\author{
Thomas A. Gerds <tag@biostat.ku.dk>
}
|
7cc1c425e48cff1cd79a1df253085fe36fdbd285 | dabcee30ccdc3f67bff6dc7defa57b1199b6ed1e | /man/get_html_meta_from_article.Rd | 504f363cb2aff5b4cd92b98a2ec34bc7cc9d5842 | [] | no_license | cran/ojsr | c58bac603ecaaa3e767f8f2de2fdc6720850bfea | 009476091d5ec8556dd69b1e9435844bbe21503f | refs/heads/master | 2022-12-25T07:14:46.902875 | 2020-09-23T22:50:03 | 2020-09-23T22:50:03 | 258,766,133 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,064 | rd | get_html_meta_from_article.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scrapers.R
\name{get_html_meta_from_article}
\alias{get_html_meta_from_article}
\title{Scraping metadata from the OJS articles HTML}
\usage{
get_html_meta_from_article(input_url, verbose = FALSE)
}
\arguments{
\item{input_url}{Character vector.}
\item{verbose}{Logical.}
}
\value{
A long-format dataframe with the url you provided (input_url), the name of the metadata (meta_data_name),
the content of the metadata (meta_data_content), the standard in which the content is annotated (meta_data_scheme),
and the language in which the metadata was entered (meta_data_xmllang)
}
\description{
Takes a vector of OJS URLs and scrapes all metadata written in HTML from the article view
}
\examples{
\donttest{
articles <- c(
'https://publicaciones.sociales.uba.ar/index.php/psicologiasocial/article/view/2137', # article
'https://dspace.palermo.edu/ojs/index.php/psicodebate/article/view/516/311' # xml galley
)
metadata <- ojsr::get_html_meta_from_article(articles, verbose = TRUE)
}
}
|
da2041fe0cb7b0661d0485f7cdffaa6b12159d31 | eb4782952de8f1a5f5c716f0485411101aaeafc5 | /R/zzz.R | e18bd03daadaebcdcff53470c44edd508d431347 | [] | no_license | quanteda/readtext | 0518b1746b496b77017d504f491d88529e4e0aea | 647c46510fbc09b605cb46e38873f68ff157b858 | refs/heads/master | 2023-06-09T01:24:38.597751 | 2023-06-03T16:42:07 | 2023-06-03T16:42:07 | 71,951,499 | 71 | 25 | null | 2022-12-01T12:42:45 | 2016-10-26T00:47:47 | R | UTF-8 | R | false | false | 94 | r | zzz.R | .onAttach <- function(...) {
# set user options
readtext_options(initialize = TRUE)
}
|
9b47a8ab1153e68ae6a54baedc9f016041e0f3b8 | 77157987168fc6a0827df2ecdd55104813be77b1 | /MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612726666-test.R | 191833d1e0ce2f084838ffe127719c2294af639f | [] | no_license | akhikolla/updatedatatype-list2 | e8758b374f9a18fd3ef07664f1150e14a2e4c3d8 | a3a519440e02d89640c75207c73c1456cf86487d | refs/heads/master | 2023-03-21T13:17:13.762823 | 2021-03-20T15:46:49 | 2021-03-20T15:46:49 | 349,766,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 338 | r | 1612726666-test.R | testlist <- list(latLongs = structure(c(2.46513676350942e-307, 4.55676310951313e-120, 4.65715466191341e-10, 1.87655515406974e+179, 8444249301319680, 9.35990297866088e-307, 2.48104025832519e-265, 8444249301319680, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 3L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) |
b9dea54490f97eefdb14ced75e131f50ed004369 | 8e74dea03fddfe4fd77b92473c7dade31809ce10 | /R/find_place.R | 6a8bf7518d4aa4415f7790e40c8f9b1025b58c4e | [] | no_license | SophieSt/Lesson6 | 4a695561817da3e2526726c067149c4190e26a9e | 3388035c2f9dfa24fd2b9961452c06bd8ae1b6fc | refs/heads/master | 2021-01-11T20:32:00.604882 | 2017-01-16T16:47:13 | 2017-01-16T16:47:13 | 79,136,959 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 337 | r | find_place.R | # Find the place (i.e. a city) that intersects with this buffer.
find_place = function (points, buffer) {
points_in = data.frame(gIntersects(buffer[1, ], points, byid = TRUE))
names(points_in) = "intersection"
points@data$in_buf = points_in$intersection
the_place = points[points@data$in_buf == "TRUE", ]
return (the_place)
}
|
80a8121aaebd056fbc3ca8c645d9f7ee596a695a | 387262d2c0dea8a553bf04e3ff263e14683ea404 | /R&S_app_v3/compareNewToPreviousIRdataFormats/compareNewSedimentmetals.R | 327224c50d1c232f7a0e78de301da01d94f95f9a | [] | no_license | EmmaVJones/Rivers-StreamsAssessment | 3432c33d7b53714d3288e1e3fee335dd6fb2af1c | 580cfaa7edbd7077a2627a128a02c3c6ee195f4d | refs/heads/master | 2020-04-24T09:40:03.613469 | 2019-10-18T13:44:07 | 2019-10-18T13:44:07 | 171,486,365 | 0 | 0 | null | 2019-02-19T20:18:56 | 2019-02-19T14:17:14 | R | UTF-8 | R | false | false | 532 | r | compareNewSedimentmetals.R | SmetalsOld <- read_excel('data/SEDIMENT_20170712.xlsx') %>% #fix column name duplications
dplyr::select(FDT_STA_ID,`ACENAPHTHENE..194`:`COMMENT..227`)
names(SmetalsOld)[2:35] <- gsub( "[..].*", "", names(Smetals)[2:35] )
Smetals <- read_excel('data/draft2020data/CEDSWQM_2020_IR_DATA-CEDSWQM_SEDIMENT_20190213.xlsx') %>%
dplyr::select(FDT_STA_ID:ZINC..70, COMMENT..89)
names(Smetals) <- gsub( "[..].*", "", names(Smetals)) # remove anything after .. in name
FDT_STA_ID:ENDRINT; FDT_STA_ID:FDT_COMMENT, ARSENIC..54:COMMENT..89 |
a5bdeaa84ba5923a3cf909aef0fad1748412250c | 73ab02fcd0565ed63aeec6797822842086c49628 | /Rlabkey/man/labkey.getLookupDetails.Rd | 285f89e6a115e91d8080b3f4a032e2fd789c0113 | [
"Apache-2.0"
] | permissive | LabKey/labkey-api-r | 8ff35a24fccc53e67b3f94f8625adef250b22546 | fef750e9db30f21a0a9ccc311c495b2462cb2b8d | refs/heads/develop | 2023-08-31T22:09:17.263783 | 2023-08-18T21:35:14 | 2023-08-18T21:35:14 | 54,238,866 | 4 | 10 | Apache-2.0 | 2023-08-18T21:35:16 | 2016-03-19T00:15:14 | R | UTF-8 | R | false | false | 2,980 | rd | labkey.getLookupDetails.Rd | \name{labkey.getLookupDetails}
\alias{labkey.getLookupDetails}
\title{Retrieve detailed information on a LabKey query}
\description{
Fetch a list of output columns and their attributes from the query referenced by a lookup field}
\usage{
labkey.getLookupDetails(baseUrl, folderPath,
schemaName, queryName, lookupKey)
}
\arguments{
\item{baseUrl}{a string specifying the address of the LabKey Server, including the context root}
\item{folderPath}{a string specifying the hierarchy of folders to the current folder (container) for the operation, starting with the project folder }
\item{schemaName}{a string specifying the schema name in which the query object is defined}
\item{queryName}{a string specifying the name the query}
\item{lookupKey}{a string specifying the qualified name of a lookup field (foreign key) relative to the query specified by queryName }
}
\details{
When \code{getQueryDetails} returns non-NA values for the lookupQueryName, the \code{getLookupDetails} function can be called
to enumerate the fields from the query referenced by the lookup. These lookup fields can be added to the \code{colSelect} list of \code{selectRows}.
}
\value{
The available schemas are returned as a data frame, with the same columns
as detailed in \code{\link{labkey.getQueryDetails}}
}
\author{Peter Hussey, peter@labkey.com}
\seealso{
{Retrieve data: } \code{\link{labkey.selectRows}},\code{\link{makeFilter}}, \code{\link{labkey.executeSql}} \cr
{Modify data: } \code{\link{labkey.updateRows}}, \code{\link{labkey.insertRows}}, \code{\link{labkey.importRows}}, \code{\link{labkey.deleteRows}}\cr
{List available data: } \code{\link{labkey.getSchemas}}, \code{\link{labkey.getQueries}}, \code{\link{labkey.getQueryViews}},
\code{\link{labkey.getQueryDetails}}, \code{\link{labkey.getDefaultViewDetails}}
}
\examples{
\dontrun{
## Details of fields of a query referenced by a lookup field
# library(Rlabkey)
lu1 <- labkey.getLookupDetails(
baseUrl="http://localhost:8080/labkey",
folderPath="/apisamples",
schemaName="lists",
queryName="AllTypes",
lookupKey="Category"
)
lu1
## When a lookup field points to a query object that itself has a lookup
## field, use a compound fieldkey consisting of the lookup fields from
## the base query object to the target lookupDetails, separated by
## forward slashes
lu2<- labkey.getLookupDetails(
baseUrl="http://localhost:8080/labkey",
folderPath="/apisamples",
schemaName="lists",
queryName="AllTypes",
lookupKey="Category/Group"
)
lu2
## Now select a result set containing a field from the base query, a
## field from the 1st level of lookup, and one from the 2nd
rows<- labkey.selectRows(
baseUrl="http://localhost:8080/labkey",
folderPath="/apisamples",
schemaName="lists",
queryName="AllTypes",
colSelect=c("DisplayFld","Category/Category","Category/Group/GroupName"),
colFilter = makeFilter(c("Category/Group/GroupName",
"NOT_EQUALS","TypeRange")), maxRows=20
)
rows
}
}
\keyword{IO}
|
f19142f08a8a77b774c2649d9b91c0c18b3675b7 | 3cc5e4163beab8c7e71e8f6463717195c7d2f103 | /AssociationFunctions/PathwayClustering.R | fc918cba32371d761dac3c645a09736433d08b83 | [
"MIT"
] | permissive | JEstabrook/Omics-QC-pipeline | fc7c514b8df1e559b56b518b619e56c78214d0f8 | e165bd1b1df0b09c268fc5e562c99104344e1df8 | refs/heads/master | 2021-06-23T18:50:12.274069 | 2018-06-29T23:09:52 | 2018-06-29T23:09:52 | 118,011,848 | 0 | 1 | null | 2019-05-06T17:52:31 | 2018-01-18T16:59:15 | Jupyter Notebook | UTF-8 | R | false | false | 23,642 | r | PathwayClustering.R | #*******************************************************************************************
# Functions to summarize AssociationFunction results
#
# Implemented
# clusterGO = clusters GO hierarchy by direct parent/child relationships
# coldClusters Ranks pathway clusters from coldMap
# makeColdMap = Creates a coldmap from an occurrence matrix (OM)
# mergeOM = merges occurrence matrices. Could also be used to trim OM
#
# To be implemented...
# gsHeatMap = Creates a heatmap from ColdMap clusters - may not be necessary
#
#*******************************************************************************************
#******* makeColdMap ***********************************************************************
# Makes a "cold map" from a true/false occurrence matrix
# Assumes that the OM has been trimmed to an appropriate size prior to the function call
#
# Arguments
# OM: occurrence matrix returned by ags.wrapper.
# Rows = gene names
# Columns = pathway names
# plotdata: list of info relevant to labeling and saving the plot
# plotdir: plot destination directory
# plotbase: base filename for the plot
# plottitle: title for all plots
# pngres: resolution for png files; defaults here to 600 dpi
# pngsize: c(width, length); defaults to c(15, 10)
makeColdMap = function (OM, plotdata = list(plotdir = NULL, plotbase = NULL, plottitle = NULL, pngres = NULL, pngsize= NULL)) {
# Set default print values
if (is.null(plotdata$pngres)) {plotdata$pngres = 600}
if (is.null(plotdata$pngsize)) {plotdata$pngsize = c(15,10)}
# Convert OM from logical to numeric
mynumOM = matrix(as.numeric(OM), nrow = nrow(OM), ncol = ncol(OM))
rownames(mynumOM) = rownames(OM)
colnames(mynumOM) = colnames(OM)
message(sprintf("OM is a matrix with %i rows and %i Columns", nrow(OM), ncol(OM)))
# Check assignments
if(sum(colSums(OM) == colSums(mynumOM)) != ncol(OM)){
message("Column data does not match in OM conversion from logical to numeric")
}
if(sum(rowSums(OM) == rowSums(mynumOM)) != nrow(OM)) {
message("Row data does not match in OM conversion from logical to numeric")
}
n = length(unique(as.vector(mynumOM)))
colorspec = paste("Blues",n,sep=":")
message(sprintf("pngres = %i, width = %i, height = %i", plotdata$pngres, plotdata$pngsize[1], plotdata$pngsize[2]))
message(sprintf("plotting to %s/%s", plotdata$plotdir, plotdata$plotbase))
png(filename = sprintf('%s/%s_coldmap.png', plotdata$plotdir, plotdata$plotbase),
width=plotdata$pngsize[1],height=plotdata$pngsize[2],units="in",res=plotdata$pngres)
# myColdMap = makeHeatmap(ratiomat = mynumOM, attribs = NA, plottitle = plotdata$plottitle, clim.pct = 0.99, cexRow = 1,
myColdMap = makeHeatmap(ratiomat = mynumOM, attribs = NA, plottitle = plotdata$plottitle, clim.pct = 1, cexRow = 1,
colorbrew = colorspec, labcoltype = "colnames", labRowType = 'genelabels')
closeDevices()
return(myColdMap)
}
#******* clusterGO *******************************************************************
# Converts a GO gene signature (GS) and occurrence matrix (OM) into a clustered GS/OM (CGS/COM)
# Two criteria must be met for each level of clustering
# 1. There must be a direct Child "is_a" or "part_of" Parent relationship between terms
# 2. All Child genes must be present in the Parent term (this may be true by default, but check)
# The Cluster name will be identified by the "eldest" term, with the addition of
# (C-n) indicating that n offspring terms have been Clustered in that term
# An additional CGOxx term will be returned detailing the clustered GS/OM
#
# Arguments
# GS - gene signature data.table returned by ags.wrapper(); must be GO annotation!
# OM - occurrence matrix returned by ags.wrapper
# ontology - limited to CC, MF, and BP. Other selections will fail.
# plotdata: list of info relevant to saving the clustering information
# Default = NULL - don't write a file
# plotdir: plot destination directory; unspecified will write to getwd()
# plotbase: base filename for the plot
# AS = NULL; optional - specify a version of the annotation set to use.
# Note: annotation version number selection is not yet supported...
#
# Returns
# allCGS - clustered gene signature data.table of all parent:offspring relationships
# CGS - clustered gene signature data.table of parent terms
# COM - clustered occurrence matrix
# CAS - clustering association settings
clusterGO = function (GS, OM, ontology, clusterannot = c("parent", "eval", "enrichment"), plotdata = NULL, AS = NULL) {
# Check the arguments
exitErrors = FALSE
if (!is.data.table(GS)) {
message("GS argument should be data.table returned by ags.wrapper()")
if (is.list(GS)) {message("List argument should be broken up to individual data.table")}
exitErrors = TRUE
}
if(!is.matrix(OM)) {
message("OM argument should be a matrix, as returned by ags.wrapper(). Note that this is an option, not default.")
if (is.list(OM)) {message("List argument should be broken up to individual data.table")}
}
if(!(ontology %chin% c("CC", "BP", "MF"))) {
message("ontology argument must be CC, BP, or MF")
exitErrors = TRUE
}
if (exitErrors) {return("ERROR: check arguments")}
# Load the associated GO pathway information
# Note for now this is hard coded. Eventually, there will be a single data set with all information in it.
GOdirectory = "/Users/lusardi/Documents/association_resources/"
GOfile = "GO.2017.02.20_all.RData"
load(paste(GOdirectory, GOfile, sep = '/'))
go.Offspring = GOrelations[[paste("GO", ontology, sep = '')]]$Offspring
go.Parents = GOrelations[[paste("GO", ontology, sep = '')]]$Parents
go.Children = GOrelations[[paste("GO", ontology, sep = '')]]$Children
# if (!("GO.db" %chin% installed.packages())) {
# source("https://bioconductor.org/biocLite.R")
# biocLite("GO.db", ask = FALSE)
# }
# library(GO.db)
# Unpack the GS name (there may be a more elegant way to do this... I don't like hard coding!)
set.Names = as.data.table(GS[,Set.Name])
colnames(set.Names) = "Set.Name"
set.Names[, "Ontol" := substr(Set.Name, 1,1)]
set.Names[, "GOID" := paste(substr(Set.Name, 3,4), substr(Set.Name, 6,12), sep = ":")]
set.Names[, "Description" := substring(Set.Name, 14)]
# Merge set.Names with GS for easy reference
expandedGS = merge(x = GS, y = set.Names, by = "Set.Name")
# check for ontology match
if (sum(set.Names$Ontol == substr(ontology,2,2)) != nrow(set.Names)) {
message("ERROR: clusterGO only handles one ontology at a time currently (MF, CC, or BP)")
message(sprintf("GS contains %i different ontologies %s, %s specified in arguments",
length(unique(set.Names$Ontol)), paste(unique(set.Names$Ontol), collapse = ","),
ontology))
return("ERROR: Ontology mismatch - no clustering performed")
}
# Get a list of parents
# go.Parents = NULL
# go.Offspring = NULL
# go.Children = NULL
# my.Parents = NULL
# my.Offspring = NULL
# my.Children = NULL
# go.Parents = switch(ontology, MF = as.list(GOMFPARENTS), CC = as.list(GOCCPARENTS), BP = as.list(GOBPPARENTS))
# go.Children = switch(ontology, MF = as.list(GOMFCHILDREN), CC = as.list(GOCCCHILDREN), BP = as.list(GOBPCHILDREN))
# go.Offspring = switch(ontology, MF = as.list(GOMFOFFSPRING), CC = as.list(GOCCOFFSPRING), BP = as.list(GOBPOFFSPRING))
my.Parents = go.Parents[set.Names$GOID]
my.Offspring = go.Offspring[set.Names$GOID]
my.Children = go.Children[set.Names$GOID]
# Identify "senior" terms (no parent terms in our data set)
# First, the terms that have no parents defined in the database (will be empty if there are none)
Seniors = set.Names$GOID[!(set.Names$GOID %chin% names(my.Parents))]
# Next, the terms that have no parents named "is_a" or "part_of" AND in set.Names$GOID
nsig.Parents = lapply(my.Parents, function(x) sum((x %chin% set.Names$GOID) & (names(x) %chin% c("is_a", "part_of"))))
Seniors = c(Seniors, names(nsig.Parents[nsig.Parents == 0]))
# Identify terms that cluster under each senior term
# Each list includes the senior member
# Note: "OFFSPRING" does not include relationship names; some are indirect, so recursive identification of direct child relations used
clusterList = list()
parentOffspring_dt = data.table(ParentGO = character(), OffspringGO = character(),
minEflag = logical(), maxEnrichFlag = logical(),
clustern = integer())
for (mySenior in Seniors) {
# Pathways with no offspring end up as "logical" list elements with NA value, causing an error
if(!is.logical(my.Offspring[[mySenior]])) {
clusterList[[mySenior]] = c(mySenior, my.Offspring[[mySenior]][(my.Offspring[[mySenior]] %chin% set.Names$GOID)])
minEVal = expandedGS[GOID %chin% clusterList[[mySenior]], E.Value] ==
min(expandedGS[GOID %chin% clusterList[[mySenior]], E.Value])
maxEnrich = expandedGS[GOID %chin% clusterList[[mySenior]], FoldEnrich] ==
max(expandedGS[GOID %chin% clusterList[[mySenior]], FoldEnrich])
} else {
clusterList[[mySenior]] = mySenior
minEVal = TRUE
maxEnrich = TRUE
}
# Create a parent/offspring summary table
parentOffspring_dt = rbind(parentOffspring_dt, cbind(ParentGO = rep(mySenior, length(clusterList[[mySenior]])),
OffspringGO = clusterList[[mySenior]],
minEflag = minEVal, maxEnrichFlag = maxEnrich,
clustern = rep(length(minEVal), length(minEVal))))
}
# Fill in the rest of the summary table
parentOffspring_dt[, ':=' (Ontology = rep(ontology, nrow(parentOffspring_dt)),
Level = ifelse(ParentGO == OffspringGO, "Parent", "Offspring"),
OffspringDescription = sapply(OffspringGO, function(x) set.Names[GOID == x, Description]),
Set.Name = sapply(OffspringGO, function(x) set.Names[GOID == x, Set.Name]))]
allCGS = setorder(merge(parentOffspring_dt, GS, by = "Set.Name"), ParentGO)
# Confirm each pathway is included in at least one cluster
allpways = unique(as.character(unlist(clusterList)))
if(sum(allpways %chin% set.Names$GOID) != length(set.Names$GOID)) {
message(sprintf("Pathway clustering is off... %i pathways considered, %i pathways clustered",
nrow(set.Names), length(allpways)))
message("returning all data for evaluation")
return(allCGS = allCGS)
}
# Confirm that the Offspring genes are all contained within the Parent gene list
for (mySenior in Seniors) {
seniorGenes = OM[, set.Names[GOID == mySenior, Set.Name]]
for (mypway in clusterList[[mySenior]]) {
offspringGenes = myOM[, set.Names[GOID == mypway, Set.Name]]
if(sum(offspringGenes & (seniorGenes == offspringGenes)) != sum(offspringGenes)) {
message(sprintf("Genes in Pathway %s are not all contained in Pathway %s", mypway, mySenior))
}
}
}
# Create an expanded name set, and rename the CGS and COM columns according to clusterAnnot
cluster.Names = set.Names[GOID %chin% Seniors,]
# Create the clustered OM
COM = OM[, allCGS[Level == "Parent", Set.Name]]
return(list(allCGS = allCGS, COM = COM, CGS = allCGS[Level == "Parent",]))
}
#*******************************************************************************************
#******* coldClusters *******************************************************************
# coldClusters returns a table consisting of cluster sizes and scores based on dendrogram heights
# used to identify clusters of pathways regulated by the same genes for heatmapping
# Height = 0 will return pathways with identical gene signatures, at the max value, all pathways clustered
#
# Arguments
# coldmap = structure returned by makeheatmap run on an OM
# OM = Occurrence Matrix used to generate the coldmap
# minCllustSize = minimum number of pathways in a cluster
# maxPctGenes = excludes clusters representing > % of genes
# quantCut = sets the max distance quantile for clustering of clusters
#
# Returns
# coldCluster_dt = data table with cluster scoring information
#
coldClusters = function(coldmap, OM, minClustSize = 1, maxPctGenes = 1, exploreClust = FALSE) {
pathway_hcd = coldmap$Colv
pathway_hc = as.hclust(pathway_hcd)
heights = unique(pathway_hc$height)
# library(stats)
# Create optional plots exploring cluster heights/k-cuts
if (exploreClust) {
# Create a cluster plot directory if needed
h.dir = paste(getwd(), "h.clusters", sep = "/")
if (!dir.exists(h.dir)) {
dir.create(h.dir)
}
# Easily distinguished color set
colors = c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#ffff99','#b15928')
index = 1
for (myheight in heights) {
# Assign membership by cluster height
clustAssign = cutree(pathway_hc, h = myheight)
ncolors = length(unique(clustAssign))
labelColors = rep(colors, ncolors %/% length(colors) + 1)[1:ncolors]
# From https://rpubs.com/gaston/dendrograms
# Apply colors to each node in the dedndrogram
colLab <- function(n) {
if (is.leaf(n)) {
a <- attributes(n)
labCol <- labelColors[clustAssign[which(names(clustAssign) == a$label)]]
attr(n, "nodePar") <- c(a$nodePar, lab.col = labCol)
}
n
}
clusDendro = dendrapply(pathway_hcd, colLab)
png_res = 450
png(filename = sprintf('%s/%i.%s_%s.png', h.dir, index, "h.cluster", myheight), width=15,height=10,units="in",res=png_res)
par(mar = c(30, 4, 4, 2))
plot(clusDendro, cex = 0.3, main = sprintf("h = %1.3f", myheight), horiz = F)
abline(a = myheight, b = 0, col = "red")
dev.off()
index = index + 1
}
}
k.dir = paste(getwd(), "k.clusters", sep = "/")
if (!dir.exists(k.dir)) {
dir.create(k.dir)
}
index = 1
for (mykcut in 1:length(pathway_hc$height)) {
# Assign membership by cluster height
clustAssign = cutree(pathway_hc, k = mykcut)
ncolors = length(unique(clustAssign))
labelColors = rep(colors, ncolors %/% length(colors) + 1)[1:ncolors]
# From https://rpubs.com/gaston/dendrograms
# Apply colors to each node in the dedndrogram
colLab <- function(n) {
if (is.leaf(n)) {
a <- attributes(n)
labCol <- labelColors[clustAssign[which(names(clustAssign) == a$label)]]
attr(n, "nodePar") <- c(a$nodePar, lab.col = labCol)
}
n
}
clusDendro = dendrapply(pathway_hcd, colLab)
png_res = 144
png(filename = sprintf('%s/%i.%s_%s.png', k.dir, index, "k.cluster", mykcut), width=15,height=10,units="in",res=png_res)
par(mar = c(30, 4, 4, 2))
plot(clusDendro, cex = 0.3, main = sprintf("k = %i", mykcut))
dev.off()
index = index + 1
}
return(pathway_dend)
# Create a table of clusters based on different height cuts
cluster_dt = data.table(heightCut = numeric(0), cluster = integer(0), nPways = integer(0),
nGenes = integer(), ClusterScore = numeric(0), CS_pway = numeric(0),
Pathways = vector(), Genes = vector())
for (myheight in heights) {
clusters = cutree(pathway_dend, h = myheight)
nclusters = unique(clusters)
message(sprintf("myheight = %1.3f, nclusters = %i", myheight, length(nclusters)))
for (mycluster in nclusters) {
nPways = sum(clusters == mycluster)
if (nPways >= minClustSize) {
pways = pathway_dend$labels[cutree(pathway_dend, h = myheight) == mycluster]
pway_str = paste(pways, sep = '', collapse = ';')
if (is.na(match(pway_str, cluster_dt$Pathways))) {
ngenes = 0
geneIDs = logical(length(nrow(OM)))
for (mypw in pways) {
ngenes = ngenes + sum(OM[,mypw])
geneIDs = geneIDs | (OM[, mypw] == 1)
}
geneList = rownames(OM)[geneIDs]
gene_str = paste(geneList, sep = '', collapse = ';')
ClusterScore = ngenes/nrow(OM)
row2add = data.table(heightCut = myheight, cluster = mycluster, nPways = nPways,
nGenes = sum(geneIDs), ClusterScore = ClusterScore, CS_pway = ClusterScore/nPways,
Pathways = pway_str, Genes = gene_str)
cluster_dt = rbindlist(list(cluster_dt, row2add))
}
}
}
}
cluster_dt[, clustID := .I]
# Create a levelplot of distance matrices to show cluster hierarchy
clusterInclude_dt = cluster_dt[nGenes <= nrow(OM)*maxPctGenes,]
rows2eval = nrow(clusterInclude_dt)
clusterOM = matrix(data = logical(), nrow = nrow(OM), ncol = rows2eval)
rownames(clusterOM) = rownames(OM)
message("clusterInclude_dt has ", nrow(clusterInclude_dt), " rows\n")
for (cluster in 1:rows2eval) {
clustGenes = unlist(strsplit(clusterInclude_dt[cluster, Genes], split = ';'))
clustLogic = rownames(clusterOM) %chin% clustGenes
clusterOM[, cluster] = clustLogic
}
OMdist = as.matrix(dist(t(clusterOM),diag=T))
levelplot(OMdist[1:ncol(OMdist),ncol(OMdist):1])
# Calculate clustering based on quantile cutoff
return(list(cluster_dt = cluster_dt, clusterInclude_dt = clusterInclude_dt, clusterOM = t(clusterOM), OMdist = OMdist))
return(cluster_dt)
}
#******* mergeOM ***********************************************************************
# Merges occurrence matrices (OM). Genes/pathways are included according to geneRules, minPways, minGenes
#
# Arguments
# OM_ls - list of OMs. List elements must be named
# mergeOnt - vector OMs to merge. Vector elements must be named, name must match the OM_ls element names
# - element value must be the idtype from agswrapper - in Settings[[Ontology]]$idtype
# - for example: mergeOnt = c("kegg" = "Entrez.ID", "GOBP" = "Uniprot")
# geneRules - Defaults to "intersection", includes only genes in all OMs specified in mergeOnt
# - Also accepts "union", which includes all genes in any OM specified in mergeOnt
# - Also accepts a vector of gene names to inlcude in the merged OM
# background - this should be the same list used for pway analysis correlating assay targets, Uniprot, Entrez, etc
# geneAnno - the annotation source that should be used for the mergedOM; must correspond to one of the columns in background
#
# Not implemented....
# minPways - minimum number of pathways that a gene must be present in for inclusion in merged OM
# - defaults to 1
# minGenes - minimum number of genes that a pathway must have to include in the merged OM
# - defaults to 1
#
# Returns
# mergedOM - logical matrix with colnames = pathway names, rownames = genenames
mergeOM = function(OM_ls, mergeOnt, geneRules = "intersection", background, geneAnno, minPways = 1, minGenes = 1) {
# Check OM and merge arguments
if (sum(names(mergeOnt) %chin% names(OM_ls)) != length(mergeOnt)) {
message("ERROR (mergeOM): elements of mergeOnt MUST match names of OM_ls")
return("mergeOM ERROR")
}
# Confirm that the geneAnno specification is present in background
if (!(geneAnno %chin% colnames(background))) {
message("ERROR (mergeOM): geneAnno must match a column in background")
return("mergeOM ERROR")
}
# Confirm that the background set includes all of the OM idtypes
idtypes = unique(mergeOnt)
idok = TRUE
for (myid in idtypes) {
if (!(myid %chin% colnames(background))) {
message("ERROR (mergeOM): mergeOnt element names must match a column in background")
idok = FALSE
}
if (!idok) {
message("ERROR (mergeOM): Exiting due to missing idtypes")
return("mergeOM ERROR")
}
}
# Create a list of genes to include in the mergeOM
incGenes = unique(background[, get(geneAnno)])
# Create a list of uniformly annotated OMs
OM2Merge = list()
if (geneRules %chin% c("intersection", "union")) {
for (myOnt in names(mergeOnt)) {
myOM = OM_ls[[myOnt]]
ontAnno = mergeOnt[myOnt]
if (!(ontAnno %chin% colnames(background))) {
message(sprintf("ERROR (mergeOM): idtype %s for %s not present in background", ontAnno, myanno))
return("mergeOM ERROR")
}
if (ontAnno != geneAnno) {
# make sure that there is a single geneAnno for each ontAnno
# limit the background set to the two annotations in question so that duplicates for other annotations are excluded
trimBkgnd = unique(background[, mget(c(ontAnno, geneAnno))])
if (length(trimBkgnd[get(ontAnno) %in% rownames(myOM), get(ontAnno)]) > nrow(myOM)) {
# deal with extra rows. Take duplicates in the geneAnno column and concatenate them with a "|"
extras = trimBkgnd[, .N, by = get(ontAnno)][N > 1, get]
geneAnno_term = unlist(lapply(extras, function(X) paste(trimBkgnd[get(ontAnno) == X, get(geneAnno)], collapse = '|')))
trimBkgnd = trimBkgnd[!(get(ontAnno) %in% extras), ]
clumped = data.table(extras, geneAnno_term)
colnames(clumped) = c(ontAnno, geneAnno)
trimBkgnd = rbind(trimBkgnd, clumped)
}
rownames(myOM) = trimBkgnd[get(ontAnno) %in% rownames(myOM), get(geneAnno)]
}
OM2Merge[[myOnt]] = myOM
# Get a list of genes in myOnt in geneAnno
incGenes = switch (geneRules,
intersection = intersect(incGenes, rownames(myOM)),
union = union(incGenes, rownames(myOM)) )
message(sprintf("%s has %i rows, incGenes now %i long", myOnt, nrow(myOM), length(incGenes)))
}
} else if (is.vector(geneRules) & length(geneRules) > 1) {
# Make sure the included genes match the geneAnno and are present in background
if (sum(geneRules %chin% background[, get(geneAnno)] == length(geneRules))) {
incGenes = geneRules # Use the list of genes specified in geneRules for the OM
} else {
message(sprintf("ERROR (mergeOM): gene list specfied in geneRules does not match %s in background", geneAnno))
return("mergeOM ERROR")
}
# Create a list of ontologies to merge with a uniform gene ID row names
for (myOnt in names(mergeOnt)) {
myOM = OM_ls[[myOnt]]
ontAnno = mergeOnt[myOnt]
if (!(ontAnno %chin% colnames(background))) {
message(sprintf("ERROR (mergeOM): idtype %s for %s not present in background", ontAnno, myanno))
return("mergeOM ERROR")
}
rownames(myOM) = background[get(ontAnno) %chin% rownames(myOM), get(geneAnno)]
OM2Merge[[myOnt]] = myOM
}
} else {
message("ERROR (mergeOM): geneRules should be intersection, union, or a vector of genes")
return("mergeOM ERROR")
}
# Create the merged OM
mergedOM = matrix(nrow = length(incGenes), ncol = 0)
rownames(mergedOM) = incGenes
for (myOnt in names(mergeOnt)) {
myOM = OM2Merge[[myOnt]]
myOM = myOM[rownames(myOM) %chin% incGenes,]
mergedOM = cbind(mergedOM, myOM[match(rownames(mergedOM), rownames(myOM)),])
}
# Limit the merged OM as specified in minPways, minGenes
# Not implemented
return(mergedOM)
}
#******* End mergeOM *********************************************************************** |
65788973a1ae0bf3835a1fca9ffa80a0c67f7f6f | 7528b4b7f9c6101ccfac84aa791893e1f81896a4 | /man/build_all_posts.Rd | 49ec2582e4143e05e743dbe415bc7833d22ba40d | [] | no_license | naman159/blogbuilder | bacc8e58966895e5c847d32997446ea2f8cd73e3 | 6491cb1889c09a07d6e669bca04c9d4326340c71 | refs/heads/main | 2023-06-06T07:28:23.147192 | 2021-06-19T09:19:10 | 2021-06-19T09:19:10 | 375,204,975 | 0 | 0 | null | 2021-06-09T02:42:13 | 2021-06-09T02:42:12 | null | UTF-8 | R | false | true | 385 | rd | build_all_posts.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build.R
\name{build_all_posts}
\alias{build_all_posts}
\title{Builds all blog posts}
\usage{
build_all_posts()
}
\description{
Builds all drafted blog posts for a DACSS course site.
The index page listing posts will be updated once
the Distill blog is built. Assumes a DACSS blog R project
is being used.
}
|
cbc192da160eb46f9db24bfcdd0f6a8d499d5d31 | 09bccab5fd88f77234fe5ad22d16cfcca10fdddf | /MLB_Prediction.R | a9a21f583fea9305eb9adbc6561cd9306c84ed75 | [] | no_license | kloudKlown/MLB_DK | c5eec15810b75dd4f08673ccd06e58d586d74d76 | 29d892d1e29651cd8a5d2946f47a49cb9d7b34e9 | refs/heads/master | 2020-03-19T03:48:58.224583 | 2018-06-01T21:33:13 | 2018-06-01T21:33:13 | 135,765,419 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 53,028 | r | MLB_Prediction.R | # ############ Load Library
# install.packages("corrplot")
# install.packages("brnn")
# install.packages("h2o")
# install.packages("randomForest")
# install.packages("Matrix")
# install.packages("xgboost")
# install.packages("stringdist")
# install.packages("varhandle")
# install.packages("mxnet")
library(Hmisc)
library(corrplot)
library(brnn)
library(h2o)
library(randomForest)
library(Matrix)
library(xgboost)
library(stringdist)
library(varhandle)
library(tidyr)
require(devtools)
#install_version("DiagrammeR", version = "0.9.0", repos = "http://cran.us.r-project.org")
require(DiagrammeR)
library(mxnet)
###########################################################
localH2O <- h2o.init()
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
Batters2016 = read.csv('Batters_16-17.csv')
for(each in 1:nrow(Batters2016)){
if (is.na(as.numeric(as.character(Batters2016$Salary[each])))){
p = Batters2016$Salary[each]
Batters2016$Salary[each] = Batters2016$Pos[each]
Batters2016$Pos[each] = p
}
}
Batters2016$Salary = as.numeric(levels(Batters2016$Salary))[Batters2016$Salary]
Batters2016$Pos = levels(Batters2016$Pos)[Batters2016$Pos]
Pitchers2016 = read.csv('Pitchers_16-17.csv')
weightedAVG = function (df){
df$C1 = df$PPerreak1 * df$BAvg1;
df$C2 = df$PPerreak2 * df$BAvg2;
df$C3 = df$PPerreak3 * df$BAvg3;
df$C4 = df$PPerreak4 * df$BAvg4;
df$C5 = df$PPerreak5 * df$BAvg5;
df$C6 = df$PPerreak6 * df$BAvg6;
df$C7 = df$PPerreak7 * df$BAvg7;
df$C8 = df$PPerreak8 * df$BAvg8;
df$C9 = df$PPerreak9 * df$BAvg9;
newDF = df[,c("PlayerName", "Date","PLayerID","C1","C2","C3","C4","C5","C6","C7","C8","C9")]
return (newDF);
}
BBBatters_2017 = read.csv('BBatters_2017.csv')
BBBatters_2018 = read.csv('BBatters_2018.csv')
BBPitchers_2017 = read.csv('BPitchers_2017.csv')
BBPitchers_2018 = read.csv('BPitchers_2018.csv')
BBBatters_2017 = weightedAVG(BBBatters_2017)
BBBatters_2018 = weightedAVG(BBBatters_2018)
BBPitchers_2017 = weightedAVG(BBPitchers_2017)
BBPitchers_2018 = weightedAVG(BBPitchers_2018)
names(BBBatters_2017)[names(BBBatters_2017) == 'PlayerName'] <- 'Player'
names(BBBatters_2018)[names(BBBatters_2018) == 'PlayerName'] <- 'Player'
names(BBPitchers_2017)[names(BBPitchers_2017) == 'PlayerName'] <- 'Player'
names(BBPitchers_2018)[names(BBPitchers_2018) == 'PlayerName'] <- 'Player'
Batters2018 = read.csv('Batters_17-18.csv')
Pitchers2018 = read.csv('Pitchers_17-18.csv')
names(Batters2018)[names(Batters2018) %nin% names(Batters2016)]
Batters2016 = separate(data = Batters2016, col = Player, into = c("Player", "Hand"), sep = "\\(")
Batters2018 = separate(data = Batters2018, col = Player, into = c("Player", "Hand"), sep = "\\(")
Pitchers2016 = separate(data = Pitchers2016, col = Player, into = c("Player", "Hand"), sep = "\\(")
Pitchers2018 = separate(data = Pitchers2018, col = Player, into = c("Player", "Hand"), sep = "\\(")
Batters2016$Hand =(gsub(')', '', Batters2016$Hand ))
Batters2018$Hand =(gsub(')', '', Batters2018$Hand ))
Pitchers2016$Hand =(gsub(')', '', Pitchers2016$Hand ))
Pitchers2018$Hand =(gsub(')', '', Pitchers2018$Hand ))
Batters2016$Player = trim(Batters2016$Player)
Batters2018$Player = trim(Batters2018$Player)
Pitchers2016$Player = trim(Pitchers2016$Player)
Pitchers2018$Player = trim(Pitchers2018$Player)
Batters2016 = Batters2016[ order(Batters2016$Date, Batters2016$Player), ]
# Batters2016$Player[Batters2016$Player == "Albert Almora"] = "Albert Almora Jr."
# Batters2016$Player[Batters2016$Player == "Nick Castellanos"] = "Nicholas Castellanos"
Batters2016_merged = merge(Batters2016, BBBatters_2017, by.x = "Player", by.y = "Player", all.x=TRUE)
Batters2016_merged = Batters2016_merged[ order(Batters2016_merged$Date.x, Batters2016_merged$Player), ]
# for(each in 1:nrow(Batters2016_merged)){
# if
# }
Batters2018_merged = merge(Batters2018, BBBatters_2018,by.x = "Player", by.y = "Player", all.x=TRUE)
Pitchers2016_merged = merge(Pitchers2016, BBPitchers_2017, by.x = "Player", by.y = "Player", all.x=TRUE)
# TODO: Remove this comment
Pitchers2018_merged = merge(Pitchers2018, BBPitchers_2018, by.x = "Player", by.y = "Player", all.x=TRUE)
names(Batters2018_merged)[names(Batters2018_merged) %nin% names(Batters2016_merged)]
Batters2016 = rbind(Batters2016_merged, Batters2018_merged)
# TODO: Remove this comment
Pitchers2016 = rbind(Pitchers2016_merged, Pitchers2018_merged)
# Pitchers2016 = Pitchers2016_merged
Batters2016 = Batters2016[!is.na(Batters2016$Date.x), ]
Pitchers2016 = Pitchers2016[!is.na(Pitchers2016$Date.x), ]
names(Pitchers2016)[names(Pitchers2016) == 'Date.x'] <- 'Date'
names(Batters2016)[names(Batters2016) == 'Date.x'] <- 'Date'
#### Merge directional stats for each players
# Data Prep
Batters2016[Batters2016 == 'nbsp;'] = 0
Pitchers2016[Pitchers2016 == 'nbsp;'] = 0
Batters2016[Batters2016 == "-"] = 0
Pitchers2016[Pitchers2016 == "-"] = 0
Batters2016$Date = as.Date(Batters2016$Date, "%m/%d/%Y")
Pitchers2016$Date = as.Date(Pitchers2016$Date, "%m/%d/%Y")
Batters2016[is.na(Batters2016)] = 0
Pitchers2016[is.na(Pitchers2016)] = 0
Batters2016$FB = unfactor(Batters2016$FB)
Batters2016$GB = unfactor(Batters2016$GB)
Batters2016$LD = unfactor(Batters2016$LD)
Batters2016$HH = unfactor(Batters2016$HH)
Pitchers2016$FB = unfactor(Pitchers2016$FB)
Pitchers2016$GB = unfactor(Pitchers2016$GB)
Pitchers2016$LD = unfactor(Pitchers2016$LD)
Pitchers2016$HH = unfactor(Pitchers2016$HH)
Batters2016[is.na(Batters2016)] = 0
Pitchers2016[is.na(Pitchers2016)] = 0
#'
#' All Column names
("Date","Like","Lock","Rating","Player","Salary",
"Pos","Order","Team","Opp","Min","Max",
"Proj","Ceiling","Floor","ProjplusMinus","Pts_Sal","LowProjOwn",
"HighProjOwn,","Imp.Pts","Act.Pts","wOBA","wOBADiff","ISO",
"ISODiff","SLG","SO_AB","HR_AB","SB_G","OppBP",
"Pro","My","Ump","Bargain","ParkF","Runs",
"OppRuns","ST","ML","O_U","MLPer","Rating_M",
"Temp","WindSpd","WindDir","Humidity","Precip","Cnt",
"Dist","EV","FB","GB","LD","HH",
"DistST","EVST","HHST","Rec.BBL","Air","CntPer",
"DistPer","EVM","FBM","GBM","LDM","HH.",
"GB_FB","AirPer","OppwOBA","OppISO","OppwOBAMonth","OppISOMonth",
"PPG","Change","plusMinus","Consistency","Upside","Duds",
"Count","PPGYear","ChangeYear","ConsistencyYear","UpsideYear","DudsYear",
"CountYear","ACountYear")
#Pitchers
("Date","Like","Lock","Rating","Player","Salary","Team",
"Opp","Min","Max","Proj","Ceiling","Floor","Proj_plusMinus",
"Pts_Sal","LowProjOwn","HighProjOwn","ImpPts","ActPts","WHIP","HR_9",
"SO_9","IP","QualS_S","SO_AB","wOBA","Pro","My",
"Ump","Bargain","KPred","ParkF","Runs","OppRuns","delta",
"ML","O_U","MLYear","RatingYear","Temp","WindSpd","WindDir",
"Humidity","Precip","Cnt","Dist","EV","FB","GB",
"LD","HH","Speed","S","Dist_delta","EV_delta","PCnt",
"HH_delta","Spd_delta","RecBBL","Air","CntYear","DistYear","EVYear",
"FBYear","GBYear","LDYear","HHYear","GB_FB","SpeedYear","SYear",
"PCntYear","AirYear","PPG","Change","plusMinusYear","Consistency","Upside",
"Duds","Count","PPGYear","ChangeYear","ConsistencyYear","UpsideYear","DudsYear",
"CountYear")
#'
## varclus Batters
spearmanP = varclus(as.matrix(Batters2016[,c("Rating","Salary",
"ProjplusMinus","Pts_Sal","LowProjOwn",
"Imp.Pts","Act.Pts","wOBA","wOBADiff","ISO",
"ISODiff","SO_AB","HR_AB","SB_G","OppBP",
"Pro","Bargain","ParkF","Runs",
"OppRuns","ST","ML","O_U","MLPer","Rating_M",
"Temp","WindSpd","Humidity","Precip","Cnt",
"Dist","EV",
"DistST","EVST","HHST","Rec.BBL","Air",
"DistPer","EVM","AirPer","OppwOBA","OppISO","OppwOBAMonth","OppISOMonth",
"PPG","Change","plusMinus",
"Count","PPGYear","ChangeYear",
"CountYear")] ), similarity = "spearman")
plot(spearmanP)
abline(h=0.3)
## Varclus Pitchers
spearmanP = varclus(as.matrix(Pitchers2016[,c("Rating","Salary",
"Proj","Ceiling","Floor","Proj_plusMinus",
"Pts_Sal","LowProjOwn","HighProjOwn","ImpPts","ActPts","WHIP","HR_9",
"SO_9","IP","QualS_S","SO_AB","wOBA","Pro",
"Bargain","KPred","ParkF","Runs","OppRuns","delta",
"ML","O_U","MLYear","RatingYear","Temp","WindSpd",
"Humidity","Precip","Cnt","Dist","EV","FB","GB",
"LD","HH","Speed","S","Dist_delta","EV_delta","PCnt",
"HH_delta","Spd_delta","RecBBL","Air","CntYear","DistYear","EVYear",
"SpeedYear","SYear",
"PCntYear","AirYear","PPG","Change","plusMinusYear","Count",
"PPGYear","ChangeYear","CountYear")] ), similarity = "spearman")
plot(spearmanP)
abline(h=0.3)
### Merge Batters info with Pitching data
# All_16_Combined = merge(All_16, Dvoa, by = c("Player","Date","Opp"))
Pitchers2016_Sub = Pitchers2016[,c("Date","Rating","Player","Salary","Team",
"WHIP","HR_9",
"SO_9","IP","QualS_S","SO_AB","wOBA","Pro",
"Bargain","KPred","ParkF","Runs","OppRuns","delta",
"ML","O_U","MLYear","RatingYear","Temp","WindSpd",
"Humidity","Precip","Cnt","Dist","EV","GB",
"LD","HH","Speed","S","Dist_delta","EV_delta","PCnt",
"Spd_delta","RecBBL","Air","CntYear","DistYear","EVYear",
"SpeedYear","SYear",
"PCntYear","AirYear","PPG","Change","plusMinusYear","Count",
"PPGYear","ChangeYear","CountYear",
"C1","C2","C3","C4","C5","C6","C7","C8","C9")]
# Pitchers2016_Sub = Pitchers2016_Sub[1:50,]
names(Pitchers2016_Sub)[names(Pitchers2016_Sub) == 'Team'] <- 'Opp'
Batters2016_Sub = Batters2016
# Get first 3 chars
Batters2016_Sub$Opp = substr(Batters2016_Sub$Opp, 0, 3)
# remove white space
Batters2016_Sub$Opp = gsub('\\s+', '', Batters2016_Sub$Opp)
Batters2016_Sub_Combined = merge(Batters2016_Sub, Pitchers2016_Sub, by = c("Date","Opp"), all.x = TRUE)
## Perdiction
DateCheck = "2018-4-17"
Batters2016Cleaned = Batters2016_Sub_Combined[,c("Date","Rating.x","Player.x","Salary.x","PLayerID","Player.y",
"Pos","Order","Team","Opp",
"ProjplusMinus","Pts_Sal","LowProjOwn",
"Imp.Pts","Act.Pts","wOBA.x","wOBADiff","ISO",
"ISODiff","SO_AB.x","HR_AB","SB_G","OppBP",
"Pro.x","Bargain.x","ParkF.x","Runs.x",
"OppRuns.x","ST","ML.x","O_U.x","MLPer","Rating_M",
"Temp.x","WindSpd.x","Humidity.x","Precip.x","Cnt.x",
"Dist.x","EV.x","FB","GB.x","LD.x","HH.x",
"DistST","EVST","HHST","Rec.BBL","Air.x",
"DistPer","EVM","AirPer","OppwOBA","OppISO","OppwOBAMonth","OppISOMonth",
"PPG.x","Change.x","plusMinus",
"Count.x","PPGYear.x","ChangeYear.x",
"CountYear.x", "Rating.y","WHIP","Salary.y","SO_9","IP","SO_AB.y",
"C1.x","C2.x","C3.x","C4.x","C5.x","C6.x","C7.x","C8.x","C9.x",
"C1.y","C2.y","C3.y","C4.y","C5.y","C6.y","C7.y","C8.y","C9.y")]
Batters2016Cleaned = unique(Batters2016Cleaned)
names(Batters2016Cleaned)[names(Batters2016Cleaned) == 'Player.x'] <- 'Player'
Batters2016Cleaned_Test = subset(Batters2016Cleaned, (as.Date(Batters2016Cleaned$Date) == as.Date(DateCheck)))
Batters2016Cleaned_Train = subset(Batters2016Cleaned, as.Date(Batters2016Cleaned$Date) < as.Date(DateCheck))
playerNames = unique(Batters2016Cleaned_Test$Player)
ResultsBatters = data.frame( RFPred = numeric(), Xgb = numeric(), Name = factor(), Pos = factor() ,
Salary = numeric(), Actual = numeric() , HTeam = factor(), OTeam = factor(),
Pts = numeric(), DNNPer = numeric(), DNN = numeric(),xgbPLUSMINUS = numeric(),
RFPLUSMINUS = numeric(),
C1B = numeric(),C2B = numeric(),C3B = numeric(),C4B = numeric(),
C5B = numeric(),C6B = numeric(),C7B = numeric(),C8B = numeric(),C9B = numeric(),
C1P = numeric(),C2P = numeric(),C3P = numeric(),C4P = numeric(),
C5P = numeric(),C6P = numeric(),C7P = numeric(),C8P = numeric(),C9P = numeric()
)
## Prediction
for (each in 1:length(playerNames)){
Batters2016Cleaned_Test = subset(Batters2016Cleaned, Batters2016Cleaned$Date == DateCheck
& Batters2016Cleaned$Player == as.character(playerNames[each]) )
Batters2016Cleaned_Train = subset(Batters2016Cleaned, Batters2016Cleaned$Date < DateCheck
& Batters2016Cleaned$Player == as.character(playerNames[each]) )
print (playerNames[each])
print (each)
### This ensures atleast 1 row of data exists for prediction
if (nrow(Batters2016Cleaned_Test) < 1 ){
next
}
#### If less than 15 rows then use that Teams's data
if (nrow(Batters2016Cleaned_Train) < 20){
Batters2016Cleaned_Train = subset(Batters2016Cleaned, Batters2016Cleaned$Date != DateCheck
& Batters2016Cleaned$Team
== as.character( unique ( subset(Batters2016Cleaned,
Batters2016Cleaned$Player == as.character(playerNames[each]) )$Team ) )
)
}
#
# if ((Batters2016Cleaned_Test$Rating) < 45){
# next
# }
if (nrow(Batters2016Cleaned_Train) < 15){
next
}
######### Construct Models
Batters2016Cleaned_Train[is.na(Batters2016Cleaned_Train)] = 0
Batters2016Cleaned_Test[is.na(Batters2016Cleaned_Test)] = 0
indices = sample(1:nrow(Batters2016Cleaned_Train), 7, replace = TRUE)
Batters2016Cleaned_Train_PlusMinusTrain = Batters2016Cleaned_Train[-indices, ]
Batters2016Cleaned_Train_PlusMinusTest = Batters2016Cleaned_Train[indices, ]
# "Rating.x",
# "ProjplusMinus","Pts_Sal","LowProjOwn",
# "Imp.Pts",
#########RF
rf = randomForest( Batters2016Cleaned_Train[,c("wOBA.x","wOBADiff","ISO",
"ISODiff","SO_AB.x","HR_AB","SB_G","OppBP",
"Pro.x","Bargain.x","ParkF.x","Runs.x",
"OppRuns.x","ST","ML.x","O_U.x","MLPer","Rating_M",
"Temp.x","WindSpd.x","Humidity.x","Precip.x","Cnt.x",
"Dist.x","EV.x","FB","GB.x","LD.x","HH.x",
"DistST","EVST","HHST","Rec.BBL","Air.x",
"DistPer","EVM","AirPer","OppwOBA","OppISO","OppwOBAMonth","OppISOMonth",
"PPG.x","Change.x","plusMinus",
"Count.x","PPGYear.x","ChangeYear.x",
"CountYear.x", "Rating.y","WHIP","Salary.y","SO_9","IP","SO_AB.y",
"C1.x","C2.x","C3.x","C4.x","C5.x","C6.x","C7.x","C8.x","C9.x",
"C1.y","C2.y","C3.y","C4.y","C5.y","C6.y","C7.y","C8.y","C9.y")],
y = Batters2016Cleaned_Train[,c("Act.Pts")], ntree=300
,type='regression')
rf_PlusMinus = randomForest( Batters2016Cleaned_Train_PlusMinusTrain[,c("wOBA.x","wOBADiff","ISO",
"ISODiff","SO_AB.x","HR_AB","SB_G","OppBP",
"Pro.x","Bargain.x","ParkF.x","Runs.x",
"OppRuns.x","ST","ML.x","O_U.x","MLPer","Rating_M",
"Temp.x","WindSpd.x","Humidity.x","Precip.x","Cnt.x",
"Dist.x","EV.x","FB","GB.x","LD.x","HH.x",
"DistST","EVST","HHST","Rec.BBL","Air.x",
"DistPer","EVM","AirPer","OppwOBA","OppISO","OppwOBAMonth","OppISOMonth",
"PPG.x","Change.x","plusMinus",
"Count.x","PPGYear.x","ChangeYear.x",
"CountYear.x", "Rating.y","WHIP","Salary.y","SO_9","IP","SO_AB.y",
"C1.x","C2.x","C3.x","C4.x","C5.x","C6.x","C7.x","C8.x","C9.x",
"C1.y","C2.y","C3.y","C4.y","C5.y","C6.y","C7.y","C8.y","C9.y")],
y = Batters2016Cleaned_Train_PlusMinusTrain[,c("Act.Pts")], ntree=300
,type='regression')
####XGB
# trainSparceMatrix = sparse.model.matrix( Batters2016Cleaned_Train$`Act.Pts` ~
# ( Batters2016Cleaned_Train$Rating + Batters2016Cleaned_Train$`Usg.Proj` + Batters2016Cleaned_Train$Pts_Sal
# + Batters2016Cleaned_Train$`Min.Proj` + Batters2016Cleaned_Train$Pro + Batters2016Cleaned_Train$Bargain
# + Batters2016Cleaned_Train$Own1 + Batters2016Cleaned_Train$PER + Batters2016Cleaned_Train$Pts + Batters2016Cleaned_Train$Usage +
# Batters2016Cleaned_Train$Opp_Plus_Minus + Batters2016Cleaned_Train$PaceD + Batters2016Cleaned_Train$TS_Per + Batters2016Cleaned_Train$Fouls_36 +
# Batters2016Cleaned_Train$Points_Touch + Batters2016Cleaned_Train$Touches + Batters2016Cleaned_Train$Rest + Batters2016Cleaned_Train$Pts +
# Batters2016Cleaned_Train$`Opp.Pts` + Batters2016Cleaned_Train$delta + Batters2016Cleaned_Train$Spread + Batters2016Cleaned_Train$O_U +
# Batters2016Cleaned_Train$Spread_per + Batters2016Cleaned_Train$Upside + Batters2016Cleaned_Train$Duds + Batters2016Cleaned_Train$Count +
# Batters2016Cleaned_Train$YPlus_Minus + Batters2016Cleaned_Train$YDuds + Batters2016Cleaned_Train$YCount+ Batters2016Cleaned_Train$PTS +
# Batters2016Cleaned_Train$REB + Batters2016Cleaned_Train$STL + Batters2016Cleaned_Train$BLK ))
#
# Labels = Matrix(Batters2016Cleaned_Train$`Act.Pts`, sparse = TRUE)
#
# dtrain <- xgb.DMatrix(data = trainSparceMatrix, label=Labels)
#
# trainSparceMatrix_PlusMinus = sparse.model.matrix( Batters2016Cleaned_Train_PlusMinusTrain$`Act.Pts` ~
# (Batters2016Cleaned_Train_PlusMinusTrain$Rating + Batters2016Cleaned_Train_PlusMinusTrain$`Usg.Proj` + Batters2016Cleaned_Train_PlusMinusTrain$Pts_Sal
# + Batters2016Cleaned_Train_PlusMinusTrain$`Min.Proj` + Batters2016Cleaned_Train_PlusMinusTrain$Pro + Batters2016Cleaned_Train_PlusMinusTrain$Bargain
# + Batters2016Cleaned_Train_PlusMinusTrain$Own1 + Batters2016Cleaned_Train_PlusMinusTrain$PER + Batters2016Cleaned_Train_PlusMinusTrain$Pts + Batters2016Cleaned_Train_PlusMinusTrain$Usage +
# Batters2016Cleaned_Train_PlusMinusTrain$Opp_Plus_Minus + Batters2016Cleaned_Train_PlusMinusTrain$PaceD + Batters2016Cleaned_Train_PlusMinusTrain$TS_Per + Batters2016Cleaned_Train_PlusMinusTrain$Fouls_36 +
# Batters2016Cleaned_Train_PlusMinusTrain$Points_Touch + Batters2016Cleaned_Train_PlusMinusTrain$Touches + Batters2016Cleaned_Train_PlusMinusTrain$Rest + Batters2016Cleaned_Train_PlusMinusTrain$Pts +
# Batters2016Cleaned_Train_PlusMinusTrain$`Opp.Pts` + Batters2016Cleaned_Train_PlusMinusTrain$delta + Batters2016Cleaned_Train_PlusMinusTrain$Spread + Batters2016Cleaned_Train_PlusMinusTrain$O_U +
# Batters2016Cleaned_Train_PlusMinusTrain$Spread_per + Batters2016Cleaned_Train_PlusMinusTrain$Upside + Batters2016Cleaned_Train_PlusMinusTrain$Duds + Batters2016Cleaned_Train_PlusMinusTrain$Count +
# Batters2016Cleaned_Train_PlusMinusTrain$YPlus_Minus + Batters2016Cleaned_Train_PlusMinusTrain$YDuds + Batters2016Cleaned_Train_PlusMinusTrain$YCount+ Batters2016Cleaned_Train_PlusMinusTrain$PTS +
# Batters2016Cleaned_Train_PlusMinusTrain$REB + Batters2016Cleaned_Train_PlusMinusTrain$STL + Batters2016Cleaned_Train_PlusMinusTrain$BLK ))
# Labels_PlusMinus = Matrix(Batters2016Cleaned_Train_PlusMinusTrain$`Act.Pts`, sparse = TRUE)
#
# dtrain_PlusMinus <- xgb.DMatrix(data = trainSparceMatrix_PlusMinus, label=Labels_PlusMinus)
###H20
# TrainingH20= as.h2o(Batters2016Cleaned_Train)
# splits <- h2o.splitFrame(TrainingH20, c(0.9), seed=1234)
#
#
# trainDNN <- h2o.assign(splits[[1]], "train.hex") # 60%
# validDNN <- h2o.assign(splits[[2]], "valid.hex") # 60%
# TrainingH202= as.h2o(Batters2016Cleaned_Test)
# splits2 <- h2o.splitFrame(TrainingH202, seed=0)
# testDNN <- h2o.assign(splits2[[1]], "test.hex") # 20%
#
# response <- "Act.Pts"
#
# predictors <- c("Rating", "Salary","Proj_Plus_Minus","Pts_Sal","Usg.Proj","Min.Proj",
# "Own1","Own2","Imp.Pts","FP_Min","PER",
# "Usage","Pro","Bargain","Opp_Plus_Minus","PaceD","TS_Per","Fouls_36",
# "Points_Touch","Touches","Rest","Pts","Opp.Pts","delta","Spread",
# "O_U","Spread_per","PPG","Consistency",
# "Upside","Duds","Count","YPPG","YPlus_Minus","YConsistency","YUpside","YDuds","YCount","PTS","REB","STL","BLK" )
#
# m1 = tryCatch(
# {expr = h2o.deeplearning(
# model_id="dl_model_first",
# training_frame=trainDNN,
# validation_frame=validDNN, ## validation dataset: used for scoring and early stopping
# x=predictors,
# y=response,
# nfold = 5,
# #activation="Rectifier", ## default
# hidden=c(300,100), ## default: 2 hidden layers with 200 neurons each
# variable_importances=T,
# epochs = 5,
# categorical_encoding = "OneHotInternal"
# )},
# error = function(i){return (0)}
#
# )
#
#################################Predictions
RFPred = predict( rf, Batters2016Cleaned_Test[,c("wOBA.x","wOBADiff","ISO",
"ISODiff","SO_AB.x","HR_AB","SB_G","OppBP",
"Pro.x","Bargain.x","ParkF.x","Runs.x",
"OppRuns.x","ST","ML.x","O_U.x","MLPer","Rating_M",
"Temp.x","WindSpd.x","Humidity.x","Precip.x","Cnt.x",
"Dist.x","EV.x","FB","GB.x","LD.x","HH.x",
"DistST","EVST","HHST","Rec.BBL","Air.x",
"DistPer","EVM","AirPer","OppwOBA","OppISO","OppwOBAMonth","OppISOMonth",
"PPG.x","Change.x","plusMinus",
"Count.x","PPGYear.x","ChangeYear.x",
"CountYear.x", "Rating.y","WHIP","Salary.y","SO_9","IP","SO_AB.y",
"C1.x","C2.x","C3.x","C4.x","C5.x","C6.x","C7.x","C8.x","C9.x",
"C1.y","C2.y","C3.y","C4.y","C5.y","C6.y","C7.y","C8.y","C9.y")]
,type = c("response") )
RFPred_PlusMinus = predict( rf_PlusMinus,
Batters2016Cleaned_Train_PlusMinusTest[,c("wOBA.x","wOBADiff","ISO",
"ISODiff","SO_AB.x","HR_AB","SB_G","OppBP",
"Pro.x","Bargain.x","ParkF.x","Runs.x",
"OppRuns.x","ST","ML.x","O_U.x","MLPer","Rating_M",
"Temp.x","WindSpd.x","Humidity.x","Precip.x","Cnt.x",
"Dist.x","EV.x","FB","GB.x","LD.x","HH.x",
"DistST","EVST","HHST","Rec.BBL","Air.x",
"DistPer","EVM","AirPer","OppwOBA","OppISO","OppwOBAMonth","OppISOMonth",
"PPG.x","Change.x","plusMinus",
"Count.x","PPGYear.x","ChangeYear.x",
"CountYear.x", "Rating.y","WHIP","Salary.y",
"SO_9","IP","SO_AB.y",
"C1.x","C2.x","C3.x","C4.x","C5.x","C6.x","C7.x","C8.x","C9.x",
"C1.y","C2.y","C3.y","C4.y","C5.y","C6.y","C7.y","C8.y","C9.y")]
,type = c("response") )
plusMinus = Batters2016Cleaned_Train_PlusMinusTest$`Act.Pts` - RFPred_PlusMinus
RFPLUSMINUS_M = ceil(min(plusMinus))
RFPLUSMINUS_P = ceil(max(plusMinus))
#
# testSparseMatrix = sparse.model.matrix(
# Batters2016Cleaned_Test$`Act.Pts` ~
# (Batters2016Cleaned_Test$Rating + Batters2016Cleaned_Test$`Usg.Proj` + Batters2016Cleaned_Test$Pts_Sal
# + Batters2016Cleaned_Test$`Min.Proj` + Batters2016Cleaned_Test$Pro + Batters2016Cleaned_Test$Bargain
# + Batters2016Cleaned_Test$Own1 + Batters2016Cleaned_Test$PER + Batters2016Cleaned_Test$Pts + Batters2016Cleaned_Test$Usage +
# Batters2016Cleaned_Test$Opp_Plus_Minus + Batters2016Cleaned_Test$PaceD + Batters2016Cleaned_Test$TS_Per + Batters2016Cleaned_Test$Fouls_36 +
# Batters2016Cleaned_Test$Points_Touch + Batters2016Cleaned_Test$Touches + Batters2016Cleaned_Test$Rest + Batters2016Cleaned_Test$Pts +
# Batters2016Cleaned_Test$`Opp.Pts` + Batters2016Cleaned_Test$delta + Batters2016Cleaned_Test$Spread + Batters2016Cleaned_Test$O_U +
# Batters2016Cleaned_Test$Spread_per + Batters2016Cleaned_Test$Upside + Batters2016Cleaned_Test$Duds + Batters2016Cleaned_Test$Count +
# Batters2016Cleaned_Test$YPlus_Minus + Batters2016Cleaned_Test$YDuds + Batters2016Cleaned_Test$YCount+
# Batters2016Cleaned_Test$REB + Batters2016Cleaned_Test$STL + Batters2016Cleaned_Test$BLK
# ))
#
# xgbO = xgboost(data = dtrain ,booster = "gblinear" , eta = 0.1 , max_depth=50, nthread = 4,
# nrounds=2000,objective = "reg:linear" , verbose = 0 )
#
# predict(xgbO,testSparseMatrix )
#
#
# testSparseMatrix_PlusMinus = sparse.model.matrix(
# Batters2016Cleaned_Train_PlusMinusTest$`Act.Pts` ~
# (Batters2016Cleaned_Train_PlusMinusTest$Rating + Batters2016Cleaned_Train_PlusMinusTest$`Usg.Proj` + Batters2016Cleaned_Train_PlusMinusTest$Pts_Sal
# + Batters2016Cleaned_Train_PlusMinusTest$`Min.Proj` + Batters2016Cleaned_Train_PlusMinusTest$Pro + Batters2016Cleaned_Train_PlusMinusTest$Bargain
# + Batters2016Cleaned_Train_PlusMinusTest$Own1 + Batters2016Cleaned_Train_PlusMinusTest$PER + Batters2016Cleaned_Train_PlusMinusTest$Pts + Batters2016Cleaned_Train_PlusMinusTest$Usage +
# Batters2016Cleaned_Train_PlusMinusTest$Opp_Plus_Minus + Batters2016Cleaned_Train_PlusMinusTest$PaceD + Batters2016Cleaned_Train_PlusMinusTest$TS_Per + Batters2016Cleaned_Train_PlusMinusTest$Fouls_36 +
# Batters2016Cleaned_Train_PlusMinusTest$Points_Touch + Batters2016Cleaned_Train_PlusMinusTest$Touches + Batters2016Cleaned_Train_PlusMinusTest$Rest + Batters2016Cleaned_Train_PlusMinusTest$Pts +
# Batters2016Cleaned_Train_PlusMinusTest$`Opp.Pts` + Batters2016Cleaned_Train_PlusMinusTest$delta + Batters2016Cleaned_Train_PlusMinusTest$Spread + Batters2016Cleaned_Train_PlusMinusTest$O_U +
# Batters2016Cleaned_Train_PlusMinusTest$Spread_per + Batters2016Cleaned_Train_PlusMinusTest$Upside + Batters2016Cleaned_Train_PlusMinusTest$Duds + Batters2016Cleaned_Train_PlusMinusTest$Count +
# Batters2016Cleaned_Train_PlusMinusTest$YPlus_Minus + Batters2016Cleaned_Train_PlusMinusTest$YDuds + Batters2016Cleaned_Train_PlusMinusTest$YCount+ Batters2016Cleaned_Train_PlusMinusTest$PTS +
# Batters2016Cleaned_Train_PlusMinusTest$REB + Batters2016Cleaned_Train_PlusMinusTest$STL + Batters2016Cleaned_Train_PlusMinusTest$BLK
# ))
#
# xgbO_PlusMinus = xgboost(data = dtrain_PlusMinus ,booster = "gblinear" , eta = 0.1 , max_depth=50, nthread = 4,
# nrounds=2000,objective = "reg:linear" , verbose = 0 )
#
# plusMinus = Batters2016Cleaned_Train_PlusMinusTest$`Act.Pts` - predict(xgbO_PlusMinus,testSparseMatrix_PlusMinus )
# xgbPLUSMINUS_M = ceil(min(plusMinus))
# xgbPLUSMINUS_P = ceil(max(plusMinus))
##################################
Prediction2 = as.data.frame(RFPred)
Prediction2["RFPer"] = 0
Prediction2["RF_M"] = as.data.frame(RFPLUSMINUS_M)
Prediction2["RF_P"] = as.data.frame(RFPLUSMINUS_P)
Prediction2["Actual"] = as.data.frame(Batters2016Cleaned_Test$`Act.Pts`)
Prediction2["Salary"] = as.data.frame(Batters2016Cleaned_Test$`Salary.x`)
Prediction2["Name"] = as.data.frame(Batters2016Cleaned_Test$Player)
Prediction2["HTeam"] = as.data.frame(Batters2016Cleaned_Test$Team)
Prediction2["Opp"] = as.data.frame(Batters2016Cleaned_Test$Opp)
Prediction2["Pts"] = as.data.frame(Batters2016Cleaned_Test$Pts)
Prediction2["Pos"] = as.data.frame(Batters2016Cleaned_Test$Pos)
Prediction2$C1B = Batters2016Cleaned_Test$C1.x
Prediction2$C2B = Batters2016Cleaned_Test$C2.x
Prediction2$C3B = Batters2016Cleaned_Test$C3.x
Prediction2$C4B = Batters2016Cleaned_Test$C4.x
Prediction2$C5B = Batters2016Cleaned_Test$C5.x
Prediction2$C6B = Batters2016Cleaned_Test$C6.x
Prediction2$C7B = Batters2016Cleaned_Test$C7.x
Prediction2$C8B = Batters2016Cleaned_Test$C8.x
Prediction2$C9B = Batters2016Cleaned_Test$C9.x
Prediction2$C1P = Batters2016Cleaned_Test$C1.y
Prediction2$C2P = Batters2016Cleaned_Test$C2.y
Prediction2$C3P = Batters2016Cleaned_Test$C3.y
Prediction2$C4P = Batters2016Cleaned_Test$C4.y
Prediction2$C5P = Batters2016Cleaned_Test$C5.y
Prediction2$C6P = Batters2016Cleaned_Test$C6.y
Prediction2$C7P = Batters2016Cleaned_Test$C7.y
Prediction2$C8P = Batters2016Cleaned_Test$C8.y
Prediction2$C9P = Batters2016Cleaned_Test$C9.y
Prediction2["Xgb"] = 0 #as.data.frame(predict(xgbO, testSparseMatrix))
Prediction2["XgbPer"] = 0 #as.data.frame( Prediction2["Xgb"]*100/(Batters2016Cleaned_Test$`Salary`) )
Prediction2["xgb_M"] = 0 #as.data.frame(xgbPLUSMINUS_M)
Prediction2["xgb_P"] = 0 #as.data.frame(xgbPLUSMINUS_P)
Prediction2["DNN"] = 0
# if (typeof(m1) == "S4"){
# Prediction2["DNN"] = 0 #as.data.frame(h2o.predict(m1,newdata=testDNN))
# }
# else{
# Prediction2["DNN"] = 0
# }
#
Prediction2["DNNPer"] = 0# as.data.frame( Prediction2["DNN"]*100/(Batters2016Cleaned_Test$`Salary`) )
ResultsBatters = rbind(ResultsBatters, Prediction2)
}
write.csv(ResultsBatters, file = "MLB_4_17_2018.csv")
#### Pitchers
Pitchers2016Cleaned = Pitchers2016[,c("Date","Rating","Player","Salary",
"Ceiling","Floor","Proj_plusMinus",
"Pts_Sal","LowProjOwn","HighProjOwn","ActPts","WHIP","HR_9",
"SO_9","IP","QualS_S","SO_AB","wOBA","Pro",
"Bargain","KPred","ParkF","Runs","OppRuns","delta",
"ML","O_U","MLYear","RatingYear","Temp","WindSpd",
"Humidity","Precip","Cnt","Dist","EV","GB",
"LD","HH","Speed","S","Dist_delta","EV_delta","PCnt",
"Spd_delta","RecBBL","Air","CntYear","DistYear","EVYear",
"SpeedYear","SYear",
"PCntYear","AirYear","PPG","Change","plusMinusYear","Count",
"PPGYear","ChangeYear","CountYear")]
Pitchers2016Cleaned = unique(Pitchers2016Cleaned)
Pitchers2016Cleaned_Test = subset(Pitchers2016Cleaned, (as.Date(Pitchers2016Cleaned$Date) == as.Date(DateCheck)))
Pitchers2016Cleaned_Train = subset(Pitchers2016Cleaned, as.Date(Pitchers2016Cleaned$Date) < as.Date(DateCheck))
playerNames = unique(Pitchers2016Cleaned_Test$Player)
ResultsPitchers = data.frame( RFPred = numeric(), Xgb = numeric(), Name = factor(), Pos = factor() ,
Salary = numeric(), Actual = numeric() , HTeam = factor(), OTeam = factor(),
Pts = numeric(), DNNPer = numeric(), DNN = numeric(),xgbPLUSMINUS = numeric(),
RFPLUSMINUS = numeric())
## Prediction
for (each in 16:length(playerNames)){
Pitchers2016Cleaned_Test = subset(Pitchers2016Cleaned, Pitchers2016Cleaned$Date == DateCheck
& Pitchers2016Cleaned$Player == as.character(playerNames[each]) )
Pitchers2016Cleaned_Train = subset(Pitchers2016Cleaned, as.Date(Pitchers2016Cleaned$Date) != as.Date(DateCheck)
& Pitchers2016Cleaned$Player == as.character(playerNames[each]) )
print (playerNames[each])
print (each)
### This ensures atleast 1 row of data exists for prediction
if (nrow(Pitchers2016Cleaned_Test) < 1 ){
next
}
######### Construct Models
indices = sample(1:nrow(Pitchers2016Cleaned_Train), 7, replace = TRUE)
Pitchers2016Cleaned_Train_PlusMinusTrain = Pitchers2016Cleaned_Train[-indices, ]
Pitchers2016Cleaned_Train_PlusMinusTest = Pitchers2016Cleaned_Train[indices, ]
#########RF
rf = randomForest( Pitchers2016Cleaned_Train[,c(
"Proj_plusMinus",
"Pts_Sal","LowProjOwn","HighProjOwn","WHIP","HR_9",
"SO_9","IP","QualS_S","SO_AB","wOBA","Pro",
"Bargain","KPred","ParkF","Runs","OppRuns","delta",
"ML","O_U","MLYear","RatingYear","Temp","WindSpd",
"Humidity","Precip","Cnt","Dist","EV","GB",
"LD","HH","Speed","S","Dist_delta","EV_delta","PCnt",
"Spd_delta","RecBBL","Air","CntYear","DistYear","EVYear",
"SpeedYear","SYear",
"PCntYear","AirYear","PPG","Change","plusMinusYear","Count",
"PPGYear","ChangeYear","CountYear")],
y = Pitchers2016Cleaned_Train[,c("ActPts")], ntree=300
,type='regression')
rf_PlusMinus = randomForest( Pitchers2016Cleaned_Train_PlusMinusTrain[,c(
"Proj_plusMinus",
"Pts_Sal","LowProjOwn","HighProjOwn","WHIP","HR_9",
"SO_9","IP","QualS_S","SO_AB","wOBA","Pro",
"Bargain","KPred","ParkF","Runs","OppRuns","delta",
"ML","O_U","MLYear","RatingYear","Temp","WindSpd",
"Humidity","Precip","Cnt","Dist","EV","GB",
"LD","HH","Speed","S","Dist_delta","EV_delta","PCnt",
"Spd_delta","RecBBL","Air","CntYear","DistYear","EVYear",
"SpeedYear","SYear",
"PCntYear","AirYear","PPG","Change","plusMinusYear","Count",
"PPGYear","ChangeYear","CountYear")],
y = Pitchers2016Cleaned_Train_PlusMinusTrain[,c("ActPts")], ntree=300
,type='regression')
####XGB
# trainSparceMatrix = sparse.model.matrix( Pitchers2016Cleaned_Train$`Act.Pts` ~
# ( Pitchers2016Cleaned_Train$Rating + Pitchers2016Cleaned_Train$`Usg.Proj` + Pitchers2016Cleaned_Train$Pts_Sal
# + Pitchers2016Cleaned_Train$`Min.Proj` + Pitchers2016Cleaned_Train$Pro + Pitchers2016Cleaned_Train$Bargain
# + Pitchers2016Cleaned_Train$Own1 + Pitchers2016Cleaned_Train$PER + Pitchers2016Cleaned_Train$Pts + Pitchers2016Cleaned_Train$Usage +
# Pitchers2016Cleaned_Train$Opp_Plus_Minus + Pitchers2016Cleaned_Train$PaceD + Pitchers2016Cleaned_Train$TS_Per + Pitchers2016Cleaned_Train$Fouls_36 +
# Pitchers2016Cleaned_Train$Points_Touch + Pitchers2016Cleaned_Train$Touches + Pitchers2016Cleaned_Train$Rest + Pitchers2016Cleaned_Train$Pts +
# Pitchers2016Cleaned_Train$`Opp.Pts` + Pitchers2016Cleaned_Train$delta + Pitchers2016Cleaned_Train$Spread + Pitchers2016Cleaned_Train$O_U +
# Pitchers2016Cleaned_Train$Spread_per + Pitchers2016Cleaned_Train$Upside + Pitchers2016Cleaned_Train$Duds + Pitchers2016Cleaned_Train$Count +
# Pitchers2016Cleaned_Train$YPlus_Minus + Pitchers2016Cleaned_Train$YDuds + Pitchers2016Cleaned_Train$YCount+ Pitchers2016Cleaned_Train$PTS +
# Pitchers2016Cleaned_Train$REB + Pitchers2016Cleaned_Train$STL + Pitchers2016Cleaned_Train$BLK ))
#
# Labels = Matrix(Pitchers2016Cleaned_Train$`Act.Pts`, sparse = TRUE)
#
# dtrain <- xgb.DMatrix(data = trainSparceMatrix, label=Labels)
#
# trainSparceMatrix_PlusMinus = sparse.model.matrix( Pitchers2016Cleaned_Train_PlusMinusTrain$`Act.Pts` ~
# (Pitchers2016Cleaned_Train_PlusMinusTrain$Rating + Pitchers2016Cleaned_Train_PlusMinusTrain$`Usg.Proj` + Pitchers2016Cleaned_Train_PlusMinusTrain$Pts_Sal
# + Pitchers2016Cleaned_Train_PlusMinusTrain$`Min.Proj` + Pitchers2016Cleaned_Train_PlusMinusTrain$Pro + Pitchers2016Cleaned_Train_PlusMinusTrain$Bargain
# + Pitchers2016Cleaned_Train_PlusMinusTrain$Own1 + Pitchers2016Cleaned_Train_PlusMinusTrain$PER + Pitchers2016Cleaned_Train_PlusMinusTrain$Pts + Pitchers2016Cleaned_Train_PlusMinusTrain$Usage +
# Pitchers2016Cleaned_Train_PlusMinusTrain$Opp_Plus_Minus + Pitchers2016Cleaned_Train_PlusMinusTrain$PaceD + Pitchers2016Cleaned_Train_PlusMinusTrain$TS_Per + Pitchers2016Cleaned_Train_PlusMinusTrain$Fouls_36 +
# Pitchers2016Cleaned_Train_PlusMinusTrain$Points_Touch + Pitchers2016Cleaned_Train_PlusMinusTrain$Touches + Pitchers2016Cleaned_Train_PlusMinusTrain$Rest + Pitchers2016Cleaned_Train_PlusMinusTrain$Pts +
# Pitchers2016Cleaned_Train_PlusMinusTrain$`Opp.Pts` + Pitchers2016Cleaned_Train_PlusMinusTrain$delta + Pitchers2016Cleaned_Train_PlusMinusTrain$Spread + Pitchers2016Cleaned_Train_PlusMinusTrain$O_U +
# Pitchers2016Cleaned_Train_PlusMinusTrain$Spread_per + Pitchers2016Cleaned_Train_PlusMinusTrain$Upside + Pitchers2016Cleaned_Train_PlusMinusTrain$Duds + Pitchers2016Cleaned_Train_PlusMinusTrain$Count +
# Pitchers2016Cleaned_Train_PlusMinusTrain$YPlus_Minus + Pitchers2016Cleaned_Train_PlusMinusTrain$YDuds + Pitchers2016Cleaned_Train_PlusMinusTrain$YCount+ Pitchers2016Cleaned_Train_PlusMinusTrain$PTS +
# Pitchers2016Cleaned_Train_PlusMinusTrain$REB + Pitchers2016Cleaned_Train_PlusMinusTrain$STL + Pitchers2016Cleaned_Train_PlusMinusTrain$BLK ))
# Labels_PlusMinus = Matrix(Pitchers2016Cleaned_Train_PlusMinusTrain$`Act.Pts`, sparse = TRUE)
#
# dtrain_PlusMinus <- xgb.DMatrix(data = trainSparceMatrix_PlusMinus, label=Labels_PlusMinus)
###H20
# TrainingH20= as.h2o(Pitchers2016Cleaned_Train)
# splits <- h2o.splitFrame(TrainingH20, c(0.9), seed=1234)
#
#
# trainDNN <- h2o.assign(splits[[1]], "train.hex") # 60%
# validDNN <- h2o.assign(splits[[2]], "valid.hex") # 60%
# TrainingH202= as.h2o(Pitchers2016Cleaned_Test)
# splits2 <- h2o.splitFrame(TrainingH202, seed=0)
# testDNN <- h2o.assign(splits2[[1]], "test.hex") # 20%
#
# response <- "Act.Pts"
#
# predictors <- c("Rating", "Salary","Proj_Plus_Minus","Pts_Sal","Usg.Proj","Min.Proj",
# "Own1","Own2","Imp.Pts","FP_Min","PER",
# "Usage","Pro","Bargain","Opp_Plus_Minus","PaceD","TS_Per","Fouls_36",
# "Points_Touch","Touches","Rest","Pts","Opp.Pts","delta","Spread",
# "O_U","Spread_per","PPG","Consistency",
# "Upside","Duds","Count","YPPG","YPlus_Minus","YConsistency","YUpside","YDuds","YCount","PTS","REB","STL","BLK" )
#
# m1 = tryCatch(
# {expr = h2o.deeplearning(
# model_id="dl_model_first",
# training_frame=trainDNN,
# validation_frame=validDNN, ## validation dataset: used for scoring and early stopping
# x=predictors,
# y=response,
# nfold = 5,
# #activation="Rectifier", ## default
# hidden=c(300,100), ## default: 2 hidden layers with 200 neurons each
# variable_importances=T,
# epochs = 5,
# categorical_encoding = "OneHotInternal"
# )},
# error = function(i){return (0)}
#
# )
#
#################################Predictions
RFPred = predict( rf, Pitchers2016Cleaned_Test[,c(
"Proj_plusMinus",
"Pts_Sal","LowProjOwn","HighProjOwn","WHIP","HR_9",
"SO_9","IP","QualS_S","SO_AB","wOBA","Pro",
"Bargain","KPred","ParkF","Runs","OppRuns","delta",
"ML","O_U","MLYear","RatingYear","Temp","WindSpd",
"Humidity","Precip","Cnt","Dist","EV","GB",
"LD","HH","Speed","S","Dist_delta","EV_delta","PCnt",
"Spd_delta","RecBBL","Air","CntYear","DistYear","EVYear",
"SpeedYear","SYear",
"PCntYear","AirYear","PPG","Change","plusMinusYear","Count",
"PPGYear","ChangeYear","CountYear")]
,type = c("response") )
RFPred_PlusMinus = predict( rf_PlusMinus,
Pitchers2016Cleaned_Train_PlusMinusTest[,c("Rating",
"Proj_plusMinus",
"Pts_Sal","LowProjOwn","HighProjOwn","WHIP","HR_9",
"SO_9","IP","QualS_S","SO_AB","wOBA","Pro",
"Bargain","KPred","ParkF","Runs","OppRuns","delta",
"ML","O_U","MLYear","RatingYear","Temp","WindSpd",
"Humidity","Precip","Cnt","Dist","EV","GB",
"LD","HH","Speed","S","Dist_delta","EV_delta","PCnt",
"Spd_delta","RecBBL","Air","CntYear","DistYear","EVYear",
"SpeedYear","SYear",
"PCntYear","AirYear","PPG","Change","plusMinusYear","Count",
"PPGYear","ChangeYear","CountYear")]
,type = c("response") )
plusMinus = Pitchers2016Cleaned_Train_PlusMinusTest$`ActPts` - RFPred_PlusMinus
RFPLUSMINUS_M = ceil(min(plusMinus))
RFPLUSMINUS_P = ceil(max(plusMinus))
#
# testSparseMatrix = sparse.model.matrix(
# Pitchers2016Cleaned_Test$`Act.Pts` ~
# (Pitchers2016Cleaned_Test$Rating + Pitchers2016Cleaned_Test$`Usg.Proj` + Pitchers2016Cleaned_Test$Pts_Sal
# + Pitchers2016Cleaned_Test$`Min.Proj` + Pitchers2016Cleaned_Test$Pro + Pitchers2016Cleaned_Test$Bargain
# + Pitchers2016Cleaned_Test$Own1 + Pitchers2016Cleaned_Test$PER + Pitchers2016Cleaned_Test$Pts + Pitchers2016Cleaned_Test$Usage +
# Pitchers2016Cleaned_Test$Opp_Plus_Minus + Pitchers2016Cleaned_Test$PaceD + Pitchers2016Cleaned_Test$TS_Per + Pitchers2016Cleaned_Test$Fouls_36 +
# Pitchers2016Cleaned_Test$Points_Touch + Pitchers2016Cleaned_Test$Touches + Pitchers2016Cleaned_Test$Rest + Pitchers2016Cleaned_Test$Pts +
# Pitchers2016Cleaned_Test$`Opp.Pts` + Pitchers2016Cleaned_Test$delta + Pitchers2016Cleaned_Test$Spread + Pitchers2016Cleaned_Test$O_U +
# Pitchers2016Cleaned_Test$Spread_per + Pitchers2016Cleaned_Test$Upside + Pitchers2016Cleaned_Test$Duds + Pitchers2016Cleaned_Test$Count +
# Pitchers2016Cleaned_Test$YPlus_Minus + Pitchers2016Cleaned_Test$YDuds + Pitchers2016Cleaned_Test$YCount+
# Pitchers2016Cleaned_Test$REB + Pitchers2016Cleaned_Test$STL + Pitchers2016Cleaned_Test$BLK
# ))
#
# xgbO = xgboost(data = dtrain ,booster = "gblinear" , eta = 0.1 , max_depth=50, nthread = 4,
# nrounds=2000,objective = "reg:linear" , verbose = 0 )
#
# predict(xgbO,testSparseMatrix )
#
#
# testSparseMatrix_PlusMinus = sparse.model.matrix(
# Pitchers2016Cleaned_Train_PlusMinusTest$`Act.Pts` ~
# (Pitchers2016Cleaned_Train_PlusMinusTest$Rating + Pitchers2016Cleaned_Train_PlusMinusTest$`Usg.Proj` + Pitchers2016Cleaned_Train_PlusMinusTest$Pts_Sal
# + Pitchers2016Cleaned_Train_PlusMinusTest$`Min.Proj` + Pitchers2016Cleaned_Train_PlusMinusTest$Pro + Pitchers2016Cleaned_Train_PlusMinusTest$Bargain
# + Pitchers2016Cleaned_Train_PlusMinusTest$Own1 + Pitchers2016Cleaned_Train_PlusMinusTest$PER + Pitchers2016Cleaned_Train_PlusMinusTest$Pts + Pitchers2016Cleaned_Train_PlusMinusTest$Usage +
# Pitchers2016Cleaned_Train_PlusMinusTest$Opp_Plus_Minus + Pitchers2016Cleaned_Train_PlusMinusTest$PaceD + Pitchers2016Cleaned_Train_PlusMinusTest$TS_Per + Pitchers2016Cleaned_Train_PlusMinusTest$Fouls_36 +
# Pitchers2016Cleaned_Train_PlusMinusTest$Points_Touch + Pitchers2016Cleaned_Train_PlusMinusTest$Touches + Pitchers2016Cleaned_Train_PlusMinusTest$Rest + Pitchers2016Cleaned_Train_PlusMinusTest$Pts +
# Pitchers2016Cleaned_Train_PlusMinusTest$`Opp.Pts` + Pitchers2016Cleaned_Train_PlusMinusTest$delta + Pitchers2016Cleaned_Train_PlusMinusTest$Spread + Pitchers2016Cleaned_Train_PlusMinusTest$O_U +
# Pitchers2016Cleaned_Train_PlusMinusTest$Spread_per + Pitchers2016Cleaned_Train_PlusMinusTest$Upside + Pitchers2016Cleaned_Train_PlusMinusTest$Duds + Pitchers2016Cleaned_Train_PlusMinusTest$Count +
# Pitchers2016Cleaned_Train_PlusMinusTest$YPlus_Minus + Pitchers2016Cleaned_Train_PlusMinusTest$YDuds + Pitchers2016Cleaned_Train_PlusMinusTest$YCount+ Pitchers2016Cleaned_Train_PlusMinusTest$PTS +
# Pitchers2016Cleaned_Train_PlusMinusTest$REB + Pitchers2016Cleaned_Train_PlusMinusTest$STL + Pitchers2016Cleaned_Train_PlusMinusTest$BLK
# ))
#
# xgbO_PlusMinus = xgboost(data = dtrain_PlusMinus ,booster = "gblinear" , eta = 0.1 , max_depth=50, nthread = 4,
# nrounds=2000,objective = "reg:linear" , verbose = 0 )
#
# plusMinus = Pitchers2016Cleaned_Train_PlusMinusTest$`Act.Pts` - predict(xgbO_PlusMinus,testSparseMatrix_PlusMinus )
# xgbPLUSMINUS_M = ceil(min(plusMinus))
# xgbPLUSMINUS_P = ceil(max(plusMinus))
##################################
Prediction2 = as.data.frame(RFPred)
Prediction2["RFPer"] = as.data.frame( Prediction2["RFPred"]*100/(Pitchers2016Cleaned_Test$`Salary`) )
Prediction2["RF_M"] = as.data.frame(RFPLUSMINUS_M)
Prediction2["RF_P"] = as.data.frame(RFPLUSMINUS_P)
Prediction2["Actual"] = as.data.frame(Pitchers2016Cleaned_Test$`Act.Pts`)
Prediction2["Salary"] = as.data.frame(Pitchers2016Cleaned_Test$`Salary`)
Prediction2["Name"] = as.data.frame(Pitchers2016Cleaned_Test$Player)
Prediction2["HTeam"] = as.data.frame(Pitchers2016Cleaned_Test$Team)
Prediction2["Opp"] = as.data.frame(Pitchers2016Cleaned_Test$Opp)
Prediction2["Pts"] = as.data.frame(Pitchers2016Cleaned_Test$Pts)
Prediction2["Pos"] = as.data.frame(Pitchers2016Cleaned_Test$Pos)
Prediction2["Xgb"] = 0 #as.data.frame(predict(xgbO, testSparseMatrix))
Prediction2["XgbPer"] = 0 #as.data.frame( Prediction2["Xgb"]*100/(Pitchers2016Cleaned_Test$`Salary`) )
Prediction2["xgb_M"] = 0 #as.data.frame(xgbPLUSMINUS_M)
Prediction2["xgb_P"] = 0 #as.data.frame(xgbPLUSMINUS_P)
Prediction2["DNN"] = 0
# if (typeof(m1) == "S4"){
# Prediction2["DNN"] = 0 #as.data.frame(h2o.predict(m1,newdata=testDNN))
# }
# else{
# Prediction2["DNN"] = 0
# }
#
Prediction2["DNNPer"] = 0# as.data.frame( Prediction2["DNN"]*100/(Pitchers2016Cleaned_Test$`Salary`) )
ResultsPitchers = rbind(ResultsPitchers, Prediction2)
}
|
6322a82d52645f3ed798c03616c19e2f63429905 | 887dc03efc71b10900e0fcab0d56e85a877098f8 | /man/pours_etud_perte_reprise.Rd | 9c99de1edfd5441f572a62a801c4e046789fcfba | [
"MIT"
] | permissive | ove-ut3/survey.admin | 36a445459c532bf307dfb5f0f4747b6288494d2c | 98225711492f50931d868a9277d7fba039bd1efc | refs/heads/master | 2021-01-07T22:48:29.207281 | 2020-05-20T12:57:42 | 2020-05-20T12:57:42 | 241,842,349 | 0 | 0 | NOASSERTION | 2020-05-20T12:57:43 | 2020-02-20T09:23:39 | R | UTF-8 | R | false | true | 315 | rd | pours_etud_perte_reprise.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{pours_etud_perte_reprise}
\alias{pours_etud_perte_reprise}
\title{pours_etud_perte_reprise}
\usage{
pours_etud_perte_reprise(sqlite_base)
}
\arguments{
\item{sqlite_base}{\dots}
}
\description{
pours_etud_perte_reprise
}
|
a1da31ffc8b59d5fee34d1d4278366da469131ce | 969e1c4b5ba41f2a79beac71e0e842adb514f6df | /man/exampleProteinTraces.Rd | cf9450e345e5b87413e68cddca063c7b383bee8b | [
"Apache-2.0"
] | permissive | kiahalespractice/CCprofiler | 8f45fdef5593ef1b5d23454c27661fbce9655338 | 60e24828349b7c03bf82d9f178353c0e41a95565 | refs/heads/master | 2023-07-12T10:09:56.710306 | 2021-05-14T11:28:03 | 2021-05-14T11:28:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 560 | rd | exampleProteinTraces.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{exampleProteinTraces}
\alias{exampleProteinTraces}
\title{Examplary protein level traces object}
\format{An object of class \code{traces} of length 4.}
\usage{
exampleProteinTraces
}
\description{
Protein level traces as, for example, generated by \code{proteinQuantification}.
}
\examples{
## protein quantification
proteinTraces <- proteinQuantification(examplePeptideTracesFiltered)
all.equal(proteinTraces,exampleProteinTraces)
}
\keyword{datasets}
|
c4f4d38720a2c18347b0ca3c55707bad781b1dc7 | f6677135d62d8aeac44d206b12be66f5e9123091 | /R/RcppExports.R | 02789a83b48fd8e3c87e0054f0f03e6e36a0b58c | [] | no_license | cran/TSDFGS | 2c49d329e5b4c8f02d169bbe3dc6b1682c50037f | 595580483c921b33b3d37f5e84c962ff6d024fca | refs/heads/master | 2023-03-16T14:31:09.001807 | 2022-06-07T13:00:11 | 2022-06-07T13:00:11 | 174,551,977 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,504 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' CD-score
#'
#' This function calculate CD-score <doi:10.1186/1297-9686-28-4-359> by given training set and test set.
#'
#' @author Jen-Hsiang Ou
#'
#' @param X A numeric matrix. The training set genotypic information matrix can be given as genotype matrix (coded as -1, 0, 1) or principle component matrix (row: sample; column: marker).
#' @param X0 A numeric mareix. The test set genotypic information matrix can be given as genotype matrix (coded as -1, 0, 1) or principle component matrix (row: sample; column: marker).
#' @return A floating-point number, CD score.
#'
#' @import Rcpp
#'
#' @export
#'
#' @examples
#' data(geno)
#' \dontrun{cd_score(geno[1:50, ], geno[51:100])}
#'
cd_score <- function(X, X0) {
.Call('_TSDFGS_cd_score', PACKAGE = 'TSDFGS', X, X0)
}
#' PEV score
#'
#' This function calculate prediction error variance (PEV) score <doi:10.1186/s12711-015-0116-6> by given training set and test set.
#'
#' @param X A numeric matrix. The training set genotypic information matrix can be given as genotype matrix (coded as -1, 0, 1) or principle component matrix (row: sample; column: marker).
#' @param X0 A numeric mareix. The test set genotypic information matrix can be given as genotype matrix (coded as -1, 0, 1) or principle component matrix (row: sample; column: marker).
#'
#' @return A floating-point number, PEV score.
#'
#' @author Jen-Hsiang Ou
#'
#' @import Rcpp
#' @export
#' @examples
#' data(geno)
#' \dontrun{pev_score(geno[1:50, ], geno[51:100])}
#'
pev_score <- function(X, X0) {
.Call('_TSDFGS_pev_score', PACKAGE = 'TSDFGS', X, X0)
}
#' r-score
#'
#' This function calculate r-score <doi:10.1007/s00122-019-03387-0> by given training set and test set.
#'
#' @author Jen-Hsiang Ou
#'
#' @param X A numeric matrix. The training set genotypic information matrix can be given as genotype matrix (coded as -1, 0, 1) or principle component matrix (row: sample; column: marker).
#' @param X0 A numeric mareix. The test set genotypic information matrix can be given as genotype matrix (coded as -1, 0, 1) or principle component matrix (row: sample; column: marker).
#' @return A floating-point number, r-score.
#'
#' @import Rcpp
#'
#' @export
#' @examples
#' data(geno)
#' \dontrun{r_score(geno[1:50, ], geno[51:100])}
#'
r_score <- function(X, X0) {
.Call('_TSDFGS_r_score', PACKAGE = 'TSDFGS', X, X0)
}
|
a193e52ffc31301e1d33983f45d3f404acc0efe8 | 7f7dbcf6c8d7c3146955638ac581a1b8d8a27d84 | /man/summary.nlsgrid.Rd | 40e1e8967f6309f0d70fa6b1f3ba8d951c4895d8 | [] | no_license | cran/nlsrk | 4d7e6332b6f4ff2c97874fe9752ff61a194da3d0 | cf5e0d26224bd42b04b58df2c69ec5afdc6b7845 | refs/heads/master | 2020-12-07T17:07:59.740964 | 2017-06-23T22:01:24 | 2017-06-23T22:01:24 | 95,274,750 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 732 | rd | summary.nlsgrid.Rd | \name{summary.nlsgrid}
\alias{summary.nlsgrid}
\title{ Summary method for objects of class nlsgrid(nlsrk) }
\description{
Prints the characteristics of an nlsgrid
}
\usage{
\method{summary}{nlsgrid}(object, ...)
}
\arguments{
\item{object}{ An object of class nlsgrid }
\item{\dots}{ Any parameters for summary }
}
\value{
NULL
}
\references{ none }
\author{ Jean-Sebastien Pierre\cr
\email{Jean-sebastien.pierre@univ-rennes1.fr}\cr
}
\seealso{ \code{\link{nlsrk}}, \code{\link{nlsrk-package}}, \code{\link{nls}}, \code{\link{nlscontour}}}
\examples{
data(logis)
m1<-nls(y~k/(1+c*exp(-r*time)),data=logis,start=list(k=100,r=0.1,c=45))
gr12<-nlscontour(m1)
summary(gr12)
}
\keyword{ nonlinear }
|
c6cc78ee86683315323493b83dab7d862393979f | 972938c1df8b58ca1cb8e8d72d38c89c78d1d9d5 | /man/ObpiStrat.Rd | 9ad1d8c692cc11325bb0f1b0613331d066b30677 | [] | no_license | spignatelli/hedgeR | 07ac3e4a640b5b0a52507996cbe09df394f35603 | 5bd7cf3e8cdc782665821fb726310d2bc47fdf1c | refs/heads/master | 2021-01-12T21:35:56.454896 | 2016-03-09T11:23:41 | 2016-03-09T11:23:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 829 | rd | ObpiStrat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ObpiStrat.R
\name{ObpiStrat}
\alias{ObpiStrat}
\title{Option Based Portfolio Insurance (OBPI)}
\usage{
ObpiStrat(Q, P, K = P[1], vol, R = 0, tdays = 250, tcost = 0, min = Q,
int = TRUE)
}
\arguments{
\item{Q}{numeric value for quantity to be hedged}
\item{P}{numeric futures price vector}
\item{K}{numeric strike price}
\item{vol}{numeric volatility}
\item{R}{numeric interest rate}
\item{tdays}{integer assumed number of trading days per year}
\item{tcost}{numeric transaction costs pr unit}
\item{min}{numeric minimum quantity to be hedged at delivery}
\item{int}{TRUE/ FALSE integer restriction on tradable volume}
}
\value{
Data frame with strategy results
}
\description{
Implements OBPI strategy for commodity price risk management
}
|
cf9c16332f4c7f4d9bf8adfa075381e953b138d1 | 3487f9041213e7416fa1cdc3d3aef3bd942dd4a2 | /man/PlotControlPercentagesPerSample.Rd | 0274088869c7c34ba7f4cbb056d191be810b5c85 | [] | no_license | quanaibn/ascend | 8ad1d10a5051c6362b2133199a2d38e27653adf6 | 93dae316ec328732764adfc6056382a8eac702a1 | refs/heads/master | 2021-05-01T14:29:35.310976 | 2018-01-31T02:00:04 | 2018-01-31T02:00:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 447 | rd | PlotControlPercentagesPerSample.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ascend_plots.R
\name{PlotControlPercentagesPerSample}
\alias{PlotControlPercentagesPerSample}
\title{PlotControlPercentagesPerSample}
\usage{
PlotControlPercentagesPerSample(object, control.name)
}
\arguments{
\item{object}{A \linkS4class{EMSet} object}
\item{control.name}{Name of the control you would like to plot}
}
\description{
PlotControlPercentagesPerSample
}
|
012542e326256e48174fa73bcc31781da291431e | 399cb1248cacbe34a7567a27f75f0e729ffe068e | /R/count_data.r | 754726cfe5adf7e09aa8f9a6445e74e84d5c2470 | [
"MIT"
] | permissive | ikbentimkramer/tidystats-v0.3 | c62b04ae13a4e760f926323a206fd2fc1dbab2af | 03b08a96c1cb4617a3c90daab3ae88d51d1f5fcc | refs/heads/master | 2020-06-28T01:34:45.599446 | 2019-08-12T12:49:55 | 2019-08-12T12:49:55 | 200,107,970 | 0 | 0 | null | 2019-08-01T19:27:09 | 2019-08-01T19:27:08 | null | UTF-8 | R | false | false | 2,170 | r | count_data.r | #' Count the total of observations
#'
#' \code{count_data} returns the number of observations for categorical
#' variables.
#'
#' @param data A data frame.
#' @param ... One or more unquoted (categorical) column names from the data
#' frame, separated by commas.
#' @param na.rm Logical. Should missing values (including NaN) be removed?
#'
#' @details The data frame can be grouped using \strong{dplyr}'s \code{group_by}
#' so that the total of observations will be calculated for each group level.
#'
#' @examples
#' library(dplyr)
#'
#' # 1 variable
#' count_data(cox, sex)
#'
#' # 2 variables
#' count_data(cox, condition, sex)
#'
#' # 1 variable, 1 group
#' cox %>%
#' group_by(condition) %>%
#' count_data(sex)
#'
#' @export
count_data <- function(data, ..., na.rm = TRUE) {
# Get variables
vars <- quos(...)
if (length(vars) > 0) {
# Check whether all variables are non-numeric
if (sum(!sapply(data[, stringr::str_replace(as.character(vars), "~", "")], class) %in%
c("factor", "character")) > 0) {
stop("Variables contain unsupported variable type")
}
# Get grouping
grouping <- dplyr::group_vars(data)
# Select all relevant columns from the data frame
output <- dplyr::select(data, !!! vars, group_vars(data))
# Convert all factors to characters
output <- dplyr::mutate_if(output, is.factor, as.character)
# Gather the requested variables into a single columns
output <- tidyr::gather(output, "var", "group", !!! vars)
# Re-order columns
output <- dplyr::select(output, var, group, everything())
# Re-group the data frame
output <- output %>%
dplyr::ungroup() %>%
dplyr::group_by_all()
# Calculate descriptives
output <- dplyr::summarize(output, n = n())
# Calculate percentage of each group per var
output <- output %>%
dplyr::group_by(var) %>%
dplyr::mutate(pct = n / sum(n) * 100)
# Group by original grouping
output <- dplyr::group_by_at(output, vars(grouping))
} else {
# TODO: When the data is grouped, return N for each group
output <- data_frame(
N = nrow(data)
)
}
return(output)
} |
580dfb32a294ac63eb1e1be8e511f079df004019 | b5284b2916d786e9d76c6b0624ea81f8a5b7df27 | /tests/testthat/test_ungroup_survey.r | 814be077800f02beacbd81103fbbd73e725cb0b7 | [] | no_license | babeheim/yamlform | 435c3d1c8f3c8c95abc843d243b6d4b6b87c5768 | 771d0b8b9a8b59365d0a1977d7e2b9651f7d7162 | refs/heads/master | 2020-06-16T12:57:14.762144 | 2019-07-17T12:25:01 | 2019-07-17T12:25:01 | 195,582,468 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,434 | r | test_ungroup_survey.r |
form1 <- list(
survey = list(
list(
name = "basic_group",
label = "basic group",
type = "begin group",
survey = list(
list(
name = "second_group",
label = "second group",
type = "begin group",
survey = list(
list(
name = "patient_age",
label = "patient age",
type = "integer"
)
)
)
)
)
)
)
form2 <- list(
survey = list(
list(
name = "basic_group",
label = "basic group",
type = "begin group",
survey = list(
list(
name = "second_group",
label = "second group",
type = "begin group",
survey = list(
list(
name = "third_group",
label = "third group",
type = "begin group",
survey = list(
list(
name = "patient_age",
label = "patient age",
type = "integer"
),
list(
name = "patient_sex",
label = "patient sex",
type = "integer"
)
)
)
)
)
)
)
)
)
form3 <- list(
survey = list(
list(
name = "basic_group",
label = "basic group",
type = "begin repeat",
survey = list(
list(
name = "second_group",
label = "second group",
type = "begin repeat",
survey = list(
list(
name = "third_group",
label = "third group",
type = "begin repeat",
survey = list(
list(
name = "patient_age",
label = "patient age",
type = "integer"
),
list(
name = "patient_sex",
label = "patient sex",
type = "integer"
)
)
)
)
)
)
)
)
)
form4 <- list(
survey = list(
list(
name = "basic_group",
label = "basic group",
type = "begin repeat",
survey = list(
list(
name = "second_group",
label = "second group",
type = "begin group",
survey = list(
list(
name = "third_group",
label = "third group",
type = "begin repeat",
survey = list(
list(
name = "patient_age",
label = "patient age",
type = "integer"
),
list(
name = "patient_sex",
label = "patient sex",
type = "integer"
)
)
)
)
)
)
)
)
)
form <- list()
form$survey <- c(form1$survey, form2$survey, form3$survey, form4$survey)
test_that("ungroup_survey works", {
expect_error(form1 %>% ungroup_survey)
expect_silent(form1$survey %>% ungroup_survey)
expect_silent(form2$survey %>% ungroup_survey)
expect_silent(form3$survey %>% ungroup_survey)
expect_silent(form4$survey %>% ungroup_survey)
expect_silent(form$survey %>% ungroup_survey)
expect_silent(form$survey %>% ungroup_survey %>% bind_rows)
})
|
ecdcfa99abdfa03c9d1222420692ccbf930b3314 | cc154449ade2f529f6eef2bf326e7b6d79062fea | /Assignment4/modeltestattempt.R | 0f19d881f816df86671c82dc2a9e39975eeef4df | [] | no_license | rnmaconeghy/assignment4 | c2cb489d5012620f619fa5a92719a541cef1f075 | 227f05b78820ee9278246d59442b283c495b2b82 | refs/heads/master | 2021-01-10T20:31:49.479807 | 2014-03-27T21:28:55 | 2014-03-27T21:28:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,681 | r | modeltestattempt.R | install.packages("ape")
install.packages("phangorn")
require(ape)
require(phangorn)
x<-read.dna("mustelid_coI.fasta",format="fasta")
d<-dist.dna(x)
write.table(as.matrix(d),"distances.csv")
d
d2<-dist.dna(x, model="GTR")
tr.upgma<-upgma(d)
plot(tr.upgma)
tr.upgmar<-root(tr.upgma, outgroup="Melursus/1-601 urinus")
plot(tr.upgmar);nodelabels();add.scale.bar(length=0.001)
plot(tr.upgma,"f")
tr.nj<-nj(d)
plot(tr.nj)
plot(tr.nj,"p")
plot(tr.nj,"f")
plot(tr.nj,"p")
par(mfrow=c(1,2))
plot(tr.upgma,"f")
plot(tr.nj,"p")
par(mfrow=c(1,1))
dt.upgma<-cophentic(tr.upgma)
dt.upgma<-cophenetic(tr.upgma)
dmat<-as.matrix(d)
nms<-rownames(dmat)
dt.upgma<-dt.upgma[nms, nms]
d
dt.upgma<-as.dist(dt.upgma)
plot(dt.upgma-d,ylab="residuals", cex=0.5,main="UPGMA")
abline(h=0,lty=3)
tr.fast<-fastme.bal(d,nni=T,spr=T,tbr=T)
plot(tr.fast)
par(mfrow=c(2,2))
plot(tr.nj)
plot(tr.bionj)
plot.phylo(tr.bionj)
fit<-pml(tr.upgma,as.phyDat(x))
fit=optim.pml(fit,T)
plot(fit)
par(mfrow=c(1,1))
plot(fit)
set.seed(8)
bs<-bootstrap.pml(fit,bs=100,optNni=T)
treeBS<-plotBS(fit$tree, type="fan", bs)
treeBS<-plotBS(fit$tree, type="p", bs)
mt<-modelTest(as.phyDat(x),G=F,I=F)
dhky<-dist.dna(x,model="GTR")
orig<-evol.distinct(tr.upgma,type="fair.proportion")
orig
orig<-evol.distinct(tr.upgma,type="fair.proportion")orig
plot(tr.fast)
par(mfrow=c(2,2))
par(mar=rep(1,4))
plot(tr.upgma,"f")
plot(dt.upgma-d,ylab="residuals", cex=0.5,main="UPGMA")
plot(tr.fast)
plot.phylo(tr.bionj)
plot(tr.bionj)
plot(tr.nj)
plot(fit)
treeBS<-plotBS(fit$tree, type="fan", bs)
treeBS<-plotBS(fit$tree, type="p", bs)
View(mt)
View(dmat)
par(mfrow=c(1,1))
par(mar=rep(1,1))
plot(dt.upgma-d,ylab="residuals", cex=0.5,main="UPGMA")
|
3276899fa8218a67bbae6efa65d9208a5abb6ed3 | a2362ef7c93007c69c56f4f6887b49250d911bfd | /R/examples.R | 1649dff955a07a7ce6feb67af3da667488c7e7b1 | [] | no_license | lionel-/gsim | 146e991caad094a22fc9f5bff6cf86af90707015 | a8034d69d6b9bd4f16a4396107050a30f53c1f6b | refs/heads/master | 2020-05-20T05:06:00.238868 | 2015-09-27T21:03:34 | 2015-09-28T12:00:27 | 24,929,896 | 13 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,564 | r | examples.R | #' Example datasets and models
#'
#' Obtain and tidy ARM's wells and radon datasets from
#' http://github.com/stan-dev/example-models/
#'
#' @name examples
NULL
#' @rdname examples
#' @export
wells_data <- function() {
check_packages("httr", "dplyr")
url <- "https://raw.githubusercontent.com/stan-dev/example-models/master/ARM/Ch.7/wells.data.R"
wells <- new.env()
raw <- httr::GET(url) %>% httr::content(as = "text")
eval(expr = parse(text = raw), wells)
as.data.frame(as.list(wells)) %>%
dplyr::tbl_df() %>%
dplyr::select(-N) %>%
dplyr::rename(switched = switc) %>%
dplyr::mutate(
c_dist100 = (dist - mean(dist, na.rm = TRUE)) / 100,
c_arsenic = arsenic - mean(arsenic, na.rm = TRUE)
)
}
#' @rdname examples
#' @export
radon_data <- function() {
check_packages("RCurl", "dplyr")
url <- "https://raw.githubusercontent.com/stan-dev/example-models/master/ARM/Ch.12/radon.data.R"
radon <- new.env()
raw <- httr::GET(url) %>% httr::content(as = "text")
eval(expr = parse(text = raw), radon)
as.data.frame(as.list(radon)) %>%
dplyr::tbl_df() %>%
dplyr::select(-N, -J)
}
#' @rdname examples
#' @export
radon_model <- function(language = "stan", variant = "centered",
ref = FALSE) {
file <- paste0("radon-", variant, ".", language)
file <- system.file(paste0("extdata/models/", file), package = "gsim")
if (file == "") {
stop("Unsupported language/variant pair", call. = FALSE)
}
if (ref) {
file
} else {
readChar(file, file.info(file)$size)
}
}
|
bbaa44742fb6e95e3810d69ca1c5b25c07678edb | 8a483632aada1fea716ed7ddab9ef42b113c413e | /code/functions/helper.R | e0c99b60b2cf9c60786475d7b34ae195df591721 | [] | no_license | ben-williams/parallel_diverge | ea54ca6caee59d321412e088ae57f920850d4464 | 9a0fd91a8e2418bbb0b1f0e7f37ca9b8c66acd7c | refs/heads/master | 2020-07-06T12:56:27.404297 | 2018-08-06T18:44:12 | 2018-08-06T18:44:12 | 66,984,062 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,390 | r | helper.R | # Code for producing agent-based model simulations of
# GOA walleye pollock fleet behavior
# ben.williams@alaska.gov
# bcwilliams2@alaska.edu
# Notes: This code will be pulled in for each scenario examined
# loads libraries, brings in data, provides a few functions for examinations
# load ----
library(extrafont)
#font_import() only do this one time - it takes a while
loadfonts(device="win")
windowsFonts(Times=windowsFont("TT Times New Roman"))
library(tidyverse)
theme_set(theme_bw(base_size=12,base_family='Times New Roman')+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()))
library(lubridate)
library(truncnorm)
library(NMOF)
library(scales)
library(splitstackshape)
library(MASS)
library(EnvStats)
library(gridExtra)
library(rlist)
options(digits=4)
options(scipen=999)
# data ----
# Catch <- read_csv('chapter_3/data/Catch.csv') # Simulated TAC data - no longer used
# pol <- read_csv('data/pol.csv') # CFEC data
# app <- read_csv('data/apportion.csv')
# Calculate distributions by area and season
app %>%
filter(area!=640) %>%
group_by(area, season) %>%
summarise(app = mean(app)) -> app # for alternative model
# create a data.frame of vessel sizes, ports, and season for simulating complete data sets
all.boat <- expand.grid(p_fshy = 1:4, port = 1:4, season = 1:4, area = 1:3) # dataframe for making a complete grid
# cleanup ----
#cleanup function for after simulation is run
f.docall <- function(x){do.call(bind_rows, x)}
# check results plots
f.check <- function(x){
x %>%
group_by(season, area, sim) %>%
summarise(c1 = sum(c1), C1=mean(C1),
c2 = sum(c2), C2=mean(C2),
c3 = sum(c3), C3=mean(C3)) %>%
mutate(c1 = ifelse(area==1, c1, NA),
c2 = ifelse(area==2, c2, NA),
c3 = ifelse(area==3, c3, NA)) %>%
gather(key, catch, -season, -area, -sim, -C1, -C2, -C3) %>%
gather(key2, TAC, -season, -area, -key, -catch, -sim) %>%
mutate(catch = ifelse(key=='c1' & key2=='C1', catch,
ifelse(key=="c2" & key2=="C2", catch,
ifelse(key=='c3' & key2=='C3', catch, NA))),
Area = factor(area)) %>%
ggplot(aes(TAC, catch, color=Area)) + geom_point() + geom_abline(slope=1, lty=4)
}
f.check.port <- function(x){
x %>%
group_by(season, d, sim) %>%
summarise(c1 = sum(c1), C1=mean(C1),
c2 = sum(c2), C2=mean(C2),
c3 = sum(c3), C3=mean(C3)) %>%
# mutate(c1 = ifelse(area==1, c1, NA),
# c2 = ifelse(area==2, c2, NA),
# c3 = ifelse(area==3, c3, NA)) %>%
gather(key, catch, -season, -d, -sim, -C1, -C2, -C3) %>%
gather(key2, TAC, -season, -d, -key, -catch, -sim) %>%
mutate(catch = ifelse(key=='c1' & key2=='C1', catch,
ifelse(key=="c2" & key2=="C2", catch,
ifelse(key=='c3' & key2=='C3', catch, NA))),
Port = factor(d)) %>%
ggplot(aes(TAC, catch, color = Port)) + geom_point() +
geom_abline(slope = 1, lty = 4)
}
f.check.individual <- function(x){
x %>%
group_by(season, p_holder, sim, d) %>%
summarise(c1 = sum(c1), C1 = mean(C1),
c2 = sum(c2), C2 = mean(C2),
c3 = sum(c3), C3 = mean(C3)) %>%
# mutate(c1 = ifelse(area==1, c1, NA),
# c2 = ifelse(area==2, c2, NA),
# c3 = ifelse(area==3, c3, NA)) %>%
gather(key, catch, -season, -p_holder, -sim, -C1, -C2, -C3, -d) %>%
gather(key2, TAC, -season, -p_holder, -key, -catch, -sim, -d) %>%
mutate(#catch = ifelse(key=='c1' & key2=='C1', catch,
# ifelse(key=="c2" & key2=="C2", catch,
# ifelse(key=='c3' & key2=='C3', catch, NA))),
Port = factor(d)) %>%
ggplot(aes(TAC, catch, color = Port)) + geom_jitter(width = 10) +
geom_abline(slope = 1, lty = 4)
}
f.simcheck <- function(x, a){
x %>%
group_by(sim, season, d) %>%
summarise(catch = sum(catch)) %>%
dplyr::select(sim, season, port = d, catch) -> temp
pol %>%
filter(g_pounds>2200, year==a) %>%
group_by(port, season) %>%
summarise(t = sum(ton)) %>%
left_join(temp) %>%
mutate(Port = factor(port)) %>%
ggplot(aes(t, catch, color = Port)) + geom_point() +
geom_abline(slope = 1, lty = 4)
}
|
8e1619f025ea3590ad9d61a6a39f163a9d486857 | ce0f2ab6661f55e5b31f48dd966e96f99c4d8914 | /assignment_3.R | 2abc1d0b94259a76e12418098ff819f3aa733998 | [
"Apache-2.0"
] | permissive | jinsonxu/ke_logistics_analytics | 0d48370e8ffc2a54bb980bd3a83bc3394b8c9c49 | 1a17bbed8cfdefb0baf66790da8a7d71b4d4ce1e | refs/heads/master | 2016-08-04T20:27:28.057617 | 2014-11-01T07:15:56 | 2014-11-01T07:15:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,979 | r | assignment_3.R | # Institute of Systems Science
# Master of Technology in Knowledge Engineering
# Logistics Analytics
# Assignment 3
# By: Jinson Xu
# Date: 18th October 2014
# clear workspace
rm(list=ls())
library(jsonlite)
library(httr)
# load custom functions
source('funcs.R')
source('config.R')
# read in our data files
plainRoutingInfo <- read.csv('data/1_vrp_8_plain_routing.csv')
fleetInfo <- read.csv('data/fleet.csv')
plainRoutingInfo[1] <- cleanPostalCode(plainRoutingInfo[,c(1)]) # clean our network 'id' first
depot <- plainRoutingInfo[1,1] # assumption: First postal code is the depot.
#shiftStart <- '8:00';
#shiftEnd <- '17:30';
# create network list
names(plainRoutingInfo) <- c('name', 'lat', 'lng')
plainRoutingList <- split(plainRoutingInfo, plainRoutingInfo$name)
# create fleet list
names(fleetInfo) <- c('vehicle', 'capacity', 'shift-start', 'shift-end')
fleetInfo['start-location'] <- depot # assumption: all trucks start and end at depot
fleetInfo['end-location'] <- depot # assumption: all trucks start and end at depot
fleetInfoList <- split(fleetInfo, fleetInfo$vehicle)
# create visits list
visitsInfo <- plainRoutingInfo
visitsInfo$duration <- 5
visitsInfo <- visitsInfo[,c(1,4)]
visitsInfoList <- split(visitsInfo, visitsInfo$name)
# generate JSON object
networkJSON <- toJSON(list(network=plainRoutingList,
fleet=fleetInfoList,
visits=visitsInfoList))
networkJSON <- gsub('\\[|\\]','',networkJSON, perl=TRUE)
writeLines(networkJSON,'q1_json.txt') # take a look
postBody <- list(network=plainRoutingList,
fleet=fleetInfoList,
visits=visitsInfoList)
# send request to routific
r <- POST("https://routific.com/api/vrp-long",
add_headers(Authorization = token), # the token variable is set in config.R which is not uploaded to GitHub for obvious reasons :P
body = postBody,
encode=c('json'))
#str(content(r))
content(r, "parsed") |
19576b909858362c726e1ede6a484448a4acf795 | 375bc6c6ec97d3f7d973aa967c55eec7cbb2b15c | /tableSMY.Rcheck/tableSMY-Ex.R | 0e7b9dc70eb431a28e2726a0a330ffd1f27da4fe | [] | no_license | peterwu19881230/Microbial_phenotype_data_mining | de9ce76ad6717d0d95df4d9972b399ddc5ae37dc | 0877af44fd66244f4ff927480acc5f524cb132fb | refs/heads/master | 2020-08-19T11:07:16.888840 | 2019-10-18T01:39:48 | 2019-10-18T01:39:48 | 215,914,207 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,008 | r | tableSMY-Ex.R | pkgname <- "tableSMY"
source(file.path(R.home("share"), "R", "examples-header.R"))
options(warn = 1)
base::assign(".ExTimings", "tableSMY-Ex.timings", pos = 'CheckExEnv')
base::cat("name\tuser\tsystem\telapsed\n", file=base::get(".ExTimings", pos = 'CheckExEnv'))
base::assign(".format_ptime",
function(x) {
if(!is.na(x[4L])) x[1L] <- x[1L] + x[4L]
if(!is.na(x[5L])) x[2L] <- x[2L] + x[5L]
options(OutDec = '.')
format(x[1L:3L], digits = 7L)
},
pos = 'CheckExEnv')
### * </HEADER>
library('tableSMY')
base::assign(".oldSearch", base::search(), pos = 'CheckExEnv')
base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv')
cleanEx()
nameEx("anyIncomplete")
### * anyIncomplete
flush(stderr()); flush(stdout())
base::assign(".ptime", proc.time(), pos = "CheckExEnv")
### Name: anyIncomplete
### Title: Check Incompletion
### Aliases: anyIncomplete
### ** Examples
set.seed(101)
random.matrix=matrix(runif(500, min = -1, max = 1), nrow = 50)
graphTable(random.matrix)
set.seed(101)
random.matrix[sample(1:50,10),sample(1:10,2)]=NA
graphTable(random.matrix)
anyIncomplete(random.matrix)
base::assign(".dptime", (proc.time() - get(".ptime", pos = "CheckExEnv")), pos = "CheckExEnv")
base::cat("anyIncomplete", base::get(".format_ptime", pos = 'CheckExEnv')(get(".dptime", pos = "CheckExEnv")), "\n", file=base::get(".ExTimings", pos = 'CheckExEnv'), append=TRUE, sep="\t")
cleanEx()
nameEx("changeNames")
### * changeNames
flush(stderr()); flush(stdout())
base::assign(".ptime", proc.time(), pos = "CheckExEnv")
### Name: changeNames
### Title: Change rol/col names
### Aliases: changeNames
### ** Examples
Table=matrix(rnorm(2*3),ncol=2,nrow=3)
rownames(Table)=c("one","two","three")
colnames(Table)=c("col_one","col_two")
Table
rowNameForTable=matrix(c("two","one","three","TWO","ONE","THREE"),ncol=2,byrow=FALSE)
colNameForTable=matrix(c("col_two","col_one","COL_TWO","COL_ONE"),ncol=2,byrow=FALSE)
#newTable=changeNames(rowOrCol="test",Table,nameForTable) #test the error message of the function
newTable=changeNames(rowOrCol="row",Table,rowNameForTable) #test rownames
newTable=changeNames(rowOrCol="col",Table,colNameForTable) #test colnames
base::assign(".dptime", (proc.time() - get(".ptime", pos = "CheckExEnv")), pos = "CheckExEnv")
base::cat("changeNames", base::get(".format_ptime", pos = 'CheckExEnv')(get(".dptime", pos = "CheckExEnv")), "\n", file=base::get(".ExTimings", pos = 'CheckExEnv'), append=TRUE, sep="\t")
cleanEx()
nameEx("checkDuplicates_vect")
### * checkDuplicates_vect
flush(stderr()); flush(stdout())
base::assign(".ptime", proc.time(), pos = "CheckExEnv")
### Name: checkDuplicates_vect
### Title: Check items that occur more than once
### Aliases: checkDuplicates_vect
### ** Examples
checkDuplicates_vect(c(1,1,2,3,4,4,4,5,6,7,8,9,10))
base::assign(".dptime", (proc.time() - get(".ptime", pos = "CheckExEnv")), pos = "CheckExEnv")
base::cat("checkDuplicates_vect", base::get(".format_ptime", pos = 'CheckExEnv')(get(".dptime", pos = "CheckExEnv")), "\n", file=base::get(".ExTimings", pos = 'CheckExEnv'), append=TRUE, sep="\t")
cleanEx()
nameEx("filterTable")
### * filterTable
flush(stderr()); flush(stdout())
base::assign(".ptime", proc.time(), pos = "CheckExEnv")
### Name: filterTable
### Title: Generate quick visualization of your matrix/dataframe and filter
### any NA/NULL/""
### Aliases: filterTable
### ** Examples
set.seed(101)
random.matrix=matrix(runif(500, min = -1, max = 1), nrow = 50)
set.seed(101)
random.matrix[sample(1:50,10),sample(1:10,2)]=NA
filtered_random.matrix=filterTable(random.matrix)
str(filtered_random.matrix)
base::assign(".dptime", (proc.time() - get(".ptime", pos = "CheckExEnv")), pos = "CheckExEnv")
base::cat("filterTable", base::get(".format_ptime", pos = 'CheckExEnv')(get(".dptime", pos = "CheckExEnv")), "\n", file=base::get(".ExTimings", pos = 'CheckExEnv'), append=TRUE, sep="\t")
cleanEx()
nameEx("graphTable")
### * graphTable
flush(stderr()); flush(stdout())
base::assign(".ptime", proc.time(), pos = "CheckExEnv")
### Name: graphTable
### Title: Draw a heat map of your matrix/dataframe/datatable
### Aliases: graphTable
### ** Examples
mat=matrix(c(1,2,3,4,5,6),ncol=2)
graphTable(mat)
set.seed(101)
random.matrix=matrix(runif(500, min = -1, max = 1), nrow = 50)
graphTable(random.matrix)
set.seed(101)
random.matrix[sample(1:50,10),sample(1:10,2)]=NA
graphTable(random.matrix)
base::assign(".dptime", (proc.time() - get(".ptime", pos = "CheckExEnv")), pos = "CheckExEnv")
base::cat("graphTable", base::get(".format_ptime", pos = 'CheckExEnv')(get(".dptime", pos = "CheckExEnv")), "\n", file=base::get(".ExTimings", pos = 'CheckExEnv'), append=TRUE, sep="\t")
### * <FOOTER>
###
cleanEx()
options(digits = 7L)
base::cat("Time elapsed: ", proc.time() - base::get("ptime", pos = 'CheckExEnv'),"\n")
grDevices::dev.off()
###
### Local variables: ***
### mode: outline-minor ***
### outline-regexp: "\\(> \\)?### [*]+" ***
### End: ***
quit('no')
|
d15b38836278a6933b5158ccd6eeb6eda83932c3 | 3f277f9f4e034d6d47984bdc70a24ba8952e5a1d | /man/waldTest.Rd | 8b3b742598bde96c8b78e1313589c6b7d0f2adae | [] | no_license | kkholst/mets | 42c1827c8ab939b38e68d965c27ffe849bcd6350 | c1b37b8885d5a9b34688cb4019170a424b7bec70 | refs/heads/master | 2023-08-11T17:09:18.762625 | 2023-06-16T07:54:16 | 2023-06-16T07:54:16 | 28,029,335 | 16 | 4 | null | null | null | null | UTF-8 | R | false | true | 449 | rd | waldTest.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dutils.R
\name{waldTest}
\alias{waldTest}
\title{Wald test for model (type III test)}
\usage{
waldTest(object, ...)
}
\arguments{
\item{object, }{for example glm object that can be used with estimate}
\item{...}{arguments for estimate of lava for example id=data$id for cluster correction}
}
\description{
Wald test for model (type III test)
}
\author{
Thomas Scheike
}
|
6fc8fbe930e4af531fb7754d7d9aea6c0029464e | de08a4fd9a437dde251507354751179e97a316c1 | /module.R | ab8ae0a4ac29d9d6f2d0ef28fb5a89c1cb9cd222 | [] | no_license | Juanroalvarado/queue_simulator | 4046b9069e1cf689b766cc6f98cce5b0c4f42e28 | 0ee7914e30b267d9813b974104956f0003461be0 | refs/heads/master | 2021-05-07T03:52:16.807283 | 2017-11-16T21:58:27 | 2017-11-16T21:58:27 | 111,026,597 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,074 | r | module.R |
displayMu <- function(id){
ns <- NS(id)
fluidRow(
box(
h3("Lambda"),
textOutput(ns("origin_lambda")),
width = 4
),
box(
h3("Mu"),
textOutput(ns("origin_mu")),
width = 4
),
box(
h3("Mu2"),
textOutput(ns("origin_mu2")),
width = 4
)
)
}
displayInput <- function(id){
ns <- NS(id)
fluidRow(
box(
h3("Promedio"),
h5("Unidades en linea de espera"),
textOutput(ns("waiting_units")),
h5("Unidades en el sistema"),
textOutput(ns("avg_units")),
width = 4
),
box(
h3("Tiempos"),
h5("Tiempo promedio que la unidad pasa en la linea de espera"),
textOutput(ns("avg_waiting_time")),
h5("Tiempo que la unidad pasa en el sistema"),
textOutput(ns("time_in_system")),
width = 4
),
box(
h3("Probabilidad"),
h5("Que no tenga que esperar"),
textOutput(ns("no_wait")),
h5("Probabilidad que no haya unidades"),
textOutput(ns("no_units")),
width = 4
)
)
}
generateMu <- function(input, output, session, go, mu, lambda, mu2){
observeEvent(go(), {
output$origin_mu <- renderText({
mu()
})
output$origin_mu2 <- renderText({
mu2()
})
output$origin_lambda <- renderText({
lambda()
})
})
}
formulae <- function(input, output, session, go, mu, lambda){
observeEvent(go(), {
output$no_units <- renderText({
1 - (lambda()/mu())
})
unit_wait <- reactive({
(lambda()^2)/(mu() * (mu()-lambda()))
})
output$waiting_units <- reactive({
unit_wait()
})
output$avg_units <- renderText({
unit_wait() + (lambda()/mu())
})
avg_time_wait <- reactive({
unit_wait()/lambda()
})
output$avg_waiting_time <- renderText({
avg_time_wait()
})
output$time_in_system <- renderText({
avg_time_wait() + (1/mu())
})
output$no_wait <- renderText({
lambda()/mu()
})
})
} |
dbeb772851d8db494d5d41c9091177a35cb57e0e | 311c0efbcb7886a48f356ccffd730a89ce15fee3 | /plot4.R | 8efb50b25c0af72e260410f586d77a3c802f94a9 | [] | no_license | cg80/ExData_Plotting1 | 21d6db347ba1c15bf673721a19313399f8a559ed | 7b054123ce9efcb3320badf5e586a0088a3a89b5 | refs/heads/master | 2021-01-18T05:30:15.323414 | 2014-08-22T14:03:23 | 2014-08-22T14:03:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,320 | r | plot4.R | ## import the data to R
data <- read.csv("household_power_consumption.txt", sep = ";",
stringsAsFactors=F, na.strings="?")
## subset the desired date range
pwr <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
## Convert the Date column from char to Date class
pwr$Date <- as.Date(pwr$Date, format="%d/%m/%Y")
## Add a column ofcombined Date and Time in POSIXct format
pwr$DateTime <- as.POSIXct(paste(pwr$Date, pwr$Time))
str(pwr)
## create a frame for the graph panel and set to store as a PNG file
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
## plot the graps for the panel
## plot [1,1]
plot(pwr$DateTime, pwr$Global_active_power, type="l",
xlab="", ylab="Global Active Power (kilowatts)")
## plot [1,2]
plot(pwr$DateTime, pwr$Voltage, type="l", xlab="datetime", ylab="Voltage")
## plot [2,1]
plot(pwr$DateTime, pwr$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(pwr$DateTime, pwr$Sub_metering_2, type="l", col="red")
lines(pwr$DateTime, pwr$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"),
lty=1, col=c("black", "red", "blue"), bty="n")
## plot [2,2]
plot(pwr$DateTime, pwr$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() |
e0edf936ec1ec968488478efdf269ae02863f7f7 | 2101587528caa6a48e0e8b219f2b7f5ce2870d1a | /ui.R | 98b871e7d1e0c66aad7cbcbb285e8d1a7d8c7284 | [] | no_license | marcfinot/Data_Products | e395089db04971ba435d27dbea43e0650a4a4631 | 4ea000ed1b0b2801c6e9bec680c625795a8c50ce | refs/heads/master | 2020-05-30T14:42:05.659727 | 2015-02-22T19:16:17 | 2015-02-22T19:16:17 | 30,503,241 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,242 | r | ui.R | library(shiny)
TMYlocationfile <- "http://rredc.nrel.gov/solar/old_data/nsrdb/1991-2005/tmy3/TMY3_StationsMeta.csv"
location_table <- read.csv(TMYlocationfile, header=TRUE)
location_table$location <- paste(location_table$State,location_table$Site.Name)
index <- order(location_table$location)
location_sorted <- location_table[index,]
shinyUI(
pageWithSidebar(
# Application title
headerPanel("Solar insolation summary based on TMY data"),
sidebarPanel(
# selectInput('locationID','Please choose location',location_sorted$USAF,selected = 704540),
selectInput('locationID','location name',location_sorted$location,selected = "CA MOUNTAIN VIEW MOFFETT FLD NAS"),
h5('data downloaded from http://rredc.nrel.gov/'),
numericInput('size','define solar system size (kW)',5, min = 1, max = 200),
submitButton('Submit')
),
mainPanel(
h3('Results of solar radiation info for the following location'),
verbatimTextOutput("inputValue"),
h4('typical daily horiz. insolation in kWh/m2 (equiv. number of hours of sun)'),
verbatimTextOutput("prediction"),
h4('Annual energy production (MWh)'),
verbatimTextOutput("production"),
plotOutput('newHist')
)
)
) |
42c736acfa402f489105e7f6ce82e7b7ac57682a | af65bddcc7a59060c7f787a88d626947f786c315 | /2019-1494.R | 7a4371a69ff7ef073ecf272e2356b268be3b46fe | [] | no_license | duanxiaoqian/data-mining-scripts | ad5b9fad5062a6ebc450e8b66b79ccec8ec1a276 | 1f06b24e67bb8a735f5e2645e4ce90946eeb8f8f | refs/heads/main | 2023-08-14T03:57:46.253457 | 2021-09-22T07:59:07 | 2021-09-22T07:59:07 | 409,114,031 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 816 | r | 2019-1494.R | # 售后LM2019-1494
setwd("Z:/项目数据/LCMS/2020/LM2019-1494/售后/售后1")
library(dplyr)
#veen
d3 <- RMETA2::readxlsx("veen.xlsx", sheet = 1)
d3_filtered <- d3[apply(d3[,-1],1, FUN = function(x){return(sum(x==0)<3)}),] # 比较时,必须全为numeric,不然全是false
d6<- RMETA2::readxlsx("veen.xlsx", sheet = 2)
d6_filtered <- d6[apply(d6[,-1], 1, FUN = function(x){return(sum(x==0)<3)}),]
RMETA2::savexlsx1(d3_filtered, "data_filtered.xlsx",sheet="d3_filtered")
RMETA2::savexlsx1(d6_filtered, "data_filtered.xlsx",sheet="d6_filtered")
RMETA2::auto_alldraw(name="data_filtered.xlsx",needgroup=list(c(1,2)),drawwhat="venny",needlist="Metabolites")
RMETA2::auto_alldraw(name = 'heatmap.xlsx',
drawwhat = "heatmap",row = F,col =F) # 画行 列都聚类的图
|
5cca6042678869c334a0f83668e1cf5aa768ca80 | 6d3fb21b34d50c70c0525bba1bcf40b0d4008c21 | /R/data_student_equity.R | 9a67b7cc593d8bbaaa0704d7bba269bdf830d882 | [] | no_license | vinhdizzo/DisImpact | 2df6051da147dbf8d3bd7292ac7a5439e1d6d269 | 9d0cc5ccd79e06acd3854365b7cf8edfb6991c9c | refs/heads/master | 2022-10-14T10:13:45.066043 | 2022-10-10T17:37:21 | 2022-10-10T17:37:21 | 134,330,630 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,616 | r | data_student_equity.R | #' Fake data on student equity
#'
#' Data randomly generated to illustrate the use of the package.
#' @format A data frame with 20,000 rows:
#' \describe{
#' \item{Ethnicity}{ethnicity (one of: \code{Asian}, \code{Black}, \code{Hispanic}, \code{Multi-Ethnicity}, \code{Native American}, \code{White}).}
#' \item{Gender}{gender (one of: \code{Male}, \code{Female}, \code{Other}).}
#' \item{Cohort}{year student first enrolled in any credit course at the institution (one of: \code{2017}, \code{2018}).}
#' \item{Transfer}{1 or 0 indicating whether or not a student transferred within 2 years of first enrollment (\code{Cohort}).}
#' \item{Cohort_Math}{year student first enrolled in a math course at the institution; could be \code{NA} if the student have not attempted math.}
#' \item{Math}{1 or 0 indicating whether or not a student completed transfer-level math within 1 year of their first math attempt (\code{Cohort_Math}); could be \code{NA} if the student have not attempted math.}
#' \item{Cohort_English}{year student first enrolled in a math course at the institution; could be \code{NA} if the student have not attempted math.}
#' \item{English}{1 or 0 indicating whether or not a student completed transfer-level English within 1 year of their first math attempt (\code{Cohort_English}); could be \code{NA} if the student have not attempted English.}
#' \item{Ed_Goal}{student's educational goal (one of: \code{Deg/Transfer}, \code{Other}).}
#' \item{College_Status}{student's educational status (one of: \code{First-time College}, \code{Other}).}
#' \item{Student_ID}{student's unique identifier.}
#' \item{EthnicityFlag_Asian}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Asian.}
#' \item{EthnicityFlag_Black}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Black.}
#' \item{EthnicityFlag_Hispanic}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Hispanic.}
#' \item{EthnicityFlag_NativeAmerican}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Native American.}
#' \item{EthnicityFlag_PacificIslander}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Pacific Islander.}
#' \item{EthnicityFlag_White}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as White.}
#' \item{EthnicityFlag_Carribean}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Carribean.}
#' \item{EthnicityFlag_EastAsian}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as East Asian.}
#' \item{EthnicityFlag_SouthEastAsian}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Southeast Asian.}
#' \item{EthnicityFlag_SouthWestAsianNorthAfrican}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Southwest Asian / North African (SWANA).}
#' \item{EthnicityFlag_AANAPI}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Asian-American or Native American Pacific Islander (AANAPI).}
#' \item{EthnicityFlag_Unknown}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Unknown.}
#' \item{EthnicityFlag_TwoorMoreRaces}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as two or more races.}
#' }
#' @docType data
#'
#' @usage data(student_equity)
#'
#' @keywords datasets
#'
#' @examples
#' data(student_equity)
"student_equity"
## # Data parameters
## true.p <- c(0.4, 0.5, 0.3, 0.2, 0.7, 0.6)
## nPerGroup <- c(100, 500, 1000, 2000, 3000, 3400)
## nGroups <- length(nPerGroup); nGroups
## nEachCohort <- sum(nPerGroup); nEachCohort
## nCohorts <- 2
## # Generate toy data
## library(devtools)
## library(dplyr)
## set.seed(1)
## student_equity <- tibble(Cohort=rep(2017:2018, each=nEachCohort)
## , Ethnicity=rep(rep(c('Native American', 'Multi-Ethnicity', 'Black', 'Hispanic', 'Asian', 'White'), times=nPerGroup), 2)
## , Transfer=c(lapply(1:nGroups, function(i) sample(0:1, size=nPerGroup[i], replace=TRUE, prob=c(1-true.p[i], true.p[i]))) %>% unlist
## , lapply(1:nGroups, function(i) sample(0:1, size=nPerGroup[i], replace=TRUE, prob=c(1-true.p[i]*1.05, true.p[i]*1.05))) %>% unlist
## )
## , Math=ifelse(Transfer==1, 1, sample(0:1, size=length(Transfer), replace=TRUE, prob=c(0.5, 0.5)))
## , English=ifelse(Transfer==1, 1, sample(0:1, size=length(Transfer), replace=TRUE, prob=c(0.6, 0.4)))
## , Gender=sample(x=c('Female', 'Male', 'Other'), size=nCohorts*sum(nPerGroup), replace=TRUE, prob=c(0.49, 0.49, 0.02))
## , Ed_Goal=sample(x=c('Deg/Transfer', 'Other'), size=nCohorts*sum(nPerGroup), replace=TRUE, prob=c(0.7, 0.3))
## , College_Status=sample(x=c('First-time College', 'Other'), size=nCohorts*sum(nPerGroup), replace=TRUE, prob=c(0.8, 0.2))
## ) %>%
## mutate(
## Math=ifelse(Math==0, sample(c(NA, 0), size=length(Transfer), replace=TRUE, prob=c(0.3, 0.7)), Math)
## , English=ifelse(English==0, sample(c(NA, 0), size=length(Transfer), replace=TRUE, prob=c(0.2, 0.8)), English)
## , Cohort_Math=ifelse(is.na(Math), NA, Cohort + sample(c(0, 1, 2), size=length(Transfer), replace=TRUE, prob=c(0.5, 0.3, 0.2)))
## , Cohort_English=ifelse(is.na(English), NA, Cohort + sample(c(0, 1, 2), size=length(Transfer), replace=TRUE, prob=c(0.6, 0.3, 0.1)))
## , Student_ID=100000 + row_number()
## ) %>%
## select(Ethnicity, Gender, Cohort, Transfer, Cohort_Math, Math, Cohort_English, English, everything()) %>%
## as.data.frame
## # Import some sample multi-ethnicity data
## library(readr)
## d_multi_eth <- read_csv('../Multi-Ethnicity Data/Results/Multi-Ethnicity.csv')
## # Append this multi-ethnicity data
## set.seed(1000)
## student_equity <- student_equity %>%
## group_by(Ethnicity) %>%
## mutate(random_id=sample(n())) %>%
## ungroup %>%
## left_join(
## d_multi_eth %>%
## group_by(Ethnicity) %>%
## mutate(random_id=sample(n())) %>%
## ungroup
## ) %>%
## select(-random_id) %>%
## group_by(Ethnicity) %>%
## mutate_at(.vars=vars(starts_with('EthnicityFlag')), .funs=function(x) ifelse(is.na(x), sample(x[!is.na(x)], size=n(), replace=TRUE), x)) %>%
## ungroup %>%
## # Fudge math success data to illustrate multi-ethnicity
## mutate(Math=ifelse(EthnicityFlag_PacificIslander==0, Math, sample(x=1:0, size=n(), replace=TRUE, prob=c(0.30, 0.70)))
## ## , Math=ifelse(EthnicityFlag_SouthEastAsian==0, Math, sample(x=1:0, size=n(), replace=TRUE, prob=c(0.50, 0.50)))
## ## , Math=ifelse(EthnicityFlag_Carribean==0, Math, sample(x=1:0, size=n(), replace=TRUE, prob=c(0.20, 0.80)))
## , Math=ifelse(Math==0, sample(c(NA, 0), size=length(Transfer), replace=TRUE, prob=c(0.2, 0.8)), Math)
## , Cohort_Math=ifelse(is.na(Math), NA, Cohort + sample(c(0, 1, 2), size=length(Transfer), replace=TRUE, prob=c(0.5, 0.3, 0.2)))
## ) %>%
## as.data.frame
## # Export data set to ./data
## ##devtools::use_data(student_equity, overwrite=TRUE) ## deprecated
## usethis::use_data(student_equity, overwrite=TRUE)
## openxlsx::write.xlsx(x=student_equity, file='~/Downloads/student_equity.xlsx')
## # Parquet files: external data in ./inst/extdata
## # File used by tinytest
## library(arrow)
## write_parquet(x=student_equity, sink='./inst/extdata/student_equity.parquet')
|
27a9a35ee70514d47606e31a86ccdbff567a2e04 | b65595f9f4ad5e2695acdb860eda3598bd7fba2c | /Annotation.R | 5dba698db45a2b469e9336a90cafbf293011ec3a | [] | no_license | SherryH1229/CS_RNA | 20f057462c9176baf160e242870ff3444a0c29f4 | 87524f9856af203ffa8f8128551480eddb7c41fc | refs/heads/master | 2020-05-22T13:55:32.672379 | 2019-09-05T19:50:57 | 2019-09-05T19:50:57 | 186,370,692 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,071 | r | Annotation.R | library(org.Mm.eg.db)
library(org.Hs.eg.db)
library(dplyr)
#DataBases
cols <- c("GENENAME","SYMBOL")
#Hs
Biotype_df_Hs <- read.csv("~/dataOS/CS_RNA/surface_click/NK92/20190408/20190408_FPKMs.txt",sep="\t") %>%
as.data.frame(.) %>% .[,c(1,7)]
# RNA type
RNA_annotation <- as.character(unique(Biotype_df_Hs$Biotype))
RNA_mainType <- grep("RNA",RNA_annotation,value=TRUE)
colnames(Biotype_df_Hs)[1] <- "tracking_id"
anno_info_Hs <- AnnotationDbi::select(org.Hs.eg.db, keys=as.vector(Biotype_df_Hs$tracking_id), columns=cols, keytype="ENSEMBL")
anno_info_Hs <- merge(anno_info_Hs,Biotype_df_Hs,by.x = "ENSEMBL",by.y ="tracking_id")
remove(Biotype_df_Hs)
#Mm
Biotype_df_Mm <- read.csv("/home/xcao3/membraneRNA/pooledResult201901/cufflink_out/fpkmsWithClickNoDup.txt",sep="\t") %>%
as.data.frame(.) %>% .[,c(1,3)]
anno_info_Mm <- AnnotationDbi::select(org.Mm.eg.db, keys=as.vector(Biotype_df_Mm$tracking_id), columns=cols, keytype="ENSEMBL")
anno_info_Mm <- merge(anno_info_Mm,Biotype_df_Mm,by.x = "ENSEMBL",by.y ="tracking_id")
remove(Biotype_df_Mm)
remove(cols)
|
7106038ffd1ba95388915d280fd3c355a651c9d6 | 9ea6e8002beed284ebb0e7d33e788ce64812cd93 | /shiny_app/server.R | 170c4b8aa5cf750f9f00a8d2c9ab03ebd1774079 | [] | no_license | AimeeRose/conflicts_analysis_project | 97985d644a8d7689dfe09fa015665cbce00faa31 | 29a707fe482535b2eec6d89eacb79062dbb46108 | refs/heads/master | 2021-01-24T23:41:28.205850 | 2015-11-17T14:34:10 | 2015-11-17T14:34:10 | 46,353,040 | 0 | 0 | null | 2015-11-17T14:41:25 | 2015-11-17T14:41:25 | null | UTF-8 | R | false | false | 1,276 | r | server.R | library(shiny)
library(RMySQL)
library(ggmap)
library(grid)
shinyServer(function(input, output) {
con <- dbConnect(RMySQL::MySQL(),
dbname = "gdelt",
host = "bgseds-group8-rds.cgwo8rgbvpyh.eu-west-1.rds.amazonaws.com",
user = "group8",
password = Sys.getenv("DB_PASSWORD"))
output$significanceMap <- renderPlot({
res <- dbSendQuery(con, paste0("select * from ", input$eventType))
data <- dbFetch(res, n = -1)
# Check out the distribution
# Played with different intervals from 0.1, 0.05, 0.01 to get a reasonable subset
qs <- quantile(data$NumMentions, probs = seq(0, 1, 0.01))
data_subset <- subset(data, NumMentions > qs['99%'])
# Make sure we have a reasonable distribution
meanLogNumMentions <- mean(log(data_subset$NumMentions))
#hist(log(data_subset$NumMentions))
data_subset$logNumMentions <- log(data_subset$NumMentions)
map <- NULL
mapWorld <- borders("world", colour="gray40", fill="gray40")
map <- get_map(input$centerMapLocation, zoom = input$mapZoom, maptype = 'satellite')
map <- ggmap(map, extent = 'device') +
geom_point(data = data_subset, aes(x = ActionGeo_Long, y = ActionGeo_Lat, size = logNumMentions), colour = 'orange')
map
},
width = 800, height = 500)
})
|
1f34268e7c5fe1fd4eaddc2aba56ad784e9ebd5f | 1f5c6b962db46c457ecb60e2cd03a9e862519c85 | /day3/5 - logistic_regression/logistic_regression.R | 4a28c4a64059f74420320abd507a1fa7ed511c3d | [] | no_license | lschwetlick/SMLP_Bayes | df7387d457dcbc4de6a1184e80cda826eb6d42c6 | 0f5fff915cbb3f72cb8083a16395d49c68f9204a | refs/heads/master | 2020-03-28T10:35:48.182646 | 2018-09-15T11:19:21 | 2018-09-15T11:19:21 | 148,124,423 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,446 | r | logistic_regression.R | setwd('/Users/lisa/Documents/SMLP/material/day3/5 - logistic_regression')
############################################################
# Initial setup
############################################################
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
util <- new.env()
source('stan_utility.R', local=util)
c_light <- c("#DCBCBC")
c_light_highlight <- c("#C79999")
c_mid <- c("#B97C7C")
c_mid_highlight <- c("#A25050")
c_dark <- c("#8F2727")
c_dark_highlight <- c("#7C0000")
############################################################
# Ungrouped logistic regression 1
############################################################
input_data <- read_rdump('logistic_regression.data.R')
fit <- stan(file='logistic_regression1.stan', data=input_data, seed=4938483)
# Check diagnostics
util$check_all_diagnostics(fit)
# Plot marginal posteriors
params = extract(fit)
par(mfrow=c(2, 3))
hist(params$beta[,1], main="", xlab="beta[1]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,2], main="", xlab="beta[2]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,3], main="", xlab="beta[3]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,4], main="", xlab="beta[4]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,5], main="", xlab="beta[5]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$alpha, main="", xlab="alpha", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
# Now let's look at the PPCs
par(mfrow=c(1, 3))
# Aggegrate
breaks_delta <- 1.0 / length(input_data$y)
breaks <- seq(- 0.5 * breaks_delta, 1 + 0.5 * breaks_delta, breaks_delta)
hist(params$p_hat_ppc, breaks=breaks, main="", xlab="p_hat_ppc", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
abline(v=sum(input_data$y) / input_data$N, col=c_light, lty=1, lw=2)
# Left-Handed Group
breaks_delta <- 1.0 / length(input_data$y[input_data$h == 1])
breaks <- seq(- 0.5 * breaks_delta, 1 + 0.5 * breaks_delta, breaks_delta)
hist(params$p_hat_left_ppc, breaks=breaks,
col=c_dark, border=c_dark_highlight,
main="", xlab="p_hat_left_ppc")
abline(v=sum(input_data$y * (input_data$h == 1)) / sum(input_data$h == 1),
col=c_light, lty=1, lw=2)
# Right-handed Group
breaks_delta <- 1.0 / length(input_data$y[input_data$h == 2])
breaks <- seq(- 0.5 * breaks_delta, 1 + 0.5 * breaks_delta, breaks_delta)
hist(params$p_hat_right_ppc, breaks=breaks,
col=c_dark, border=c_dark_highlight,
main="", xlab="p_hat_right_ppc")
abline(v=sum(input_data$y * (input_data$h == 2)) / sum(input_data$h == 2),
col=c_light, lty=1, lw=2)
# Aggregate fit looks okay, but individual groups
# are very different from aggregrate response
############################################################
# Ungrouped logistic regression 2
############################################################
# Now let's run again with the more efficient implentation
# of the logistic regression and confirm that we get an
# equivalent fit
fit <- stan(file='logistic_regression2.stan', data=input_data, seed=4938483)
# Check diagnostics
util$check_all_diagnostics(fit)
# Plot marginal posteriors
params = extract(fit)
par(mfrow=c(2, 3))
hist(params$beta[,1], main="", xlab="beta[1]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,2], main="", xlab="beta[2]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,3], main="", xlab="beta[3]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,4], main="", xlab="beta[4]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,5], main="", xlab="beta[5]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$alpha, main="", xlab="alpha", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
# Now let's look at the PPCs
par(mfrow=c(1, 3))
# Aggegrate
breaks_delta <- 1.0 / length(input_data$y)
breaks <- seq(- 0.5 * breaks_delta, 1 + 0.5 * breaks_delta, breaks_delta)
hist(params$p_hat_ppc, breaks=breaks, main="", xlab="p_hat_ppc", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
abline(v=sum(input_data$y) / input_data$N, col=c_light, lty=1, lw=2)
# Left-Handed Group
breaks_delta <- 1.0 / length(input_data$y[input_data$h == 1])
breaks <- seq(- 0.5 * breaks_delta, 1 + 0.5 * breaks_delta, breaks_delta)
hist(params$p_hat_left_ppc, breaks=breaks,
col=c_dark, border=c_dark_highlight,
main="", xlab="p_hat_left_ppc")
abline(v=sum(input_data$y * (input_data$h == 1)) / sum(input_data$h == 1),
col=c_light, lty=1, lw=2)
# Right-handed Group
breaks_delta <- 1.0 / length(input_data$y[input_data$h == 2])
breaks <- seq(- 0.5 * breaks_delta, 1 + 0.5 * breaks_delta, breaks_delta)
hist(params$p_hat_right_ppc, breaks=breaks,
col=c_dark, border=c_dark_highlight,
main="", xlab="p_hat_right_ppc")
abline(v=sum(input_data$y * (input_data$h == 2)) / sum(input_data$h == 2),
col=c_light, lty=1, lw=2)
############################################################
# Grouped logistic regression
############################################################
fit <- stan(file='grouped_logistic_regression.stan', data=input_data, seed=4938483)
# Check diagnostics
util$check_all_diagnostics(fit)
# Plot marginal posteriors
params = extract(fit)
par(mfrow=c(2, 4))
hist(params$beta[,1], main="", xlab="beta[1]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,2], main="", xlab="beta[2]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,3], main="", xlab="beta[3]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,4], main="", xlab="beta[4]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$beta[,5], main="", xlab="beta[5]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$alpha[,1], main="", xlab="alpha[1]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
hist(params$alpha[,2], main="", xlab="alpha[2]", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
# Now let's look at the PPCs
par(mfrow=c(1, 3))
# Aggegrate
breaks_delta <- 1.0 / length(input_data$y)
breaks <- seq(- 0.5 * breaks_delta, 1 + 0.5 * breaks_delta, breaks_delta)
hist(params$p_hat_ppc, breaks=breaks, main="", xlab="p_hat_ppc", yaxt='n', ylab="",
col=c_dark, border=c_dark_highlight)
abline(v=sum(input_data$y) / input_data$N, col=c_light, lty=1, lw=2)
# Left-Handed Group
breaks_delta <- 1.0 / length(input_data$y[input_data$h == 1])
breaks <- seq(- 0.5 * breaks_delta, 1 + 0.5 * breaks_delta, breaks_delta)
hist(params$p_hat_left_ppc, breaks=breaks,
col=c_dark, border=c_dark_highlight,
main="", xlab="p_hat_left_ppc")
abline(v=sum(input_data$y * (input_data$h == 1)) / sum(input_data$h == 1),
col=c_light, lty=1, lw=2)
# Right-handed Group
breaks_delta <- 1.0 / length(input_data$y[input_data$h == 2])
breaks <- seq(- 0.5 * breaks_delta, 1 + 0.5 * breaks_delta, breaks_delta)
hist(params$p_hat_right_ppc, breaks=breaks,
col=c_dark, border=c_dark_highlight,
main="", xlab="p_hat_right_ppc")
abline(v=sum(input_data$y * (input_data$h == 2)) / sum(input_data$h == 2),
col=c_light, lty=1, lw=2)
|
48d9bb61c0cc3750485550f85dd5cb0d10a765be | 82f22590405a457c63cb9f9baeafee96ac9eb431 | /fimo2fishers.R | 14e47aece592c9ab4c6ea4207c649cf77d2d14de | [] | no_license | zm-git-dev/apa_bin | df5026b74fe02f4f6d6f948dad8d12867bd3afad | 73816825db72a1f7203b625a7651507afd25142f | refs/heads/master | 2022-09-13T07:04:40.922889 | 2018-07-24T20:37:08 | 2018-07-24T20:37:08 | 191,887,492 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,706 | r | fimo2fishers.R | source("~/uapa/R/apa_tools.R")
load("/n/data1/biobase/transfac/current/R/matrix.dat.RData")
vert <- indexes$acc[grep("^V",indexes$ID)]
urls <- rbind(read.delim("transfac.url", sep="\t", header=F, as.is=T),read.delim("jaspar.url", sep="\t", header=F, as.is=T))
ids <- rbind(read.delim("transfac.ids", sep="\t", header=F, as.is=T)[,2:3],read.delim("jaspar.ids", sep="\t", header=F, as.is=T)[,2:3])
### REGULAR MOTIF MEME, TSS-1KB
ff <- read.delim("fimo2fishers.f2f.txt", sep="\t", header=T, as.is=T)
ff.js <- ff[ff[,3] == "jaspar",]
ff.tf <- ff[ff[,3] == "transfac",]
ff.me <- ff[grep("meme",ff[,3]),]
ff.tf <- ff.tf[ff.tf[,4] %in% vert,] # subset on vertebrate motifs
ff2.tf <- cbind(ff.tf[,c(1:5,10)],fisherize(ff.tf[,6:9]))
ff2.js <- cbind(ff.js[,c(1:5,10)],fisherize(ff.js[,6:9]))
ff2.me <- cbind(ff.me[,c(1:5,10)],fisherize(ff.me[,6:9]))
ff3 <- rbind(ff2.tf, ff2.js, ff2.me)[,c(1:5,7:16,6)]
udb <- unique(ff3[,3]); udb
for (x in udb) { ff3[ff3[,3]==x,15] <- p.adjust(ff3[ff3[,3]==x,14], method="BH") }
min(ff3[,15])
min(ff3[,14])
sum(ff3[,15] <= 0.05)
sum(ff3[,14] <= 0.05)
sum(ff3[,14] <= 0.01)
sig <- which(ff3[,14] <= 0.05)
length(sig)
luniq(ff3[sig,4])
sort(unique(ff3[sig,4]))
ff3[sig,]
WriteXLS2(list(ff3[sig,]), "sig.f2f.motifs.xls")
### REGULAR MOTIF MEME, TSS+-1KB
ff <- read.delim("fimo2fishers.TSS.txt", sep="\t", header=T, as.is=T)
ff[grep("1k$",ff[,3]),3] <- paste(ff[grep("1k$",ff[,3]),3],"meme",sep=".")
ff.js <- ff[ff[,3] == "jaspar",]
ff.tf <- ff[ff[,3] == "transfac",]
ff.me <- ff[grep("meme",ff[,3]),]
ff.tf <- ff.tf[ff.tf[,4] %in% vert,] # subset on vertebrate motifs
ff2.tf <- cbind(ff.tf[,c(1:5,10)],fisherize(ff.tf[,6:9]))
ff2.js <- cbind(ff.js[,c(1:5,10)],fisherize(ff.js[,6:9]))
ff2.me <- cbind(ff.me[,c(1:5,10)],fisherize(ff.me[,6:9]))
ff3 <- rbind(ff2.tf, ff2.js, ff2.me)[,c(1:5,7:16,6)]
udb <- unique(ff3[,3]); udb
for (x in udb) { ff3[ff3[,3]==x,15] <- p.adjust(ff3[ff3[,3]==x,14], method="BH") }
min(ff3[,15])
min(ff3[,14])
sum(ff3[,15] <= 0.05)
sum(ff3[,14] <= 0.05)
sum(ff3[,14] <= 0.01)
sig <- which(ff3[,14] <= 0.05)
length(sig)
luniq(ff3[sig,4])
sort(unique(ff3[sig,4]))
ff3[sig,]
WriteXLS2(list(ff3[sig,]), "sig.TSS.motifs.xls")
### UTR MIRNA-SITE MEME
ff <- read.delim("fimo2fishers.UTR.txt", sep="\t", header=T, as.is=T)
ff3 <- cbind(ff[,c(1:5,10)],fisherize(ff[,6:9]))[,c(1:5,7:16,6)]
udb <- unique(ff3[,3]); udb
for (x in udb) { ff3[ff3[,3]==x,15] <- p.adjust(ff3[ff3[,3]==x,14], method="BH") }
min(ff3[,15])
min(ff3[,14])
sum(ff3[,15] <= 0.05)
sum(ff3[,14] <= 0.05)
sum(ff3[,14] <= 0.01)
sig <- which(ff3[,14] <= 0.05)
length(sig)
luniq(ff3[sig,4])
sort(unique(ff3[sig,4]))
ff3[sig,]
WriteXLS2(list(ff3[sig,]), "sig.UTR.motifs.xls")
|
27dcf6c1b7e74354341ee987e8d43d53599a536f | 77fa704a3d3cbaddf8c94ba841ed3da988fea64c | /data-raw/texas_income.R | 012913db5589b260267feaa5878643939f70b20a | [
"MIT"
] | permissive | wilkelab/practicalgg | a80a257b3c57b746110923604875e9217ea293e2 | 5718aad4322ba86967efd6c5d2b31eace0319248 | refs/heads/master | 2021-06-26T22:34:30.561706 | 2021-02-01T23:06:05 | 2021-02-01T23:06:05 | 209,400,266 | 78 | 8 | NOASSERTION | 2019-10-14T19:24:47 | 2019-09-18T20:41:08 | R | UTF-8 | R | false | false | 324 | r | texas_income.R | library(tidycensus)
library(tidyverse)
options(tigris_use_cache = TRUE)
##****************
## Median income
##****************
# get median income
texas_income <- get_acs(
state = "TX", geography = "county", year = 2015,
variables = "B19013_001", geometry = TRUE
)
usethis::use_data(texas_income, overwrite = TRUE)
|
880d6a5abd99c48d6fbc233031412cffb099705c | a8e8000b370d54c2f6a097ee59876827f4daafbe | /9.6/ggplot2.R | 13d66c9c6afca66f95ff82bfb078ddf74825de22 | [] | no_license | weidaoming/R | 142be073ebdf097740ae5a02a7e75308a06e30d1 | 5048ca1d46025ba41d03b00049a17b309e8dfedc | refs/heads/master | 2021-07-12T10:47:27.552074 | 2017-10-18T07:09:09 | 2017-10-18T07:09:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,099 | r | ggplot2.R | library(ggplot2)
ggplot(airquality,aes(Wind,Temp)) +
geom_point(aes(color=factor(Month)),alpha=0.4,size=5)
ggplot(airquality,aes(Wind,Temp)) +
geom_point() +
geom_smooth()
ggplot(airquality,aes(Wind,Temp)) +
stat_smooth(method="lm",se=FALSE)
ggplot(airquality,aes(Wind,Temp)) +
stat_smooth(method="lm",se=FALSE,aes(color=factor(Month)))
ggplot(airquality,aes(Wind,Temp,col=factor(Month),grop=1)) +
geom_point() +
stat_smooth(method="lm",se=FALSE)
library(RColorBrewer)
mycolors <- c(brewer.pal(5,"Dark2"),"black")
display.brewer.pal(5,"Dark2")
ggplot(airquality,aes(Wind,Temp,col=factor(Month),grop=1)) +
geom_point() +
stat_smooth(method="lm",se=FALSE) +
scale_color_manual("Month",values=mycolors)
ggplot(airquality,aes(Wind,Temp,col=factor(Month))) +
geom_point() +
stat_smooth(method="lm",se=FALSE) +
scale_color_manual("Month",values=mycolors) +
facet_grid(.~MOnth)
ggplot(airquality,aes(Wind,Temp,col=factor(Month),grop=1)) +
geom_point() +
stat_smooth(method="lm",se=FALSE) +
scale_color_manual("Month",values=mycolors) +
theme_classic()
?theme
|
960e67a8479ee0c835bff1c9e91dadf67f0c3347 | 5450042e2cdca616a2c4b07cdeb8ff41fe534f29 | /man/focal.function.Rd | f11c7e49a2716fea28a1ae2e0b060ef9f8966c7b | [] | no_license | GertS/RSAGA | 78f4b0aefe2e2deed447452f1600be47139d15dc | e2f6b296c45adcb23c364e32eea3cf1baea59251 | refs/heads/master | 2021-01-16T19:57:34.833107 | 2013-07-23T00:00:00 | 2013-07-23T00:00:00 | 31,158,444 | 1 | 0 | null | 2015-02-22T09:32:46 | 2015-02-22T09:32:45 | null | UTF-8 | R | false | false | 9,588 | rd | focal.function.Rd | \name{focal.function}
\alias{focal.function}
\alias{gapply}
\alias{local.function}
\title{Local and Focal Grid Functions}
\usage{
focal.function(in.grid, in.factor.grid, out.grid.prefix,
path = NULL, in.path = path, out.path = path, fun,
varnames, radius = 0, is.pixel.radius = TRUE,
na.strings = "NA", valid.range = c(-Inf, Inf),
nodata.values = c(), out.nodata.value,
search.mode = c("circle", "square"), digits = 4,
hdr.digits = 10, dec = ".", quiet = TRUE, nlines = Inf,
mw.to.vector = FALSE, mw.na.rm = FALSE, ...)
gapply(in.grid, fun, varnames, mw.to.vector = TRUE,
mw.na.rm = TRUE, ...)
local.function(...)
}
\arguments{
\item{in.grid}{file name of input ASCII grid, relative to
\code{in.path}}
\item{in.factor.grid}{optional file name giving a gridded
categorical variables defining zones; zone boundaries are
used as breaklines for the moving window (see Details)}
\item{out.grid.prefix}{character string (optional),
defining a file name prefix to be used for the output
file names; a dash (\code{-}) will separate the prefix
and the \code{varnames}}
\item{path}{path in which to look for \code{in.grid} and
write output grid files; see also \code{in.path} and
\code{out.path}, which overwrite \code{path} if they are
specified}
\item{in.path}{path in which to look for \code{in.grid}
(defaults to \code{path})}
\item{out.path}{path in which to write output grid files;
defaults to \code{path}}
\item{fun}{a function, or name of a function, to be
applied on the moving window; see Details}
\item{varnames}{character vector specifying the names of
the variable(s) returned by \code{fun}; if missing,
\code{focal.function} will try to determine the varnames
from \code{fun} itself, or from a call to \code{fun} if
this is a function (see Details)}
\item{radius}{numeric value specifying the (circular or
square) radius of the moving window; see
\code{is.pixel.radius} and \code{search.mode}; note that
all data within distance \code{<=radius} will be included
in the moving window, not \code{<radius}.}
\item{is.pixel.radius}{logical: if \code{TRUE} (default),
the \code{radius} will be interpreted as a (possibly
non-integer) number of pixels; if \code{FALSE}, it is
interpreted as a radius measured in the grid (map)
units.}
\item{valid.range}{numeric vector of length 2, specifying
minimum and maximum valid values read from input file;
all values \code{<valid.range[1]} or
\code{>valid.range[1]} will be converted to \code{NA}.}
\item{nodata.values}{numeric vector: any values from the
input grid file that should be converted to \code{NA}, in
addition to the nodata value specified in the grid
header}
\item{out.nodata.value}{numeric: value used for storing
\code{NA}s in the output file(s); if missing, use the
same nodata value as specified in the header of the input
grid file}
\item{na.strings}{passed on to \code{\link{scan}}}
\item{search.mode}{character, either \code{"circle"}
(default) for a circular search window, or
\code{"square"} for a squared one.}
\item{digits}{numeric, specifying the number of digits to
be used for output grid file.}
\item{hdr.digits}{numeric, specifying the number of
digits to be used for the header of the output grid file
(default: 10; see
\code{\link{write.ascii.grid.header}}).}
\item{dec}{character, specifying the decimal mark to be
used for input and output.}
\item{quiet}{If \code{TRUE}, gives some output
(\code{"*"}) after every 10th line of the grid file and
when the job is done.}
\item{nlines}{Number of lines to be processed; useful for
testing purposes.}
\item{mw.to.vector}{logical: Should the content of the
moving window be coerced (from a matrix) to a vector?}
\item{mw.na.rm}{logical: Should \code{NA}s be removed
from moving window prior to passing the data to
\code{fun}? Only applicable when
\code{mw.to.vector=TRUE}.}
\item{\dots}{Arguments to be passed to \code{fun};
\code{local.function}: arguments to be passed to
\code{focal.function}.}
}
\value{
\code{focal.function} and \code{local.function} return
the character vector of output file names.
}
\description{
\code{focal.function} cuts out square or circular moving
windows from a grid (matrix) and applies a user-defined
matrix function to calculate e.g. a terrain attribute or
filter the grid. The function is suitable for large grid
files as it can process them row by row.
\code{local.function} represents the special case of a
moving window of radius 1. Users can define their own
functions operating on moving windows, or use simple
functions such as \code{median} to define filters.
}
\details{
\code{focal.function} passes a square matrix of size
\code{2*radius+1} to the function \code{fun} if
\code{mw.to.vector=FALSE} (default), or a vector of
length \code{<=(2*radius+1)^2} if
\code{mw.to.vector=TRUE}. This matrix or vector will
contain the content of the moving window, which may
possibly contain \code{NA}s even if the \code{in.grid}
has no nodata values, e.g. due to edge effects. If
\code{search.mode="circle"}, values more than
\code{radius} units (pixels or grid units, depending on
\code{is.pixel.radius}) away from the center pixel /
matrix entry will be set to \code{NA}. In addition,
\code{valid.range}, \code{nodata.values}, and the nodata
values specified in the \code{in.grid} are checked to
assign further \code{NA}s to pixels in the moving window.
Finally, if \code{in.factor.grid} specifies zones, all
pixels in the moving window that belong to a different
zone than the center pixel are set to \code{NA}, or, in
other words, zone boundaries are used as breaklines.
The function \code{fun} should return a single numeric
value or a numeric vector. As an example, the function
\code{\link{resid.minmedmax}} returns the minimum, median
and maximum of the difference between the values in the
moving window and the value in the center grid cell. In
addition to the (first) argument receiving the moving
window data, \code{fun} may have additional arguments;
the \code{...} argument of \code{focal.function} is
passed on to \code{fun}. \code{\link{resid.quantile}} is
a function that uses this feature.
Optionally, \code{fun} should support the following
feature: If no argument is passed to it, then it should
return a character vector giving variable names to be
used for naming the output grids. The call
\code{\link{resid.minmedmax}()}, for example, returns
\code{c("rmin","rmed","rmax")}; this vector must have the
same length as the numeric vector returned when moving
window data is passed to the function. This feature is
only used if no \code{varnames} argument is provided.
Note that the result is currently being
\code{\link{abbreviate}}d to a length of 6 characters.
Input and output file names are built according to the
following schemes:
Input: \code{[<in.path>/]<in.grid>}
Zones: \code{[<in.path>/]<in.factor.grid>} (if specified)
Output:
\code{[<out.path>/][<out.grid.prefix>-]<varnames>.asc}
For the input files, \code{.asc} is used as the default
file extension, if it is not specified by the user.
}
\note{
These functions are not very efficient ways of
calculating e.g. (focal) terrain attributes compared to
for example the SAGA modules, but the idea is that you
can easily specify your own functions without starting to
mess around with C code. For example try implementing a
median filter as a SAGA module... or just use the code
shown in the example!
}
\examples{
\dontrun{
# A simple median filter applied to dem.asc:
gapply("dem","median",radius=3)
# Same:
#focal.function("dem",fun="median",radius=3,mw.to.vector=TRUE,mw.na.rm=TRUE)
# See how the filter has changed the elevation data:
d1 = as.vector(read.ascii.grid("dem")$data)
d2 = as.vector(read.ascii.grid("median")$data)
hist(d1-d2,br=50)
}
# Wind shelter index used by Plattner et al. (2004):
\dontrun{
ctrl = wind.shelter.prep(6,-pi/4,pi/12,10)
focal.function("dem",fun=wind.shelter,control=ctrl,
radius=6,search.mode="circle")
}
# Or how about this, if "aspect" is local terrain exposure:
\dontrun{
gapply("aspect","cos") # how "northerly-exposed" is a pixel?
gapply("aspect","sin") # how "easterly-exposed" is a pixel?
# Same result, but faster:
focal.function("aspect",fun=function(x) c(cos(x),sin(x)), varnames=c("cos","sin"))
}
}
\author{
Alexander Brenning
}
\references{
Brenning, A. (2008): Statistical geocomputing combining R
and SAGA: The example of landslide susceptibility
analysis with generalized additive models. In: J.
Boehner, T. Blaschke, L. Montanarella (eds.), SAGA -
Seconds Out (= Hamburger Beitraege zur Physischen
Geographie und Landschaftsoekologie, 19), 23-32.
\url{http://www.environment.uwaterloo.ca/u/brenning/Brenning-2008-RSAGA.pdf}
}
\seealso{
\code{\link{multi.focal.function}},
\code{\link{multi.local.function}},
\code{\link{resid.median}},
\code{\link{resid.minmedmax}},
\code{\link{relative.position}},
\code{\link{resid.quantile}},
\code{\link{resid.quartiles}},
\code{\link{relative.rank}}, \code{\link{wind.shelter}},
\code{\link{create.variable.name}}
}
\keyword{spatial}
|
0e20da1b4e00abed27871db6f323b69a53a22607 | f2d3a834eb614c444e4c4d2f863577e804d9fb70 | /man/fix_dates.Rd | 1e44778d3d5e1b64ad50703df026c34810d61bb9 | [] | no_license | David-Hervas/clickR | 150669cc67575659258e2bb44f429544e52e809c | cb738e505375376d91ac37eb01813ac3fb0e1432 | refs/heads/master | 2023-08-14T05:06:15.095067 | 2023-08-07T17:01:53 | 2023-08-07T17:01:53 | 90,495,146 | 3 | 3 | null | null | null | null | UTF-8 | R | false | true | 1,785 | rd | fix_dates.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fix_functions.R
\name{fix_dates}
\alias{fix_dates}
\title{Fix dates}
\usage{
fix_dates(
x,
max.NA = 0.8,
min.obs = nrow(x) * 0.05,
use.probs = TRUE,
select = 1:ncol(x),
track = TRUE,
parallel = TRUE
)
}
\arguments{
\item{x}{A data.frame}
\item{max.NA}{Maximum allowed proportion of NA values created by coercion. If the
coercion to date creates more NA values than those specified in \code{max.NA}, then all
changes will be reverted and the variable will remain unchanged.}
\item{min.obs}{Minimum number of non-NA observations allowed per variable. If the variable
has fewer non-NA observations, then it will be ignored by \code{fix.dates}.}
\item{use.probs}{When there are multiple date formats in the same column, there can
be ambiguities. For example, 04-06-2015 can be interpreted as 2015-06-04 or as 2015-04-06.
If \code{use.probs=TRUE}, ambiguities will be solved by assigning to the most frequent
date format in the column.}
\item{select}{Numeric vector with the positions (all by default) to be affected by the function}
\item{track}{Track changes?}
\item{parallel}{Should the computations be performed in parallel? Set up strategy first with future::plan()}
}
\description{
Fixes dates. Dates can be recorded in numerous formats depending on the
country, the traditions and the field of knowledge. \code{fix.dates} tries to detect
all possible date formats and transforms all of them in the ISO standard favored by
R (yyyy-mm-dd).
}
\examples{
mydata<-data.frame(Dates1=c("25/06/1983", "25-08/2014", "2001/11/01", "2008-10-01"),
Dates2=c("01/01/85", "04/04/1982", "07/12-2016", "September 24, 2020"),
Numeric1=rnorm(4))
fix_dates(mydata)
}
|
caa837aa1d2b8c0c61ac12c63070f0a70b45ccd8 | 35b9530d0bd20b69e2508954183d6e52db45826f | /R/qrCodeSpec.R | 69176324f99c3cfd74048c79920d07d8d562fbfe | [] | no_license | victorteh/qrcode | fd67c312d0465ddf70fc6ef35d70c443dd5eb496 | 7d3305d3fa134afced8f48d13a5b4c4540ddb2e9 | refs/heads/master | 2021-01-01T05:38:18.649874 | 2015-08-24T00:12:36 | 2015-08-24T00:12:36 | 41,254,512 | 3 | 3 | null | 2018-06-19T12:33:44 | 2015-08-23T15:12:48 | R | UTF-8 | R | false | false | 1,012 | r | qrCodeSpec.R | #' QRcode specifications and requirements.
#'
#' List of different versions of QRcode specification and requirements.
#' For more details can refer to QRcode standard.
#'
#' \itemize{
#' \item Version. QRcode version.
#' \item ECL. Error Correction Level. Consisted of 4 level L,M,Q and H.
#' \item Numeric. Number of numeric character supported by the given version and ECL.
#' \item Alphanumeric. Number of alphabet and numeric character
#' supported by the given version and ECL.
#' \item Byte.Number of byte supported by the given version and ECL.
#' \item Dcword. Data code word count.
#' \item ECwordPerBlock. Error correction word count per block.
#' \item Grp1. Number of block in group 1.
#' \item DCinGrp1. Number of data code word in each group 1.
#' \item Grp2. Number of block in group 2.
#' \item DCinGrp2. Number of data code word in each group 2.
#' }
#'
#' @name qrCodeSpec
#' @docType data
#' @keywords dataset
#' @usage data(qrCodeSpec)
"qrCodeSpec"
|
1826184be566de1325c3e5b735acba593aa6204d | 7a1c1dd167794190e0aa0f763b4549abdac5ead4 | /plot4.r | 401b2bd5336e640e33a5d1857e552835870d574b | [] | no_license | mattyhoo/ExData_Plotting1 | 4d367e305885066b90bb46f8a2293220278ced7c | d5b77c0cfc347541c7582c5375a94b45ea67fca6 | refs/heads/master | 2022-06-10T01:28:36.551019 | 2020-05-09T21:33:33 | 2020-05-09T21:33:33 | 262,483,024 | 0 | 0 | null | 2020-05-09T03:44:57 | 2020-05-09T03:44:56 | null | UTF-8 | R | false | false | 1,305 | r | plot4.r | pc<-read.table("household_power_consumption.txt",sep=";",header=TRUE)
pc$Date<-as.Date(pc$Date,"%d/%m/%Y")
pc1<-subset(pc,Date>=as.Date('2007-02-1') & Date<=as.Date('2007-02-02'))
pc1$Global_active_power<-as.numeric(as.character(pc1$Global_active_power))
pc1$Global_reactive_power<-as.numeric(as.character(pc1$Global_reactive_power))
pc1$Voltage<-as.numeric(as.character(pc1$Voltage))
pc1$Time<-paste(pc1$Date,pc1$Time,sep=" ")
pc1$Time<-strptime(pc1$Time, "%Y-%m-%d %H:%M:%S")
png("plot4.png",width=480,height=480,units="px")
par(mfrow=c(2,2))
plot(pc1$Time,pc1$Global_active_power,xlab="", ylab="Global Active Power (killowatts)", type="n")
lines(pc1$Time,pc1$Global_active_power,type="l")
plot(pc1$Time,pc1$Voltage,xlab="datetime", ylab="Voltage", type="n")
lines(pc1$Time,pc1$Voltage,type="l")
plot(pc1$Time,pc1$Sub_metering_1,xlab="", ylab="Energy sub metering", type="n")
lines(pc1$Time,pc1$Sub_metering_1,type="l")
lines(pc1$Time,pc1$Sub_metering_2,type="l",col="red")
lines(pc1$Time,pc1$Sub_metering_3,type="l",col="blue")
legend(x="topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1, col=c("black","red","blue"))
plot(pc1$Time,pc1$Global_reactive_power,xlab="datetime", ylab="Global_reactive_power", type="n")
lines(pc1$Time,pc1$Global_reactive_power,type="l")
dev.off() |
7ef186c264f61d58cd5354fa74c0bddff496e599 | 9fdd567c9389cbff4d4e332e2264e6807e7bc97a | /BernSetting/MAPBeta.R | 56ebbb9dc7c15566cc9f287108a1ca078a5884f1 | [] | no_license | JINhuaqing/UIP | 91ed3e2b2b9568a802817ce49be33675365edc07 | 645581f544df409e9a3bf980c4af710e537a692d | refs/heads/master | 2023-06-19T16:27:57.773039 | 2021-07-13T07:18:37 | 2021-07-13T07:18:37 | 227,052,732 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 339 | r | MAPBeta.R | library(RBesT)
getrMAPBeta <- function(Ds, w, mean){
nD <- dim(Ds)[2]
stds <- 1:nD
dataDF <- data.frame(study=stds, r=Ds[1, ], n=Ds[2, ])
map_mcmc <- gMAP(cbind(r, n-r)~1|study, family=binomial, data=dataDF, tau.prior=1)
map <- automixfit(map_mcmc)
rmap <- robustify(map, weight=w, mean=mean)
return(rmap)
}
|
5693bf1be7fab41f4eae6c92586b96812f3f0e10 | eb5cc0a95d4ec2877f7605dda0e3461993301e4d | /plot2.R | 0891674ab66ba7673e95a9a1e59bfb29a355d6fa | [] | no_license | MaxDQlikDev/ExData_Plotting1 | 15e27382041ebfe51a045fbed46a6e2a34fdc842 | db683e83b5a01b2a0fc5c7fc68c5e42184b9858f | refs/heads/master | 2020-04-05T08:19:29.873884 | 2018-11-12T16:25:57 | 2018-11-12T16:25:57 | 156,710,219 | 0 | 0 | null | 2018-11-08T13:21:00 | 2018-11-08T13:20:59 | null | UTF-8 | R | false | false | 1,253 | r | plot2.R | # Plot 2
# loading and unzipping data
pathTozipFile <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
pathTotxtFile <- file.path(getwd(),'Temp')
download.file(pathTozipFile, pathTotxtFile, mode="wb")
unzip(pathTotxtFile, "household_power_consumption.txt")
dfSecondPlot <- read.csv("household_power_consumption.txt", sep = ";", stringsAsFactors = FALSE)
## creating DateTime var
dfSecondPlot$DateTime <- with(dfSecondPlot, paste(Date, Time))
# converting Date var
dfSecondPlot$Date <- as.Date(dfSecondPlot$Date, format = "%d/%m/%Y")
# filtering only for 1-st and 2-nd february
dfSecondPlotFiltered <- subset(dfSecondPlot, Date == "2007-02-01" |
Date == "2007-02-02")
# converting y var into numeric
dfSecondPlotFiltered$Global_active_power <- as.numeric(dfSecondPlotFiltered$Global_active_power)
# converting DateTime
dfSecondPlotFiltered$DateTime <- strptime(dfSecondPlotFiltered$DateTime, format = "%d/%m/%Y %H:%M:%S")
# creating file to write plot in
png("plot2.png", width = 480, height = 480)
with(dfSecondPlotFiltered, plot(DateTime, Global_active_power,
type = "l", ylab = "Global Active Power (kilowatts)", xlab = " "))
dev.off()
|
ba6ede4a3a6e2187d5098b58b1ada92029b8eeee | 8516a1b12744c52a8775250fea9be7f2bf535f4b | /shiny_infoBox_brushed/ui.R | dca22869c1a28a093e59e7dce58107ed086817e4 | [] | no_license | Ronlee12355/ShinyTrials | 5696482712d87fce6349bfbb5f8fc3915b32154c | c00691d7a1cdbdd0f608bbfe2c09dbe3878fe590 | refs/heads/master | 2021-03-22T22:29:47.263599 | 2020-04-12T06:04:46 | 2020-04-12T06:04:46 | 247,402,563 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,769 | r | ui.R | library(shiny)
library(shinydashboard)
shinyUI(
dashboardPage(
title = 'Shiny Dashborad With InfoBox and Brushed Plot',
skin = "yellow",
dashboardHeader(
title = 'Shiny Dashborad With InfoBox and Brushed Plot',
titleWidth = 600,
# Dropdown menu for messages
dropdownMenu(type = "messages", badgeStatus = "success",
messageItem("Support Team",
"This is the content of a message.",
time = "5 mins"
),
messageItem("Support Team",
"This is the content of another message.",
time = "2 hours"
)
),
# Dropdown menu for notifications
dropdownMenu(type = "notifications", badgeStatus = "warning",
notificationItem(icon = icon("users"), status = "info",
"5 new members joined today"
),
notificationItem(icon = icon("warning"), status = "danger",
"Resource usage near limit."
),
notificationItem(icon = icon("shopping-cart", lib = "glyphicon"),
status = "success", "25 sales made"
)
),
# Dropdown menu for tasks, with progress bar
dropdownMenu(type = "tasks", badgeStatus = "danger",
taskItem(value = 20, color = "aqua",
"Refactor code"
),
taskItem(value = 40, color = "green",
"Design new layout"
),
taskItem(value = 60, color = "yellow",
"Another task"
)
)
),
dashboardSidebar(
sidebarMenu(
menuItem(text = 'InfoBox & ValueBox', tabName = 'info_box',icon=icon('info-circle')),
menuItem(text = 'Brushed Plot', tabName = 'brushed_box',icon=icon('paint-brush')),
menuItem(text = 'External Links', icon = icon('external-link'),
menuSubItem('GitHub Repo', icon = icon('github'), href = 'https://github.com/Ronlee12355/ShinyTrials/'),
menuSubItem('LinkedIn Profile', icon = icon('linkedin'), href = 'http://www.linkedin.com/in/ronlee12355/'),
menuSubItem('Quora Info', icon = icon('quora'), href = '#'))
),
sidebarSearchForm(textId = 'ipt_text', buttonId = 'sub_btn', 'Input a number')
),
dashboardBody(
tabItems(
tabItem(tabName = 'info_box',
fluidPage(
h2(strong('InfoBox & ValueBox Demo')),
selectInput('dst', label = 'Choose a dataset: ', choices = c('trees','iris','mtcars')),
br(),
p(strong('Basic Information of Selected Dataset: ')),
verbatimTextOutput('data_str'),
fluidRow(infoBoxOutput('cols',width = 3), infoBoxOutput('rows', width = 3))
)),
tabItem(tabName = 'brushed_box',
fluidRow(
box(fluidRow(column(width=6, selectInput('point_x', 'X Variable: ', choices = setdiff(colnames(iris),'Species'))),
column(width=6, selectInput('point_y', 'Y Variable: ', choices = setdiff(colnames(iris),'Species')))),
plotOutput('iris_point', brush = brushOpts(id='point_brush')),
br(),
column(p(strong('Selected Data Points')),
tableOutput(outputId = 'iris_brush'),
width=12),
title = 'Brushed Point Plot of Iris',
status = 'success',
footer = 'Powered by ggplot2',
solidHeader = T,
width = 6
),
box(fluidRow(column(width=6, selectInput('bar_y', 'Y Variable: ', choices = setdiff(colnames(iris),'Species'))),
column(width=6, radioButtons('reversed','Reverse: ', choices = c('Yes'=T, 'No'=F), inline = T))),
plotOutput('iris_bar', click = clickOpts(id='point_click')),
br(),
p(strong('Clicked Outlier Data Points')),
tableOutput('iris_bar_brush'),
title = 'Brushed Bar Plot of Iris',
status = 'danger',
footer = 'Powered by ggplot2',
solidHeader = T,
width = 6
),
)
)
)
)
)
) |
67487312af93c03f0f5d1481fdf1e031bf2fc3ee | 0a906cf8b1b7da2aea87de958e3662870df49727 | /blorr/inst/testfiles/blr_pairs_cpp/libFuzzer_blr_pairs_cpp/blr_pairs_cpp_valgrind_files/1609956893-test.R | 9f2548c9a32e7b7762ea444235d74b065e134374 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 784 | r | 1609956893-test.R | testlist <- list(x = c(-6.95715257111252e+306, NaN, -1.65652687672654e-181, NaN, -1.06817373190919e+297, 3.53369545915248e+72, 7.21902962651992e-304, 4.99006302299659e-322, 0, NaN, 3.53369546676705e+72, NaN, -2.0535003523974e-155, 2.67122792029349e+161, 5.41108926822131e-312, 1.390671161567e-308, -4.73574659118105e+305, NaN, 5.04530594319667e+182, 3.91107221590192e-274, -1.34765550943377e+28, 9.11136727638145e-311, -6.67113314602392e+306, -5.82852024984172e+303, 8.85449539944218e-159, 2.14327978499502e-312, 128978.107570072, 1.59875023337884e-112, 8.98637298518713e-243, 1.6690200483343e-308, -7.98824203857711e-280, -3.31318983418522e+304 ), y = c(1.62597497544489e-260, 2.10538544574893e-314, 0, 0, 0, 0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) |
acd0a59fbe48604fa245c71cf20a745b8239d3ef | 6858ce9b40f7f242af8200921cf3731bd0bb5c64 | /FWL_EtherLipids_3.2.0.R | 095d059f44353dd1bd2b25ef16fa33193fcf2de9 | [] | no_license | zl1002/Farese_and_Walther_lab | fea1b62bfcb7ac92029174b12ce03cbd0055fcdc | f03cde0aa1853374c1cd29476a76665c6b5a9be4 | refs/heads/master | 2021-04-08T14:09:46.247690 | 2020-03-19T17:59:05 | 2020-03-19T17:59:05 | 248,782,314 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,886 | r | FWL_EtherLipids_3.2.0.R | ####################################################################################
# Script: FWL_EtherLipids_3.2.0.R
# Author: Wenting Lyu
# Notes: This script assist executing for main script FWL_lipidomics_3.1.R which
# helps generating the graph and data for the workflow of lipidomics.
# it also calculate help visualize the ether saturation information
# First, Make sure that your R and Rstudio are newly enough for installing the
# packages needed in the script. Otherwise the script will pop out warnings
# for packages and won't run.
# Second, typing command in the console-----> source("FWL_lipidomics_3.1.R")
# or press the source button.
# Third, users can independently implement this analysis by running
# "fattyAcidsaturation_analysis2.1.r" in directory fattyAcids_saturation_analysis.
# This script is derived from Laura's project
#####################################################################################
# ether lipid
saturation_data <- read_csv("data/count_lipid_filtered_lipidomics.csv", col_types = cols())
ether_lipids <- saturation_data %>% rowwise() %>% mutate(ether = ifelse(str_detect(LipidMolec, "\\(.*[ep]"), "YES", "NO"))
ether_dt <- ether_lipids %>% filter(ether == "YES")
write_csv(ether_dt,"data/ether_lipids_saturation.csv")
all_lipids <- ether_lipids %>%
filter(Class %in% unique(ether_dt$Class)) %>%
select(Class, contains("MainArea"), -"MainArea[c]") %>%
group_by(Class)%>%
summarise_all(sum) %>%
gather(SAMPLES, all_AUC, -Class)
ether_all <- ether_dt %>% select(Class, contains("MainArea"), -"MainArea[c]") %>% group_by(Class)%>% summarise_all(sum)%>%
gather(SAMPLES, ether_AUC, -Class)
ether_percent <- left_join(all_lipids, ether_all)
ether_percent <- ether_percent %>%
rowwise() %>%
mutate(rest_AUC = all_AUC - ether_AUC) %>%
select(-all_AUC) %>%
mutate(GROUPS = ifelse(SAMPLES %in% group_info$samples,
unlist(group_info[group_info$samples==SAMPLES, 2]), "NA")) %>%
mutate(SAMPLES = str_remove_all(SAMPLES, "MainArea\\[") %>% str_remove_all(., "\\]")) %>%
gather(type, value, -c("Class", "SAMPLES", "GROUPS"))
ether_percent <- ether_percent %>% mutate_if(is.character, as.factor)
levels(ether_percent$SAMPLES) <- unique(ether_percent$SAMPLES) %>%
str_remove_all(., "s") %>%
as.numeric() %>%
sort() %>%
paste0("s", .)
levels(ether_percent$GROUPS) <- group_names
params1 <- c("SAMPLES", "value", "type")
p1 <- plot_all(data = ether_percent, params1) +
geom_bar(stat = "identity") +
scale_fill_simpsons(labels = c("ether", "rest_lipid")) +
facet_wrap(~Class, scales = "free") +
theme_bw() +
set_theme(theme_params = list(axis.text.x = element_text(angle = 45, size = 8, hjust = 1))) +
add_scales() +
labs(x = "experiment samples", y = "value", title = "ether in lipid class for each sample", fill = "", caption = "This visualization is only for ether lipids here")
print(p1)
ggsave("plot/ether.png", device = "png")
ggsave("plot/ether.svg", device = "svg")
p2 <- plot_all(data = ether_percent, params1) +
geom_bar(stat = "identity", position = "fill") +
scale_fill_simpsons(labels = c("ether", "rest_lipid")) +
facet_wrap(~Class, scales = "free") +
theme_bw() +
set_theme(theme_params = list(axis.text.x = element_text(angle = 45, size = 8, hjust = 1))) +
scale_y_continuous( expand = c(0, 0, 0.1, 0), labels = scales::percent_format()) +
labs(x = "experiment samples", y = "value", title = "ether in lipid class for each sample", fill = "", caption = "This visualization is only for ether lipids here")
print(p2)
ggsave("plot/ether_percentage.png", device = "png")
ggsave("plot/ether_percentage.svg", device = "svg")
ether_percent_group <- ether_percent %>% group_by(Class, GROUPS, type) %>% select(-SAMPLES) %>% summarise_all(mean)
params2 <- c("GROUPS", "value", "type")
p3 <- plot_all(data = ether_percent_group, params2) +
geom_bar(stat = "identity") +
scale_fill_simpsons(labels = c("ether", "rest_lipid")) +
facet_wrap(~Class, scales = "free") +
theme_bw() +
set_theme(theme_params = list(axis.text.x = element_text(angle = 45, size = 8, hjust = 1))) +
add_scales() +
labs(x = "experiment Groups", y = "value", title = "ether in lipid class for each group", fill = "", caption = "This visualization is only for ether lipids here")
print(p3)
ggsave("data/ether_group.png", device = "png")
ggsave("data/ether_group.svg", device = "svg")
p4 <- plot_all(data = ether_percent_group, params2) +
geom_bar(stat = "identity", position = "fill") +
scale_fill_simpsons(labels = c("ether", "rest_lipid")) +
facet_wrap(~Class, scales = "free") +
theme_bw() +
set_theme(theme_params = list(axis.text.x = element_text(angle = 45, size = 8, hjust = 1))) +
scale_y_continuous( expand = c(0, 0, 0.1, 0), labels = scales::percent_format()) +
labs(x = "experiment samples", y = "value", title = "ether in lipid class for each group", fill = "", caption = "This visualization is only for ether lipids here")
print(p4)
ggsave("data/ether_group_percent.png", device = "png")
ggsave("data/ether_group_percent.svg", device = "svg")
ether_saturation <- ether_dt %>% filter(!FA_types == "None")
ether_long <- ether_saturation %>%
group_by(Class) %>%
summarise_at(vars(sample_raw_list), list(~sum(.))) %>%
gather(SAMPLES, total_value, -Class)
pufa_percent <- ether_saturation %>%
mutate("PUFA%" = PUFA/(SFA + MUFA + PUFA))
write_csv(pufa_percent, "pufa_individuleMolec.csv")
sfa_percent <- ether_saturation %>%
mutate("SFA%" = SFA/(SFA + MUFA + PUFA))
mufa_percent <- ether_saturation %>%
mutate("MUFA%" = PUFA/(SFA + MUFA + PUFA))
pufa_value <- pufa_percent %>% transmute_at(vars(sample_raw_list), list(~.*`PUFA%`)) %>% bind_cols(Class=pufa_percent$Class, .)
pufa_samples <- bind_cols(LipidMolec = pufa_percent$LipidMolec, LipidIon = pufa_percent$LipidIon, pufa_value)
write_csv(pufa_samples, "pufa_in_each_sample.csv")
pufa_ether <- pufa_value %>% group_by(Class) %>% summarise_at(vars(sample_raw_list), list(~sum(.))) %>% arrange(Class)
pufa_long <- pufa_ether %>% gather(SAMPLES, PUFA_value, -Class)
sfa_value <- sfa_percent %>%
transmute_at(vars(sample_raw_list), list(~.*`SFA%`)) %>%
bind_cols(Class=sfa_percent$Class, .)
sfa_long <- sfa_value %>%
group_by(Class) %>%
summarise_at(vars(sample_raw_list), list(~sum(.))) %>%
arrange(Class) %>%
gather(SAMPLES, SFA_value, -Class)
mufa_value <- mufa_percent %>%
transmute_at(vars(sample_raw_list), list(~.*`MUFA%`)) %>%
bind_cols(Class=mufa_percent$Class, .)
mufa_long <- mufa_value %>%
group_by(Class) %>%
summarise_at(vars(sample_raw_list), list(~sum(.))) %>%
arrange(Class) %>%
gather(SAMPLES, MUFA_value, -Class)
#ether_fa <- left_join( pufa_long, sfa_long, by = c("Class", "SAMPLES")) %>% left_join(., mufa_long, by = c("Class", "SAMPLES"))
ether_fa <- left_join(pufa_long, ether_long)
# ether_info <- ether_fa %>%
# rowwise() %>%
# mutate(GROUPS = ifelse(SAMPLES %in% group_info$samples,
# unlist(group_info[group_info$samples==SAMPLES, 2]), "NA")) %>%
# mutate("SFA_MUFA" = SFA_value + MUFA_value) %>%
# mutate("total_value" = PUFA_value + SFA_MUFA) %>%
# mutate(SAMPLES = str_remove_all(SAMPLES, "MainArea\\[") %>% str_remove_all(., "\\]")) %>%
# select(c("Class", "SAMPLES", "PUFA_value", "SFA_value", "MUFA_value", "SFA_MUFA", "total_value", "GROUPS" ))
# write_csv(ether_info, "ether_pufa.csv")
ether_info <- ether_fa %>%
rowwise() %>%
mutate(GROUPS = ifelse(SAMPLES %in% group_info$samples,
unlist(group_info[group_info$samples==SAMPLES, 2]), "NA")) %>%
mutate("SFA_MUFA" = total_value - PUFA_value) %>%
mutate(SAMPLES = str_remove_all(SAMPLES, "MainArea\\[") %>% str_remove_all(., "\\]")) %>%
select(Class, SAMPLES, PUFA_value, SFA_MUFA, total_value, GROUPS)
ether1 <- ether_info %>%
select(c("Class", "SAMPLES", "PUFA_value", "SFA_MUFA", "GROUPS")) %>%
gather(type, value, -c("Class", "SAMPLES", "GROUPS"))
# name_factor <- paste0("s", 1:12)
name_factor <- paste0("s", 1:18)
ether1$SAMPLES <- factor(ether1$SAMPLES, levels = name_factor)
#params1 <- c("SAMPLES", "value", "type")
p5 <- plot_all(data = ether1, params1) +
geom_bar(stat = "identity") +
scale_fill_simpsons(labels = c("PUFA", "SFA_MUFA")) +
facet_wrap(~Class, scales = "free") +
theme_bw() +
set_theme(theme_params = list(axis.text.x = element_text(angle = 45, size = 8, hjust = 1))) +
add_scales() +
labs(x = "experiment samples", y = "value", title = "PUFA in ehter lipids for each sample", fill = "", caption = "This visualization is only for ether lipids here")
print(p5)
ggsave("data/pufa_ether.png", device = "png")
p6 <- ggplot(data = ether1, aes(x=SAMPLES, y = value, fill = type)) +
geom_bar(stat = "identity", position = "fill") +
scale_y_continuous( expand = c(0, 0, 0.1, 0), labels = scales::percent_format()) +
scale_fill_jco(labels = c("PUFA", "SFA_MUFA")) +
facet_wrap(~Class, scales = "free") +
theme_bw() +
set_theme(theme_params = list(axis.text.x = element_text(angle = 45, size = 8, hjust = 1))) +
labs(x = "experiment samples", y = "value", title = "PUFA in ehter lipids for each sample", color = "", caption = "This visualization is only for ether lipids here")
print(p6)
ggsave("data/pufa_ether_percentage.png", device = "png")
aggregated_ether <- ether_saturation %>% group_by(Class) %>% summarise_at(vars(sample_raw_list), list(~sum(.))) %>% arrange(Class)
ether_mean <- cal_lipid_statistics(aggregated_ether, group_info, "mean", "Class")
ether_mean_long <- ether_mean[[2]] %>% select(-TYPE)
pufa_mean <- cal_lipid_statistics(pufa_ether, group_info, "mean", "Class")
pufa_mean_long <- pufa_mean[[2]] %>% select(-TYPE)
colnames(pufa_mean_long)[2] <- "PUFA_mean"
ether_pufa_mean <- left_join(ether_mean_long, pufa_mean_long, by = c("Class", "Groups")) %>% mutate("SFA/MUFA_mean" = mean - PUFA_mean)
ether2 <- ether_pufa_mean %>% select(-mean) %>% gather(type, value, -c("Class", "Groups"))
ether2 <- ether_info %>%
group_by(Class, GROUPS) %>%
summarise_at(vars(c(PUFA_value, SFA_MUFA)), list(~mean(.))) %>%
gather(type, value, -c(Class, GROUPS))
ether2$GROUPS <- factor(ether2$GROUPS, levels = group_names)
p7 <- plot_all(ether2, c("GROUPS", "value", "type")) +
geom_bar(stat = "identity") +
scale_fill_d3(labels = c("PUFA", "SFA_MUFA")) +
facet_wrap(~Class, scales = "free") +
theme_bw() +
set_theme() +
add_scales() +
labs(x = "Experiment Groups", y = "value", title = "PUFA (mean) in ether lipids for each group in each lipid class", color = "", caption = "This visualization is only for ether lipids here")
print(p7)
ggsave("data/pufa_ether_group.png", device = "png")
ggsave("data/pufa_ether_group.svg", device = "svg")
|
a36d096079e3657d0b1b93eeb5980a0665d64c06 | a7150b302e67445f75e351b8a8f5d10395b7abae | /knn_KOOS.R | ecfb1c1b9ae084cfad5e8560e7d0fb1eb328ff7e | [] | no_license | muttaja/test | bc16b0b18a0e81a6aa7f9b6a807052533d3917dc | 82ee1413e43e5537c33ce09ff17e5fe59e1f6427 | refs/heads/master | 2020-04-04T20:51:52.686836 | 2019-04-26T14:15:31 | 2019-04-26T14:15:31 | 156,263,771 | 0 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 18,839 | r | knn_KOOS.R | #hinnangud koos andmestikul
#dk andmestik failist landsat_to_sentinel
dk[,-1] = scale(dk[,-1])
muld1 = data.frame(muld = koos[koos$aproovitykk_id %in% mets_id,]$muld)
muld1$aproovitykk_id = koos$aproovitykk_id[koos$aproovitykk_id %in% mets_id]
muld1[is.na(muld1$muld),]$muld = 999
#midagi kokku võtta?
table(muld1$muld) #alla 10 kindlasti kokku; 2 puuduvat väärtust
#10, 11, 209, 999
muld1[muld1$muld %in% c(10,11,200,999), "muld"] = 999
#muld 1-0 tüüpi andmestikuks
muld2 = dcast(muld1,aproovitykk_id~muld,fun.aggregate = function(x){as.integer(length(x) > 0)})
dkm = merge(dk, muld2, by = "aproovitykk_id")
#et siin oleks õige "sat"
sat0 = read.csv("smi_prt_13_17_pixphv-heledused.csv")
sat0[sat0 == 0] = NA
stid = unique(sat0$aproovitykk_id); stid = stid[stid %in% mets_id] #mets
kid = unique(koos$aproovitykk_id);
length(stid[!(stid %in% kid)])
load("ID_OK.RData", verbose = T)# saab midagi kohe valja võtta
sidx = id_ok
not_ok = stid[!(stid %in% id_ok)]
mets_id = na.omit(mets_id)
sidxx = mets_id[!(mets_id %in% not_ok)]
#####################################################
#vaata "ekspert_ puhas vajaminevate funktsioonide lugemiseks!
vars = names(dkm[-1])
data10 = dkm[dkm$aproovitykk_id %in% sidxx,]
names(data10)[1] = "SID"
require(FNN)
names_taks = names(koos)[c(2,19,25:31)]
taks_uus = koos[koos$aproovitykk_id %in% sidxx,names_taks]
#taks_uus = na.omit(taks_uus)
taks_uus$ARV_VMA = taks_uus$arv_maht_es*taks_uus$MA / 100
taks_uus$ARV_VKU = taks_uus$arv_maht_es*taks_uus$KU / 100
taks_uus$ARV_VKS = taks_uus$arv_maht_es*taks_uus$KS / 100
taks_uus$ARV_VHB = taks_uus$arv_maht_es*taks_uus$HB / 100
taks_uus$ARV_VLM = taks_uus$arv_maht_es*taks_uus$LM / 100
taks_uus$ARV_VLV = taks_uus$arv_maht_es*taks_uus$LV / 100
taks_uus$ARV_VKX = taks_uus$arv_maht_es*taks_uus$KX / 100
puud = c("ARV_VMA", "ARV_VKU", "ARV_VKS", "ARV_VHB", "ARV_VLM", "ARV_VLV", "ARV_VKX")
data_puud = taks_uus[taks_uus$aproovitykk_id %in% sidxx, puud]
data_puud_raie_props = data_puud / rowSums(data_puud)
sid_puudu = taks_uus$aproovitykk_id[!(complete.cases(data_puud_raie_props))]
sidxx = sidxx[!(sidxx %in% sid_puudu)]
data_puud = taks_uus[taks_uus$aproovitykk_id %in% sidxx, puud]
data_puud_raie_props = data_puud / rowSums(data_puud)
##############################
#funktsioonid
vars = names(dkm[-1])
fun_liik = function(liik){
for(k in 1:15){
print(k); print(Sys.time())
vars0 = vars
vars1 = c()
mx = 696
vahe = 969
while(vahe > 0){
mx0 = mx
print(vahe)
vars00 = c()
rss = c()
for(j in 1:length(vars0)){
vars00 = c(vars1,vars0[j])
dex = data10[,c("SID",vars00)]
dex = dex[dex$SID %in% sidxx,]
dex$cl = "cl"
H_puu = fun_agre_liik(dex, data_puud, k = k, liik = liik)
rsdls = H_puu - data_puud_raie_props[,liik]
rss[j] = sum(rsdls**2)
}
rss[is.na(rss)] = 1000
mx0 = min(rss)
varmin = vars0[which.min(rss)]
vars0 = vars0[!(vars0 %in% varmin)]
vars1 = c(vars1,varmin)
vahe = mx-mx0
mx = mx0
}
assign(paste("var_naabreid", k, sep="_"),vars1)
}
lst_return = list(var_naabreid_1,var_naabreid_2,var_naabreid_3,var_naabreid_4,var_naabreid_5,var_naabreid_6,var_naabreid_7,var_naabreid_8,
var_naabreid_9,var_naabreid_10,var_naabreid_11,var_naabreid_12,var_naabreid_13,var_naabreid_14,var_naabreid_15)
}
for(l in 1:7){
print(Sys.time()); print(l)
vars_liik = fun_liik(l)
assign(paste("puuliik", l, sep="_"),vars_liik)
}
puuliiks = list(puuliik_1,puuliik_2,puuliik_3,puuliik_4,puuliik_5,puuliik_6,puuliik_7)
#save(puuliiks, file = "puuliiks0_mullaga.RData")
nr_neigh_liik = function(k, vars, liik){
dex = data10[,c("SID",vars)]
dex = dex[dex$SID %in% sidxx,]
dex[,-1] = t((t(as.matrix(dex[,-1]))))#*bw - ta
dex$cl = "cl"
H_puu = fun_agre_liik(dex, data_puud, k = k, liik = liik)
rsdls = H_puu - data_puud_raie_props[,liik]
rss = sum(rsdls**2)
rss
}
for(l in 1:7){
puuliik = puuliiks[[l]]
rss= c()
for(k in 1:15){
rss[k] = nr_neigh_liik(k, vars = puuliik[[k]], liik = l)
}
assign(paste("rss_liik", l, sep="_"),rss)
}
par(mfrow = c(3,3))
plot(rss_liik_1, type = "o")
plot(rss_liik_2, type = "o")
plot(rss_liik_3, type = "o")
plot(rss_liik_4, type = "o")
plot(rss_liik_5, type = "o")
plot(rss_liik_6, type = "o")
plot(rss_liik_7, type = "o")
#mänd, kuusk, kask 10+; edasi 11,9,7 ja 4,5
k_list = list(c(10,11,12,13,14,15), c(10,11,12,13,14,15), c(10,11,12,13,14,15), c(11), c(9), c(7), c(4,5))
fun_opti_liik = function(w,k,vars,liik){
dex = data10[,c("SID",vars)]
dex = dex[dex$SID %in% sidxx,]
dex[,-1] = t((t(as.matrix(dex[,-1])))*w)
dex$cl = "cl"
H_puu = fun_agre_liik(dex, data_puud, k = k, liik = liik)
rsdls = H_puu - data_puud_raie_props[,liik]
rss = sum(rsdls**2)
rss
}
for(j in 7:7){
for(k in 2:2){
print(c(j,k)); print(Sys.time())
kk = k_list[[j]][k]
varsx = puuliiks[[j]][[kk]]
w = rep(1, length(varsx))
opti_liik1 = optim(par = w, fn = fun_opti_liik, k = kk, vars = varsx, liik = j) #
print(j);print(k);print(opti_liik1$value)
assign(paste("optirss_muld", j, k, sep="_"),opti_liik1$value)
assign(paste("optiweights_muld", j, k, sep="_"),opti_liik1$par)
}
}
#4, 3, 5, 1, 1, 1, 1
liik_mins = c(13,12,14,11,9,7,4)
optiweights = list(optiweights_muld_1_4,optiweights_muld_2_3, optiweights_muld_3_5, optiweights_muld_4_1, optiweights_muld_5_1, optiweights_muld_6_1, optiweights_muld_7_1)
save(liik_mins, file = "liik_mins0_mullaga.RData")
save(optiweights, file = "optiweights0_muld.RData")
for(i in 1:7){
liik = i
wws = optiweights[[liik]]
varsx = puuliiks[[liik]][[liik_mins[liik]]]
dex = data10[,c("SID",varsx)]
dex = dex[dex$SID %in% sidxx,]
dex[,-1] = t((t(as.matrix(dex[,-1])))*wws)#*bw - ta
dex$cl = "cl"
H_puu = fun_agre_liik(dex, data_puud, k = liik_mins[liik], liik = liik)
HP = data.frame(H = H_puu, T = data_puud_raie_props[,liik])
assign(paste("HP", i, sep=""),HP)
}
par(mfrow = c(3,3))
plot(HP1$T, HP1$H)
plot(HP2$T, HP2$H)
plot(HP3$T, HP3$H)
plot(HP4$T, HP4$H)
plot(HP5$T, HP5$H)
plot(HP6$T, HP6$H)
plot(HP7$T, HP7$H)
fun_rss = function(df){
pred = df[,1]; true = df[,2]
rsdls = (pred - true)
sqrt(sum(rsdls)**2) / dim(df)[1]
}
w_res = c(fun_rss(HP1),fun_rss(HP2),fun_rss(HP4),fun_rss(HP4),fun_rss(HP5),fun_rss(HP6),fun_rss(HP7))
#w_res = w_res / sum(w_res)
w_res
#[1] 0.0188098568 0.0111026192 0.0022136951 0.0022136951 0.0003588392 0.0013074733 0.0052367201
#see ei saa ju õige olla!?
#1 - mänd. 2 - kuusk, 3 - kask, 4 - haab, 5 - mustlepp, 6 - halllepp, 7 - muu
data_hp = data.frame(MA = HP1$H, KU = HP2$H, KS = HP3$H, HB = HP4$H, LM = HP5$H, LV = HP6$H, KX = HP7$H)
HP0 = data_hp / rowSums(data_hp)
WRSS = function(df){
true = data_puud_raie_props
#print(dim(df));print(dim(true))
w = colSums(true) / dim(df)[1] #kaalud vastavalt kui levinud on puuliik
rsdls = (df - true)
cols = colSums(rsdls**2)
RSS_col = cols*w
sum(RSS_col)
}
wrss = WRSS(data_hp0)
wrss
par(mfrow = c(3,3))
plot(HP1$T, HP0$MA)
plot(HP2$T, HP0$KU)
plot(HP3$T, HP0$KS)
plot(HP4$T, HP0$HB)
plot(HP5$T, HP0$LM)
plot(HP6$T, HP0$LV)
plot(HP7$T, HP0$KX)
#Kuule, aga kui nüüd ära on kaalutud kõik tunnised, siis võiks ju uuesti läbi jooksutada ja parimaid tunnuseid otsida???
#aga nüüd: võta "lihtsa kõrgusemudeliga" vigased välja ja proovi uuesti naabreid 5-15
koos_mets = koos[(koos$maakatsgrp == "M" & koos$maakatgrp == "ME"),] #miks NA-d sisse jäävad kui panna aint 1. tingimus?
koos_mets$aasta_erinevus = 2018 - koos_mets$aasta
load(file = "korgus_valja.RData", verbose = T)
k1 = koos_mets[!(koos_mets$aproovitykk_id %in% korgus_valja),]
km1 = lm(inv_korgus ~ K_Elev_P60 + H_Elev_P60 + K_Elev_P90 + H_Elev_P90 + aasta_erinevus, data = k1)
summary(km1) #aasta_erinevus negatiivse kordajaga, sest praegu näitaks lidar muidu liiga kõrget metsa. kordaja 3.3, ehk 33cm aastas juurdekasv? palju natuke!
pk1 = predict(km1, data = k1)
res1 = pk1 - k1$inv_korgus
resdf1 = data.frame(SID = k1$aproovitykk_id, res = res1)
plot(k1$inv_korgus, pk1)
########## need kõrgusesest saadud välja
sidxx = mets_id[!(mets_id %in% not_ok)] #769
data_puud = taks_uus[taks_uus$aproovitykk_id %in% sidxx, puud]
data_puud_raie_props = data_puud / rowSums(data_puud)
sid_puudu = taks_uus$aproovitykk_id[!(complete.cases(data_puud_raie_props))]
sidxx = sidxx[!(sidxx %in% sid_puudu)] #765
sum(1*((sidxx %in% korgus_valja))) #16 OK!
sum(1*((korgus_valja %in% sidxx))) #29, kumb nüüd õige on? korgus_valja pole unikaalsed!!!
sidxx = sidxx[!(sidxx %in% korgus_valja)] #749
data_puud = taks_uus[taks_uus$aproovitykk_id %in% sidxx, puud]
data_puud_raie_props = data_puud / rowSums(data_puud)
data10 = dkm[dkm$aproovitykk_id %in% sidxx,]
names(data10)[1] = "SID"
for(i in 1:7){
liik = i
wws = optiweights[[liik]]
varsx = puuliiks[[liik]][[liik_mins[liik]]]
dex = data10[,c("SID",varsx)]
dex = dex[dex$SID %in% sidxx,]
dex[,-1] = t((t(as.matrix(dex[,-1])))*wws)#*bw - ta
dex$cl = "cl"
H_puu = fun_agre_liik(dex, data_puud, k = liik_mins[liik], liik = liik)
HP = data.frame(H = H_puu, T = data_puud_raie_props[,liik])
assign(paste("HP", i, sep=""),HP)
}
par(mfrow = c(3,3))
plot(HP1$T, HP1$H)
plot(HP2$T, HP2$H)
plot(HP3$T, HP3$H)
plot(HP4$T, HP4$H)
plot(HP5$T, HP5$H)
plot(HP6$T, HP6$H)
plot(HP7$T, HP7$H)
fun_rss = function(df){
pred = df[,1]; true = df[,2]
rsdls = (pred - true)
sum(abs(rsdls)) / dim(df)[1]
}
w_res = c(fun_rss(HP1),fun_rss(HP2),fun_rss(HP4),fun_rss(HP4),fun_rss(HP5),fun_rss(HP6),fun_rss(HP7))
#w_res = w_res / sum(w_res)
w_res
#749 juhul, kui ei jooksutanud uuesti läbi uute tunnuste ja kaalude leidmiseks
#[1] [1] 0.12734026 0.13474512 0.06289597 0.06289597 0.01798898 0.06396007 0.03488666
####uued hinnangud, kui need 15 väljas
#k 4 ... 15
sidxx = sidx1
fun_liik = function(liik, k1, k2){
lst_return = vector("list", length = 1+k2-k1)
for(k in k1:k2){
print(k); print(Sys.time())
vars0 = vars
vars1 = c()
mx = 696
vahe = 969
while(vahe > 0){
mx0 = mx
print(vahe)
vars00 = c()
rss = c()
for(j in 1:length(vars0)){
vars00 = c(vars1,vars0[j])
#print(vars00)
dex = data10[,c("SID",vars00)]
dex = dex[dex$SID %in% sidxx,]
dex$cl = "cl"
H_puu = fun_agre_liik(dex, data_puud, k = k, liik = liik)
rsdls = H_puu - data_puud_raie_props[,liik]
rss[j] = sum(rsdls**2)
}
rss[is.na(rss)] = 1000
mx0 = min(rss)
varmin = vars0[which.min(rss)]
vars0 = vars0[!(vars0 %in% varmin)]
vars1 = c(vars1,varmin)
vahe = mx-mx0
mx = mx0
}
lst_return[[1+k-k1]] = assign(paste("var_naabreid", k, sep="_"),vars1)
}
lst_return
}
for(l in 1:7){
print(Sys.time()); print(l)
vars_liik = fun_liik(l, 8, 20)
assign(paste("puuliik_749", l, sep="_"),vars_liik)
}
puuliiks749 = list(puuliik_749_1,puuliik_749_2,puuliik_749_3,puuliik_749_4,puuliik_749_5,puuliik_749_6,puuliik_749_7)
save(puuliiks749, file = "puuliiks749_mullaga.RData")
nr_neigh_liik = function(k, vars, liik){
dex = data10[,c("SID",vars)]
dex = dex[dex$SID %in% sidxx,] ####### NB milliseid SID kasutad!
dex[,-1] = t((t(as.matrix(dex[,-1]))))#*bw - ta
dex$cl = "cl"
H_puu = fun_agre_liik(dex, data_puud, k = k, liik = liik)
rsdls = H_puu - data_puud_raie_props[,liik]
rss = sum(rsdls**2) / dim(dex)[1]
rss
}
for(l in 1:7){
puuliik = puuliiks749[[l]]
rss= c()
for(k in 1:12){
rss[k] = nr_neigh_liik(k, vars = puuliik[[k]], liik = l)
}
assign(paste("rss_749", l, sep="_"),rss)
}
rss749 = list(rss_749_1, rss_749_2, rss_749_3, rss_749_4, rss_749_5, rss_749_6, rss_749_7)
save(rss749, file = "rss749_0.RData") #ehk see, mis on kehvem kui 765 korral
par(mfrow = c(3,3))
plot(rss_749_1, type = "o")
plot(rss_749_2, type = "o")
plot(rss_749_3, type = "o")
plot(rss_749_4, type = "o")
plot(rss_749_5, type = "o")
plot(rss_749_6, type = "o")
plot(rss_749_7, type = "o")
#võtame 15 kõik praegu lihtsuse huvides
for(j in 1:7){
for(k in 12:12){
print(c(j,k)); print(Sys.time())
kk = k #k_list[[j]][k]
varsx = puuliiks749[[j]][[kk]]
w = rep(1, length(varsx))
opti_liik1 = optim(par = w, fn = fun_opti_liik, k = kk, vars = varsx, liik = j) #
print(j);print(k);print(opti_liik1$value)
assign(paste("optirss749_muld", j, k, sep="_"),opti_liik1$value)
assign(paste("optiweights749_muld", j, k, sep="_"),opti_liik1$par)
}
}
optiweights749 = list(optiweights749_muld_1_12,optiweights749_muld_2_12, optiweights749_muld_3_12, optiweights749_muld_4_12, optiweights749_muld_5_12, optiweights749_muld_6_12, optiweights749_muld_7_12)
save(optiweights749, file = "optiweights749_muld.RData")
for(i in 1:7){
liik = i
wws = optiweights749[[liik]]
varsx = puuliiks749[[liik]][[12]]
dex = data10[,c("SID",varsx)]
dex = dex[dex$SID %in% sidxx,]
dex[,-1] = t((t(as.matrix(dex[,-1])))*wws)#*bw - ta
dex$cl = "cl"
H_puu = fun_agre_liik(dex, data_puud, k = liik_mins[liik], liik = liik)
HP = data.frame(H = H_puu, T = data_puud_raie_props[,liik])
assign(paste("HP", i, sep=""),HP)
}
par(mfrow = c(3,3))
plot(HP1$T, HP1$H)
plot(HP2$T, HP2$H)
plot(HP3$T, HP3$H)
plot(HP4$T, HP4$H)
plot(HP5$T, HP5$H)
plot(HP6$T, HP6$H)
plot(HP7$T, HP7$H)
fun_rss = function(df){
pred = df[,1]; true = df[,2]
rsdls = (pred - true)
sum(abs(rsdls)) / dim(df)[1]
}
w_res = c(fun_rss(HP1),fun_rss(HP2),fun_rss(HP4),fun_rss(HP4),fun_rss(HP5),fun_rss(HP6),fun_rss(HP7))
#w_res = w_res / sum(w_res)
w_res
#[1] 0.16378694 0.14633420 0.07979455 0.07979455 0.02553548 0.08091982 0.03627051
#oli: [1] 0.12734026 0.13474512 0.06289597 0.06289597 0.01798898 0.06396007 0.03488666
#wtf, need, mis ma välja võtsin, ei olnud siis välja võtmist väärt? äkki nende sat-pilt vastas siis veel reaalsusele!?
#kas mul ikka andmed/dimensioonid klapivad omavahel???
s1 = data10$SID
data_puud1 = taks_uus[taks_uus$aproovitykk_id %in% sidxx, c("aproovitykk_id",puud)]
s2 = data_puud1$aproovitykk_id
################ uuesti 749
#18 oli kõigil kuuel esimesel seni kõige madalam
for(l in 1:6){
print(Sys.time()); print(l)
vars_liik = fun_liik(l, 5, 20)
assign(paste("puuliik_749_ver2_", l, sep="_"),vars_liik)
}
#NB, "muu" lase eraldi! 7. aint 7 hetkel
puuliiks749_ver1 = list(puuliik_749_ver1__1,puuliik_749_ver1__2,puuliik_749_ver1__3,puuliik_749_ver1__4,puuliik_749_ver1__5,puuliik_749_ver1__6,puuliik_749_ver1__7)
puuliiks749_ver2 = list(puuliik_749_ver2__1,puuliik_749_ver2__2,puuliik_749_ver2__3,puuliik_749_ver2__4,puuliik_749_ver2__5,puuliik_749_ver2__6)
for(l in 1:6){
puuliik = puuliiks749_ver2[[l]]
rss= c()
for(k in 1:16){
rss[k] = nr_neigh_liik(k+4, vars = puuliik[[k]], liik = l)
}
assign(paste("rss_749_ver2", l, sep="_"),rss)
}
rss749_v2 = list(rss_749_ver2_1, rss_749_ver2_2, rss_749_ver2_3, rss_749_ver2_4, rss_749_ver2_5, rss_749_ver2_6)
par(mfrow = c(3,3))
plot(rss_749_ver2_1, type = "o")
plot(rss_749_ver2_2, type = "o")
plot(rss_749_ver2_3, type = "o")
plot(rss_749_ver2_4, type = "o")
plot(rss_749_ver2_5, type = "o")
plot(rss_749_ver2_6, type = "o")
lkm = c(17,18,20,15,9,6)
for(j in 1:6){
print(j); print(Sys.time())
kk = lkm[j]
varsx = puuliiks749_ver2[[j]][[kk-4]]
w = rep(1, length(varsx))
opti_liik1 = optim(par = w, fn = fun_opti_liik, k = kk, vars = varsx, liik = j) #
print(j);print(k);print(opti_liik1$value)
assign(paste("optirss749_ver2", j, k, sep="_"),opti_liik1$value)
assign(paste("optiweights749_ver2", j, kk, sep="_"),opti_liik1$par)
}
for(i in 1:6){
liik = i
kk = lkm[liik]
wws = optiweights749_v2[[liik]]
varsx = puuliiks749_ver2[[liik]][[kk-4]]
dex = data10[,c("SID",varsx)]
dex = dex[dex$SID %in% sidxx,]
dex[,-1] = t((t(as.matrix(dex[,-1])))*wws)#*bw - ta
dex$cl = "cl"
H_puu = fun_agre_liik(dex, data_puud, k = kk, liik = liik)
HP = data.frame(H = H_puu, T = data_puud_raie_props[,liik])
assign(paste("HP", i, sep=""),HP)
}
optiweights749_v2 = list(optiweights749_ver2_1_16,optiweights749_ver2_2_16, optiweights749_ver2_3_16, optiweights749_ver2_4_16, optiweights749_ver2_5_16, optiweights749_ver2_6_16)
save(optiweights749_v2, file = "optiweights749_v2.RData")
for(i in 1:6){
liik = i
kk = lkm[liik]
wws = optiweights749_v2[[liik]]
varsx = puuliiks749_ver2[[liik]][[kk-4]]
dex = data10[,c("SID",varsx)]
dex = dex[dex$SID %in% sidxx,]
dex[,-1] = t((t(as.matrix(dex[,-1])))*wws)#*bw - ta
dex$cl = "cl"
H_puu = fun_agre_liik(dex, data_puud, k = kk, liik = liik)
HP = data.frame(H = H_puu, T = data_puud_raie_props[,liik])
assign(paste("HP", i, sep=""),HP)
}
par(mfrow = c(2,3))
plot(HP1$T, HP1$H)
plot(HP2$T, HP2$H)
plot(HP3$T, HP3$H)
plot(HP4$T, HP4$H)
plot(HP5$T, HP5$H)
plot(HP6$T, HP6$H)
fun_rss = function(df){
pred = df[,1]; true = df[,2]
rsdls = (pred - true)
sum(abs(rsdls)) / dim(df)[1]
}
w_res = c(fun_rss(HP1),fun_rss(HP2),fun_rss(HP3),fun_rss(HP4),fun_rss(HP5),fun_rss(HP6),fun_rss(HP7))
#w_res = w_res / sum(w_res)
w_res
#keskmised vead: 0.12888961 0.13565126 0.16840761 0.06443989 0.01791119 0.06321045 0.03488666
#kaalume läbi ja otsime uusi tunnuseid!
fun_liik = function(liik, k1, k2, wi, vi){
lst_return = vector("list", length = 1+k2-k1)
data10[,vi] = t((t(as.matrix(data10[,vi])))*wi) #korrutame eelmisel sammul leitud kaaludega läbi
for(k in k1:k2){
print(k); print(Sys.time())
vars0 = vars
vars1 = c()
mx = 696
vahe = 969
while(vahe > 0){
mx0 = mx
print(vahe)
vars00 = c()
rss = c()
for(j in 1:length(vars0)){
vars00 = c(vars1,vars0[j])
#print(vars00)
dex = data10[,c("SID",vars00)]
dex = dex[dex$SID %in% sidxx,]
dex$cl = "cl"
H_puu = fun_agre_liik(dex, data_puud, k = k, liik = liik)
rsdls = H_puu - data_puud_raie_props[,liik]
rss[j] = sum(rsdls**2)
}
rss[is.na(rss)] = 1000
mx0 = min(rss)
varmin = vars0[which.min(rss)]
vars0 = vars0[!(vars0 %in% varmin)]
vars1 = c(vars1,varmin)
vahe = mx-mx0
mx = mx0
}
lst_return[[1+k-k1]] = assign(paste("var_naabreid", k, sep="_"),vars1)
}
lst_return
}
for(l in 1:3){
print(Sys.time()); print(l)
wi = optiweights749_v2[[l]]
kk = lkm[l]
vi = puuliiks749_ver2[[l]][[kk-4]]
vars_liik = fun_liik(l, 17, 20, wi = wi, vi = vi)
assign(paste("puuliik_749_ver3_", l, sep="_"),vars_liik)
}
liiksv3 = list(puuliik_749_ver3__1, puuliik_749_ver3__2, puuliik_749_ver3__3)
#liigid 1-3 naabreid 17-20 jooksutatud juba kaalutud andmete peal:
#võrdle tavalsii 749 vigu:
##keskmised vead: 0.12888961 0.13565126 0.16840761 0.06443989 0.01791119 0.06321045 0.03488666
save(liiksv3, file = "liiksv3.RData")
|
f5ed093722a6cdca2d7eabe82150943499e066e9 | b6258c79242265936a92284096f4ad53faee34f2 | /K-means and CART.tree .R | 0293165523122540961ac249cae6e4573707c8e5 | [] | no_license | lialee99/Taiwan-Food-Forum | 873148f3e0832a8b767f7c8cfaae29a75903f9b1 | e86744261f3517fcf848434fbc4d841c726ce7ff | refs/heads/master | 2020-06-06T22:50:42.695993 | 2019-07-03T03:39:10 | 2019-07-03T03:39:10 | 192,869,303 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 4,380 | r | K-means and CART.tree .R | library(jsonlite)
library(jiebaR)
# get dcard_area from github
dcard_area <- dcard_area[,-1]
dim(dcard_area)
# select content, id, gender, area
dcard_content <- dcard_area[,c(1,8,12,15)]
# delete na
dcard_content <- dcard_content %>%
filter(area %in% c(1:5))
sapply(dcard_content, class)
library(dplyr)
# repeat 8 round (north, middle, south, east X female, male)
# F_E = female plus east
M_S <- dcard_content %>%
filter(gender == "M") %>%
filter(area == 3)
# 檢查是否重複
M_M <- M_M %>%
distinct(content, id, .keep_all = TRUE)
# 指定停用詞和分詞詞庫
stop_word <- '/Users/lialee/Desktop/Programming/TextMining/Data/stop.txt'
user_dic <- "/Users/lialee/Desktop/III Final Project/foodclean.csv"
# 設定斷詞器
mixseg <- worker(stop_word = stop_word, user = user_dic, type = "tag")
# - - - 開始跑斷詞
df_FN <- data.frame()
df_FM <- data.frame()
df_FE <- data.frame()
df_FS <- data.frame()
df_MN <- data.frame()
df_MM <- data.frame()
df_ME <- data.frame()
df_MS <- data.frame()
seq_doc <- NULL # Word Segmentation Results
seq_tag <- NULL # POS Tagging Results
k <- M_S$content
k <- gsub('[0-9]+', "", k)
k <- gsub('[[:space:]]', "", k)
k <- gsub('[a-zA-Z]', "", k)
k <- gsub('#', "", k)
k <- gsub('[ ️ ︎ ﹏ ︵ ︶ ︿ ﹃ ꒳]',"",k)
k <- gsub('[︴ ︹ ︺ ꒦ ꒪ ꒫"]' ,"",k)
k <- gsub('[a-zA-Z]', "", k)
k <- gsub('[-+/.─◆○~=,「」▲:~※_★$、?│【】()()]' ,"", k)
# 中文分詞
w <- segment(as.vector(k), mixseg)
seq_doc <- c(seq_doc, w)
# 詞性標注
t <- names(tagging(as.vector(k), mixseg))
seq_tag <- c(seq_tag , t)
seq <- data.frame(seq_doc, seq_tag)
seq <- seq[seq$seq_tag %in% c('n','nr','nrt','ns','nt','nz'),]
seq_doc <- table(as.character(seq$seq_doc))
# give area tags
seq_doc <- data.frame(seq_doc, clas = 'Male South')
df_MS <- rbind(df_MS, seq_doc)
# combine eight data frames to df_area
df_area <- rbind(df_area, df_MS)
table(df_area$clas)
names(df_area)[1] <- 'Keywords'
names(df_area)[2] <- 'Frequency'
names(df_area)[3] <- 'Type'
DF <- c(table(df_area$Keywords))
FM <- unique(df_area$Type)
df_area <- df_school
library(reshape2)
TCM <- acast(df_area, Keywords ~ Type, value.var='Frequency', fill = 0, drop = FALSE, sum)
TCB <- ifelse(TCM > 0, 1, 0)
# 共出現超過5次才選
selectedKW <- rowSums(TCM) > 5
TCM <- as.data.frame(TCM[selectedKW,])
TCB <- as.data.frame(TCB[selectedKW,])
DF <- DF[selectedKW]
#文章總篇數
counter <- 31038
IDF <- log10(counter / DF)
cbind(rownames(TCM), IDF)
TTF <- colSums(TCM)
TCM_IDF <- t(t(TCM) / TTF) * IDF
TCM <- data.frame(Keywords = rownames(TCM), TCM)
rownames(TCM) <- NULL
TCM_IDF <- data.frame(Keywords = rownames(TCM_IDF), TCM_IDF)
rownames(TCM_IDF) <- NULL
TCB <- data.frame(Keywords = rownames(TCB), TCB)
rownames(TCB) <- NULL
colnam <- TCM$Keywords
TCM$Keywords <- NULL
# 轉向
t_TCM <- as.data.frame(t(TCM))
colnames(t_TCM) <- colnam
rownames(t_TCM) <- FM
# 這個步驟是為了決策樹跟分類無關
cart_TCM <- t_TCM
cart_TCM$Type <- FM
# K-means
library(cluster)
# Decide K
result <- list()
for (i in 2:6){
kmd <- kmeans(t_TCM, centers=i)
sil <- silhouette(kmd$cluster, dist(t_TCM))
result[[paste('k=',i,sep='')]] <- mean(sil[,'sil_width'])
}
result
# K = 3
kmd <- kmeans(t_TCM, centers=3)
kmd$cluster
# 看輪廓係數
sil <- silhouette(kmd$cluster, dist(t_TCM))
mean(sil[,'sil_width'])
# Cluster Descriptions
kmd$centers
# Display Clustering Results
# 以下都是畫文字雲用的
library(wordcloud2)
install.packages("webshot")
library(webshot)
webshot::install_phantomjs()
library("htmlwidgets")
for(i in 1:3) {
Clus_i <- t_TCM[kmd$cluster==i,]
Clus_n <- colnames(t_TCM)
Clus_f <- colSums(Clus_i)
Word_Tab <- data.frame(Clus_n, Clus_f)
rownames(Word_Tab) <- NULL
Word_Tab <- Word_Tab[Word_Tab$Clus_f!=0,]
my_graph <- wordcloud2(Word_Tab, size = 0.5, minSize = 0, gridSize = 3,color = "random-light", backgroundColor = "white")
saveWidget(my_graph,paste0("/Users/lialee/Desktop/",i,".html"),selfcontained = F)
webshot(paste0("/Users/lialee/Desktop/",i,".html"),
paste0("/Users/lialee/Desktop/",i,".png"),
vwidth = 600, vheight=350)
}
# - - - - - cart_TCM 畫決策樹
library(rpart)
library(rpart.plot)
CART.tree <- rpart(Type ~ ., data=cart_TCM,
control=rpart.control(minsplit=2, cp=0))
rpart.plot(CART.tree)
|
4f3a2ecdc66a66f77eff1e45343ca2827d152ecb | 599c2cf0ad1b158138c78b5c6c4c2804bbeb45d0 | /man/recycle.Rd | 32eca8960d48d2866ba4ec3a4b7768b6083d10c8 | [] | no_license | tlarzg/rtemis | b12efae30483c52440cc2402383e58b66fdd9229 | ffd00189f6b703fe8ebbd161db209d8b0f9f6ab4 | refs/heads/master | 2023-07-07T20:53:43.066319 | 2021-08-27T03:42:19 | 2021-08-27T03:42:19 | 400,347,657 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 299 | rd | recycle.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recycle.R
\name{recycle}
\alias{recycle}
\title{Recycle values of vector to match length of target}
\usage{
recycle(x, target)
}
\description{
Recycle values of vector to match length of target
}
\author{
E.D. Gennatas
}
|
db298ec7aa72332f96dd70a21578b92ee8cbc260 | cb39c24643cdd4a9fd4ed20980ee675edd1cdbf3 | /man/FamiliasLocus.Rd | e3abade8a7ba7c80d02c29adc66a2f2eca8cce08 | [] | no_license | cran/Familias | b11d2abc1dc53219b79a9907220a90aec72aa20d | b8ed14226c255b2b791d6e08a6cb231d761846a3 | refs/heads/master | 2021-01-22T03:39:35.304468 | 2016-02-18T15:13:56 | 2016-02-18T15:13:56 | 17,679,206 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,586 | rd | FamiliasLocus.Rd | \name{FamiliasLocus}
\alias{FamiliasLocus}
\title{
Creates an object with information on a locus, including its mutation matrices.
}
\description{
The user provides input needed to define a locus (also called system or forensic marker) to be used for pedigree calculations.
The input is checked and if no errors are found a list with class \code{FamiliasLocus} is returned containing
the information.
}
\usage{
FamiliasLocus(frequencies, allelenames, name,
MutationModel = "Stepwise",
MutationRate = 0,
MutationRange = 0.5,
MutationRate2 = 0,
MutationMatrix,
Stabilization = "None",
MaxStabilizedMutrate = 1,
femaleMutationModel,
femaleMutationRate,
femaleMutationRange,
femaleMutationRate2,
femaleMutationMatrix,
maleMutationModel,
maleMutationRate,
maleMutationRange,
maleMutationRate2,
maleMutationMatrix)
}
\arguments{
\item{frequencies}{
The first input of FamiliasLocus may be either a vector containing allele frequencies, or a previously created FamiliasLocus object.
In the first case, the vector may include a possible silent allele; that it is silent is indicated in the allelenames.
The frequencies must sum to 1.
In the second case, the new object will be identical to the old object in terms of frequencies, names of alleles, and name of locus,
so the 'allelenames' and 'name' parameters must be missing. However, at least one Mutation parameter or the Stabilization parameter must
be non-missing, and new mutation matrices will be constructed based on these. If all Mutation parameters are missing, stabilized
mutation matrices will be produced based on the mutation matrices of the old object.
}
\item{allelenames}{
Names of the alleles, like \code{15} or 'A'. Note that thel last allele may be called 'Silent' (or 'silent').
It is then treated as a silent allele in subsequent likelihood calculations. The default is to use the names attribute of
the frequencies, if it exists; otherwise the default is to use consecutive integers, starting at 1.
Note that if the 'Stepwise' mutation model is used, allele names (except for a silent allele) must be integers,
with microvariants named as for example 15.2.
}
\item{name}{
Characters like 'D3S1358', used to identify the locus (marker). The default is to use the name of the frequencies argument to this function.
}
\item{MutationModel}{
The mutation model, used to create the mutation matrix. It may be 'Equal', 'Proportional', 'Stepwise', or 'Custom', see Details.
}
\item{MutationRate}{
The mutation rate; for the 'Stepwise' model the rate of integer-step mutations. It is not used when the MutationModel is 'Custom'.
}
\item{MutationRange}{
Only used when the MutationModel is 'Stepwise'. It then indicates the relative probability of mutating n+1 steps versus
mutating n steps.
}
\item{MutationRate2}{
Only used when the MutationModel is 'Stepwise'. It then indicates the rate of non-integer-step mutations, e.g., mutations
from an allele with an integer name to alleles with decimal names indicating microvariants.
}
\item{MutationMatrix}{
Only used when the MutationModel is 'Custom'. It then directly specifies the mutation matrix.
}
\item{Stabilization}{
The possible values are 'None', 'DP', 'RM', and 'PM', with 'None' being the default.
The other values adjust the mutation matrices so that allele frequencies after one or
more generations of mutations will be equal to the original allele frequencies. See Details.
}
\item{MaxStabilizedMutrate}{
Not used when stabilization is 'None'. Otherwise it indicates an upper bound for the specific mutation rate
for each allele allowed in the mutation matrices after stabilization.
}
\item{femaleMutationModel}{
Specifies a separate female value for MutationModel; defaults to MutationModel.
}
\item{femaleMutationRate}{
Specifies a separate female value for MutationRate; defaults to MutationRate.
}
\item{femaleMutationRange}{
Specifies a separate female value for MutationRange; defaults to MutationRange.
}
\item{femaleMutationRate2}{
Specifies a separate female value for MutationRate2; defaults to MutationRate2.
}
\item{femaleMutationMatrix}{
Specifies a separate female value for MutationMatrix; defaults to MutationMatrix.
}
\item{maleMutationModel}{
Specifies a separate male value for MutationModel; defaults to MutationModel.
}
\item{maleMutationRate}{
Specifies a separate male value for MutationRate; defaults to MutationRate.
}
\item{maleMutationRange}{
Specifies a separate male value for MutationRange; defaults to MutationRange.
}
\item{maleMutationRate2}{
Specifies a separate male value for MutationRate2; defaults to MutationRate2.
}
\item{maleMutationMatrix}{
Specifies a separate male value for MutationMatrix; defaults to MutationMatrix.
}
}
\details{
The probabilities for when and how mutations happen can be specified in mutation matrices, where
the row corresponding to an allele indicates the probabilities that the allele is transferred as
the allele indicated by the column. Mutation matrices may be specified directly
in the MutationMatrix parameters by setting the
value of the MutationModel parameter to 'Custom'. Otherwise they are computed based on the values
of the MutationModel, MutationRate, MutationRate2, and MutationRange parameters.
If MutationModel is 'Equal', there is an equal probability of mutating to any non-silent allele, given that a mutation happens.
This model is referred to as 'Equal probability (simple and fast)' in Familias 2.0.
If MutationModel is 'Proportional', the probability of mutating to any non-silent allele is proportional to its frequency.
It is referred to as 'Probability proportional to frequency (stable)' in Familias 2.0.
If MutationModel is 'Stepwise', it is required that the names of all non-silent alleles are positive integers,
indicating the number of sequence repetitions of an STR marker, or decimal numbers with a single decimal, such as '15.2',
indicating a microvariant. Mutations are then divided into two types: Those that
add or subtract an integer to the allele, and those that add or subtract some fractional amount.
The rate of these two types of mutations are given separately as MutationRate and MutationRate2, respectively.
Relative probabilities of different mutaitons of the first type are specified using the MutationRange parameter.
The model with only integer alleles is referred to as 'Probability decreasing with range (equal)' in Familias 2.0,
while the more general model is called 'Extended stepwise' in Familias 3.0.
Note that the probability of mutations to or from silent alleles is set to zero in all models except the 'Custom' model.
The 'Stabilization' parameter may be used to change the mutation matrices so that they become stationary relative
to the frequencies vector. See the references.
When the 'PM' setting is used together with the 'Stepwise' MutationModel
and all allele names are integers, the resulting model is referred to as
'Probability decreasing with range (stable)' in Familias 2.0.
}
\value{
A list of class \code{FamiliasLocus} containing
\item{locusname}{The name of the locus}
\item{alleles}{The frequencies of the alleles. The names of the alleles are included as the vector names.}
\item{femaleMutationType}{A string specifying the type of the female mutations.}
\item{femaleMutationMatrix}{The mutation matrix used for female transfer.}
\item{maleMutationType}{A string specifying the type of the male mutations.}
\item{maleMutationMatrix}{The mutation matrix used for male transfer.}
\item{simpleMutationMatrices}{Indicates whether the probability of mutating to an
allele is always independent of which allele the mutation happens from.
If this is true, some likelihood computations can be done faster.}
\item{Stabilization}{The stabilization method used.}
}
\author{
Petter Mostad mostad@chalmers.se, Thore Egeland Thore.Egeland@gmail.com, and Ivar Simonsson Ivar simonssi@chalmers.se
}
\references{
Egeland, Kling, Mostad: Relationship Inference with Familias and R. 2016.
Simonsson, Mostad: Stationary Mutation models. (Submitted)
}
\examples{
#Simple examples
FamiliasLocus(1:4/10)
FamiliasLocus(frequencies=c(0.1, 0.2, 0.3, 0.4),
allelenames= c("A", "B", "C", "D"), name="locus1")
#Modified to include a silent frequency
FamiliasLocus(frequencies=c(0.1, 0.2, 0.3, 0.3, 0.1),
allelenames= c("8", "9", "10", "11", "silent"), name="locus1")
#Mutation rates added
FamiliasLocus(frequencies=c(0.1, 0.2, 0.3, 0.4),
allelenames= c("8", "9", "10", "11"), name="locus1",
femaleMutationRate=0.001, maleMutationRate=0.005)
#Mutation matrices specified directly
MM <- matrix(c(0.99, 0.005, 0.003, 0.002, 0.005, 0.99, 0.005, 0,
0, 0.005, 0.99, 0.005, 0.002, 0.003, 0.005, 0.99), 4, 4, byrow=TRUE)
FamiliasLocus(frequencies=c(0.1, 0.2, 0.3, 0.4),
allelenames= c("08", "09", "10", "11"), name="locus1",
MutationModel = "Custom", MutationMatrix = MM)
#A locus is first created, and then edited
loc <- FamiliasLocus(c(0.2, 0.5, 0.3))
loc2 <- FamiliasLocus(loc, maleMutationRate = 0.001)
FamiliasLocus(loc2, Stabilization = "PM")
#A locus using standard Norwegian frequencies is created
data(NorwegianFrequencies)
FamiliasLocus(NorwegianFrequencies$TH01)
} |
7c1ffd142798e790dd7a1304a5fe4da9dc91c30d | a169dda12f7f03c68cb8a86a68236774990d50da | /man/iNEXTbeta.link.Rd | c782ba0425254afa981512310e0ea4d4eedbb472 | [] | no_license | Chunger-Lo/iNEXT.link | 393e889f2fd8f24d89285ce84b6b646de0085930 | a7d4ac08958297ceacab5bdc32d876a81b1712b1 | refs/heads/master | 2023-07-08T17:23:09.387560 | 2021-08-12T01:49:03 | 2021-08-12T01:49:03 | 351,671,773 | 0 | 0 | null | 2021-05-04T12:56:25 | 2021-03-26T05:26:13 | R | UTF-8 | R | false | true | 3,790 | rd | iNEXTbeta.link.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iNEXTlink.R
\name{iNEXTbeta.link}
\alias{iNEXTbeta.link}
\title{Interpolation (rarefaction) and extrapolation of Chao et al.’s (2021) network diversity and mean network diversity
Function \code{iNEXTbeta.link} Interpolation and extrapolation of Beta diversity with order q}
\usage{
iNEXTbeta.link(
data,
diversity = "TD",
level = seq(0.5, 1, 0.5),
datatype = c("abundance", "incidence_raw"),
q = c(0, 1, 2),
nboot = 20,
conf = 0.95,
row.tree = NULL,
col.tree = NULL
)
}
\arguments{
\item{data}{a matrix/data.frame of species abundances (for abundance data) or species-by-site incidence raw matrix/data.frame (for incidence data).\cr
Abundance data: a species-by-site matrix/data.frame of species abundances. The row (species) names of
data must match the species names in the phylogenetic tree and thus cannot be missing.\cr
Incidence raw data: species-by-site raw incidence matrix/data.frame. When there are N assemblages
and thus N matrices, users must first merge the N matrices by species identity to obtain a large
merged incidence matrix, where the rows of the matrix refer to all species presented in the merged
data. The row (species) names of data must match the species names in the phylogenetic tree and
thus cannot be missing.}
\item{diversity}{a choice of three-level diversity: 'TD' = 'Taxonomic', 'PD' = 'Phylogenetic',
and 'FD' = 'Functional' under certain threshold.}
\item{datatype}{data type of input data: individual-based abundance data (\code{datatype = "abundance"}) or species by sampling-units incidence matrix (\code{datatype = "incidence_raw"}).}
\item{q}{a nonnegative value or sequence specifying the diversity order. Default is \code{c(0,1,2)}.}
\item{nboot}{a positive integer specifying the number of bootstrap replications when assessing
sampling uncertainty and constructing confidence intervals. Enter 0 to skip the bootstrap procedures. Default is 50}
\item{conf}{a positive number < 1 specifying the level of confidence interval. Default is 0.95.}
\item{type}{desired diversity type: \code{type = "PD"} for Chao et al. (2010) phylogenetic diversity
and \code{type = "meanPD"} for mean phylogenetic diversity (phylogenetic Hill number). Default is \code{"PD"}.}
}
\value{
A list of seven lists with three-diversity and four-dissimilarity.
}
\description{
Interpolation (rarefaction) and extrapolation of Chao et al.’s (2021) network diversity and mean network diversity
Function \code{iNEXTbeta.link} Interpolation and extrapolation of Beta diversity with order q
}
\examples{
\dontrun{
# example
data(puerto.rico)
beta1 = iNEXTbeta.link(data = puerto.rico$data, level = seq(0.5, 0.9, 0.4), datatype='abundance',q = c(0, 1, 2),
diversity = 'TD', nboot = 10, conf = 0.95)
beta2 = iNEXTbeta.link(networks = puerto.rico$data, level = seq(0.5, 0.9, 0.4), datatype='abundance',q = c(0, 1, 2),
data = 'PD', nboot = 10, conf = 0.95,
row.tree = puerto.rico$row.tree, col.tree = puerto.rico$col.tree)
}
}
\references{
Chao, A., Chazdon, R. L., Colwell, R. K. and Shen, T.-J.(2005). A new statistical approach for assessing similarity of species composition with incidence and abundance data. Ecology Letters 8, 148-159. (pdf file) Spanish translation in pp. 85-96 of Halffter, G. Soberon, J., Koleff, P. and Melic, A. (eds) 2005 Sobre Diversidad Biologica: el Sognificado de las Diversidades Alfa, Beta y Gamma. m3m-Monografias 3ercer Milenio, vol. 4, SEA, CONABIO, Grupo DIVERSITAS & CONACYT, Zaragoza. IV +242 pp.
Chiu, C.-H., Jost, L. and Chao*, A. (2014). Phylogenetic beta diversity, similarity, and differentiation measures based on Hill numbers. Ecological Monographs 84, 21-44.
}
|
bdda01fdbeac2e611adeaa91ac8c19af16c94f60 | 4ef4f79e190d70e79c13e342919beb1cdc667742 | /add_opp_info.R | e269ed8e9526f8fccca185a7498a6b18b5acf716 | [] | no_license | capstat/NBA-back-to-backs | 65ce519497bb9c183baaf14fc714edf18b67fd67 | e15d5453da6e138fedd6bc8e80e640c5128f3078 | refs/heads/master | 2021-05-04T11:16:32.273560 | 2017-05-17T12:57:25 | 2017-05-17T12:57:25 | 47,477,254 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,317 | r | add_opp_info.R | library(dplyr)
all_schedules = read.csv(paste0("schedules/allNBA_gameresults_",
min(YEARS-1), "-", max(YEARS), ".csv"),
stringsAsFactors=FALSE)
#add team age
team_info = read.csv("nba_team_info.csv", stringsAsFactors=FALSE)
colnames(team_info)[1] = c("Team")
#season ex 2013-2014
team_info$Season = paste(team_info$year-1, team_info$year, sep="-")
all_schedules = left_join(all_schedules, team_info, by=c("Team","Season"))
#some variables represent data that happened
#after the game or we need from the game before
#if the game before went into overtime
all_schedules = all_schedules %>% group_by(link) %>%
mutate(Result_GB4=lag(Result),
OT_GB4=lag(OT),
Point_Diff_GB4=lag(Point_Diff),
Last10_GB4=lag(Last10))
#all points and win% inc the game
all_schedules = all_schedules %>% group_by(link, H_A) %>%
mutate(A_pytWpct_GB4=lag(A_pytWpct),
H_pytWpct_GB4=lag(H_pytWpct))
#add opponent info
all_schedules = left_join(all_schedules, all_schedules,
by=c("Date","Opp_Abbr"="Team"),
suffix=c(".",".Opp"))
write.csv(all_schedules,
paste0("schedules/allNBA_gameresults_",
min(YEARS-1), "-", max(YEARS), ".csv"),
row.names=FALSE)
|
0f9938e67f1c7e28660859b706277109cf649fcd | 0a9e9a4c2e194d70e9269db2b3490472efa21ffd | /man/censor_beh.Rd | f12a0228eda8219000dd23c6eb28db5266afe073 | [] | no_license | williamcioffi/sattagutils | 390424e9fb23fb0f99574c4eba844d7633288da1 | e42256c679756aef5f7f9a286eceb95d64c4d11f | refs/heads/main | 2022-06-06T13:11:14.049968 | 2022-05-29T03:35:08 | 2022-05-29T03:35:08 | 153,984,008 | 0 | 0 | null | 2021-11-27T08:36:14 | 2018-10-21T07:32:31 | R | UTF-8 | R | false | true | 633 | rd | censor_beh.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/censor_beh.R
\name{censor_beh}
\alias{censor_beh}
\title{censor a behavior stream}
\usage{
censor_beh(b1, depth = 50, duration = 33 * 60)
}
\arguments{
\item{b1}{a behavior data stream or a dataframe approximating one. requires columns \code{DurationMin}, \code{DurationMax}, \code{What}, \code{Start}, \code{End}, \code{DepthMax}, \code{DepthMin}.}
\item{depth}{the minimum depth required to qualify as a dive}
\item{duration}{the minimum duration required to qualify as a dive in seconds.}
}
\description{
based on depth and duration qualifications
}
|
30c925f0f6f9672a9e5c810204ef2cc530c02333 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ezplot/examples/roc_plot.Rd.R | 8f820cc8e491d1c79a986c3400189259581233f2 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 818 | r | roc_plot.Rd.R | library(ezplot)
### Name: roc_plot
### Title: roc_plot
### Aliases: roc_plot
### ** Examples
library(ggplot2)
n = 10000
df = data.frame(actual = sample(c(FALSE, TRUE), n, replace = TRUE),
runif = runif(n))
df[["fitted"]] = runif(n) ^ ifelse(df[["actual"]] == 1, 0.5, 2)
ggplot(df) +
geom_density(aes(fitted, fill = actual), alpha = 0.5)
roc_plot(df, "actual", "actual")
roc_plot(df, "actual", "fitted")
roc_plot(df, "actual", "runif")
## No test:
roc_plot(df, "actual", "fitted", "sample(c(1, 2), n(), TRUE)")
roc_plot(df, "actual", "fitted",
"sample(c(1, 2), n(), TRUE)",
"sample(c(3, 4), n(), TRUE)")
roc_plot(df, "actual", "fitted",
"sample(c(1, 2), n(), TRUE)",
"sample(c(3, 4), n(), TRUE)",
"sample(c(5, 6), n(), TRUE)")
## End(No test)
|
1edf131182abe881d0ebf077ed6d27e37020bb2e | cac416597b20497c8521d6ddf84d4c38235228d4 | /Homework 9/Powers/man/square.Rd | 3076bca4e0511dafa9bd95d814e8589000b94f7d | [] | no_license | vanflad/STAT547-hw-fladmark-vanessa | 6da4f34cb2922503c96991e75cef85354db0b5ae | edf7ef8c685bad5123a72536f3ca19114a74cfd1 | refs/heads/master | 2021-08-24T04:11:28.981437 | 2017-12-08T01:41:43 | 2017-12-08T01:41:43 | 104,009,193 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 630 | rd | square.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/square.R
\name{square}
\alias{square}
\title{Square a vector}
\usage{
square(x)
}
\arguments{
\item{x}{The vector to be squared.}
}
\value{
A vector that is the square of \code{x}.
}
\description{
That's it -- this function just squares a vector!
}
\details{
A number multiplied by itself is called "squared" or "to the power of 2."
This function utilizes the pow() function in the same package to calculate squares of numbers.
This function also returns an informative error when a non-numeric input is given.
}
\examples{
square(1:10)
square(-5)
}
|
3fd008b72b4cd8f7459a223b18eecf4989063b74 | 79063458d54822e88b87a9cc1b930fbbeaaa1147 | /protocols/compare-cegs/plot_hits.R | c07558b05d93eb209e0a7c9c2632580e416a4c54 | [] | no_license | DataZou/haemonchus-comparison | d6aef0fe4a1057757dd1ae333f7e320a7a835370 | 302dbd23189bc3aaec0b7690c610d181d368ede6 | refs/heads/master | 2020-12-25T01:17:07.260717 | 2014-03-03T14:41:12 | 2014-03-03T14:41:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,372 | r | plot_hits.R | library(Vennerable)
v <- Venn(
SetNames = c('MHco3 complete', 'MHco3 partial', 'McMaster complete', 'McMaster partial'),
Weight = c(
'0110' = 2,
'0111' = 0,
'0000' = 0,
'0001' = 5,
'0011' = 0,
'0010' = 5,
'0101' = 8,
'0100' = 6,
'1111' = 0,
'1110' = 0,
'1100' = 0,
'1101' = 0,
'1010' = 166,
'1011' = 0,
'1001' = 33,
'1000' = 17))
svg('mhco3_vs_mcmaster_cegs.svg')
plot(v, doWeights = FALSE, type='ellipses', show = list(SetLabels=TRUE))
dev.off()
v <- Venn(SetNames=c('MHco3', 'McMaster'), Weight=c('01'=7, '10'=50, '11'=166))
svg('present_complete.svg')
plot(v, doWeights = FALSE, type='circles', show = list(SetLabels=TRUE))
dev.off()
v <- Venn(SetNames=c('MHco3', 'McMaster'), Weight=c('01'=10, '10'=23, '11'=209))
svg('present_comp+part.svg')
plot(v, doWeights = FALSE, type='circles', show = list(SetLabels=TRUE))
dev.off()
v <- Venn(SetNames=c('MHco3', 'McMaster'), Weight=c('01'=50, '10'=7, '11'=25))
svg('missing_complete.svg')
plot(v, doWeights = FALSE, type='circles', show = list(SetLabels=TRUE))
dev.off()
v <- Venn(SetNames=c('MHco3', 'McMaster'), Weight=c('01'=23, '10'=10, '11'=6))
svg('missing_comp+part.svg')
plot(v, doWeights = FALSE, type='circles', show = list(SetLabels=TRUE))
dev.off()
|
ce9a8900b614eb286dba38417d22129693de78cb | f81d187ba79cc6a5d999737c54dd1f086445fd87 | /test/units/testpkg/man/square_it.Rd | e19a4b462ee7030202100eef87386a0006b84320 | [
"MIT"
] | permissive | virtualstaticvoid/heroku-docker-r | 2897e14f7299dd3b9e1a69bb4b19520988fe4e09 | 39933ac9046d923db885c36e9fd35cdbf7c1f693 | refs/heads/main | 2022-12-20T07:10:34.445598 | 2022-12-15T18:04:41 | 2022-12-15T18:04:41 | 48,537,247 | 35 | 17 | MIT | 2022-07-10T11:17:22 | 2015-12-24T10:00:22 | R | UTF-8 | R | false | true | 245 | rd | square_it.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testpkg.R
\name{square_it}
\alias{square_it}
\title{Square it up}
\usage{
square_it(n)
}
\arguments{
\item{n}{Integer}
}
\value{
Square
}
\description{
Square it up
}
|
e711025bf782850d256e60cd378f776e06c3d3f2 | ad3e0dbb4eb75a4b03c4c853447790fb0681874b | /man/ds.pull_URL.Rd | 7d7934f71b888b1376d34a78ac134126c5309d3f | [] | no_license | deepfriar/dryscrape | 06b4a73a4b9deb379328f82c9870f579b93cd1b0 | 7e3f09f362ab34726766ee9c81ed0dcdbf26904e | refs/heads/master | 2022-06-14T22:46:28.893834 | 2019-11-03T01:48:37 | 2019-11-03T01:48:37 | 173,660,862 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 569 | rd | ds.pull_URL.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/5_pull.R
\name{ds.pull_URL}
\alias{ds.pull_URL}
\title{Don't Repeat Yourself}
\usage{
ds.pull_URL(url, agents, ref = "nhl.com")
}
\arguments{
\item{url}{character. A URL.}
\item{agents}{character vector. One or more HTTP user agent strings.}
\item{ref}{character. A referer. Default \code{"nhl.com"}.}
}
\value{
output of \code{RCurl::\link[RCurl]{getURL}}.
}
\description{
\code{pull_URL()} is a one-line wrapper around \code{RCurl::getURL()} with repeatedly used options
}
|
11bc80b9ccd0a6dd44111e5dc0be0df70e31e1a1 | a2af327f39a760b55ec9f7c185a60f23d0ce4da1 | /R/tab-panel-histogram.R | 023959ac2b509808d4f7c9418ae028e33dba4e52 | [] | no_license | agberg/gogoplot | 3f9bfeea3433c9aa193a62cf5765bde7a1696672 | 3ce0722423b9fd9df670139627b6774437316f13 | refs/heads/master | 2021-07-11T20:48:23.297217 | 2017-10-14T05:06:24 | 2017-10-14T05:06:24 | 106,940,952 | 0 | 0 | null | 2017-10-14T15:53:20 | 2017-10-14T15:53:20 | null | UTF-8 | R | false | false | 141 | r | tab-panel-histogram.R | histogramTabPanel <- miniUI::miniTabPanel(
'histogram',
icon = shiny::icon('bar-chart'),
shiny::helpText('...under-construction...')
)
|
2f30523b5ad3b7328efd7e68b60b3d08cbebd17b | 90bb1dabe91ac66076eefee72e59f8bc75d3315d | /man/cross_prob_search.Rd | e7fa5694d18c454b57611ef614be7439b4ed7172 | [
"MIT"
] | permissive | shinjaehyeok/SGLRT_paper | 31b1dfaac5fdae07c8a106ed86802559b4ac3808 | cbca2c5d9cfc6a2a5fbc8af6a3183fa133b9c377 | refs/heads/master | 2022-12-30T23:09:42.248401 | 2020-10-24T07:21:30 | 2020-10-24T07:21:30 | 299,136,203 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 875 | rd | cross_prob_search.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cross_prob.R
\name{cross_prob_search}
\alias{cross_prob_search}
\title{Grid-searching the upper bound on the boundary crossing probability for the sequential GLR-like test.}
\usage{
cross_prob_search(g, d, nmin = 1L, m_upper = 1000L)
}
\arguments{
\item{g}{Constant threshold (positive numeric).}
\item{d}{Bregman divergence between the null and alternative spaces (positive numeric).}
\item{nmin}{Minimum sample size of the test (default = 1L).}
\item{m_upper}{Upper bound on the grid-search for m (default = 1e+3L).}
}
\value{
Grid-search result of the upper bound on the boundary crossing probability based on Theorem 1.
}
\description{
\code{cross_prob_search} is used to grid-search the constant threshold for the sequential GLR-like test with the well-separated alternative space case.
}
|
78bf6da09ea5444b7b0db913166084319218a238 | 1307cddff3051fc017330260d0dcc908f54e9167 | /man/ihme_cols.Rd | 4807fdd48761c352025c91af4e156cfa66402c72 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | mcooper/mattr | 20673ffbd20f4b61a19d90989337ea6e330867d4 | 0bddcc79210ed866adb5e7238dfd1051cf471142 | refs/heads/master | 2022-11-08T05:36:02.816456 | 2020-06-24T16:06:56 | 2020-06-24T16:06:56 | 261,308,979 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 324 | rd | ihme_cols.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ihme_cols.R
\docType{data}
\name{ihme_cols}
\alias{ihme_cols}
\title{Vector of colors that I like}
\format{An object of class \code{character} of length 10.}
\usage{
ihme_cols
}
\description{
Taken from an IHME visualization
}
\keyword{datasets}
|
c97fb8a9d6f8d1333e7d363df18f7ba5ded02791 | c8bce529daccc22533607fd83eeced0509b044c8 | /R/theme_din.R | c9b89f67556e4dbd40b9b006226d3c0b202db36b | [
"MIT"
] | permissive | camille-s/camiller | 4621954dac2954ed1d6ef60cc8b273ef533ab78e | 544ee2879a1c4f6bc5a75b854d1d3c57a99bae84 | refs/heads/main | 2022-03-05T22:57:39.198220 | 2022-01-21T21:52:56 | 2022-01-21T21:52:56 | 134,476,995 | 2 | 0 | NOASSERTION | 2022-01-21T20:43:39 | 2018-05-22T21:18:03 | R | UTF-8 | R | false | false | 3,573 | r | theme_din.R | #' Theme Din
#'
#' @description A clean theme especially suited for labeled bar charts or Cleveland dot plots. Designed based on the DIN family of fonts, but now defaults to font "Roboto Condensed".
#' This defaults to `base_family = "roboto"`, which depends on a) using `showtext` and `sysfonts` to get a font called "roboto", or b) already having a font loaded with this name. Use `sysfonts::font_add_google` or `sysfonts::font_add` to set a different font.
#' @param base_size Base font size
#' @param base_family Base font family; defaults to "roboto", as set by `showtext`
#' @param xgrid A logical for turning x-grid on or off, or "dotted", for a light dotted grid
#' @param ygrid A logical for turning y-grid on or off, or "dotted", for a light dotted grid
#' @param fallback_google Logical: if `TRUE` and `base_family` not currently loaded, will load Roboto Condensed from Google. If `FALSE`, will load system sans font. Defaults `TRUE`.
#' @seealso [sysfonts::font_add()], [sysfonts::font_add_google()]
#' @export
theme_din <- function(base_size = 14, base_family = "roboto", xgrid = FALSE, ygrid = TRUE, fallback_google = TRUE) {
showtext::showtext_auto(enable = TRUE)
loaded_fonts <- sysfonts::font_families()
if (!base_family %in% loaded_fonts) {
if (fallback_google) {
message(sprintf("Font %s not found. ", base_family), "Substituting with font Roboto Condensed as family 'roboto'")
sysfonts::font_add_google(name = "Roboto Condensed", family = "roboto")
base_family <- "roboto"
} else {
message(sprintf("Font %s not found. ", base_family), "Substituting with system sans font.")
base_family <- "sans"
}
}
out <- ggplot2::theme_light(base_size = base_size, base_family = base_family) +
ggplot2::theme(
plot.caption = ggplot2::element_text(vjust = 1, size = ggplot2::rel(0.7), color = "gray30", margin = ggplot2::margin(12, 0, 0, 0)),
axis.ticks = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_line(colour = "gray85"),
panel.grid.minor = ggplot2::element_blank(),
axis.title = ggplot2::element_text(face = "bold", colour = "gray20", size = ggplot2::rel(0.8)),
axis.text = ggplot2::element_text(color = "gray30", size = ggplot2::rel(0.8)),
plot.title = ggplot2::element_text(face = "bold", colour = "gray10", size = ggplot2::rel(1.1)),
plot.subtitle = ggplot2::element_text(color = "gray20"),
plot.title.position = "plot",
plot.caption.position = "panel",
legend.title = ggplot2::element_text(size = ggplot2::rel(0.9)),
legend.text = ggplot2::element_text(size = ggplot2::rel(0.75)),
legend.key.width = grid::unit(1.1, "lines"),
legend.key.height = grid::unit(0.8, "lines"),
panel.background = ggplot2::element_rect(fill = "gray100"),
panel.border = ggplot2::element_blank(),
strip.background = ggplot2::element_rect(fill = "gray95"),
strip.text = ggplot2::element_text(color = "gray20"))
if(is.logical(xgrid)) {
if(!xgrid) {
out <- out + ggplot2::theme(panel.grid.major.x = ggplot2::element_blank())
}
} else if(xgrid == "dotted") {
out <- out + ggplot2::theme(panel.grid.major.x = ggplot2::element_line(color = "gray92", size = 1, linetype = "22"))
}
if(is.logical(ygrid)) {
if(!ygrid) {
out <- out + ggplot2::theme(panel.grid.major.y = ggplot2::element_blank())
}
} else if(ygrid == "dotted") {
out <- out + ggplot2::theme(panel.grid.major.y = ggplot2::element_line(color = "gray92", size = 1, linetype = "22"))
}
return(out)
}
|
9921d06f0fced713ad62429d6cafca3ef24a6c6d | 1dcfea8d5cdc1c7c5d0a96d89e639102da0dbbd4 | /man/CreateTimeunits.Rd | 8fec7faef11ab01c8dfaea65919f3623e6095cf3 | [] | no_license | aukkola/FluxnetLSM | 707295d0dd4ccf1f5b43b09896b947e5f10b5e84 | 2716bc87bcc2ba148de7896bfad7fe6631639431 | refs/heads/master | 2023-06-24T20:21:45.371934 | 2023-06-20T05:27:53 | 2023-06-20T05:27:53 | 73,448,414 | 29 | 15 | null | 2022-10-04T23:53:08 | 2016-11-11T05:24:51 | R | UTF-8 | R | false | true | 239 | rd | CreateTimeunits.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Timing_general.R
\name{CreateTimeunits}
\alias{CreateTimeunits}
\title{Creates time unit}
\usage{
CreateTimeunits(starttime)
}
\description{
Creates time unit
}
|
b03ee42096cc38e716ebcdaa8db8e12663f6da3c | fd12705603e3ba863935d6de229303763acb2748 | /src/homeworks/onlinedatasales/campaign_5_1_cv_gbm.r | 51c9a1a060483615fb1bff6999253ffcb104a107 | [] | no_license | jsrawan-mobo/mrbigdata | 59ffbaca07ba5f605387c96140016d1529086398 | 179165e38228d17e9994287160ba77c9600771ad | refs/heads/master | 2021-01-17T22:04:16.968566 | 2014-09-17T07:24:08 | 2014-09-17T07:24:08 | 1,648,356 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,459 | r | campaign_5_1_cv_gbm.r | #
#
# Submitted at 5.1 RMLSE =
# Same as campaign 4_2 but with cross-validation loop
#
#
rm(list=ls())
require(gbm)
cleanInputDataForGBM <- function(X) {
names(X);
for(i in 1:length(X)) {
name = names(X)[i]
print (name)
col = X[,i]
index = which(is.na(col))
if ( substr(name,1,3) == 'Cat' ) {
col[index] = "Unknown"
X[,i] <- as.factor(col)
}
if ( substr(name,1,4) == 'Quan' ) {
column_mean = mean(col, na.rm = TRUE)
col[index] = column_mean
X[,i] <- as.numeric(col)
}
if ( substr(name,1,4) == 'Date' ) {
column_mean = mean(col, na.rm = TRUE)
col[index] = column_mean
X[,i] <- as.numeric(col)
}
result = is.factor(X[,i])
print(result);
}
return (X)
}
computeRMSLE <- function(Ysimulated, Yreal) {
#pick every 3rd row
#Ysimulated <- Ysimulated[seq(1, nrow(Ysimulated), 3), ]
#Yreal <- Yreal[seq(1, nrow(Yreal), 3), ]
#zero out negative elements
Ysimulated <- ifelse(Ysimulated<0,0,Ysimulated)
Yreal <- ifelse(Yreal<0,0,Yreal)
#initialize values
rmsle <- 0.0
n <- 0
#perform calculations
Ysimulated <- log(Ysimulated + 1)
Yreal <- log(Yreal + 1)
#n <- nrow(Yreal) * ncol(Yreal)
#for vectors, n is the length of the vector
n <- length(Yreal)
rmsle <- sqrt(sum((Ysimulated - Yreal)^2)/n)
return (rmsle)
}
#setup data
#setwd("C:\\Projects\\R")
data <- read.table(file="TrainingDataset.csv",header=TRUE, sep=",")
testData <- read.table(file="TestDataset.csv",header=TRUE, sep=",")
X <- data[,13:29]
#add the difference between two dates as another variable
daysbetweencampaigns <- as.numeric(data[,14]-data[,19])
X <- cbind(X, daysbetweencampaigns)
YMonthlyFractions <- matrix(nrow = 1, ncol = 12)
YMonthlySales <- as.matrix(data[,1:12])
YMonthlySales <- log(YMonthlySales)
YMonthlySales[is.na(YMonthlySales)] <- 0.0
YTotalSales <- as.numeric(data[,1])
#Y - labels as sum of all month sales
for(i in 1:nrow(data)){
YTotalSales[i] <- log(sum(data[i,1:12],na.rm=TRUE))
}
#cleanup data - factor variables are still problematic on prediction
X <- cleanInputDataForGBM(X)
#train and cross-validate the model using 10-fold cv
numberOfRows <- nrow(X)
numberOfXColumns <- ncol(X)
numFolds = 10
rmsles <- rep(0, numFolds)
for(cvFold in 1:numFolds){
#split the input space into training and test data
testRows <- seq(from=cvFold, to=numberOfRows, by=numFolds)
XCVTest <- X[testRows, 1:numberOfXColumns]
XCVTrain <- X[-testRows, 1:numberOfXColumns]
YCVTestExpected <- YTotalSales[testRows]
YCVTrain <- YTotalSales[-testRows]
#estimate and predict total sales for data in the training set
gdata <- cbind(YCVTrain,XCVTrain)
ntrees <- 500
depth <- 5
minObs <- 10
shrink <- 0.001
folds <- 10
mo1gbm <- gbm(YCVTrain~. ,data=gdata,
distribution = "gaussian",
n.trees = ntrees,
shrinkage = shrink,
cv.folds = folds)
YCVTestPredictedAnnual <- predict.gbm(mo1gbm, newdata=XCVTest, n.trees = ntrees)
numberOfRowsToTest <- length(YCVTestPredictedAnnual)
YCVTestPredictedMonthly <- matrix(nrow = numberOfRowsToTest, ncol = 12)
YCVTrainMonthly <- YMonthlySales[-testRows,]
#now estimate and predict individual months' sales
for( i in 1:12 ) {
#get the fraction of total sales for a given month
YCVTrainMonthly[,i] <- YCVTrainMonthly[,i]/YCVTrain
YCVTrainMonthly[is.na(YCVTrainMonthly)] <- 0.0
#store that fraction in YCVTrainThisMonth
YCVTrainThisMonth <- YCVTrainMonthly[,i]
gdata <- cbind(YCVTrainThisMonth, XCVTrain)
#fit the model on training data
mo2gbm <- gbm(YCVTrainThisMonth~. ,
data=gdata,
distribution = "gaussian",
n.trees = ntrees,
shrinkage = shrink,
cv.folds = folds)
#apply the model to test data
monthlySalesOfTotal <- predict.gbm(mo2gbm, newdata=XCVTest, n.trees = ntrees)
#save monthly sales predictions
YCVTestPredictedMonthly[,i] <- monthlySalesOfTotal * YCVTestPredictedAnnual
}
# CV folds to actual.
YCVTestExpectedMonthlySales <- YMonthlySales[testRows,]
YCVTestExpectedMonthlySales[is.na(YCVTestExpectedMonthlySales)] <- 0.0
rmsles[cvFold] <- computeRMSLE(exp(YCVTestPredictedMonthly), exp(YCVTestExpectedMonthlySales))
}
cvError <- sum(rmsles)/numFolds
|
ae54e683e41483f4b72f4066243aa64fe71133da | 713234ecbf874b802377c9df7159ff4fa7219f6a | /run_analysis.R | c4963278537be3544769345de289391424c30ce9 | [] | no_license | rtsuther/Cleaning_Data_Project | 199468f53e0b8c487b6b24f0888508a423b14a3e | aea5088a8aae0078992175b75b868975479c50c7 | refs/heads/master | 2016-09-05T15:02:04.109866 | 2014-12-21T01:53:10 | 2014-12-21T01:53:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,640 | r | run_analysis.R | # run_analysis.R
# R script to take UCI HAR data and turn it into tidy data set
library(plyr)
library(dplyr)
# Read in the labels for the Activities and the Features which will be used to create the col names
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt", header=F,sep="")
features <- read.table("./UCI HAR Dataset/features.txt", header=F,sep="")
# Change labels to make them more meaningful
features$V2 <-gsub("^t","time",features$V2)
features$V2 <-gsub("^f","frequency",features$V2)
features$V2 <-gsub("BodyBody","Body",features$V2)
# Fix for Duplicate values because X,Y,Z were left off.
features$V2[303:316] <- paste0(features$V2[303:316],"-X")
features$V2[317:330] <- paste0(features$V2[317:330],"-Y")
features$V2[331:344] <- paste0(features$V2[331:344],"-Z")
features$V2[382:395] <- paste0(features$V2[382:395],"-X")
features$V2[396:409] <- paste0(features$V2[396:409],"-Y")
features$V2[410:423] <- paste0(features$V2[410:423],"-Z")
features$V2[461:474] <- paste0(features$V2[461:474],"-X")
features$V2[475:488] <- paste0(features$V2[475:488],"-Y")
features$V2[489:502] <- paste0(features$V2[489:502],"-Z")
# retrieve the columns that contain means. Excludes the columns where mean is in the title,
# but the data isn't actaully a mean.
mean_col <- grep("mean(", features$V2,fixed=TRUE)
# combines the means column ids with the standard deviation column ids
mean_std_cols <- c(mean_col,grep("std", features$V2))
# Read in the training set, apply the new column names
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt", header=F,sep="")
names(subject_train) <- "Subject"
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt", header=F,sep="")
names(x_train) <-features$V2
x_train <- x_train[,mean_std_cols]
y_train <- read.table("./UCI HAR Dataset/train/Y_train.txt", header=F,sep="")
# Iterate through the training data to apply the activity labels
label_index <- nrow(activity_labels)
train_index <- nrow(y_train)
for (i in 1:label_index) {
for(j in 1:train_index) {
if(y_train$V1[j] == activity_labels$V1[i]) {
y_train$V1[j] <- as.character(activity_labels$V2[i])
}
}
}
names(y_train) <- "Activity"
# Combine the training data sets
combined_train <- cbind(subject_train,y_train,x_train)
# Read in the test set, apply the new column names
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt", header=F,sep="")
names(subject_test) <- "Subject"
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt", header=F,sep="")
names(x_test) <-features$V2
x_test <- x_test[,mean_std_cols]
y_test <- read.table("./UCI HAR Dataset/test/Y_test.txt", header=F,sep="")
# Iterate through the test data to apply the activity labels
test_index <- nrow(y_test)
for (i in 1:label_index) {
for(j in 1:test_index) {
if(y_test$V1[j] == activity_labels$V1[i]) {
y_test$V1[j] <- as.character(activity_labels$V2[i])
}
}
}
names(y_test) <- "Activity"
# Combine the test data sets
combined_test <- cbind(subject_test,y_test,x_test)
# combine the test and train data sets
tidy_combined_set <- rbind(combined_test,combined_train)
# Just a copy of the combined set at this point. Averaging occurs next
tidy_averaged_set <- tbl_df(tidy_combined_set)
# Takes the data from the combined set above that was copied above, groups the data
# and summarizes it so that there is a row of average data for each participant for
# each activity
tidy_averaged_set <- tidy_averaged_set %>% group_by(Subject,Activity) %>% summarise_each(funs(mean))
# Writes the data to a file
write.table(tidy_averaged_set,"tidy_averaged_set.txt",row.name=FALSE)
|
ce289978447f5e794f176a829e3248be9a82fecd | aeaa9ac30428b8df7e88d980da1d727925938d3e | /R/pit.R | df15660164b0a27acc4c1d658876498c389bb03c | [] | no_license | cran/tscount | 826b5c77cf3940cf075968146f8e064a3ac3bf2d | e804ef82017773f515570a19de424919d3e44797 | refs/heads/master | 2021-01-18T22:05:17.334697 | 2020-09-08T06:00:03 | 2020-09-08T06:00:03 | 30,640,940 | 6 | 6 | null | null | null | null | UTF-8 | R | false | false | 1,545 | r | pit.R | pit <- function(...) UseMethod("pit")
pit.default <- function(response, pred, distr=c("poisson", "nbinom"), distrcoefs, bins=10, ...){
n <- length(pred)
u <- seq(0, 1, length=bins+1)
pit <- numeric(length(u))
for(t in 1:n){
P_x <- pdistr(response[t], meanvalue=pred[t], distr=distr, distrcoefs=distrcoefs)
if(response[t]!=0){
P_x_1 <- pdistr(response[t]-1, meanvalue=pred[t], distr=distr, distrcoefs=distrcoefs)
}else{
P_x_1 <- 0
}
pit <- pit + punif(u, P_x_1, P_x)/n
}
histo <- list(breaks=u, counts=diff(pit)*n, density=diff(pit)*bins, mids=(u[-(bins+1)]+u[-1])/2, xname="PIT", equidits=TRUE)
class(histo) <- "histogram"
plot_args <- modifyList(list(main="Non-randomized PIT histogram", xlab="Probability integral transform", ylab="Density", freq=FALSE, ylim=range(0, histo$density)), list(...)) #the default arguments can be overriden by those provided in the ... argument
do.call("plot", args=c(list(x=histo), plot_args))
#simconfint <- if(ci>0 && ci<1) (n/bins+c(-1,+1)*qnorm(1-(1-ci)/bins/2)*sqrt(n*(1/bins)*(1-1/bins)))/(n/bins) else NULL #simultaneous confidence band of level ci (normal approximation) for the histogram bars under the assumption of iid U(0,1) PIT values
#if(ci>0 && ci<1) abline(h=simconfint, lty="dashed", col=ci.col)
abline(h=1, lty="dashed", col="blue")
}
pit.tsglm <- function(object, bins=10, ...){
pit.default(response=object$response, pred=fitted(object), distr=object$distr, distrcoefs=object$distrcoefs, bins=bins, ...)
}
|
bbf3da46c69388692c21852f5f928c998bf2fe72 | 13f518e49e5908210db53c5b68df3d669de7bb93 | /man/refLengths.Rd | c994d82aa19681882d50ee8b3a57d304b1176a60 | [] | no_license | PerHapsH/MutationTimeR | 7930a7356cfdbde9859e3ed9d749f937b7f56214 | e4e266a494482face03e831322a9df00b1b0ff93 | refs/heads/master | 2023-03-18T22:22:28.801501 | 2020-12-12T19:14:42 | 2020-12-12T19:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 263 | rd | refLengths.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{data}
\name{refLengths}
\alias{refLengths}
\title{Reference lengths for hg37d5}
\description{
Reference lengths for hg37d5
}
\examples{
data(refLengths)
refLengths
}
|
d09a337e636879b4c9ab1187e2d64f7b1832f3bb | 7e276072d233e583e975b63024503689f4149619 | /man/csvFileInput.Rd | ae616840f7372e52aee879f5a6068bb6915466fe | [] | no_license | Davide-bll/Shinymod | 924203095ddc536bdaead56b39b77593f1ce4bdb | 8c117e56cebdee184f6f76491dd0c5bf6460b382 | refs/heads/master | 2023-04-06T05:59:03.448747 | 2021-04-13T18:22:24 | 2021-04-13T18:22:24 | 260,253,887 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 329 | rd | csvFileInput.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loaddata_module.R
\name{csvFileInput}
\alias{csvFileInput}
\title{Ui function foa loading csv Data}
\usage{
csvFileInput(id, label = "CSV file")
}
\arguments{
\item{id}{Id module}
}
\value{
UI module
}
\description{
Ui function foa loading csv Data
}
|
f68128d27b584b741b09d671c700ae864088a125 | aaad6e135dc094b25a172a4ac2d9f2538af24fe7 | /lab_lc.R | ef35c9cc5b40347986af67ae68769d8bf644cb7f | [] | no_license | linxy29/HW_casual-inference | ac50316b0e54846e52b9c7dbafb52022d33b998f | e5f10cbf50b97d0e37a0df2486caac5c7e39c26d | refs/heads/master | 2020-07-26T02:33:58.169349 | 2020-02-16T15:13:36 | 2020-02-16T15:13:36 | 208,506,256 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 631 | r | lab_lc.R | ######MEDIATION ANALYSIS LAB#######
x<-read.csv("./sim_data_lung.csv")
p<-mean(x$case)
pi<-0.082642
x$w <- ifelse(x$case==1,pi/p,(1-pi)/(1-p)) #weight for y=1 and y=0#
library(mediation)
library(survey)
yreg<-svyglm(case ~ snp*smoking+ sex + colgrad +age, family=binomial(link="logit"),design = svydesign(~ 1, weights = ~ w,data=x))
summary(yreg)
mreg <- svyglm(smoking ~ snp + sex + colgrad +age, family=binomial(link="logit"),design = svydesign(~ 1, weights = ~ w,data=x))
summary(mreg)
?mediate
output.mediation <- mediate(mreg,yreg,treat="snp",mediator="smoking",control.value = 0,treat.value = 1)
summary(output.mediation)
|
88773f9236358eff2abb090870831130cabcc9ea | 326e19400e7e5955584aaac458cfb22efe98cbd4 | /src/createTIC/tokenisers/discrete/text-word2vec-cosinesim/tokeniser.R | 9907b3b8e829e6212134dca4c5b469b75f6103a3 | [
"Apache-2.0"
] | permissive | vuw-c2lab/transcendental-information-cascades | 75c943da5b04d31c62f8f5813eaf8367576df108 | 7c650b751df8e00bf105cccffbd85e98beeca9c1 | refs/heads/master | 2020-08-07T01:46:58.006733 | 2020-06-08T20:00:33 | 2020-06-08T20:00:33 | 213,244,838 | 1 | 2 | Apache-2.0 | 2020-04-14T10:24:51 | 2019-10-06T21:13:41 | HTML | UTF-8 | R | false | false | 1,724 | r | tokeniser.R | if (!require(wordVectors)) devtools::install_github("bmschmidt/wordVectors")
# MAIN FUNCTION -----------------------------------------------------------
# vector init
wordVec = read.vectors("data/w2v-novel450.bin") #english
# POS model download
#model <- udpipe::udpipe_download_model(language = "english")
#model <- udpipe::udpipe_load_model(file = model$file_model)
# or load from file
model <- udpipe::udpipe_load_model(file = "data/english-ewt-ud-2.4-190531.udpipe") #english
#model <- udpipe::udpipe_load_model(file = "/Users/MLR/indonesian-gsd-ud-2.4-190531.udpipe") #bahasa
#model <- udpipe::udpipe_load_model(file = "/Users/MLR/portuguese-bosque-ud-2.4-190531.udpipe") #portuguese
tokenise <- function(dataSource,no_cores=1){
# read source data
sourceData <- readr::read_csv(paste0(getwd(),"/data/",dataSource), col_names = F)
sourceData$X1 <- tolower(sourceData$X1)
#tokenise
collect <- lapply(sourceData$X1, function(xx){
#annotate
x <- udpipe::udpipe_annotate(model, x = xx)
x <- as.data.frame(x)
#x <- x[which(x$upos!="PUNCT"),]
x <- x[which(x$upos=="VERB" | x$upos=="NOUN" | x$upos=="ADJ"),]
keywords <- lapply(x$token,function(x){
sims <- wordVec %>% closest_to(x,n = 10)
sims <- sims[which(sims[,2]>0.9),]
sims$word
})
unique(unlist(keywords))
})
unique_nouns <- lapply(collect, function(tt){
paste(sort(unique(unlist(tt))), collapse = ", ")
})
tokenised <- data.frame(x = c(1:length(unique_nouns)),
y = unlist(unique_nouns),
stringsAsFactors = FALSE)
return(tokenised)
}
# SUPPORT FUNCTIONS -------------------------------------------------------
|
6e3322c5282dee07682b82abddd69b65b8951268 | 44598c891266cd295188326f2bb8d7755481e66b | /DbtTools/GraphAlgorithms/man/dbt.GraphAlgorithms-package.Rd | 5c50356834a9dcb1fd4341d0536714a86294d982 | [] | no_license | markus-flicke/KD_Projekt_1 | 09a66f5e2ef06447d4b0408f54487b146d21f1e9 | 1958c81a92711fb9cd4ccb0ea16ffc6b02a50fe4 | refs/heads/master | 2020-03-13T23:12:31.501130 | 2018-05-21T22:25:37 | 2018-05-21T22:25:37 | 131,330,787 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 262 | rd | dbt.GraphAlgorithms-package.Rd | \name{dbt.GraphAlgorithms-package}
\alias{dbt.GraphAlgorithms-package}
\alias{dbt.GraphAlgorithms}
\docType{package}
\title{
dbt.GraphAlgorithms-package
}
\description{
....
}
\details{
....
}
\author{
Michael Thrun, Florian Lerch, Alfred Ultsch
} |
b9cac0db6a2e7542a151d90fcb6df4c833d3456c | fa60f8262586afbf25096cfb954e5a9d391addf7 | /R_Machine_Learning/r_16_3(k_Fold).R | d06327ef646de6a443a147dfdbad7c1307ce2771 | [] | no_license | pprasad14/Data_Science_with_R | ce5a2526dad5f6fa2c1fdff9f97c71d2655b2570 | 3b61e54b7b4b0c6a6ed0a5cc8243519481bb11b9 | refs/heads/master | 2020-05-05T08:56:39.708519 | 2019-04-06T20:42:11 | 2019-04-06T20:42:11 | 179,884,402 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,024 | r | r_16_3(k_Fold).R | # load Social Networks Ads dataset
dataset = read.csv("social_network_ads.csv")
dataset = dataset[3:5]
#make Purchased to factor from numeric
dataset$Purchased = as.factor(dataset$Purchased)
# split data to train and test set
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, 0.75)
training_set = subset(dataset, split == T)
test_set = subset(dataset, split == F)
# scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Apply SVM
library(e1071)
classifier = svm(Purchased ~ ., data = training_set, type = "class") #error
#Predictions
pred = predict(classifier, newdata = test_set)
# CM
cm = table(test_set$Purchased, pred)
cm
1 - sum(diag(cm)) / sum(cm)
#####
# apply k fold cross validation
library(caret)
set.seed(123)
folds = createFolds(training_set$Purchased, k = 10)
folds
# Train SVM model on training_set
cv = lapply(folds, function(x){
training_fold = training_set[-x,]
test_fold = training_set[x,]
classifier = svm(Purchased ~ . ,
data = training_fold,
type = "C-classification",
kernel = "radial")
pred = predict(classifier, newdata = test_fold[,-3])
cm = table(test_fold[,3],pred)
accuracy = (cm[1,1]+cm[2,2]) / (cm[1,1] + cm[1,2] + cm[2,1] + cm[2,2])
return(accuracy)
})
cv # will give 10 accuracies
#convert cv to numeric
accuracy = mean(as.numeric(cv))
accuracy
########
#Grid search
library(caret)
set.seed(123)
classifier = train(Purchased ~ . , data = training_set,
method = 'svmRadial')
classifier
classifier$bestTune
########
classifier = svm(Purchased ~ ., data = training_set,
type = "C-classification",
kernel = "radial",
cost = 1,
sigma = 1.2)
#Predictions
pred = predict(classifier, newdata = test_set)
# CM
cm = table(test_set$Purchased, pred)
cm
1 - sum(diag(cm)) / sum(cm) # to see error in model
|
9fc3eee942681c0488669df3371754392bbe975e | dfe1acc0ce4df57a8526de721ecba3cc79946708 | /tests/batch/R.R | 579a7930713aa7f41669899f602b89f1dcacd575 | [
"Apache-2.0"
] | permissive | WydD/astral | 924d5c4bf79243d3eec53d281a44ae2e7dd381a4 | 950d630b432bf4ffd6275e6eec7bf32f8d0efcc6 | refs/heads/master | 2020-06-30T03:59:30.242861 | 2016-11-21T18:28:36 | 2016-11-21T18:28:36 | 74,392,753 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 44 | r | R.R | v,T
t=1
1,1
t=2
2,2
3,2
t=2
4,2
5,2
t=4
6,4
|
98ccbe65d1a4c2e79087bc60c352ffeda9ff6bc8 | 95ca4f9d0d2680b5d94a001c523830a057fe42df | /knit.R | e27665b2fcacc58ba7e831f8f00e3253abf03ca0 | [] | no_license | jruusu/RepData_PeerAssessment1 | f47796578603d55a53e82e44bef9b07aa5c0a186 | b29c45fa02d4ea53bf190c7bdeb1b6c4dac5b671 | refs/heads/master | 2020-12-25T12:28:45.945098 | 2015-02-08T10:01:16 | 2015-02-08T10:01:16 | 30,460,093 | 0 | 0 | null | 2015-02-07T15:35:59 | 2015-02-07T15:35:58 | null | UTF-8 | R | false | false | 151 | r | knit.R | # A little utility script for building Rmd into a html document
library(knitr)
knit2html("PA1_template.Rmd", options="")
browseURL("PA1_template.html") |
dd467e4d35d01d790740df4e26920ecdd50cc1c0 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/FastSF/R/fsf.ada.R | 6667e8833b2171abdf36d205ae765b13d3ab9c6e | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 776 | r | fsf.ada.R | fsf.ada <- function(y, D, tau=1, s.max=20, eps=0.1, ddinv=NULL){
n <- length(y)
m <- dim(D)[1]
if(is.null(ddinv)){
ddt=D%*%t(D)
if(rcond(ddt)<1e-12){
ddinv <- Solve.banded(ddt+diag(rep(1e-6,m)),1,1, B = diag(1,dim(D)[1]))
}else{
ddinv <- Solve.banded(ddt,1,1, B = diag(1,dim(D)[1]))
}
}
kk <- 1
beta.all <- NULL
while(kk * tau < s.max){
# cat(kk, "\n")
s <- tau*kk
re <- fsf(y = y, D = D, s = s, ddinv=ddinv)
beta <- re$beta
beta.all <- cbind(beta.all, beta)
z <- re$z
u <- re$u
mse <- mean((y-beta)^2)
# cat(mse, "\n")
if(mse < eps | s > s.max){
break
}else{
kk <- kk + 1
}
}
df <- 1:s
return(list(y = y, beta = beta, v = z, beta.all = beta.all, df = df))
}
|
1fddcc9c7a8eafa49ef608e4fa1ac8f4ab999ec2 | ce08a0b0f7c32e5e7b7de81d9a1ed6dec4950cba | /toRData.R | 693c426100bbc4fa267b7ca444a3bdda66274f9e | [] | no_license | kuhana/Thesis-Code | 1b8a7e47e71d04fdc66264da7c5c9ae5db39dd6e | b056d3e2e93cac2745d6a539a93bcbce0cbb4a71 | refs/heads/master | 2021-01-11T18:23:25.932697 | 2017-01-20T05:49:36 | 2017-01-20T05:49:36 | 79,529,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 432 | r | toRData.R | loc = '/Users/hanaku/Documents/Thesis/Data/'
list = list.files(loc)
#f01001_1 = read.csv(file = "/Users/hanaku/Documents/Thesis/Data/NewRecoveredOriginPixel01001_1.csv", header = TRUE, sep = ',')
for(f in 1:length(list)){
#for(f in 1:1){
file = read.csv(paste(loc, list[f],sep=""))
name = paste0('f',substr(list[f],24,30))
save(file, file = paste0('/Users/hanaku/Documents/Thesis/RData/',substr(list[f],24,30),'.RData'))
} |
69e72f8b9bc4f46b7852414129b897777c780af9 | 0a04c131b8d20e7fef14dd4780cadd9aff3fe673 | /STAT4201 Advanced Data Analysis/Homework2/Ch3Qs32.R | 7ebf4a04c9afc239f9445bc6db91d397d7c97498 | [] | no_license | parth0708/Coursework | d49fb62cfcce71e754f3b2fb881a7b7c014757ce | 749ec8703999706cdd6954fe765c9215628e5b00 | refs/heads/master | 2021-01-19T04:27:56.185484 | 2016-06-12T19:51:59 | 2016-06-12T19:51:59 | 53,802,301 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 436 | r | Ch3Qs32.R | library(Sleuth3)
attach(ex0332)
group_Public <- subset(ex0332,Type == "Public")
group_Private <- subset(ex0332,Type == "Private")
boxplot(log(group_Public$InState),log(group_Private$InState))
t.test((log(group_Public$OutOfState)-log(group_Public$InState)), var.equal=TRUE)
t.test(log(group_Private$InState),log(group_Public$InState), var.equal=TRUE)
t.test(log(group_Private$OutOfState),log(group_Public$OutOfState), var.equal=TRUE) |
bd554800b36d9842ac5b91e81c890708dc212f3b | 1f9b135708835e4c542f9c0c743dfb49620f6685 | /man/crop_circ_bop.Rd | a80256c47912026ea4663f8db2028e2637510129 | [
"MIT"
] | permissive | kafetzakid/morphotype | 71c337e1b237df3d4a39f829f224e0f2f04e7f05 | 2e44431232701ce0186f93793dcd1b3caaa7a731 | refs/heads/main | 2023-08-11T14:54:53.943013 | 2023-07-30T14:06:55 | 2023-07-30T14:06:55 | 485,943,302 | 1 | 0 | MIT | 2023-07-30T14:06:56 | 2022-04-26T20:49:18 | R | UTF-8 | R | false | true | 1,068 | rd | crop_circ_bop.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crop_circ_bop.R
\name{crop_circ_bop}
\alias{crop_circ_bop}
\title{Crops circumference at bottom outer point}
\usage{
crop_circ_bop(outer.line, inner.line, marSc)
}
\arguments{
\item{outer.line}{a numeric 1D vector. The outer profile line.}
\item{inner.line}{a numeric 1D vector. The inner profile line.}
\item{marSc}{a numeric scalar. The amount of pixels vertically after the outer wall of the base that will be included in the result.}
}
\value{
A list with the following items:
\itemize{
\item mati_scaled - a dataframe with numeric attributes. The x and y position of the points belonging to the circumference.
\item thrP - a numeric scalar. The x index value of the custom cut of the profile base.
\item indx - a numeric 1D vector. The y indexes with a value in the custom cut of the profile base.
}
}
\description{
Crops circumference at bottom outer point
}
\examples{
crop_circ_bop(outer.line = m19$outer.line, inner.line = m19$inner.line, marSc = 5)
}
\author{
Danai Kafetzaki
}
|
f35f85223ec2f561dfdb7255b51dbc2272c3dba1 | 7212c1fea0fd1e286ca10e978d24c9cc4ef05af7 | /EstadisticaLicFisica/ProgramasClase/Programa04Montoya.R | 7e4bdc8edf7cad71993ecbf4c22426a011762276 | [] | no_license | maps16/Estadistica | 92635e96f254946572dd4b8c0d82dcb4c028bd3a | c1bfd6c4123378903fc8cb8e83824c85e906f1e2 | refs/heads/master | 2021-01-10T07:11:55.266656 | 2016-04-04T17:02:46 | 2016-04-04T17:02:46 | 52,023,966 | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 2,176 | r | Programa04Montoya.R | ###############################################################################
#PROGRAMA: VALIDACION DE MODELOS #
#MONTOYA: 2016-02-15 #
###############################################################################
#ESCENARIO DE SIMULACION: DATOS EXPONENCIALES - SE AJUSTA MODELO EXPONENCIAL
Lambda0<-10
n<-50
#DATOS SIMULADOS
x<-rexp(n,1/Lambda0)
#DENSIDAD ESTIMADA Y EL HISTOGRAMA
EMVlambda<-mean(x)
hist(x, freq = FALSE)
curve(dexp(x,1/EMVlambda), col = 2, lty = 2, lwd = 2, add = TRUE)
#FUNCION DE DISTRIBUCION ESTIMADA Y LA DISTRIBUCIÓN EMPIRICA
z<-seq(0,max(x)+5,0.1)
Festimada<-pexp(z,1/EMVlambda)
Fempirica<-ecdf(x)
plot(Fempirica,main="Funciones de probabilidad acumuladas")
points(z,Festimada,type="l",lwd=2,lty=1,col=4)
#ESCENARIO DE SIMULACION: DATOS NORMALES - SE AJUSTA MODELO EXPONENCIAL
Media0<-2
Sd0<-0.5
n<-100
#DATOS SIMULADOS
x<-rnorm(n,Media0,Sd0)
#DENSIDAD ESTIMADA Y EL HISTOGRAMA
EMVlambda<-mean(x)
hist(x, freq = FALSE)
curve(dexp(x,1/EMVlambda), col = 2, lty = 2, lwd = 2, add = TRUE)
#FUNCION DE DISTRIBUCION ESTIMADA Y LA DISTRIBUCIÓN EMPIRICA
z<-seq(0,max(x)+5,0.1)
Festimada<-pexp(z,1/EMVlambda)
Fempirica<-ecdf(x)
plot(Fempirica,main="Funciones de probabilidad acumuladas")
points(z,Festimada,type="l",lwd=2,lty=1,col=4)
#ESCENARIO DE SIMULACION: DATOS EXPONENCIALES - SE AJUSTA MODELO EXPONENCIAL
Lambda0<-10
n<-50
#DATOS SIMULADOS
x<-rexp(n,1/Lambda0)
#GRAFICA Q-Q-PLOT
EMVlambda<-mean(x)
valpha<-(seq(1,n,1)-0.5)/n
QTE<-qexp(valpha,1/EMVlambda)
QE<-sort(x)
plot(QTE,QE,type="p",pch=19,cex=1.25,xlim=c(0,60),ylim=c(0,60))
segments(0,0,100,100,lwd=2,col=4)
#ESCENARIO DE SIMULACION: DATOS NORMALES - SE AJUSTA MODELO EXPONENCIAL
Media0<-2
Sd0<-0.5
n<-100
#DATOS SIMULADOS
x<-rnorm(n,Media0,Sd0)
#GRAFICA Q-Q-PLOT
EMVlambda<-mean(x)
valpha<-(seq(1,n,1)-0.5)/n
QTE<-qexp(valpha,1/EMVlambda)
QE<-sort(x)
plot(QTE,QE,type="p",pch=19,cex=1.25)
segments(0,0,100,100,lwd=2,col=4)
#DATOS TERREMOTOS
DatosTerremotos<-read.csv("all_month.csv")
Xobs<-DatosTerremotos$mag
FEmpTERREMOTOS<-ecdf(Xobs)
plot(FEmpTERREMOTOS)
|
ed7e4c9714a411f8386a033937993ebb20378c69 | 207fe6741c507222fe3efb446d8b4b70d52f454c | /Phillies/average_estimator_functions.R | 8c9ee65feee845e62893334d44ffd412d500a761 | [] | no_license | dlernz/Phillies_RD | b3b28151d5f630ea664c168997345eee711618eb | 815fda74dfa134c92abf70aa8635f68d28783d39 | refs/heads/master | 2021-01-18T22:47:51.751263 | 2016-11-18T19:02:43 | 2016-11-18T19:02:43 | 74,097,849 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,604 | r | average_estimator_functions.R | library("dplyr")
library("ggplot2")
### Download files
### Move downloaded folder to directory you wish to run script in
### Open RStudio, R Application
### In Console: Set working directory to filepath leading to downloaded folder was placed
### setwd(filePath)
### In Console: Verify working directory points to downloaded folder
### getwd()
### In Console: Load Script holding functions to perform analysis of player performance data
### source("average_estimator_functions.R")
### In Console: Run Script to output analysis of player performance data
### main()
main <- function(){
batting <- read.csv("batting.csv")
fangraphsData <- read.csv("Fangraphs_Player_Performance_March_April.csv")
playerData <- merge(batting, fangraphsData, by = "Name", all.x = TRUE)
cleansed <- filter(playerData, MarApr_PA > 0)
regressionData <- performRegression(cleansed)
estimatedAverages <- estimateBAvg(regressionData)
avgEstimatedBA <- mean(estimatedAverages$avgEstimate)
noPA <- filter(playerData, MarApr_PA == 0)
noPA$avgEstimate <- avgEstimatedBA
noPA$percentError <- abs(100*(noPA$FullSeason_AVG - noPA$avgEstimate))/noPA$FullSeason_AVG
estimatesPA <- select(estimatedAverages, Name, MarApr_AVG, FullSeason_AVG, avgEstimate, percentError)
noPAEstimates <- select(noPA, Name, MarApr_AVG, FullSeason_AVG, avgEstimate, percentError)
outputEstimates <- rbind(estimatesPA, noPAEstimates)
outputEstimates
}
### Utilizes input data table from findEstimates function call and a max/min for league BABIP, and
### a player's batting average for games in March/April to estimate a player's batting average.
### Input: regressionData - Table containing player performance data for March/April games and estimates for each player's season long strikeout rate and walk rate
### Output: battingAverages <- Data table containing estimates for a player's season long batting average under "estAverage" column and percent error from actual season average
estimateBAvg <- function(regressionData) {
battingAverages <- select(regressionData, Name, MarApr_AVG, FullSeason_AVG)
BABIPVals <- c(0.380, 0.360, 0.340, 0.270, 0.250, 0.230)
avgContainer <- data.frame(matrix(NA, nrow = nrow(regressionData), ncol = length(BABIPVals)))
colnames(avgContainer) <- BABIPVals
for (index in 1: length(BABIPVals)){
BABIPVal <- BABIPVals[index]
strikeOutRate <- regressionData$K_percent_stable/100.
BBRate_stable <- regressionData$BB_percent_stable/100.
BBRate_orig <- regressionData$BB_percent/100.
plateAppears <- regressionData$MarApr_PA
homers <- regressionData$HR
numerator <- (((plateAppears - (BBRate_stable*plateAppears) - (strikeOutRate*plateAppears))*BABIPVal) + homers)
denominator <- (plateAppears - (BBRate_orig * plateAppears))
estAvg <- numerator/denominator
avgContainer[,index] <- estAvg
}
battingAverages <- cbind(battingAverages, avgContainer)
targetAverages <- select(battingAverages, MarApr_AVG,6,7)
battingAverages$avgEstimate <- rowMeans(targetAverages)
battingAverages <- mutate(battingAverages, "percentError" = abs((FullSeason_AVG - avgEstimate)/FullSeason_AVG * 100))
battingAverages
}
### Finds estiamte for season long strikeout rate and walk rate depending on input parameter for each player if player plate appearances < 100.
### Season long estimates depending on average league strikeout rate (18.73%) and walk rate averages (8.28%) from 2007-2014.
### Data to find season long averages acquired from Fangraphs - https://goo.gl/Kmf1po
### Minimum plate appearances threshold and estimate methodology based on Jeff Zimmerman's 'Predicting Batting Average Article - https://goo.gl/9Aiw6d
### Example Estimating Seasong Long Strikeout Rate: K%_Est = (Player K% * Player PA + League K% * 100 PA) / (Player PA + 100 PA)
### Input: playerData - Data table consisting player performance data from March/April games
### Input: param - either "BB_percent" or "K_percent" to mark whether function walk rate or strikeout rate should be estimated
### Input: stabilizeFactor - either 168 or 100 depending if walk rate or strikeout rate is being estimated respectively
### Output: playerData - original playerData table with added columns for estimated season long strikeout and walk rate
estimator <- function(playerData, param, stabilizeFactor) {
if (param == "BB_percent"){
leagueAvg <- 8.28
}
else if (param == "K_percent") {
leagueAvg <- 18.73
}
leagueAvg <- mean(as.numeric(unlist(playerData[eval(param)])))
stabilizedStat <- c()
for (index in 1: nrow(playerData)) {
curRow <- playerData[index,]
targetStat <- curRow[eval(param)]
plateAppear <- curRow$MarApr_PA
if (plateAppear < stabilizeFactor) {
stat_Star <- (targetStat * plateAppear + leagueAvg * stabilizeFactor) / (plateAppear + stabilizeFactor)
stabilizedStat <- c(stabilizedStat, stat_Star)
}
else {
stabilizedStat <- c(stabilizedStat, targetStat)
}
}
stableStatColName <- paste(eval(param), "_stable", sep = "")
playerData[eval(stableStatColName)] <- unlist(stabilizedStat)
playerData
}
### Utilizes input player performance data to estimate a player's season long strikeout rate and walk rate
### Input: originalPlayerData - Data table containing various statistics critical to player performance for March-April games
### Output: estimateBB - Data table consiting of original statistics and estimated season long strikeout rate and walk rate
findEstimates <- function(originalPlayerData){
cleansed <- convertPercents(originalPlayerData)
estimateK <- estimator(cleansed, "K_percent", 100)
estimateBB <- estimator(estimateK, "BB_percent", 168)
estimateBB
}
### Convert formatting for input player data for strike out rates and walks rate to numeric type
### Input: playerData - Data table consisting player performance data from March/April games
### Output: playerData - Original data table, except strikeout percentages and walk percentages have been converted to numeric types
convertPercents <- function(playerData){
percent_numericK <- as.numeric(sub(" %", "", playerData$K.))
playerData <- mutate(playerData, "K_percent" = percent_numericK)
playerData$K. <- NULL
percent_numericBB <- as.numeric(sub(" %", "", playerData$BB.))
playerData <- mutate(playerData, "BB_percent" = percent_numericBB)
playerData$BB. <- NULL
playerData
}
|
78671e2e56c16d06b9df944fe594e0f686d9a91e | da1dc5317dd8723f6fc549492cae6f6f1a6e57fd | /loaddata/manipData.R | bbb9d50c9eabbaabc29b86c9b489374eca6cb528 | [] | no_license | quace/BusinessIntelligence | cfe402ad31ebf094ea0f524db0b2a5e052290e9a | f170becfdc5ff6479872da661d08287dda806322 | refs/heads/master | 2021-08-29T19:26:46.327734 | 2017-12-14T18:53:14 | 2017-12-14T18:53:14 | 108,522,007 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,080 | r | manipData.R | #PACE
MergeCompleteData <- MergeCompleteData %>% mutate(Pace = as.integer(0.45*Acceleration + 0.55*Speed))
#Shooting
MergeCompleteData <- MergeCompleteData %>% mutate(Shooting = as.integer(0.05*Attacking_Position + 0.45*Finishing +0.20*Shot_Power +0.20*Long_Shots + 0.05*Volleys + 0.05*Penalties))
MergeCompleteData <- MergeCompleteData %>% mutate(Passing = as.integer(0.2*Vision + 0.2*Crossing + 0.05*Freekick_Accuracy + 0.35*Short_Pass + 0.15*Long_Pass + 0.05*Curve))
MergeCompleteData <- MergeCompleteData %>% mutate(Dribblingx = as.integer(0.1*Agility + 0.05*Balance + 0.05*Reactions + 0.3*Ball_Control + 0.5*Dribbling))
MergeCompleteData <- MergeCompleteData %>% mutate(Defending = as.integer(0.2*Interceptions + 0.1*Heading + 0.3*Marking + 0.3*Standing_Tackle + 0.1*Sliding_Tackle))
MergeCompleteData <- MergeCompleteData %>% mutate(Physicality = as.integer(0.05*Jumping + 0.25*Stamina + 0.5*Strength + 0.2*Aggression))
MergeCompleteData <- MergeCompleteData %>% mutate(GK_Score = as.integer((GK_Positioning + GK_Diving + GK_Kicking + GK_Handling + GK_Reflexes )/5))
|
25ee0eca97d577914d8d7652a8de8416c05031e5 | cdc8e477a0b8392984ac7a63f9d608a6cc48d8fd | /man/gauss_binaryTree.Rd | 7ecb9d6111f66e38163df4754b0baae31c8130c7 | [] | no_license | vrunge/fpopTree | e192fcc796b9519a8ef1e41f42d5c2c325c19831 | 3b0da013f236330d380179d512b3ba27030684a5 | refs/heads/main | 2023-03-05T14:37:04.733741 | 2021-02-05T17:00:30 | 2021-02-05T17:00:30 | 330,942,638 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 553 | rd | gauss_binaryTree.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataGenerator.R
\name{gauss_binaryTree}
\alias{gauss_binaryTree}
\title{Generating a binary tree with changes and gaussian cost}
\usage{
gauss_binaryTree(l_tree, k, roots = NULL)
}
\arguments{
\item{l_tree}{a list encoding a binary tree}
\item{k}{the number of changes}
\item{roots}{the position of the changes (roots of the subtrees)}
}
\value{
a list assiciated to the binary tree with changes and edge means
}
\description{
Genarate a list associated to a binary tree
}
|
78ac9a13db05562ca3124776214139480622818e | cd9cd8e93507bc17b2f08a2184e5fedfff55fbf7 | /04_series_temporais/101 TURISMO/Fertilizantes.R | ff72a58992c85603467e4ed0500d5e820771e8f7 | [] | no_license | Matheusvfvitor/MBA | b7641dc995d98afb38101cf045cce8fc155817ca | d9b348f8f180629e6b2049a34bceee17a4a12886 | refs/heads/main | 2023-01-29T16:48:41.982857 | 2020-12-05T21:06:53 | 2020-12-05T21:06:53 | 318,882,844 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 552 | r | Fertilizantes.R | # install.packages(c("fpp","fpp2", "forecast", "readxl"))
library(fpp)
library(fpp2)
library(forecast)
library(readxl)
df <- read_excel("Fertilizantes.xlsx")
head(df)
tail(df)
class(df$consumo)
## 2. Transformando df em classe ts
fertilizantes.ts <- ts(df$consumo , frequency = 12,
start = c(1998,1), end = c(2019,9))
fertilizantes.ts
autoplot(fertilizantes.ts)+
xlab("meses")+
ylab("Demanda Mensal em Mil Toneladas")+
ggtitle("Fertilizantes Entregues ao Mercado em Milhares de Toneladas")
|
227e9966311560f810e5ff831092f03036752476 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/GRS.test/examples/GRS.test.Rd.R | 95b6e3009a21f06090d6cc1dff612bf82785aef4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 379 | r | GRS.test.Rd.R | library(GRS.test)
### Name: GRS.test
### Title: GRS test and Model Estimation Results
### Aliases: GRS.test
### Keywords: htest
### ** Examples
data(data)
factor.mat = data[1:342,2:4] # Fama-French 3-factor model
ret.mat = data[1:342,8:ncol(data)] # 25 size-BM portfolio returns
GRS.test(ret.mat,factor.mat)$GRS.stat # See Table 9C of Fama-French (1993)
|
7ae95335359a0f30b80f8e70f372b3df16f85002 | 6e3c2de27a09be5965b79dc4c3f0ab1081d9aa6b | /exam/convert_obs_pvalue.R | 146c5aaa9ebdbba8858d8ad13a851236f4b62a7b | [] | no_license | ArmandasRokas/statistics | 308234d513516a09f6984a94af3dbafbf0c0a56d | 9f184e238d90d5c9d1a1c36ba0531beec1b699c0 | refs/heads/master | 2023-04-03T20:41:53.331250 | 2021-04-25T15:24:13 | 2021-04-25T15:24:13 | 257,019,215 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,016 | r | convert_obs_pvalue.R | ######## T-distribution ########
# From Tobs to p_value
tobs <- 2.38
n <- 20
p_value <- 2*(1-pt(abs(tobs),df=n-1))
p_value
# From p_value to Tobs
p_value <- 0.001
n <- 20
tobs <- qt(1-p_value/2 ,df=n-1)
tobs
######## Z-distribution ########
# From Zobs to p_value
zobs <- -3.47
p_value <- 2*(1-pnorm(abs(zobs)))
p_value
# From p_value to Zobs
p_value <- 0.00052
zobs <- qnorm(1-p_value/2)
zobs
######## F-distribution ########
# From Fobs to p_value
fobs <- 4.30
k <- 5 # Number of groups
n <- 30 # Number of all observations
p_value <- 1-pf(abs(fobs),df1=k-1,df2=n-k)
p_value
# From p_value to Fobs
p_value <- 0.05
k <- 6 # Number of groups
n <- 357 # Number of all observations
fobs <- qf(1-p_value, df1=k-1,df2=n-k)
fobs
######## ChiSq-distribution ########
# From chiobs to p_value
chiobs <- 29
nrow <- 5
ncol <- 4
p_value <- 1-pchisq(abs(chiobs), df=(nrow-1)*(ncol-1))
p_value
# From p_value to chiobs
p_value <- 0.01
nrow <- 2
ncol <- 4
chiobs <- qchisq(1-p_value,df=(nrow-1)*(ncol-1) )
chiobs
|
a3501d6ec75224f3e7bd22521536ec169faa5f87 | 0952bd0b89027cae306ed34010f87021d0a09534 | /run_analysis.R | 62f81f9edaa52bceeb588f61516554f637753eec | [] | no_license | wnpChancellor/Coursera-Clean-Data | fbd3d8253fcc67dabe3c8705672dab0b8aa242c0 | f46ad9dc73ba06135e2498d07572835a47d1712b | refs/heads/master | 2021-01-20T12:03:58.592428 | 2015-08-23T14:30:19 | 2015-08-23T14:30:19 | 41,251,796 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,938 | r | run_analysis.R |
# (0) Load features and activity
features <- read.table("/Users/jingshi/Desktop/coursera/UCI HAR Dataset/features.txt")[,2]
activity <- read.table("/Users/jingshi/Desktop/coursera/UCI HAR Dataset/activity_labels.txt")
colnames(activity)=c('Code', 'Activity.Name')
######################################################################
# (1) Merge training and test datasets to create one dataset;
# Load train datasets;
x.train<-read.table("/Users/jingshi/Desktop/coursera/UCI HAR dataset/train/X_train.txt")
colnames(x.train)<-as.character(features)
y.train<-read.table("/Users/jingshi/Desktop/coursera/UCI HAR dataset/train/y_train.txt")
colnames(y.train)<-c("Activity.ID")
sub.train<-read.table("/Users/jingshi/Desktop/coursera/UCI HAR dataset/train/subject_train.txt")
colnames(sub.train)<-c("Subject.ID")
train<-cbind(sub.train, y.train, x.train)
#load test datsets;
x.test<-read.table("/Users/jingshi/Desktop/coursera/UCI HAR dataset/test/X_test.txt")
colnames(x.test)<-as.character(features)
y.test<-read.table("/Users/jingshi/Desktop/coursera/UCI HAR dataset/test/y_test.txt")
colnames(y.test)<-c("Activity.ID")
sub.test<-read.table("/Users/jingshi/Desktop/coursera/UCI HAR dataset/test/subject_test.txt")
colnames(sub.test)<-c("Subject.ID")
test<-cbind(sub.test, y.test, x.test)
# merge train and test datasets;
merged.data<-rbind(train,test)
###################################################################################
# (2) Keep only the measurements on the mean and std;
select.feature<-grepl("std[(][)]|mean[(][)]",features)
mean.std<-merged.data[,c(select.feature)]
###################################################################################
# (3) Uses descriptive activity names to name the activities in the data set
data.final<-merge(mean.std,activity, by.x="Activity.ID", by.y="Code")
###################################################################################
# (4) Appropriately labels the data set with descriptive variable names.
colNames <- names(data.final)
colNames <-gsub("Acc", ".Accelerometer", colNames)
colNames <-gsub("Gyro", ".Gyroscope", colNames)
colNames <-gsub("Mag", ".Magnitude", colNames)
colNames <-gsub("Jerk", ".Jerk", colNames)
colNames <-gsub("^t", "Time.", colNames)
colNames <-gsub("^f", "Frequency.", colNames)
colNames <-gsub("mean..", "Mean", colNames)
colNames <-gsub("std..", "Std", colNames)
names(data.final) <- make.names(colNames)
#############################################
# (5) From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
library('reshape2')
data.melt <- melt(data.final,id.vars=c("Subject.ID",'Activity.Name'))
tidy.data <- dcast(data.melt,Subject.ID + Activity.Name ~ variable, mean)
# output a tidy dataset
write.table(tidy.data, "/Users/jingshi/Desktop/coursera/UCI HAR Dataset/Tidy_Data.txt", row.names=FALSE)
|
c36cec3dd99475d3245d032ee2bbb19cd9f5d9db | 865a88c27b921e488e11a1fa96f4f0e8d6dc36eb | /functions.R | a4c0c04ddef0cd4d92038993746b3bbedb7ed41d | [] | no_license | benoit-marion/R_training | b06f20224cbd210f1214caa0eeb989792f9d8793 | 98b66aaa42d1e18e2010bdc6e6a1f84b070f330d | refs/heads/master | 2016-09-12T17:08:02.633606 | 2016-05-02T12:10:46 | 2016-05-02T12:10:46 | 57,886,200 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,256 | r | functions.R | # Create Triple function
triple <- function(x)
{ y <- 3 * x
return(y) }
triple(6)
# Create Triple function, with default function and condition with 0
math_magic <- function(a, b = 1)
{ if(b == 0) { return(0) }
a*b + a/b }
math_magic(4,0)
# Function to return NULL if negative
my_filter <- function(x)
{
if(x >= 0) { return(x) } else { return(NULL) }
}
# If in a function
pow_two <- function(x, print_info = TRUE) {
y <- x ^ 2
if(print_info == TRUE) { print(paste(x, "to the power two equals", y)) }
return(y)
}
pow_two(3, TRUE)
# Interpret function
interpret <- function(num_views) {
if (num_views > 15) {
print("You're popular!")
return(num_views)
} else {
print("Try to be more visible!")
return(0)
}
}
# The linkedin and facebook vectors have already been created for you
linkedin <- c(16, 9, 13, 5, 2, 17, 14)
facebook <- c(17, 7, 5, 16, 8, 13, 14)
# The interpret() can be used inside interpret_all()
interpret <- function(num_views) {
if (num_views > 15) {
print("You're popular!")
return(num_views)
} else {
print("Try to be more visible!")
return(0)
}
}
# Define the interpret_all() function and sum only popular days
interpret_all <- function(views, vsum = TRUE) {
count <-0
for(i in 1:length(views)) {
count <- count + interpret(views[i])
if(vsum) {
print(count)
} else {
print(NULL)
}
}
}
# Call the interpret_all() function on both linkedin and facebook
interpret_all(linkedin)
interpret_all(facebook)
# apply functions
## lapply for applying function over list or vector, output = list
## sapply try to simplify list to array
## vapply similar to sapply but define the output
# lapply basic
nyc <- list(pop = 56789,
cities = c("New York", "Chicago", "Los Angeles", "Miami"),
capital = FALSE)
lapply(nyc, class)
# lapply with strsplit
pioneers <- c("GAUSS:1777", "BAYES:1702", "PASCAL:1623", "PEARSON:1857")
split_math <- lapply(pioneers, strsplit, ":")
split_math <- strsplit(pioneers, ":")
tolower(lapply(split_math))
# generate list with sample and repeat 5 times
temp <- list(sample(-10:20))[rep(1,5)]
data.table(A=rep(letters[2:1], each=4L), B=rep(1:4, each=2L), C=sample(8))
# rep can use the amount of time to be repeated (time = 2) or if each vector should be repeated (each = 2)
# sapply allow to generate vectors (when possible) instead of list - simplified lapply
# subet data using logical operation
below_zero <- function(x) { subset(x, x < 0) }
freezing_s <- sapply(temp, below_zero)
freezing_l <- lapply(temp, below_zero)
identical(freezing_s, freezing_l)
# Concatenate
print_info <- function(x) { cat("The average temperature is", mean(x), "\n") }
sapply(unlist(temp[[1]]), print_info)
# vapply and naming vectors
basics <- function(x) { c(min(x), mean(x), max(x)) }
vapply(temp, basics, c("min" = 0, "mean" = 0, "max" = 0))
# alternative, naming vectors in the function
basics <- function(x) { c(min = min(x), mean = mean(x), max = max(x)) }
vapply(temp, basics, numeric(3)) # character(3) or logical(3) for other value type
# Rank - takes a group of values and calculates the rank of each value within the group, e.g.
rank(c(21, 22, 24, 23))
# has output [1] 1 2 4 3
|
234c263cba0763a802dd1654438652e94cd3bf19 | 43eae47269ee5a073218dcda8eed82e3e18c6312 | /man/real_mcmc.Rd | f878ac1c63adfe7d79c9e8e78b9debef713bc1a0 | [] | no_license | wlandau/fbseqStudies | 4e494d25165130f95a983ee64d751ba3fb24bd3d | 0169ac5a00d457a261401f926b37e08587eace64 | refs/heads/main | 2021-01-19T04:39:24.890203 | 2017-10-21T02:36:07 | 2017-10-21T02:36:07 | 45,758,088 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 358 | rd | real_mcmc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/study-real_mcmc.R
\name{real_mcmc}
\alias{real_mcmc}
\title{Function \code{real_mcmc}}
\usage{
real_mcmc(path = newdir())
}
\arguments{
\item{path}{to directory to save simulations and results}
}
\value{
path to simulated objects
}
\description{
MCMC of the real data analysis
}
|
bb198175811c6fcabffb964b5545bff5f1778c5d | 7e89bf38271b35e6c2fdb9be7bffc119c9e65e7e | /Assignment/ProgrammingAssignment3/function_rankall.R | 9041058da670f5c24f5261b749c1211b232ac48d | [] | no_license | simrat-singh/R | a6d146a30140fa2042b7417e7ef531950c7f1c5a | 6c3fe41a3110cacc9ff8696841f368da5b5343d9 | refs/heads/master | 2021-05-03T11:23:19.446811 | 2016-10-16T02:39:07 | 2016-10-16T02:39:07 | 68,988,139 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,977 | r | function_rankall.R | #Function to get list of hospitals on each state for an specific outcome and rank
#PROBLEM STATEMENT:- The function takes outcome and num as parameters and returns a 2-column data frame
#containing the hospital in each state that has the ranking specified in num.
#For detailed problem statement refer to "4 Ranking hospitals in all states" in ProgAssignment3.pdf in repo
#Logic applied:-
#1.Read the data and get list of unique states and sort it alphbatecially
#2.Iterate the list and for each state call findhospital method of function_rankhospital to get hospital
# in each state that has ranking specified in num
#3.rbind the state name and hospital to create the final data frame
source('helperFunctions.R')
rankall<-function(outcome, num='best'){
#Call helper function to load and create relevant subset of the data
hospitals<-readAndCleanData('outcome-of-care-measures.csv')
state_list<-sort(unique(hospitals[,2], incomparables = FALSE)) #Getting list of states and sort is alphabatically
outcome_list<-c("heart attack", "heart failure", "pneumonia") #Create a list of possible outcomes
options(warn = -1) #Supress warnings
result<-data.frame()
for (state in state_list) {
#Source findhospitals() method of function_rankhospital.R
hospital<-findhospital(as.character(state), outcome, num, hospitals)
result<-rbind(result, data.frame("hospital"=hospital, "state"=as.character(state)))
}
result
}
#Test cases
#head(rankall("heart attack", 20), 10)
#Expected outcome
#hospital state
# <NA> AK
#D W MCMILLAN MEMORIAL HOSPITAL AL
#ARKANSAS METHODIST MEDICAL CENTER AR
#JOHN C LINCOLN DEER VALLEY HOSPITAL AZ
#SHERMAN OAKS HOSPITAL CA
#SKY RIDGE MEDICAL CENTER CO
#MIDSTATE MEDICAL CENTER CT
# <NA> DC
# <NA> DE
#SOUTH FLORIDA BAPTIST HOSPITAL FL |
110d00f265143a69cfdebc9d21e95cd064e2fe05 | 548333edfb4493b63a4cf0a2542851178bb1a557 | /Models/5_Unit_tests/old/Model_test.R | ec491430eba7cefd15506cc0f166144fa97e31f2 | [] | no_license | Allisterh/Climate_change_financial-IPR | b3137e270cacd35b08ebb7c2a612a9d1047a68bd | 1b304b9d0ec2b362d9cfa0d46bb6ebc1d49fe623 | refs/heads/master | 2023-03-26T07:44:54.148687 | 2019-07-16T16:02:27 | 2019-07-16T16:02:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,549 | r | Model_test.R | ##### Project code: Net-Zero Toolkit for modelling the financial impacts of low-carbon transition scenarios
##### Date of last edit: 25/06/2019
##### Model author: Robert Ritz
##### Code author: Shyamal Patel
##### Dependencies: N/A
##### Notes: This file defines the Robert Ritz cost & competition model applied over model scenarios, regions and sectors
##### The function is defined below, but no data is called in by this script
##### Called by: Run_model_test.R
#--------------------------------------------------------------------------------------------------
##### SECTION 1 - Define function to run the Robert Ritz model ----
# Note that value chain element currently does nothing
run_rr_model <- function(rr_data, rr_carbon_cost_pwh_data = NULL, rr_parameter_data, rr_value_chain_element) {
# Combine carbon cost dataset with RR dataset (defining new variables as carbon_cost_pwh_i) if not NULL
if(!is.null(rr_carbon_cost_pwh_data)) # on second call follow this step
# Note that carbon costs in the PWH sector are 0 in 2017 so merging using this variable does not cause issues
{rr_panel_model_run <- rr_data %>% left_join(rr_carbon_cost_pwh_data, by = c("scenario", "region", "carbon_cost_pwh_2017"))
} else {rr_panel_model_run <- rr_data # first call this is the case, so runs power sector model
for(j in model_years) {
carbon_cost_pwh = rlang::sym(paste0("carbon_cost_pwh_", j))
rr_panel_model_run %<>%
mutate(!!carbon_cost_pwh := 0)
}
}
# User input on abatement options: if "OFF", replace co2 cost with co2 price in all years, otherwise, change nothing
for(i in model_years) {
carbon_price = rlang::sym(paste0("carbon_price_", i))
carbon_cost = rlang::sym(paste0("carbon_cost_", i))
carbon_cost_pwh = rlang::sym(paste0("carbon_cost_pwh_", i))
rr_panel_model_run %<>%
# Adjust carbon costs for abatement opporunities switch (if OFF, carbon costs = carbon price)
mutate(!!carbon_cost := if(rr_parameter_data$abatement_opportunities == "ON") (!!carbon_cost) else (!!carbon_price),
!!carbon_cost_pwh := if(rr_parameter_data$abatement_opportunities == "ON") (!!carbon_cost_pwh) else (!!carbon_price))
}
# User input on product differentiation
if(rr_parameter_data$product_differentiation != "B.0") {
rr_panel_model_run %<>%
mutate(product_differentiation = rr_parameter_data$product_differentiation)
}
#JC: New text, sets elasticities
if(rr_parameter_data$elasticity != "B.0") {
rr_panel_model_run %<>%
mutate(elasticity = rr_parameter_data$elasticity)
}
for(i in model_years) {
t_new <- i
t_old <- i - 1
discount_yrs <- i - 2018
# Create variable names for use within loop
prefix_variables_new <- c("carbon_cost_", "carbon_cost_pwh_", "a_term_", "b_term_", "number_firms_", "c_term_", "industry_cost_pass_through_", "d_relative_co2_intensity_",
"rho_cost_pass_through_", "e_sales_impact_", "f_margin_impact_", "profit_pre_closure_pre_tax_", "quantity_pre_closure_",
"sector_quantity_pre_closure_", "quantity_reallocated_", "sector_quantity_reallocated_", "quantity_post_closure_",
"sector_quantity_post_closure_", "market_share_post_closure_", "profit_post_closure_pre_tax_",
"unit_cost_", "delta_unit_cost_", "price_", "revenue_actual_quantity_", "sector_revenue_actual_quantity_", "profit_margin_",
"sector_profit_post_closure_pre_tax_", "sector_profit_margin_", "product_of_share_and_co2_intensity_scope_1_", "product_of_share_and_co2_intensity_scope_2_",
"product_of_share_and_co2_intensity_scope_3_", "sector_average_co2_intensity_scope_1_", "sector_average_co2_intensity_scope_2_",
"sector_average_co2_intensity_scope_3_", "delta_sector_average_unit_cost_", "sector_average_unit_cost_", "revenue_", "sector_revenue_")
prefix_variables_old <- c("quantity_post_closure_", "carbon_cost_", "carbon_cost_pwh_", "sector_profit_margin_", "profit_margin_", "profit_pre_closure_pre_tax_", "price_",
"sector_quantity_post_closure_", "profit_post_closure_pre_tax_", "market_share_post_closure_", "unit_cost_", "sector_average_co2_intensity_scope_1_",
"sector_average_co2_intensity_scope_2_", "delta_sector_average_unit_cost_", "sector_average_unit_cost_")
for (var in prefix_variables_new) {
assign(paste0(var, "t_new"), rlang::sym(paste0(var, t_new)))
}
for (var in prefix_variables_old) {
assign(paste0(var, "t_old"), rlang::sym(paste0(var, t_old)))
}
rr_panel_model_run %<>%
# Remove companies with profit margins over 100%
mutate(indicator = ifelse(net_income_2017 / revenue_2017 > 1, 1, 0)) %>%
filter(indicator != 1) %>%
select(-indicator) %>%
group_by(scenario, market, region) %>%
mutate(!!delta_unit_cost_t_new := co2_intensity_scope_1 * ((!!carbon_cost_t_new) - (!!carbon_cost_t_old))
+ co2_intensity_scope_2 * ((!!carbon_cost_pwh_t_new) - (!!carbon_cost_pwh_t_old)),
!!delta_sector_average_unit_cost_t_new := (!!sector_average_co2_intensity_scope_1_t_old) * ((!!carbon_cost_t_new) - (!!carbon_cost_t_old))
+ (!!sector_average_co2_intensity_scope_2_t_old) * ((!!carbon_cost_pwh_t_new) - (!!carbon_cost_pwh_t_old)),
!!unit_cost_t_new := (!!unit_cost_t_old) + (!!delta_unit_cost_t_new),
!!sector_average_unit_cost_t_new := (!!sector_average_unit_cost_t_old) + (!!delta_sector_average_unit_cost_t_new),
## Terms A - D in the profit equation (see annotated verison of Robert Ritz industry note)
## JC: cost pass through can replace this section: rho_i = rho_cpt + AC(emissions_intensity- 1)
!!a_term_t_new := 1 / (1 + rr_parameter_data$competition - product_differentiation),
!!b_term_t_new := 1 - product_differentiation,
!!number_firms_t_new := if(rr_parameter_data$num_firms == "ON") (1 / sum((!!market_share_post_closure_t_old)^2))
else if(rr_parameter_data$num_firms == "OFF") 1
else if(rr_parameter_data$num_firms == "PC") 10^9
else NA,
!!c_term_t_new := (rr_parameter_data$competition * product_differentiation * (!!number_firms_t_new)) / (1 + rr_parameter_data$competition + product_differentiation * ((!!number_firms_t_new) - 1)),
!!industry_cost_pass_through_t_new := if(rr_parameter_data$cost_pass_through == "OFF") 0
else (!!a_term_t_new) * ((!!b_term_t_new) + (!!c_term_t_new)),
!!d_relative_co2_intensity_t_new := if(rr_parameter_data$num_firms == "OFF") 1
# Fix to 1 when there is no cost impact (industry average unit cost change is 0)
else { ifelse(rep(mean(!!delta_sector_average_unit_cost_t_new, na.rm = TRUE) == 0, n()), 1,
(!!delta_sector_average_unit_cost_t_new) / (!!delta_unit_cost_t_new)) },
!!rho_cost_pass_through_t_new := if(rr_parameter_data$cost_pass_through == "ON") { (!!a_term_t_new) * ((!!b_term_t_new) + (!!c_term_t_new) * (!!d_relative_co2_intensity_t_new)) }
else {0},
## Term E - sales impact
!!e_sales_impact_t_new := if(rr_parameter_data$sales_impact != "ON") {0}
else if(rr_parameter_data$num_firms != "OFF")
{(- elasticity * (!!industry_cost_pass_through_t_new) * ((!!delta_sector_average_unit_cost_t_new) / (!!sector_average_unit_cost_t_old))
* (1 - (!!sector_profit_margin_t_old)))}
else {(- elasticity * (!!industry_cost_pass_through_t_new) * ((!!delta_unit_cost_t_new) / (!!unit_cost_t_old))
* (1 - (!!profit_margin_t_old)))},
## Term F - margin impact
!!f_margin_impact_t_new := (- (1 - (!!rho_cost_pass_through_t_new)) * ((!!delta_unit_cost_t_new) / (!!unit_cost_t_old))
* (1 - (!!profit_margin_t_old)) / (!!profit_margin_t_old)),
## Profit - before firm closure, before corporation tax adjustment
!!profit_pre_closure_pre_tax_t_new := ifelse((!!quantity_post_closure_t_old) == 0, 0,
(!!profit_post_closure_pre_tax_t_old) * gamma_factor * (1 + (!!f_margin_impact_t_new) + (!!e_sales_impact_t_new) + (!!f_margin_impact_t_new) * (!!e_sales_impact_t_new))),
## Quantity reallocated due to firm closure (if profit pre-closure and pre-tax is negative)
!!quantity_pre_closure_t_new := ifelse((!!quantity_post_closure_t_old) == 0, 0,
(!!quantity_post_closure_t_old) * (1 + (!!e_sales_impact_t_new)) * gamma_factor),
!!sector_quantity_pre_closure_t_new := sum((!!quantity_pre_closure_t_new)),
# TRY REPLACING BELOW WITH IF PROFIT MARGIN < 0 ...
!!quantity_reallocated_t_new := if(rr_parameter_data$firm_closure == "ON")
{ifelse((!!profit_pre_closure_pre_tax_t_new) < 0, (!!quantity_pre_closure_t_new), 0)}
else {0},
!!sector_quantity_reallocated_t_new := sum((!!quantity_reallocated_t_new), na.rm = TRUE),
# Firms receive reallocated quantity units in proportion to their initial shares of the market
!!quantity_post_closure_t_new := if(rr_parameter_data$firm_closure != "ON")
(!!quantity_pre_closure_t_new)
else ifelse((!!profit_pre_closure_pre_tax_t_new) <= 0, 0,
(!!quantity_pre_closure_t_new) + rr_parameter_data$quantity_reallocation * (!!sector_quantity_reallocated_t_new) *
((!!quantity_pre_closure_t_new) / ((!!sector_quantity_pre_closure_t_new) - (!!sector_quantity_reallocated_t_new)))),
!!sector_quantity_post_closure_t_new := sum((!!quantity_post_closure_t_new), na.rm = TRUE),
!!market_share_post_closure_t_new := ifelse((!!quantity_pre_closure_t_new) == 0, 0,
(!!quantity_post_closure_t_new) / (!!sector_quantity_post_closure_t_new)),
## Profit - after firm closure, before corporation tax adjustment
!!profit_post_closure_pre_tax_t_new := ifelse((!!quantity_pre_closure_t_new) == 0, 0,
(!!profit_pre_closure_pre_tax_t_new) * ((!!quantity_post_closure_t_new) / (!!quantity_pre_closure_t_new))),
# TRY CHANGING THESE FORMULA - IT ISN'T REALLY 'ACTUAL' COST, SO VERIFY THAT IT IS APPROPRIATE FOR THIS SITUATION - ARE THE QUANTITIES RIGHT?
!!price_t_new := if(rr_parameter_data$cost_pass_through == "ON")
((!!price_t_old) + (!!delta_sector_average_unit_cost_t_new) * (1 / (1 + rr_parameter_data$competition - product_differentiation))
* ((1 - product_differentiation) + (rr_parameter_data$competition * product_differentiation * (!!number_firms_t_new)) /
(1 + rr_parameter_data$competition + product_differentiation * ((!!number_firms_t_new) - 1))))
else (!!price_t_old),
!!revenue_t_new := (!!quantity_post_closure_t_new) * (!!price_t_new),
!!sector_revenue_t_new := sum((!!revenue_t_new), na.rm = TRUE),
!!profit_margin_t_new := (!!profit_post_closure_pre_tax_t_new) / (!!revenue_t_new),
!!sector_profit_post_closure_pre_tax_t_new := sum((!!profit_post_closure_pre_tax_t_new), na.rm = TRUE),
!!sector_profit_margin_t_new := (!!sector_profit_post_closure_pre_tax_t_new) / (!!sector_revenue_t_new),
# Sector average co2 intensity by scope
!!product_of_share_and_co2_intensity_scope_1_t_new := (!!market_share_post_closure_t_new) * co2_intensity_scope_1,
!!product_of_share_and_co2_intensity_scope_2_t_new := (!!market_share_post_closure_t_new) * co2_intensity_scope_2,
!!product_of_share_and_co2_intensity_scope_3_t_new := (!!market_share_post_closure_t_new) * co2_intensity_scope_3,
!!sector_average_co2_intensity_scope_1_t_new := sum((!!product_of_share_and_co2_intensity_scope_1_t_new), na.rm = TRUE),
!!sector_average_co2_intensity_scope_2_t_new := sum((!!product_of_share_and_co2_intensity_scope_2_t_new), na.rm = TRUE),
!!sector_average_co2_intensity_scope_3_t_new := sum((!!product_of_share_and_co2_intensity_scope_3_t_new), na.rm = TRUE)
) %>%
ungroup()
}
return(rr_panel_model_run)
}
#--------------------------------------------------------------------------------------------------
##### SECTION 2 - Define wrapper function to run model over power sector first and use other switches ----
model_years <- c(2018:2050)
run_model <- function(data, parameter_data, value_chain_element, variables) {
### SECTION 2a - Data cleaning which is parameter choice contingent
print("Cleaning data")
# Adjust market cap for asset stranding (if selected by user)
panel_model_data <- data %>%
mutate(profit_impact_pct = case_when(parameter_data$stranding != "ON" & substring(market, 1, 3) != "GR_" ~ NA_real_,
parameter_data$green_upside != "ON" & substring(market, 1, 3) == "GR_" ~ NA_real_,
TRUE ~ profit_impact_pct),
market_cap_model = case_when(!is.na(profit_impact_pct) ~ market_cap_2017 * (1 + profit_impact_pct),
TRUE ~ market_cap_2017)) %>%
select(scenario:market_cap_2017, market_cap_model, everything())
# Create 2017 model variables for use as the first year in the recursive model (2018 - 50)
panel_model_data2 <- panel_model_data %>%
# Scale down quantities in proportion to ratio of stranding market cap to pre-stranding market cap (assumed to have no effect on margin or co2 intensity of output)
mutate(revenue_2017 = revenue_2017 * (market_cap_model / market_cap_2017),
net_income_2017 = net_income_2017 * (market_cap_model / market_cap_2017),
# Save unadjusted CO2 emissions (scope 1 + 2) data for later
co2_emissions = co2_scope_1_2017 + co2_scope_2_2017,
co2_scope_1_2017 = co2_scope_1_2017 * (market_cap_model / market_cap_2017),
co2_scope_2_2017 = co2_scope_2_2017 * (market_cap_model / market_cap_2017),
co2_scope_3_2017 = co2_scope_3_2017 * (market_cap_model / market_cap_2017)) %>%
# Adjust up cost and profit to be pre-tax using corporation tax rate
mutate(total_cost_2017 = ifelse(net_income_2017 >= 0 & revenue_2017 - net_income_2017 / (1 - corporation_tax_rate) >= 0,
revenue_2017 - net_income_2017 / (1 - corporation_tax_rate),
revenue_2017 - net_income_2017),
profit_pre_closure_pre_tax_2017 = ifelse(revenue_2017 - net_income_2017 / (1 - corporation_tax_rate) >= 0, 1 / (1 - corporation_tax_rate), 1) *
net_income_margin * revenue_2017,
profit_post_closure_pre_tax_2017 = profit_pre_closure_pre_tax_2017,
revenue_BAU_quantity_2017 = revenue_2017) %>%
group_by(scenario, region, market) %>%
mutate(sector_revenue_actual_quantity_2017 = sum(revenue_BAU_quantity_2017),
profit_margin_2017 = profit_pre_closure_pre_tax_2017 / revenue_2017, #Exception written for companies not active in particular markets (at this stage no company should have zero revenue at the global level)
sector_profit_post_closure_pre_tax_2017 = sum(profit_pre_closure_pre_tax_2017),
sector_profit_margin_2017 = sector_profit_post_closure_pre_tax_2017 / sector_revenue_actual_quantity_2017)
### Adjust emissions down based on Winsorisation procedure [if applicable]
# Store initial emissions as base values
# Variation in CO2 intensity is set to 0.49 - 0.51 percentiles for iron & steel
# Variation in CO2 intensity is set to 0.2 - 0.8 percentiles for concrete and cement
if(parameter_data$winsorise_scope_1 == "ON") {
panel_model_data3 <- panel_model_data2 %>%
mutate(co2_base_scope_1_2017 = co2_scope_1_2017,
co2_base_scope_2_2017 = co2_scope_2_2017,
co2_base_scope_3_2017 = co2_scope_3_2017) %>%
# Revenue 2017 case is for companies which are wiped out in the DD / CM analysis #JC: we can remove the iron and steel and concrete cement lines here.
mutate(co2_intensity_scope_1 = ifelse(revenue_2017 == 0, 0, co2_scope_1_2017 / revenue_2017)) %>%
group_by(scenario, market) %>%
mutate(low_co2_intensity_scope_1 = case_when(market == "Iron & Steel" ~ quantile(co2_intensity_scope_1, probs = 0.49, na.rm = TRUE),
market == "Concrete and cement" ~ quantile(co2_intensity_scope_1, probs = 0.2, na.rm = TRUE),
TRUE ~ quantile(co2_intensity_scope_1, probs = parameter_data$winsorise_qlow, na.rm = TRUE)),
high_co2_intensity_scope_1 = case_when(market == "Iron & Steel" ~ quantile(co2_intensity_scope_1, probs = 0.51, na.rm = TRUE),
market == "Concrete and cement" ~ quantile(co2_intensity_scope_1, probs = 0.8, na.rm = TRUE),
TRUE ~ quantile(co2_intensity_scope_1, probs = parameter_data$winsorise_qhigh, na.rm = TRUE))) %>%
ungroup() %>%
mutate(co2_intensity_scope_1 = case_when(co2_intensity_scope_1 <= low_co2_intensity_scope_1 ~ low_co2_intensity_scope_1,
co2_intensity_scope_1 >= high_co2_intensity_scope_1 ~ high_co2_intensity_scope_1,
TRUE ~ co2_intensity_scope_1)) %>%
mutate(co2_scope_1_2017 = revenue_2017 * co2_intensity_scope_1) %>%
select(-co2_intensity_scope_1, -low_co2_intensity_scope_1, -high_co2_intensity_scope_1)
} else {panel_model_data3 <- panel_model_data2}
# Change units of CO2 emissions from tonnes to Mtonnes (all monetary values are in million US$ (2016))
# Give zero emissions companies an arbitrary amount of emissions so the model can be run for them (1 tonne)
panel_model_data4 <- panel_model_data3 %>%
mutate(co2_scope_1_2017 = case_when(co2_scope_1_2017 == 0 & revenue_2017 >= 0 ~ 1 / 10^6,
TRUE ~ co2_scope_1_2017 / 10^6),
co2_scope_2_2017 = co2_scope_2_2017 / 10^6,
co2_scope_3_2017 = co2_scope_3_2017 / 10^6)
# Preserve starting emissions variables and calculate market shares
panel_model_data5 <- panel_model_data4 %>%
group_by(scenario, market, region) %>%
mutate(co2_scope_1_BAU_quantity_2017 = co2_scope_1_2017,
co2_scope_2_BAU_quantity_2017 = co2_scope_2_2017,
co2_scope_3_BAU_quantity_2017 = co2_scope_3_2017,
sector_revenue_BAU_quantity_2017 = sum(revenue_BAU_quantity_2017),
total_cost_BAU_quantity_2017 = total_cost_2017,
sector_total_cost_BAU_quantity_2017 = sum(total_cost_BAU_quantity_2017),
total_cost_actual_quantity_2017 = total_cost_BAU_quantity_2017,
sector_total_cost_actual_quantity_2017 = sum(total_cost_actual_quantity_2017),
quantity_post_closure_2017 = revenue_BAU_quantity_2017,
sector_quantity_post_closure_2017 = sum(quantity_post_closure_2017),
market_share_post_closure_2017 = quantity_post_closure_2017 / sector_quantity_post_closure_2017,
# WLOG p2017 = 1
price_2017 = 1,
quantity_2017 = revenue_2017 / price_2017,
# Emissions intensity by scope variables
co2_intensity_scope_1 = co2_scope_1_2017 / quantity_2017,
co2_intensity_scope_2 = co2_scope_2_2017 / quantity_2017,
co2_intensity_scope_3 = co2_scope_3_2017 / quantity_2017) %>%
ungroup()
# Create unit cost and sector average variables
panel_model_data6 <- panel_model_data5 %>%
group_by(scenario, market, region) %>%
mutate(unit_cost_2017 = total_cost_2017 / quantity_2017,
sector_average_unit_cost_2017 = sector_total_cost_actual_quantity_2017 / sector_quantity_post_closure_2017,
# Sectoral average emissions intensity by scope variables
product_of_share_and_co2_intensity_scope_1_2017 = co2_intensity_scope_1 * market_share_post_closure_2017,
product_of_share_and_co2_intensity_scope_2_2017 = co2_intensity_scope_2 * market_share_post_closure_2017,
product_of_share_and_co2_intensity_scope_3_2017 = co2_intensity_scope_3 * market_share_post_closure_2017,
sector_average_co2_intensity_scope_1_2017 = sum(product_of_share_and_co2_intensity_scope_1_2017, na.rm = TRUE),
sector_average_co2_intensity_scope_2_2017 = sum(product_of_share_and_co2_intensity_scope_2_2017, na.rm = TRUE),
sector_average_co2_intensity_scope_3_2017 = sum(product_of_share_and_co2_intensity_scope_3_2017, na.rm = TRUE)) %>%
ungroup()
# Set carbon price and cost to be 0 in 2017 (setting up the baseline)
panel_model_data7 <- panel_model_data6 %>%
mutate(carbon_price_2017 = 0,
carbon_cost_2017 = 0,
carbon_cost_pwh_2017 = 0)
# Define the final panel run dataset before running the model
panel_run <- panel_model_data7
### SECTION 2b - Run recursive Robert Ritz model for power sector
print("Power sector model")
# Separate out the power sector
pwh_panel_run <- panel_run %>%
filter(market == "Power generation")
# Run the Robert Ritz model for the power sector
pwh_results <- run_rr_model(rr_data = pwh_panel_run, rr_parameter_data = parameter_data, rr_value_chain_element = value_chain_element)
### SECTION 3c - Calculate power sector indirect carbon costs imposed on other sectors
# Find carbon costs from the power sector model run
pwh_carbon_cost_results <- pwh_results %>%
select(scenario, region, starts_with("carbon_cost_"), starts_with("industry_cost_pass_through_")) %>%
unique()
for (j in model_years) {
carbon_cost_pwh = rlang::sym(paste0("carbon_cost_pwh_", j))
carbon_cost = rlang::sym(paste0("carbon_cost_", j))
pwh_cost_pass_through = rlang::sym(paste0("industry_cost_pass_through_", j))
pwh_carbon_cost_results %<>%
mutate(!!carbon_cost_pwh := !!carbon_cost * !!pwh_cost_pass_through)
}
pwh_carbon_cost_results %<>%
select(scenario, region, contains("_pwh"))
### SECTION 3d - Run recursive Robert Ritz model for all other sectors
print("All other sectors model")
all_other_panel_model_run <- panel_run %>%
filter(market != "Power generation")
all_other_results <- run_rr_model(rr_data = all_other_panel_model_run, rr_carbon_cost_pwh_data = pwh_carbon_cost_results,
rr_parameter_data = parameter_data, rr_value_chain_element = value_chain_element)
### SECTION 3e - Combine results
run_results <- pwh_results %>%
bind_rows(all_other_results)
### SECTION 3f - Calculate terminal value of profits, post tax profits and net present values
print("NPV results and aggregation")
# Terminal values, post-tax profits and profit summary
run_results2 <- run_results %>%
# Post-tax profits
mutate_at(vars(starts_with("profit_post_closure_pre_tax")),
funs(post_tax = ifelse(revenue_2017 - net_income_2017 / (1 - corporation_tax_rate) > 0,
. * (1 - corporation_tax_rate), .))) %>%
rename_at(vars(ends_with("post_tax")), funs(paste0("profit_post_closure_post_tax_", stri_extract_all_regex(., "[0-9]+")))) %>%
# Post-tax NPV profits
mutate_at(vars(starts_with("profit_post_closure_post_tax")),
funs(npv = . / (1 + parameter_data$discount_rate) ^ (as.numeric(stri_extract_all_regex(deparse(substitute(.)), "[0-9]+")) - 2018))) %>%
rename_at(vars(ends_with("npv")), funs(paste0("profit_npv_post_closure_post_tax_", stri_extract_all_regex(., "[0-9]+")))) %>%
mutate(profit_npv_post_closure_post_tax_terminal = profit_npv_post_closure_post_tax_2050 * (gamma_factor / ( 1 + parameter_data$discount_rate - gamma_factor)))
print("aggregation")
subsidiary_results <- run_results2 %>%
select(scenario, company_id, company, market, region, market_cap_2017, market_cap_model, !!(variables)) %>%
# Calculate model company division level 'value impairment' index
mutate(index = (rowSums(.[grep("profit_npv_post_closure_post_tax", names(.))]) - profit_npv_post_closure_post_tax_2017) / market_cap_2017 - 1,
index_cap = case_when(grepl("GR_", market) ~ index,
index >= (1 + parameter_data$profit_cap) ~ parameter_data$profit_cap * parameter_data$profit_cap_weight + index * (1 - parameter_data$profit_cap_weight),
TRUE ~ index))
print("aggregation2")
# Summarise by user-defined categories
summarise_results <- function(...) {
group_vars <- enquos(...)
temp <- subsidiary_results %>%
group_by(!!!group_vars) %>%
mutate(profit_capped = (index_cap + 1) * market_cap_2017) %>%
summarise_at(vars(market_cap_2017, market_cap_model, starts_with("profit_post_closure_pre_tax"),
starts_with("profit_post_closure_post_tax"), starts_with("profit_npv_post_closure_post_tax"),
profit_capped),
funs(sum(., na.rm = TRUE))) %>%
ungroup() %>%
mutate(index = (rowSums(.[grep("profit_npv_post_closure_post_tax", names(.))]) - profit_npv_post_closure_post_tax_2017) / market_cap_2017 - 1,
index_cap = profit_capped / market_cap_2017 - 1) %>%
select(-profit_capped)
return(temp)
}
region_market_results <- summarise_results(scenario, region, market)
market_results <- summarise_results(scenario, market)
region_results <- summarise_results(scenario, region)
# Set attributes so that parameter values can be recalled for run results when needed
attr(subsidiary_results, "parameters") <- parameter_data
attr(region_market_results, "parameters") <- parameter_data
attr(market_results, "parameters") <- parameter_data
attr(region_results, "parameters") <- parameter_data
return(list(subsidiary_results, region_market_results, market_results, region_results))
} |
39a7f17d5e3faa467d9e41e6a65995156bd8fe84 | 1c5f7b31c775e1e2de918813233d46ff08c49a05 | /man/robustArchetypes.Rd | a332b0c369d92ea3ef6337a1113db39120f8e5a1 | [] | no_license | cran/archetypes | 3f4f251bf97ef5b0e2887debbc5f19aa8c4c3e60 | c4b904b4b83392724ab1bb68dad2046150aba448 | refs/heads/master | 2021-06-04T18:10:36.846001 | 2019-04-22T05:55:33 | 2019-04-22T05:55:33 | 17,694,433 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 670 | rd | robustArchetypes.Rd | \name{robustArchetypes}
\alias{robustArchetypes}
\title{Robust archetypes}
\usage{
robustArchetypes(data, k, familyBlocks = list(), ...)
}
\arguments{
\item{familyBlocks}{Exchange predefined family blocks;
see \code{\link{archetypesFamily}}.}
\item{data}{A numeric \eqn{n \times m} data matrix.}
\item{k}{The number of archetypes.}
\item{...}{Additional arguments for family blocks.}
}
\value{
An object of class \code{robustArchetypes} and
\code{\link{as.archetypes}}.
}
\description{
Robust archetypes
}
\seealso{
Other archetypes: \code{\link{archetypesFamily}};
\code{\link{archetypes}}; \code{\link{as.archetypes}};
\code{\link{weightedArchetypes}}
}
|
20144399aa668c15caa6b8dc446f8c3ed2a2aaf7 | b0251a873cda6b236dc46a71c0d7ac8d403dda28 | /DTR/man/CHRdata.Rd | 2d1a40d90c173a3f2252a929a453056fd5206331 | [] | no_license | yhy188/rosur | 161a836f477ca85d091da3974fb640b372ddddb0 | 4e8d5ddd3e4102a187173232c040d53241621636 | refs/heads/master | 2021-01-16T10:58:23.970230 | 2020-03-18T02:18:06 | 2020-03-18T02:18:06 | 243,092,781 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,152 | rd | CHRdata.Rd | \name{CHRdata}
\docType{data}
\alias{CHRdata}
\title{
CHR data set
}
\description{
This data set was generates from sequentially randomized clinical trials as described in the simulation study of Tang and Wahed (2013) [Epub ahead of print]. It contains the following variables: "X" is the first-stage indicator, X=0 if assigned to A1, and X=1 if assigned to A2; "R" is the response status, R=1 for responders, and R=0 for non-responders; "Z" is the second-stage indicator among responders (R=1), Z=0 if assigned to B1, and Z=1 if assigned to B2; "U" is the observed survival time, U is death time if delta=1, and U is censoring time if delta=0; "delta" is the censoring indicator, delta=1 for event, and delta=0 for censored; and "V1" and "V2" are covariates.
}
\usage{
CHRdata
}
\format{
A data frame with rows corresponding to patients.
}
\references{
Tang X, Wahed AS: Cumulative hazard ratio estimation for treatment regimes in sequentially randomized clinical trials. Statistics in Biosciences, 2013 [Epub ahead of print]
}
\source{
Generated by Xinyu Tang in R
}
\examples{
\dontrun{
data("CHRdata")}
}
\keyword{datasets} |
52c7f42c9a284a62d3cb3b3fd9b9dd9fe5a26f2f | dc31b9a98c191cc38a822c05d7eec77c76ba923c | /descriptive_stats_fp.R | 0c53942dfb872955d2ed1d38983bfbfb2e1c879b | [] | no_license | saitouvic/Geog-418-Final-Project | 8fb368660de021f66ed93868972d34f30a64e5c3 | 751e82a9780a1f051cd39db452aba26c7e2f5e4e | refs/heads/main | 2023-02-04T20:27:35.334482 | 2020-12-08T04:17:27 | 2020-12-08T04:17:27 | 318,067,795 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,136 | r | descriptive_stats_fp.R | ##Descriptive stats for income and pollution data
install.packages("lubridate")
install.packages("gtable")
install.packages("gridExtra")
install.packages("grid")
install.packages("ggplot2")
install.packages("dplyr")
install.packages("bcmaps")
install.packages('bcmapsdata', repos='https://bcgov.github.io/drat/')
install.packages("maps")
library(lubridate)
library(gtable)
library(gridExtra)
library(grid)
library(ggplot2)
library(dplyr)
library(bcmaps)
library(bcmapsdata)
library(raster)
library(maps)
#Mean
meanIncome <- mean(income.tracts$Income, na.rm = TRUE)
meanPollution<- mean(pm2.5$PM25, na.rm = TRUE)
#Standard Deviation
sdIncome <- sd(income.tracts$Income, na.rm = TRUE)
sdPollution<- sd(pm2.5$PM25, na.rm = TRUE)
#Mode
modeIncome <- as.numeric(names(sort(table(income.tracts$Income), decreasing = TRUE))[1])
modePollution <- as.numeric(names(sort(table(pm2.5$PM25), decreasing = TRUE))[1])
#Median
medIncome <- median(income.tracts$Income, na.rm = TRUE)
medPollution <- median(pm2.5$PM25, na.rm = TRUE)
#Skewness
skewIncome <- skewness(income.tracts$Income, na.rm = TRUE)[1]
skewPollution <- skewness(pm2.5$PM25, na.rm = TRUE)[1]
#Kurtosis
kurtIncome <- kurtosis(income.tracts$Income, na.rm = TRUE)[1]
kurtPollution <- kurtosis(pm2.5$PM25, na.rm = TRUE)[1]
#CoV
CoVIncome <- (sdIncome / meanIncome) * 100
CoVPollution <- (sdPollution / meanPollution) * 100
#Normal distribution test
normIncome_PVAL <- shapiro.test(income.tracts$Income)$p.value
normPollution_PVAL <- shapiro.test(pm2.5$PM25)$p.value
#####
#Create a table of descriptive stats
samples = c("Income", "Pollution") #Create an object for the labels
means = c(meanIncome, meanPollution) #Create an object for the means
sd = c(sdIncome, sdPollution) #Create an object for the standard deviations
median = c(medIncome, medPollution) #Create an object for the medians
mode <- c(modeIncome, modePollution) #Create an object for the modes
skewness <- c(skewIncome, skewPollution) #Create an object for the skewness
kurtosis <- c(kurtIncome, kurtPollution) #Create an object for the kurtosis
CoV <- c(CoVIncome, CoVPollution) #Create an object for the CoV
normality <- c(normIncome_PVAL, normPollution_PVAL) #Create an object for the normality PVALUE
##Check table values for sigfigs?
means <- round(means, 3)
sd <- round(sd, 3)
median <- round(median, 3)
mode <- round(mode, 3)
skewness <- round(skewness, 3)
kurtosis<- round(kurtosis, 3)
CoV <- round(CoV, 3)
data.for.table1 = data.frame(samples, means, sd, median, mode)
data.for.table2 = data.frame(samples, skewness, kurtosis, CoV, normality)
#Make table 1
table1 <- tableGrob(data.for.table1, c("","")) #make a table "Graphical Object" (GrOb)
t1Caption <- textGrob("Table 1: Measure of Dispersion for Income and Pollution data", gp = gpar(fontsize = 09))
padding <- unit(5, "mm")
table1 <- gtable_add_rows(table1,
heights = grobHeight(t1Caption) + padding,
pos = 0)
table1 <- gtable_add_grob(table1,
t1Caption, t = 1, l = 2, r = ncol(data.for.table1) + 1)
table2 <- tableGrob(data.for.table2, c("",""))
t2Caption <- textGrob("Table 2: Relative Position for Income and Pollution data", gp = gpar(fontsize = 09))
padding <- unit(5, "mm")
table2 <- gtable_add_rows(table2,
heights = grobHeight(t2Caption) + padding,
pos = 0)
table2 <- gtable_add_grob(table2,
t2Caption, t = 1, l = 2, r = ncol(data.for.table2) + 1)
grid.arrange(table1, newpage = TRUE)
grid.arrange(table2, newpage = TRUE)
#Printing a table
png("Measureofdispersion_data.png") #Create an object to print the table to
grid.arrange(table1, newpage = TRUE)
dev.off() #Print table
png("Relative_position_data.png") #Create an object to print the table to
grid.arrange(table2, newpage = TRUE) #Create table
dev.off()
sum(census.tracts$Shape_Area)
##Global Moran's I and local Moran's I
#######################
head(income.tracts@data)
incomeCleanCols <- c("DAUID","CDNAME","CDTYPE", "CSDUID", "CSDNAME",
"CSDTYPE", "CCSUID", "ERUID","ERNAME", "CMAPUID", "CMAUID",
"CMANAME", "SACTYPE", "CTUID","CTNAME", "PRUID",
"Shape_Leng", "Shape_Area", "Income")
incomeClean <- income.tracts[incomeCleanCols]
income.nb <- poly2nb(income.tracts)
income.net <- nb2lines(income.nb, coords=coordinates(income.tracts))
crs(income.net) <- crs(income.tracts)
tm_shape(income.tracts) + tm_borders(col='lightgrey') +
tm_shape(income.net) + tm_lines(col='red')
income.nb2 <- poly2nb(income.tracts, queen = FALSE)
income.net2 <- nb2lines(income.nb2, coords=coordinates(income.tracts))
crs(income.net2) <- crs(income.tracts)
tm_shape(income.tracts) + tm_borders(col='lightgrey') +
tm_shape(income.net) + tm_lines(col='blue', lwd = 2) +
tm_shape(income.net2) + tm_lines(col='yellow', lwd = 2)
########################
income.lw <- nb2listw(income.nb, zero.policy = TRUE, style = "W")
print.listw(income.lw, zero.policy = TRUE)
########################
########################
numeric_version(income.tracts, strict = TRUE)
mi <- moran.test(income.tracts$Income, income.lw, zero.policy = TRUE)
mi
moran.range <- function(lw) {
wmat <- listw2mat(lw)
return(range(eigen((wmat + t(wmat))/2)$Income))
}
moran.range(income.lw)
mI <- mi$estimate[[1]]
eI <- mi$estimate[[2]]
var <- mi$estimate[[3]]
z <- (eI - mI)/(var)^(1/2)
########################
lisa.test <- localmoran(income.tracts$Income, income.lw)
income.tracts$Ii <- lisa.test[,1]
income.tracts$E.Ii<- lisa.test[,2]
income.tracts$Var.Ii<- lisa.test[,3]
income.tracts$Z.Ii<- lisa.test[,4]
income.tracts$P<- lisa.test[,5]
########################
map_LISA <- tm_shape(income.tracts) +
tm_polygons(col = "Ii",
title = "Local Moran's I",
style = "fisher",
palette = "Greens", n = 10)
map_LISA
########################
moran.plot(income.tracts$Income, income.lw, zero.policy=TRUE, spChk=NULL, labels=NULL, xlab="Income",
ylab="Spatially Lagged Income", quiet=NULL)
########################
|
8c0b163c9389568179a65d7503a3a9a3d5cf8434 | 7a7375245bc738fae50df9e8a950ee28e0e6ec00 | /man/SA3__Year_SexemploymentToPopulation.Rd | c18df0fec5a20b9ea229ac8f322d9b77272e38d3 | [] | no_license | HughParsonage/Census2016.DataPack.TimeSeries | 63e6d35c15c20b881d5b337da2f756a86a0153b5 | 171d9911e405b914987a1ebe4ed5bd5e5422481f | refs/heads/master | 2021-09-02T11:42:27.015587 | 2018-01-02T09:01:39 | 2018-01-02T09:02:17 | 112,477,214 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 474 | rd | SA3__Year_SexemploymentToPopulation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SA3__Year_SexemploymentToPopulation.R
\docType{data}
\name{SA3__Year_SexemploymentToPopulation}
\alias{SA3__Year_SexemploymentToPopulation}
\title{Sex by SA3, Year}
\format{2,148 observations and 4 variables.}
\usage{
SA3__Year_SexemploymentToPopulation
}
\description{
EmploymentSex by SA3, Year #' @description ToSex by SA3, Year #' @description PopulationSex by SA3, Year
}
\keyword{datasets}
|
bfa6767f6a8b645f49cad847d3c76f74dd198fef | 2b88d8b269b336ac3112c84675950a0d3877ced0 | /tests/testthat/test-run_auto_mount.R | f19abb2d3e94067a628b2c4e32cdff208f2046b2 | [
"MIT"
] | permissive | dynverse/babelwhale | 71187be3b87cba91710b277d9bc3e3cce2c1db74 | 61e5ebfef09b6159c0a9524adeae9fe2da9215be | refs/heads/master | 2023-07-26T00:34:25.120015 | 2023-07-14T19:28:32 | 2023-07-14T19:28:32 | 147,789,735 | 24 | 6 | NOASSERTION | 2022-08-18T09:14:38 | 2018-09-07T07:48:17 | R | UTF-8 | R | false | false | 1,527 | r | test-run_auto_mount.R | configs <- list(
docker = create_docker_config(),
singularity = create_singularity_config(cache_dir = tempdir())
)
config <- configs[[1]]
for (config in configs) {
context(paste0("Testing ", config$backend))
set_default_config(config, permanent = FALSE)
skip_on_cran()
skip_on_github_actions()
test_that(paste0("run_auto_mount can mount files on ", config$backend), {
# warm up
output <- run("alpine", "echo", "hello")
output <-
run_auto_mount(
container_id = "alpine",
command = "cat",
args = c(file = system.file("DESCRIPTION", package = "babelwhale")
)
)
expect_equal(
strsplit(output$stdout, "\n", fixed = TRUE)[[1]][[1]],
"Package: babelwhale" # first line of DESCRIPTION
)
expect_equal(output$status, 0)
})
test_that(paste0("run_auto_mount wd arg works on ", config$backend), {
output <-
run_auto_mount(
container_id = "alpine",
command = "ls",
wd = system.file(package = "babelwhale")
)
expect_match(
output$stdout,
"DESCRIPTION" # should be a DESCRIPTION file in babelwhale package dir
)
expect_equal(output$status, 0)
})
test_that(paste0("run_auto_mount wd_in_container arg works on ", config$backend), {
output <-
run_auto_mount(
container_id = "alpine",
command = "pwd",
wd_in_container = "/bin"
)
expect_equal(
output$stdout,
"/bin\n"
)
expect_equal(output$status, 0)
})
}
|
5daecbe6df0cf43d69443c23eb42d353b774233b | ec13b2a8368d7fd7749552ef0123476a5d802058 | /Read instancia_ejemplo.R | 34e81c91a1f9588e4e1b1a8a6af1adbe2654ea7d | [] | no_license | HLiNaKRbCsFr/Trabajos | 190331073ffe18acfb78d28ec9551ba457645124 | 8db2309aff28913024abd79acce98ee9e05920e2 | refs/heads/master | 2020-03-28T19:08:28.143419 | 2018-12-07T01:41:19 | 2018-12-07T01:41:19 | 148,948,479 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 70 | r | Read instancia_ejemplo.R | read.table(file = "C:/users/lenovo/downloads/instancia_ejemplo.txt")
|
ad059f6fa0d40e31760f89db636d5f950fe43c96 | bc5aa2493a04fab4ab76a54c135b6c91bb50a921 | /ppr_test.R | b117ef4cc4a5334803a9cc98f31c8ad98d9a2c98 | [] | no_license | reking/stat852 | b3caba3d5ecfbb1e61874e955cb3e43b2fbdcd1d | e704cfe9f4f49b43fda64211a78188bf9cfabc97 | refs/heads/master | 2021-01-10T05:04:34.782068 | 2016-01-08T06:08:18 | 2016-01-08T06:08:18 | 44,140,619 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 913 | r | ppr_test.R | prostate <- read.table("~/stat852/data/Prostate.csv", header=TRUE, sep=",", na.strings=" ")
head(prostate)
column.de <- apply(prostate[,2:9],2,sd)
prostate[,2:9] <- scale(prostate[,2:9], center = TRUE, scale = column.de)
y.1 <- prostate[which(prostate$set==1),10]
x.1 <- as.matrix(prostate[which(prostate$set==1),c(2:9)])
y.2 <- prostate[which(prostate$set==2),10]
x.2 <- as.matrix(prostate[which(prostate$set==2),c(2:9)])
set.seed(120401002)
prostate$set <- ifelse(runif(n=nrow(prostate))>0.5, yes=2, no=1)
####################################################################
## One terms
####################################################################
# Using default smoother, increasing quality of optimizer.
ppr1 <- ppr(data=airquality, Ozone~scale(Wind)+scale(Temp), nterms=1, optlevel=3)
summary(ppr1)
# The term weights and the coefficients on the terms.
ppr1$alpha
ppr1$beta
|
73b4541f07c70c74fb55f1dbe7934c0ce30b2b4b | b10c971ed284df735e2bbc17213c3ae0902cc139 | /CA2/part1.r | bfaba66496e022687a4edc6092700051c4778679 | [] | no_license | daniel-saeedi/EngineeringProbabilityStatistics | 320e33b2576548b66cbf3a1ca852c72ab5b73d0e | 63ee22e63568cec9599bcbb734f32b053dde3773 | refs/heads/master | 2023-03-07T11:07:50.517598 | 2021-02-20T22:13:23 | 2021-02-20T22:13:23 | 307,182,842 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 612 | r | part1.r | library(corrplot)
elements <- read.csv("./countries.csv", head = TRUE,",")
df <- data.frame(elements)
df[df==""] <- NA
#replacing NA values with mean of their column
for(i in 1:ncol(df))
{
df[is.na(df[,i]), i] <- mean(df[,i], na.rm = TRUE)
}
matrix <- data.matrix(df);
correlation_matrix <- cor(matrix);
correlation_matrix
corrplot(correlation_matrix)
plot(df$Agriculture, df$Birthrate, main = "Agriculture vs Birthrate",
xlab = "Agriculture", ylab = "Birthrate",
pch = 19, frame = FALSE)
lines(lowess(df$Agriculture, df$Birthrate), col = "blue")
relation <- lm(Agriculture~Birthrate,df)
relation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.