blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a25fe9b4881ed4f22029b712648abb1d51e8704a | e2970f3553ae0de707709b7e2667a970c983b9c8 | /Rcurl/2017.11.19组织行为学小论文练手/组织行为.R | 152571c0a0b28c28fb1542668a733f99e006afcf | [] | no_license | ZuoRX/Rcurl | 96021e7584235d5f6aae33e4c9b9b7f20755b419 | a443177a1eb024d8faadf393276120809e426957 | refs/heads/master | 2020-05-09T13:41:21.986214 | 2019-04-13T11:53:55 | 2019-04-13T11:53:55 | 181,163,569 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,986 | r | 组织行为.R | rm(list=ls())
###packages################
library(NLP)
library(tm)
library(rJava)
library(Rwordseg)
library(SnowballC)
library(MASS)
library(RColorBrewer)
library(wordcloud)
library(pcaPP)
library(rainbow)
library(Rcpp)
library(cluster)
library(mvtnorm)
library(hdrcde)
library(locfit)
library(ash)
library(KernSmooth)
library(misc3d)
library(rgl)
library(ks)
library(ggplot2)
library(ggmap)
library(maps)
library(mapdata)
library(sp)
library(maptools)
library(grid)
library(vcd)
library(topicmodels)
library(randomForest)
library(rFerns)
library(ranger)
library(Boruta)
library(lattice)
library(caret)
library(slam)
library(Matrix)
library(foreach)
library(glmnet)
library(xlsx)
library(igraph)
library(wordcloud2)
library(e1071)
#第一步:读取数据##############################################
jour<- read.xlsx("C:/Users/lenovo/Desktop/behavir.xlsx",1,encoding="UTF-8")
#第二步:导入语料库
installDict("D:\\library\\words\\心理学.scel","sougou")
installDict("D:\\library\\words\\项目管理词汇.scel","sougou")
installDict("D:\\library\\words\\社会学专业词库.scel","sougou")
listDict()#查看已安装词典
installDict(dictpath = 'D:\\library\\words\\社会学专业词库.scel',
dictname = "社会学专业词库", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\项目管理词汇.scel',
dictname = "项目管理词汇", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\心理学.scel',
dictname = "心理学", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\社会工作专业词库.scel',
dictname = "社会工作专业词库", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\中国职业名称大全.scel',
dictname = "中国职业名称大全", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\心理学词库.scel',
dictname = "心理学词库", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\教育教学综合词库.scel',
dictname = "教育教学综合词库", dicttype = "scel", load = TRUE)
#第三步:切割分词。在处理中文时,要把字符变量转换可读的,所以尽量用英文写变量名词
title<-as.character(jour$标题)
segmentCN(title)
titlewords=segmentCN(title)
#sink("D:\\library\\words\\titlewords.xlsx",append=TRUE,split=TRUE)#把数据导出到文件
#第四步:计算词频→建立数据框)
term<-lapply(X=titlewords,FUN = strsplit,' ')
term<-unlist(term)
df<-table(term)#建表
df
df1<-sort(df,decreasing = T)#降序排列
df1
seg3<-names(df1)
df2<-as.data.frame(df1)
df2
write.csv(df2,"path")
wordsFreq<-wordsFreq[-which(nchar(wordsFreq[,1])<3),]
wordcloud(df2$term,df2$Freq,min.freq = 10)
|
e58afc1628424f7a9e64980c1f9a06482835015d | 90a3878cc96f72ff2402cbcdfa7e241e891d3053 | /R/IntervalTree-utils.R | 948f1c300cd06bc24c9d1eb65aa2f1707a07500f | [] | no_license | agstudy/IRanges | 108cb82787ea92838e7bbd15c204d2fd7bb3737b | 6381ac0826b3dcb68183a420ea11050dbd531daf | refs/heads/master | 2021-01-20T11:09:58.441445 | 2013-09-26T02:48:46 | 2013-09-26T02:48:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,801 | r | IntervalTree-utils.R | ### =========================================================================
### IntervalTree utilities
### -------------------------------------------------------------------------
## internal generic
setGeneric("processSelfMatching", # not exported
function(x, select = c("all", "first", "last", "arbitrary"),
ignoreSelf = FALSE, ignoreRedundant = FALSE)
standardGeneric("processSelfMatching"))
setMethod("processSelfMatching", "Hits",
function(x, select = c("all", "first", "last", "arbitrary"),
ignoreSelf = FALSE, ignoreRedundant = FALSE)
{
mat <- as.matrix(x)
if (ignoreSelf)
mat <- mat[mat[,1L] != mat[,2L],,drop=FALSE]
if (ignoreRedundant) {
norm_mat <- cbind(pmin.int(mat[,1L], mat[,2L]),
pmax.int(mat[,1L], mat[,2L]))
mat <- mat[!duplicated(norm_mat),,drop=FALSE]
}
if (select != "all") { # relies on 'mat' sorted by subject
if (select == "last")
mat <- mat[seq(nrow(mat), 1),,drop=FALSE]
.hitsMatrixToVector(mat, queryLength(x))
} else {
## unname() required because in case 'm' has only 1 row
## 'm[ , 1L]' and 'm[ , 2L]' will return a named atomic vector
x@queryHits <- unname(mat[ , 1L])
x@subjectHits <- unname(mat[ , 2L])
x
}
})
setMethod("processSelfMatching", "HitsList",
function(x, select = c("all", "first", "last", "arbitrary"),
ignoreSelf = FALSE, ignoreRedundant = FALSE)
{
select <- match.arg(select)
ans <- lapply(x, processSelfMatching, select, ignoreSelf,
ignoreRedundant)
if (select != "all")
IntegerList(ans)
else
newList("HitsList", ans, subjectOffsets = x@subjectOffsets)
})
setMethod("processSelfMatching", "CompressedHitsList",
function(x, select = c("all", "first", "last", "arbitrary"),
ignoreSelf = FALSE, ignoreRedundant = FALSE)
{
select <- match.arg(select)
ans <- processSelfMatching(x@unlistData, select = select,
ignoreSelf = ignoreSelf, ignoreRedundant = ignoreRedundant)
if (select != "all")
new2("CompressedIntegerList", unlistData=ans, partitioning=x@partitioning)
else
new2("CompressedHitsList", unlistData=ans, partitioning=x@partitioning)
})
## not for exporting, just a debugging utility
IntervalTreeDump <- function(object) {
.IntervalTreeCall(object, "dump")
}
|
507e48bbc8d059f79ed47f1a6806dc5b3d252e45 | a8cf4321e8025d73512718eb680b16ecebfbed26 | /LWR.R | 7a4cd02740249c04c79cc78fad246130ca0644ae | [] | no_license | XileiDai/BuildingEnergyPred | a36ffb84a2b4c5db515567dace3d8c8426027fa6 | ea427008d459840f8631f15cca53fc0b6f7db656 | refs/heads/master | 2022-12-18T16:13:13.385167 | 2020-08-26T13:45:22 | 2020-08-26T13:45:22 | 290,159,491 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,511 | r | LWR.R | # setwd()
library(chron)
library(zoo)
library(stringr)
library(readxl)
library(timeDate)
library(grDevices);
ramp <- colorRamp(c("red", "white","blue"));
Sys.setlocale("LC_ALL","English")
Sys.setenv(TZ="Asia/Shanghai")
file_name = list.files('Hour-record/')
all_data = data.frame(row.names = NULL)
holiday = read_excel('holiday.xlsx')
holiday$holiday = as.character(holiday$holiday)
type_total = c('W','Q')
is_holiday_total = c(TRUE, FALSE)
ratio_median = matrix(data = NA, nrow = 4, ncol = 24)
ratio_attir = matrix(data = NA, nrow = 4, ncol = 2)
d = 1
for(type in type_total){
for(is_holiday in is_holiday_total){
k=1
ratio = matrix(nrow = length(file_name) * 4, ncol = 24)
for(i in file_name){
if(str_detect(i, paste(type,'.csv',sep = ''))){
data_i = read.csv(paste('Hour-record/',i,sep=''))
data_i = data_i[!is.na(data_i$Record),]
data_i$holiday = data_i$day %in% holiday$holiday
weekend = !isWeekday(data_i$day)
data_i$holiday = data_i$holiday | weekend
data_i$Time = as.POSIXlt(data_i$Time, format = '%Y-%m-%d %H:%M:%S')
data_i$hour = data_i$Time$hour
data_i$month = months(data_i$Time)
data_i$season = 'winter'
data_i$season[data_i$month == 'February' |data_i$month == 'March'|data_i$month == 'April'] = 'spring'
data_i$season[data_i$month == 'May' |data_i$month == 'June'|data_i$month == 'July'] = 'summer'
data_i$season[data_i$month == 'August' |data_i$month == 'September'|data_i$month == 'October'] = 'autumn'
for(j in c('winter', 'spring', 'summer', 'autumn')){
data_j = data_i[data_i$season == j & data_i$holiday == is_holiday,]
ratio[k,] = tapply(data_j$Record, data_j$hour, mean, na.rm = T)
k = k+1
}
}
}
for(i in 1:nrow(ratio)){
ratio[i,] = ratio[i,]/max(ratio[i,])
}
ratio_median[d,] = apply(ratio, 2, median, na.rm=T)
ratio_attir[d,] = c(type, is_holiday)
d = d+1
for(i in 1:4){
season = c('winter.jpg', 'spring.jpg', 'summer.jpg', 'autumn.jpg')
jpeg(paste('Result_figure/Result',type,is_holiday, season[i], sep = '-'))
plot(ratio[i,], ylim = c(0,1), col = rgb(23,45,255, maxColorValue = 255))
k = i+4
j=2
while(k<=nrow(ratio)){
lines(ratio[k,])
k = k+4
j=j+1
}
dev.off()
}
}
}
result = cbind(ratio_attir, ratio_median)
write.csv(result, 'hourly_median.csv', row.names = FALSE,col.names = FALSE)
|
23c5528df59b92a02f0d7bc9ffbdd6bd74d76097 | e23fad4d8d1d673a3a846d064e0814ef775a16fd | /final.R | b99d878897ee8172c1ead3a265163b348107bf6a | [] | no_license | juangarciaa/final | e021a3a41b960343a60e8081829cfaf119607cf9 | 29646b51288736f3098fcdc46d6cd4be2efd40c4 | refs/heads/master | 2021-07-08T14:08:21.653887 | 2017-10-07T18:30:00 | 2017-10-07T18:30:00 | 106,119,779 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 45 | r | final.R | library(slidify)
author("nombre",use_git = F) |
6d6ee5547867808bd17caecb6d01efab7221caf1 | 38b58e85b243ce4de7410257af66c6b5144136d3 | /R/cg.R | 0ffd67c5b201487ba9ce3f8cc61c8c3757c8c126 | [] | no_license | meerapatelmd/skyscraper | 47c444947f3278a174ae46dc97c6b011380d6d2f | f40c8a745f9f1efadb551bab400d22a2b304c9b4 | refs/heads/master | 2023-02-03T05:25:34.036567 | 2020-12-22T15:31:35 | 2020-12-22T15:31:35 | 296,966,513 | 2 | 0 | null | 2020-12-07T15:49:16 | 2020-09-19T23:12:45 | R | UTF-8 | R | false | false | 8,758 | r | cg.R | #' @title
#' Run CancerGov Scrape and Store
#'
#' @description
#' Run the full sequence that scrapes, parses, and stores the NCI Drug Dictionary found at CancerGov.org and any correlates to the NCI Thesaurus in a Postgres Database.
#'
#' @section
#' Web Source Types:
#' The NCI Drug Dictionary has 2 data sources that run in parallel. The first source is the Drug Dictionary itself at \href{https://www.cancer.gov/publications/dictionaries/cancer-drug}{https://www.cancer.gov/publications/dictionaries/cancer-drug}. The other source are the individual drug pages, called Drug Detail Links in skyscraper, that contain tables of synonyms, including investigational names.
#'
#' @section
#' Drug Dictionary:
#' The listed drug names and their definitions are scraped from the Drug Dictionary HTML and updated to a Drug Dictionary Table in a `cancergov` schema.
#'
#' @section
#' Drug Detail Links:
#' The links to Drug Pages are scraped from the Data Dictionary URL over the maximum page number and are saved to a Drug Link Table in the `cancergov` schema. The URLs in the Drug Link Table are then scraped for any HTML Tables of synonyms and the results are written to a Drug Link Synonym Table. The links to active clinical trials and NCIt mappings are also derived and stored in their respective tables.
#'
#'
#' @param conn Postgres connection object.
#' @param conn_fun (optional) An expr as a string that can be parsed and evaluated into a connection object. If present, it is used in lieu of the `conn` argument and disconnects the connection on exit.
#' @param steps The sequence of steps, labeled by the internal function that is called. A step can be skipped if it is removed from the list, but the order in which they are called does not change. Adding any erroneous values to this list does not have an effect. Default: c("log_drug_count", "get_dictionary_and_links", "process_drug_link_synonym", "process_drug_link_url", "process_drug_link_ncit", "get_ncit_synonym", "update_cancergov_drugs").
#' @param max_page maximum page number to iterate the scrape over in the "https://www.cancer.gov/publications/dictionaries/cancer-drug?expand=ALL&page=" path, Default: 50
#'
#' @inheritParams scrape
#' @inheritParams xml2::read_html
#' @inheritParams pg13::query
#'
#' @export
#' @rdname cg_run
#' @family run functions
#' @importFrom cli cat_line cat_rule
cg_run <-
function(conn,
conn_fun,
steps = c("log_drug_count",
"get_dictionary_and_links",
"process_drug_link_synonym",
"process_drug_link_url",
"process_drug_link_ncit",
"get_ncit_synonym",
"update_cancergov_drugs"),
max_page = 50,
sleep_time = 5,
encoding = "",
options = c("RECOVER", "NOERROR", "NOBLANKS"),
expiration_days = 30,
verbose = TRUE,
render_sql = TRUE) {
on.exit(expr = closeAllConnections(),
add = TRUE)
if (!missing(conn_fun)) {
conn <- eval(rlang::parse_expr(conn_fun))
on.exit(pg13::dc(conn = conn, verbose = verbose),
add = TRUE,
after = TRUE)
}
cli::cat_line()
cli::cat_rule("Creating Tables")
start_cg(conn = conn,
verbose = verbose,
render_sql = render_sql)
if ("log_drug_count" %in% steps) {
cli::cat_line()
cli::cat_rule("Logging Drug Count")
log_drug_count(conn = conn,
verbose = verbose,
render_sql = render_sql)
}
if ("get_dictionary_and_links" %in% steps) {
cli::cat_line()
cli::cat_rule("Scraping the Drug Dictionary for Definitions and Links")
get_dictionary_and_links(conn = conn,
max_page = max_page,
sleep_time = sleep_time,
verbose = verbose,
render_sql = render_sql)
}
if ("process_drug_link_synonym" %in% steps) {
cli::cat_line()
cli::cat_rule("Scraping Each Drug Page for Synonyms")
process_drug_link_synonym(conn = conn,
sleep_time = sleep_time,
expiration_days = expiration_days,
encoding = encoding,
options = options,
verbose = verbose,
render_sql = render_sql)
}
if ("process_drug_link_url" %in% steps) {
cli::cat_line()
cli::cat_rule("Scraping Each Drug Page for URLs to Other Related Resources")
process_drug_link_url(conn = conn,
sleep_time = sleep_time,
expiration_days = expiration_days,
encoding = encoding,
options = options,
verbose = verbose,
render_sql = render_sql)
}
if ("process_drug_link_ncit" %in% steps) {
cli::cat_line()
cli::cat_rule("Extracting URLs for any NCIt Codes")
process_drug_link_ncit(conn = conn,
verbose = verbose,
render_sql = render_sql,
expiration_days = expiration_days)
}
if ("get_ncit_synonym" %in% steps) {
cli::cat_line()
cli::cat_rule("Scraping NCIt for Synonyms Using NCIt Codes")
get_ncit_synonym(conn = conn,
sleep_time = sleep_time,
expiration_days = expiration_days,
verbose = verbose,
render_sql = render_sql)
}
if ("update_cancergov_drugs" %in% steps) {
cli::cat_line()
cli::cat_rule("Appending CANCERGOV_DRUGS Table with New Diffs")
update_cancergov_drugs(conn = conn,
verbose = verbose,
render_sql = render_sql)
}
}
#' @title
#' Search Cancer.gov
#'
#' @description
#' Search the NCI Drug Dictionary using the API. Note that the results are not stored in the database in this version in the manner that ChemiDPlus (`cdp_search`) does in this package version.
#'
#' @param search_term PARAM_DESCRIPTION
#' @param size PARAM_DESCRIPTION, Default: 1000
#' @param matchType PARAM_DESCRIPTION, Default: 'Begins'
#' @param crawl_delay PARAM_DESCRIPTION, Default: 5
#' @seealso
#' \code{\link[httr]{GET}},\code{\link[httr]{content}}
#' \code{\link[jsonlite]{toJSON, fromJSON}}
#' @rdname cg_search
#' @export
#' @importFrom httr GET content
#' @importFrom jsonlite fromJSON
cg_search <-
function(search_term,
size = 1000,
matchType = "Begins",
crawl_delay = 5) {
Sys.sleep(crawl_delay)
response <- httr::GET(url = "https://webapis.cancer.gov/drugdictionary/v1/Drugs/search",
query = list(query = search_term,
matchType = matchType,
size = size))
parsed <- httr::content(x = response,
as = "text",
encoding = "UTF-8")
df <- jsonlite::fromJSON(txt = parsed)
df
}
|
6d4f6017b4614cd531565f0ccbe86b7c1574dcbf | 137140fc25db7904c6924b054f0239e5b1e98fea | /Final.R | cabbdd32713d001f579a422ac5b035fa206afb68 | [] | no_license | whryan/psych_205_final | 459061396767bebec64c0bd4f8bdedde2f2cbb2a | e7ba60ab9fd6781009d39e012c0a99d8d9aa37c8 | refs/heads/master | 2020-03-15T15:28:28.026491 | 2018-05-07T07:06:56 | 2018-05-07T07:06:56 | 132,212,922 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,920 | r | Final.R | # Psych 205. Final Exam. Spring 2017. Name: Ulysses
# Theunissen
set.seed(7777) # Used to ensure replicability
# Make sure you have these libraries
library(car)
library(nlme)
library(lme4)
library(MASS)
#optional
library(Hmisc) #I use this for the describe() function
################################################################
# Problem 1. Personality Problems?
################################################################
# Load the data set
# Change the statement below to go to the working directory where you used to save the data file
person <- read.csv("C:/Users/User/Dropbox/Drive Sync/Berkeley Post Bac/Spring 2018 Classes/Psych 205 - Data Analysis/Final Exam Code/ft_personality.csv")
# View the data
View(person)
# This is a classic dataset in personality psychology. It has:
# bfi1 - bfi44: forty-four questions measuring five personality traits
# BFIE, BFIA, BFIC, BFIN, BFIO: the average 'scale scores' for the five traits
# BFIE - extraversion
# BFIA - agreeableness
# BFIC - conscientiousness
# BFIN - neuroticism
# BFIO - openness
# sexF: gender (M = male; F = female)
# ethF: ethnicity (AS = asian; W = white; L = latino; M = middle east; AA = african american; O = other)
# incF: family household income (different levels)
# ed.dadF: father's highest level of education (different levels)
# ed.momF: mother's highest level of education (different levels)
# Question 1.1
# Visualize the data. Generate 2 separate box plots (boxplot()) and 2 separated
# interaction plots (interaction.plot()) to show
# how average scale scores for extraversion and openness vary as a function
# of the interaction between ethnicity and gender.
# Comment on what you observe.
boxplot(person$BFIO ~ person$sexF*person$ethF)
boxplot(person$BFIE ~ person$sexF*person$ethF)
interaction.plot(person$sexF,person$ethF, person$BFIO)
interaction.plot(person$sexF,person$ethF, person$BFIE)
# Question 1.2
# For each personality trait, fit a model and perform the appropriate test to determine whether gender, ethnicity and their interaction affect a participant's scale score on the trait.
# Note: For this question you should fit 5 separate models rather than grouping the DVs into a single variable.
# Make sure to include the following as comments in your R code:
# a. Specify the name of the statistical test as you would report in a paper.
# b. Summarize the results in a couple of sentences.
#These are two way ANOVAs (they are comparing a continuous DV
# vs the interaction between two categorical, inependant groups)
lm_e = lm(BFIE ~ sexF*ethF, data=person)
summary(lm_e)
lm_a = lm(BFIA ~ sexF*ethF, data=person)
summary(lm_a)
lm_ic = lm(BFIC ~ sexF*ethF, data=person)
summary(lm_ic)
lm_in = lm(BFIN ~ sexF*ethF, data=person)
summary(lm_in)
lm_io = lm(BFIO ~ sexF*ethF, data=person)
summary(lm_io)
#For most of these none appear to bbe significant - none of relationships, with the exception of the intercept
#have a significant term on them
#this implies that there is not a statistically signiicany interaction here
# Question 1.3.
# Are there correlations between the scale scores for the five traits in the personality test? In theory, these should be small.
# Calculate the correlation table for all of the scale measures (traits), display it, and comment on the results.
# Note: You will have to deal with missing datavalues!. Use the help for the "cor" function to determine good options.
# Write a couple of concluding sentences, describing the results and stating what might be an alternative to the 5 tests.
#Subset to just the scale scores
scale_only = (person[,47:51])
cor(scale_only, use="na.or.complete")
# BFIE BFIA BFIC BFIO BFIN
#BFIE 1.00000000 0.03851062 0.1379402 0.14129299 -0.25842476
#BFIA 0.03851062 1.00000000 0.3306293 0.30089556 -0.32864909
#BFIC 0.13794023 0.33062931 1.0000000 0.21728708 -0.30927625
#BFIO 0.14129299 0.30089556 0.2172871 1.00000000 -0.00524156
#BFIN -0.25842476 -0.32864909 -0.3092762 -0.00524156 1.00000000
# There are correlations here, but relatively few
#Some of the strongest correlations are between agreeablenes and openness, for example
#but even here, there is only a .3 coefficient, which is not a very high correlation
#one alternative to running these different correlation tests is to just do a PCA
#Analysis and see that there are not principle components which explain a lot of this data
################################################################
# Problem 2. Exercise, Anarexia and Age
################################################################
# Visualize a longitidunal study that examined whether anorexic patients
# are not only eating less but exercising more.
View(Blackmore) # Warning on old versions of car this data set is Blackmoor - change if it does not work for you
bmore = Blackmore
describe(bmore)
# The Blackmore dataframe has the the number of hours per week of exercise (exercise)
# for different subjects (subject), taken repeatidly at different ages (age).
# The subjects are divided into 'control' and 'patient' (group).
# Patient are the eating-disordered patients.
# Question 2.1.
# Use a scatter plot to display exercise as a function of age and using different symbols for control and patient.
#WRONG
#scatterplot(exercise ~ age | group, data=bmore)
scatterplot(exercise ~ age*group, data = Blackmore)
# Question 2.2.
# Specify the right model and perform the correct test to determine whether
# exercise is different between the two groups (control vs patient) after taking
# into account the effect of age.
# In a short sentence state your conclusion
# Because it is a within-subject longitidunal test we need to use a mixed-effect modeling
mod.exercise <- lme(exercise ~ age*group, random = ~ 1|subject, data = Blackmore)
summary(mod.exercise)
#BEing a patient has a significant negative coefficient, which seems to suggest that
#patients exercise less than healthy controls do
#age has a positive coefficient, suggesting that older people generally exercise more
#this coeff is also significant, though less so than the patient one
#finally, the interaction between patient and age is significant and positive, suggesting that
# the effet of age for patients is even more pronounced, with a higher positive slow
#So older people exercise more, anorexixs exercise less all else equal, but as they age the effects of ageon exrcise are more pronounced
# Question 2.3.
# On the same plot from 2.1, draw the two curves obtained from your
# model.
scatterplot(exercise ~ age*group, data = Blackmore)
abline( a = mod.exercise$coefficients$fixed[1], b=mod.exercise$coefficients$fixed[2], lty='dashed')
abline( a= mod.exercise$coefficients$fixed[1] + mod.exercise$coefficients$fixed[3], b = mod.exercise$coefficients$fixed[2] + mod.exercise$coefficients$fixed[4], lty='dashed', col = 'red' )
#add for interactionis!!!
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 0.14751 0.71296 0.207 0.8361
#grouppatient -4.14647 0.90140 -4.600 4.80e-06 ***
# age 0.13263 0.06159 2.153 0.0315 *
# grouppatient:age 0.47968 0.07711 6.221 7.44e-10 ***
################################################################
# Problem 3
################################################################
# Fertility in transition.
# view the data set.
View(swiss)
describe(swiss)
# This data contains fertility rates (Fertility) in 47 counties in Switzerland in 1888 at
# a time of transition (to a more developped and industrialized economy country). The variable Agriculture contains the
# percentage % of Swiss males (no female data was collected for this dataset :-( ) involved in agriculture, and Examination and Education are two measures of
# the education level. Catholic is the % of the population
# that is Catholic (as opposed to Protestant). Infant.Mortality is Infant Mortality. (type ?swiss for more information)
# Question 3.1.
# Use a scatter plot matrix to visualize how Fertility is
# affected by all the other variables.
scatterplotMatrix(~ Fertility + Agriculture + Examination + Education + Catholic + Infant.Mortality, data=swiss)
# Question 3.2
# You may have noticed that counties are divided into mostly
# Catholic and mostly Protestant. To deal with this bi-modal distribution
# make a new categorical variable "Religion" that is 'C' when
# Catholic is greater than 50% and 'P' otherwise. Generate a new scatter plot
# matrix that separates the data between Catholic and Protestants (different slopes for different folks!)
swiss$Religion = ifelse(swiss$Catholic>50, 'C','P')
scatterplotMatrix(~ Fertility + Agriculture + Examination + Education + Catholic + Infant.Mortality | Religion, by.group=TRUE, data=swiss)
# Question 3.3
# Is the fertility rate different in Catholic vs. Protestant counties?
# Perform the modeling and statistical analysis to answer this question WITHOUT
# taking into account any of the other variables.
# Notice from the distribution in the diagonal of your scatter plot matrix
# that the probability of Fertility gvien C or P is well approximated by a
# normal distribution
# What is the difference in Fertility rate between the Catholic vs. Protestant
# counties? You should calculate this difference directly from the data.
# Summarize your results in a single sentence.
swiss_m1 = lm(Fertility ~ Religion, data=swiss)
summary(swiss_m1)
#lm(formula = Fertility ~ Religion, data = swiss)
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 76.461 2.725 28.063 <2e-16 ***
# ReligionP -10.240 3.469 -2.952 0.005 **
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Residual standard error: 11.56 on 45 degrees of freedom
#Multiple R-squared: 0.1623, Adjusted R-squared: 0.1436
#F-statistic: 8.716 on 1 and 45 DF, p-value: 0.004998
actual_mean_diff = mean(swiss$Fertility[swiss$Religion=="C"]) - mean(swiss$Fertility[swiss$Religion=="P"])
#Catholic countries have a mean 10 higher than protestant countries.
#This difference is confirmed significant by an lm with fertility as DV and
#religion as DV (ie an Anova)
#And, this difference is significant as shown by the significant p value of the
#Coefficient for religion
# Question 3.4
# Perform a permutation analysis to calculate obtain a p-value for the average
# difference in Fertility between Protestant and Catholic counties using 1000 permutated subsamples of the original data.
# Compare the p-value calculated for your permutation test to the p-value you obtained in 3.3
f = swiss$Fertility
results = 1:1000
for(i in range(1,1000)){
fake_religion = sample(swiss$Religion, replace=FALSE)
permuted_mean = mean(f[fake_religion=="C"]) - mean(f[fake_religion=="P"])
results[i] = permuted_mean
}
prop.table(table (results > actual_mean_diff))
#P value of .011, which is somewhat higher than the .005 p value found before
#FREDERIC WAY
nBoot <- 9999 # let's do 10000 to get good values of p
nP <- sum(swiss$Religion == 'P')
n <- nrow(swiss)
diff.Boot <- array(0, dim = nBoot)
for (i in 1:nBoot) {
ind.G1 <- sample(n, nP)
diff.Boot[i] <- mean(swiss$Fertility[ind.G1]) - mean(swiss$Fertility[-ind.G1])
}
pval <- sum(abs(diff.Boot)>abs(diff.Fertility.CP))/(nBoot+1)
# Question 3.5
# Test again for the effect of Religion on Fertility but this time
# take after taking into account all the other regressors as well.
# From the summary of yourthe model, what is the new difference in Fertility between the two groups?.
# Explain the difference with the analysis performed in 3.4
swiss_mfull = lm(Fertility ~ Agriculture + Examination + Education + Infant.Mortality + Religion, data=swiss)
summary(swiss_mfull)
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 73.57310 11.62576 6.328 1.48e-07 ***
# Agriculture -0.15718 0.07447 -2.111 0.040953 *
# Examination -0.37581 0.27934 -1.345 0.185915
# Education -0.79949 0.19813 -4.035 0.000232 ***
# Infant.Mortality 1.16404 0.40361 2.884 0.006229 **
# ReligionP -6.00889 3.30560 -1.818 0.076408 .
#---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#Residual standard error: 7.591 on 41 degrees of freedom
#Multiple R-squared: 0.6709, Adjusted R-squared: 0.6308
#F-statistic: 16.72 on 5 and 41 DF, p-value: 5.531e-09
Anova(swiss_mfull)
#FRED EXPLANATION
# The difference in Fertility is now only 6% and it is not significant at the 5% level (p=0.07)
# Confounding factors such as Education and Mortality explained some of the differences between groups.
#MY EDXPLANATION
#Religion is now only marginally significant. This sort of makes sense
# if we think that there was an interaction and a third variable actually
#explained things better
# Question 3.6
# Perform a second permutation analysis to test the average difference in Fertility
# between Catholic and Protestants counties where you now take into account all the
# other regressors to test the difference between Catholic and Protestants counties.
# Hint:. Generate a model for one group to obtain a prediction for the other group and test
# whether that difference in prediction could have been obtained with random groups of similar sizes.
# First with the real data
swiss.mod.C <- lm(Fertility ~ Agriculture + Examination + Education + Infant.Mortality, data=swiss, subset = Religion=='C')
sel <- swiss$Religion == 'P'
predicted.Fertility.P <- predict(swiss.mod.C, newdata=swiss[sel,])
diff.Actual.Predicted <- mean(swiss$Fertility[sel]) - mean(predicted.Fertility.P)
# The difference in prediction is about -11 fertility points.
nBoot <- 9999 # let's do 10000 to get good values of p
nP <- sum(swiss$Religion == 'P')
n <- nrow(swiss)
diff.BootFull <- array(0, dim = nBoot)
for (i in 1:nBoot) {
ind.G1 <- sample(n, nP)
# Make a model with from the data that excludes the rows in group 1.
swiss.mod.Boot <- lm(Fertility ~ Agriculture + Examination + Education + Infant.Mortality, data=swiss, subset = -ind.G1)
# Predict data in group 1
predicted.Fertility.G1 <- predict(swiss.mod.Boot, newdata=swiss[ind.G1,])
# Save the difference
diff.BootFull[i] <- mean(swiss$Fertility[ind.G1]) - mean(predicted.Fertility.G1)
}
pval <- sum(abs(diff.BootFull)>abs(diff.Actual.Predicted))/(nBoot+1)
# Here is get a pval of 0.0003 suggesting that there is a
# statistical difference in Fertility rates between counties that are Catholic and Protestant.
# We have a situation with the Normal assumption is too conservative.
# Question 3.7
# Compare your full model from 3.5 to a model that excludes the two
# regressors that capture the educational level: Education and Examination.
# Perform a significance test for the model comparison in R, then replicate
# this result by and calculatinge the F value and p value "by hand" (i.e.,,
# meaning from the correct sum of square errors and degrees of freedom)
swiss_med = lm(Fertility ~ Religion + Examination + Education, data=swiss)
summary(swiss_med)
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 84.8357 3.0311 27.988 < 2e-16 ***
#ReligionP -6.3011 3.7038 -1.701 0.09612 .
#Examination -0.1861 0.3149 -0.591 0.55766
#Education -0.7047 0.2120 -3.324 0.00182 **
#Residual standard error: 8.794 on 43 degrees of freedom
#Multiple R-squared: 0.5367, Adjusted R-squared: 0.5043
#F-statistic: 16.6 on 3 and 43 DF, p-value: 2.605e-07
(anova(swiss_mfull, swiss_med))
#Analysis of Variance Table
# Model 1: Fertility ~ Agriculture + Examination + Education + Infant.Mortality +
# Religion
#Model 2: Fertility ~ Religion + Examination + Education
#Res.Df RSS Df Sum of Sq F Pr(>F)
#1 41 2362.4
#2 43 3325.8 -2 -963.4 8.3602 0.0009012 ***
k1 = length(swiss_med$coefficients)
k2 = length(swiss_mfull$coefficients)
n = nrow(swiss_mfull$model)
# F = ((SS1-SS2)/(k2-k1))/(SS2/(n-k2))
k = length(mod3$coefficients)
preds2 = predict(swiss_mfull)
preds1 = predict(swiss_med)
SS2 = sum((swiss$Fertility - preds2)^2)
SS1 = sum((swiss$Fertility - preds1)^2)
F_val = ((SS1-SS2)/(k2-k1))/(SS2/(n-k2))
require(stats)
pfval = 1 - pf(F_val, df1 = k2-k1, df2 = n-k2)
#FREDERIC WAY
# We had this above.
swiss.modFull <- lm(Fertility ~ Agriculture + Examination + Education + Infant.Mortality + Religion, data=swiss)
# The reduced model
swiss.mod.NoEd <- lm(Fertility ~ Agriculture + Infant.Mortality + Religion, data=swiss)
# The statistical comparison
anova(swiss.mod.NoEd, swiss.modFull)
# Education has a highly significant effect on Fertility.
# F value by hand
k1 <- length(swiss.mod.NoEd$coeff)
k2 <- length(swiss.modFull$coeff)
n <- nrow(swiss)
SS1 <- sum(swiss.mod.NoEd$residuals^2)
SS2 <- sum(swiss.modFull$residuals^2)
Fval <- ((SS1-SS2)/(k2-k1))/(SS2/(n-k2))
# This will return the probability
pf(Fval, k2-k1, n-k2, lower.tail = FALSE)
#################################################################.
# Problem 4. Zebra finch call types.
#################################################################
# Load the data -
finch <- read.csv("C:/Users/User/Dropbox/Drive Sync/Berkeley Post Bac/Spring 2018 Classes/Psych 205 - Data Analysis/Final Exam Code/zebraFinchCalls.csv")# The first column of this data frame has the call type:
# DC : Distance Call
# LT : Long Tonal Call - the disctance call produced by Juveniles
# Be : Begging Call
# The 21 other columns are acoustical parameters extracted from the calls
# Question 4.1.
# Perform an analysis to determine whether the three types of calls
# can be distinguished based on their acoustical parameters. How many dimensions are significant?
#TODO: This should be an LDA analysis
require(MASS)
require(car)
finch_lda <- lda(CallType ~ ., finch)
finch_lda
#Proportion of trace:
# LD1 LD2
#0.9387 0.0613
#There are two significant dimensions
#It would appear that they can be
# Question 4.2.
# Make a standard scatter plot showing yourthe results from 4.1
# Hint - use the standard plot for the model that you used and for nice colors
# give the optional color argument as : col = as.integer(ZFCalls$CallType)
plot(finch_lda,col = as.integer(finch$CallType))
# Question 4.3
# Determine the percentage of average classification
# using 10 cross-validation sets where 20% of the data is saved
# each time for validation. Use a flat prior (ie. equal probability) (4pts)
per_corrs = 1:10
for(i in 1:10){
finch_lda <- lda(CallType ~ ., finch, prior = c(1,1,1)/3, subset = train)
percent_train = .8
train = sample(1:nrow(finch),floor(nrow(finch)*percent_train), replace=FALSE)
preds = predict(finch_lda, finch[-train, ])$class
actual = finch$CallType[-train]
per_corr = sum(preds==actual)/length(preds)
per_corrs[i] = per_corr
}
mean(per_corrs)
#94% correct
#FREDERIC METHOD BELOW
# Question 4.1. (2 pt)
# Perform ananalysis to determine whether the three types of calls
# can be distinguished based on their acoustical parameters. How many dimensions are significant?
ZFCalls.mod <- lda(CallType ~ ., data=ZFCalls)
ZFCalls.mod
# Two discriminant functions are siginificant.
# The plot command gives a nice visual display. It looks
# like the three dimensions are well separated.
# Question 4.2. (1 pt)
# Make a standard scatter plot showing yourthe results from 4.1
# Hint - use the standard plot for the model that you used and for nice colors
# give the optional color argument as : col = as.integer(ZFCalls$CallType)
plot(ZFCalls.mod, col = as.integer(ZFCalls$CallType))
# Question 4.3 (3 pt)
# Determine the percentage of average classification
# using 10 cross-validation sets where 20% of the data is saved
# each time for validation. Use a flat prior (ie. equal probability) (4pts)
nsets <- 10
pcorrect <- array(0, dim=nsets)
n <- nrow(ZFCalls)
nvalid <- floor(n*0.2)
for (i in 1:nsets) {
valid <- sample(n, nvalid)
ZF.mod.cv <- lda(CallType ~ ., data=ZFCalls, prior = c(1,1,1)/3, subset = -valid)
predict.ZF <- predict(ZF.mod.cv, ZFCalls[valid, ])$class
pcorrect[i] <- sum(predict.ZF == ZFCalls$CallType[valid])/nvalid
}
pcorrect.avg <- mean(pcorrect)
# I get a percent correct classification of 93%!
#################################################################
# Extra Credit question.
#################################################################
# Perform a principal component analysis on the 44 questions used to assess the personality traits in Problem 2
# and determine the percent of the variance explained by the first
# 10 PC. You will have to deal with missing values!
#Explanation http://www.sthda.com/english/wiki/print.php?id=207
#Data description
# bfi1 - bfi44: forty-four questions measuring five personality traits
# BFIE, BFIA, BFIC, BFIN, BFIO: the average 'scale scores' for the five traits
# BFIE - extraversion
# BFIA - agreeableness
# BFIC - conscientiousness
# BFIN - neuroticism
# BFIO - openness
# sexF: gender (M = male; F = female)
# ethF: ethnicity (AS = asian; W = white; L = latino; M = middle east; AA = african american; O = other)
# incF: family household income (different levels)
# ed.dadF: father's highest level of education (different levels)
# ed.momF: mother's highest level of education (different levels)
names(person)
person_qs = person[,c(names(person[1:44]))]
person_qs = person_qs[complete.cases(person_qs),]
pca_qs = princomp(person_qs, scores=TRUE)
summary(pca_qs)
eig = (pca_qs$sdev)^2
var = eig*100/sum(eig)
cumvar = cumsum(var)
plot(cumvar[1:10])
cumvar[10]
#67% of variance is explained
|
150c4315ebc2afcdc84f11d5f9871f97f910371f | 17f0fc9beb96136a5c41c2a7ef6a0e4418f60e6b | /quiz1question5.R | 11f50f7b51bc057d59957d93c01bc04205e5aa2e | [] | no_license | bradlindaas/getdata | c0d938bd0cbf39633217372ed732ff57e0cea933 | 5bae447ce3e4469c3f8bcbae2c7dea6ea7a05be0 | refs/heads/master | 2016-09-05T09:13:28.934924 | 2014-06-23T20:01:46 | 2014-06-23T20:01:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 960 | r | quiz1question5.R | library(data.table)
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv"
if (!file.exists("data")) {
dir.create("data")
}
if (!file.exists("./data/quiz1question5.csv")) {
download.file(fileURL, destfile = "./data/quiz1question5.csv", method="curl")
}
DT <- fread("./data/quiz1question5.csv")
resultA <- system.time(A<-tapply(DT$pwgtp15,DT$SEX,mean))
resultB <- system.time(B<-sapply(split(DT$pwgtp15,DT$SEX),mean))
# this fails resultC <- system.time({rowMeans(DT)[DT$SEX==1]; rowMeans(DT)[DT$SEX==2]})
resultD <- system.time(D<-DT[,mean(pwgtp15),by=SEX])
resultE <- system.time({E<-mean(DT[DT$SEX==1,]$pwgtp15); mean(DT[DT$SEX==2,]$pwgtp15)})
resultF <- system.time(F<-mean(DT$pwgtp15,by=DT$SEX))
A # correct results
B # correct results
# C didn't even process
D # correct results
E # didn't return the results split by sex
F # didn't return the results split by sex
results <- rbind(resultA, resultB, resultD)
results
|
9c09a20ac93817bb9d8821212a26c1fbb8c6ef1f | 7e7f80fdb7f14647795147284735daf2de3e4bf5 | /Figure4/Figure4.R | 282c3a070a59bb24e975544373e61cf12c731017 | [] | no_license | Jovanderson/AlternativeSplicing | 0f20acd209595d205160daa7c692a9d93eb6c189 | d1f30726a61f51c0e0f56fcd90bbe3a77fa4eb20 | refs/heads/master | 2022-02-26T10:55:06.401113 | 2019-10-18T18:51:14 | 2019-10-18T18:51:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,122 | r | Figure4.R | library(ggplot2)
library(scales)
library(reshape2)
library(cowplot)
library(ggpmisc)
theme_set(theme_cowplot())
AS1 <- "#F7882F"
AS2 <- "#4484CE"
LHY_ratio <- read.csv("LHY_ratio_time2.txt", sep="\t")
LHY_ratio$AS = factor(LHY_ratio$AS, levels=c("I1R", "I5R"))
LHY_ratio_time <- ggplot(data=LHY_ratio, aes(x=ZT, y=LHY_ratio)) +
annotate("rect", xmin = -2, xmax = 0, ymin = -2.7, ymax = 0.9, alpha = .5, fill = "#e3e3e3")+
annotate("rect", xmin = 12, xmax = 24, ymin = -2.7, ymax = 0.9, alpha = .5, fill = "#e3e3e3")+
scale_colour_manual(values=c(AS1, AS2))+
scale_fill_manual(values=c(AS1, AS2))+
#scale_shape_manual(values=c(15,16,17,18))+
geom_jitter(aes(col = AS), position=position_jitter(0.2), size = 2) +
#stat_summary(fun.y=mean, geom="line", size = 1.0)+
geom_smooth(aes(group = AS, colour = AS, fill=AS, outfit=fit2<<-..y..))+
scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.55))+
scale_y_continuous(breaks=seq(-2.7,0.9,0.9), name="log(AS/FS)", limits=c(-2.7,0.9))+
theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75),
text = element_text(size=18),
axis.ticks = element_blank(),
axis.line = element_blank(),
#axis.text.x = element_text(size=18),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.text.y = element_text(size=18),
#axis.text.y = element_blank(),
#axis.title.y = element_blank(),
legend.position = "none"
)
LHY_ratio_time
#amplitude 1.88, peak at ZT22-23, through at ZT07
#method to identify the peak and though of the smoothed fit
fit2 <- data.frame(fit2)
fit3 <- fit2[1:60, ]
fit4 <- fit2[81:140, ]
fit5 <- cbind(fit3, fit4)
fit5 <- data.frame(fit5)
maxFS0 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE))
maxFS <- ((maxFS0$row-1)*26.55/80)-2
maxAS0 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE))
maxAS <- ((maxAS0$row-1)*26.55/80)-2
minFS0 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE))
minFS <- ((minFS0$row-1)*26.55/80)-2
minAS0 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE))
minAS <- ((minAS0$row-1)*26.55/80)-2
FinalMaxMin <- cbind(maxFS, minFS, maxAS, minAS)
round(FinalMaxMin)
AS4 <- "#FECE00"
AS3 <- "#8FC33A"
AS5 <- "#105989"
#AS5 <- "#FC4A1A"
PRR37_ratio <- read.csv("PRR37_ratio_time.txt", sep="\t")
#PRR37_ratio$AS = factor(LHY_ratio$AS, levels=c("E3S","I3R","I6R","I7R"))
PRR37_ratio$AS = factor(LHY_ratio$AS, levels=c("I6R","I7R","I3R"))
PRR37_ratio_time <- ggplot(data=PRR37_ratio, aes(x=ZT, y=PRR37_ratio)) +
annotate("rect", xmin = -2, xmax = 0, ymin = -2.7, ymax = 0.9, alpha = .5, fill = "#e3e3e3")+
annotate("rect", xmin = 12, xmax = 24, ymin = -2.7, ymax = 0.9, alpha = .5, fill = "#e3e3e3")+
scale_colour_manual(values=c(AS3, AS4, AS5))+
scale_fill_manual(values=c(AS3, AS4, AS5))+
geom_jitter(aes(col = AS), position=position_jitter(0.2), size = 2) +
#stat_summary(fun.y=mean, geom="line", size = 1.0)+
#geom_smooth(colour = "black")+
geom_smooth(aes(group = AS, colour = AS, fill=AS, outfit=fit2<<-..y..))+
#geom_smooth(aes(col = AS))+
scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.55))+
scale_y_continuous(breaks=seq(-2.7,0.9,0.9), name="log(AS/FS)", limits=c(-2.7,0.9))+
theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75),
text = element_text(size=18),
axis.ticks = element_blank(),
axis.line = element_blank(),
#axis.text.x = element_text(size=18),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
#axis.text.y = element_text(size=18),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
legend.position = "none"
)
PRR37_ratio_time
#amplitude 0.39, peak at ZT07, through at ZT17-18
#method to identify the peak and though of the smoothed fit
fit2 <- data.frame(fit2)
fit3 <- fit2[1:60, ]
fit4 <- fit2[81:160, ]
fit4b <- fit2[161:240, ]
fit5 <- cbind(fit3, fit4, fit4b)
fit5 <- data.frame(fit5)
maxFS0 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE))
maxFS <- ((maxFS0$row-1)*26.55/80)-2
maxAS0 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE))
maxAS <- ((maxAS0$row-1)*26.55/80)-2
minFS0 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE))
minFS <- ((minFS0$row-1)*26.55/80)-2
minAS0 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE))
minAS <- ((minAS0$row-1)*26.55/80)-2
maxAS10 <- data.frame(which(fit5 == max(fit5$fit4b), arr.ind=TRUE))
maxAS1 <- ((maxAS10$row-1)*26.55/80)-2
minAS10 <- data.frame(which(fit5 == min(fit5$fit4b), arr.ind=TRUE))
minAS1 <- ((minAS10$row-1)*26.55/80)-2
FinalMaxMin <- cbind(maxFS, minFS, maxAS, minAS, maxAS1, minAS1)
round(FinalMaxMin)
#AS4 <- "#FECE00"
#AS3 <- "#8FC33A"
#AS5 <- "#105989"
AS6 <- "#FC4A1A"
PRR37_ratio_S <- read.csv("PRR37_ratio_time_S.txt", sep="\t")
PRR37_ratio_S$AS = factor(LHY_ratio_S$AS, levels=c("E3S"))
PRR37_ratio_time_S <- ggplot(data=PRR37_ratio_S, aes(x=ZT, y=PRR37_ratio)) +
annotate("rect", xmin = -2, xmax = 0, ymin = -2.7, ymax = 0.9, alpha = .5, fill = "#e3e3e3")+
annotate("rect", xmin = 12, xmax = 24, ymin = -2.7, ymax = 0.9, alpha = .5, fill = "#e3e3e3")+
scale_colour_manual(values=c(AS6))+
scale_fill_manual(values=c(AS6))+
geom_jitter(aes(col = AS), position=position_jitter(0.2), size = 2) +
#stat_summary(fun.y=mean, geom="line", size = 1.0)+
geom_smooth(aes(group = AS, colour = AS, fill=AS, outfit=fit2<<-..y..))+
#geom_smooth(colour="black")+
scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.55))+
scale_y_continuous(breaks=seq(-2.7,0.9,0.9), name="log(AS/FS)", limits=c(-2.7,0.9))+
theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75),
text = element_text(size=18),
axis.ticks = element_blank(),
axis.line = element_blank(),
#axis.text.x = element_text(size=18),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
#axis.text.y = element_text(size=18),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
legend.position = "none"
)
PRR37_ratio_time_S
#amplitude 0.94, peak at ZT0, through at ZT11
fit2 <- data.frame(fit2)
fit3 <- fit2[1:60, ]
fit4 <- fit2[81:140, ]
fit5 <- cbind(fit3, fit4)
fit5 <- data.frame(fit5)
maxFS0 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE))
maxFS <- ((maxFS0$row-1)*26.55/80)-2
maxAS0 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE))
maxAS <- ((maxAS0$row-1)*26.55/80)-2
minFS0 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE))
minFS <- ((minFS0$row-1)*26.55/80)-2
minAS0 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE))
minAS <- ((minAS0$row-1)*26.55/80)-2
FinalMaxMin <- cbind(maxFS, minFS, maxAS, minAS)
round(FinalMaxMin)
AS8 <- "#94618E"
AS7 <- "#BE9E0E"
PRR73_ratio <- read.csv("PRR73_ratio_time.txt", sep="\t")
PRR73_ratio$AS = factor(PRR73_ratio$AS, levels=c("I2R","I6R"))
PRR73_ratio_time <- ggplot(data=PRR73_ratio, aes(x=ZT, y=PRR73_ratio)) +
annotate("rect", xmin = -2, xmax = 0, ymin = -2.7, ymax = 0.9, alpha = .5, fill = "#e3e3e3")+
annotate("rect", xmin = 12, xmax = 24, ymin = -2.7, ymax = 0.9, alpha = .5, fill = "#e3e3e3")+
scale_colour_manual(values=c(AS7,AS8))+
scale_fill_manual(values=c(AS7,AS8))+
geom_jitter(aes(col = AS), position=position_jitter(0.2), size = 2) +
#stat_summary(fun.y=mean, geom="line", size = 1.0)+
#geom_smooth(colour="black")+
geom_smooth(aes(group = AS, colour = AS, fill=AS, outfit=fit2<<-..y..))+
scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.55))+
scale_y_continuous(breaks=seq(-2.7,0.9,0.9), name="log(AS/FS)", limits=c(-2.7,0.9))+
theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75),
text = element_text(size=18),
axis.ticks = element_blank(),
axis.line = element_blank(),
#axis.text.x = element_text(size=18),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.text.y = element_text(size=18),
#axis.text.y = element_blank(),
#axis.title.y = element_blank(),
legend.position = "none"
)
PRR73_ratio_time
#amplitude 0.86, peak at ZT06, through at ZT17
fit2 <- data.frame(fit2)
fit3 <- fit2[1:60, ]
fit4 <- fit2[81:140, ]
fit5 <- cbind(fit3, fit4)
fit5 <- data.frame(fit5)
maxFS0 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE))
maxFS <- ((maxFS0$row-1)*26.55/80)-2
maxAS0 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE))
maxAS <- ((maxAS0$row-1)*26.55/80)-2
minFS0 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE))
minFS <- ((minFS0$row-1)*26.55/80)-2
minAS0 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE))
minAS <- ((minAS0$row-1)*26.55/80)-2
FinalMaxMin <- cbind(maxFS, minFS, maxAS, minAS)
round(FinalMaxMin)
AS9 <- "#4ABDAC"
AS10 <- "#EB6E80"
AS11 <- "#1B7B34"
PRR95_ratio <- read.csv("PRR95_ratio_time.txt", sep="\t")
PRR95_ratio$AS = factor(PRR95_ratio$AS, levels=c("Altss", "I3R", "I7R"))
PRR95_ratio_time <- ggplot(data=PRR95_ratio, aes(x=ZT, y=PRR95_ratio)) +
annotate("rect", xmin = -2, xmax = 0, ymin = -2.4, ymax = 0.6, alpha = .5, fill = "#e3e3e3")+
annotate("rect", xmin = 12, xmax = 24, ymin = -2.4, ymax = 0.6, alpha = .5, fill = "#e3e3e3")+
scale_colour_manual(values=c(AS9,AS10,AS11))+
scale_fill_manual(values=c(AS9,AS10,AS11))+
geom_jitter(aes(col = AS), position=position_jitter(0.2), size = 2) +
#stat_summary(fun.y=mean, geom="line", size = 1.0)+
geom_smooth(aes(group = AS, colour = AS, fill=AS, outfit=fit2<<-..y..))+
#geom_smooth(colour="black")+
scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.55))+
scale_y_continuous(breaks=seq(-2.4,0.6,0.6), name="log(AS/FS)", limits=c(-2.4,0.6))+
theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75),
text = element_text(size=18),
axis.ticks = element_blank(),
axis.line = element_blank(),
#axis.text.x = element_text(size=18),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.text.y = element_text(size=18),
#axis.text.y = element_blank(),
#axis.title.y = element_blank(),
legend.position = "none"
)
PRR95_ratio_time
AS12 <- "#c41b12"
TOC1_ratio <- read.csv("TOC1_ratio_time.txt", sep="\t")
#TOC1_ratio$AS = factor(TOC1_ratio$AS, levels=c("I2R","I6R"))
TOC1_ratio_time <- ggplot(data=TOC1_ratio, aes(x=ZT, y=TOC1_ratio)) +
annotate("rect", xmin = -2, xmax = 0, ymin = -2.4, ymax = 0.6, alpha = .5, fill = "#e3e3e3")+
annotate("rect", xmin = 12, xmax = 24, ymin = -2.4, ymax = 0.6, alpha = .5, fill = "#e3e3e3")+
scale_colour_manual(values=c(AS12))+
scale_fill_manual(values=c(AS12))+
geom_jitter(aes(col = AS), position=position_jitter(0.2), size = 2) +
#stat_summary(fun.y=mean, geom="line", size = 1.0)+
geom_smooth(aes(group = AS, colour = AS, fill=AS, outfit=fit2<<-..y..))+
#geom_smooth(colour="black")+
scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.55))+
scale_y_continuous(breaks=seq(-2.4,0.6,0.6), name="log(AS/FS)", limits=c(-2.4,0.6))+
theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75),
text = element_text(size=18),
axis.ticks = element_blank(),
axis.line = element_blank(),
#axis.text.x = element_text(size=18),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.text.y = element_text(size=18),
#axis.text.y = element_blank(),
#axis.title.y = element_blank(),
legend.position = "none"
)
TOC1_ratio_time
FS_line = "solid"
AS_line = "dashed"
AS2_line = "dotdash"
FS <- "#4d4d4d"
AS1 <- "#105989"
AS2 <- "#FC4A1A"
f1 = "#00441b"
e1 = "#a20021"
e5 = "#fabc3c"
f1_line = "solid"
e1_line = "longdash"
e5_line = "dotdash"
Leaf_splicing_data <- read.csv("leaf_timecourses.txt", sep="\t")
Leaf_splicing <- ggplot(data=Leaf_splicing_data, aes(x=Time, y=Expression)) +
annotate("rect", xmin = -2, xmax = 0, ymin = -2.4, ymax = 2.4, alpha = .5, fill = "#e3e3e3")+
annotate("rect", xmin = 12, xmax = 24, ymin = -2.4, ymax = 2.4, alpha = .5, fill = "#e3e3e3")+
geom_line(aes(group=Gene), colour="#969696", size = 0.5) +
#stat_summary(aes(col=Organ), fun.y=mean, geom="line", size = 1.5)+
geom_smooth(aes(col=Organ, fill=Organ, outfit=fit2<<-..y..))+
scale_colour_manual(values=c(f1))+
scale_fill_manual(values=c(f1))+
scale_linetype_manual(values=c(f1_line)) +
scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.55))+
scale_y_continuous(breaks=seq(-2.4,2.4,1.2), name="Expression value", limits=c(-2.4, 2.4))+
theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75),
text = element_text(size=18),
axis.ticks = element_blank(),
axis.line = element_blank(),
axis.text.x = element_text(size=18),
#axis.text.x = element_blank(),
#axis.title.x = element_blank(),
axis.text.y = element_text(size=18),
legend.position="none")
Leaf_splicing
Internodes_splicing_data <- read.csv("internodes_timecourses.txt", sep="\t")
Internodes_splicing <- ggplot(data=Internodes_splicing_data, aes(x=Time, y=Expression)) +
annotate("rect", xmin = -2, xmax = 0, ymin = -2.4, ymax = 2.4, alpha = .5, fill = "#e3e3e3")+
annotate("rect", xmin = 12, xmax = 24, ymin = -2.4, ymax = 2.4, alpha = .5, fill = "#e3e3e3")+
geom_line(aes(group=Gene, linetype=Organ), colour="#969696", size = 0.5) +
#stat_summary(aes(col=Organ, linetype=Organ), fun.y=mean, geom="line", size = 1.5)+
geom_smooth(aes(col=Organ, fill=Organ, outfit=fit2<<-..y..))+
scale_colour_manual(values=c(e1,e5))+
scale_fill_manual(values=c(e1,e5))+
scale_linetype_manual(values=c(e1_line, e5_line)) +
scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.55))+
scale_y_continuous(breaks=seq(-2.4,2.4,1.2), name="Expression value", limits=c(-2.4, 2.4))+
theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75),
text = element_text(size=18),
axis.ticks = element_blank(),
axis.line = element_blank(),
axis.text.x = element_text(size=18),
#axis.text.x = element_blank(),
#axis.title.x = element_blank(),
#axis.text.y = element_text(size=18),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
legend.position="none")
Internodes_splicing
plot_grid(LHY_ratio_time, PRR37_ratio_time, PRR73_ratio_time, PRR37_ratio_time_S,Leaf_splicing, Internodes_splicing, labels = c("A", "B", "C", "D", "E", "F"), ncol = 2, align = "none", rel_widths = c(1.15, 1),rel_heights = c(1,1,1.2),label_size = 20)
|
eca260c14a8947c367f3bf76781c0dd6e276721a | 44eeff1293391105dba7b6268e6c2d05810370ee | /dataset_scripts/generate_waldron_hESC.R | 5e399c18361a8fb40e1e1fa63f823d0502f9d6d4 | [] | no_license | jrboyd/shiny_seqtsne | 9c554128b39f1fd590a5400fd768d6bd40bc462a | 10a7910a31af8d7247d34d1b040de1a773b19e28 | refs/heads/master | 2020-05-18T07:30:48.889049 | 2019-05-29T19:01:07 | 2019-05-29T19:01:07 | 184,266,897 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 632 | r | generate_waldron_hESC.R | load("~/R/waldron/waldron_tsne_v2.save")
profile_dt = prof_dt[, .(tall_var, wide_var, id, x, y, cell = tall_var, mark = wide_var)]
tsne_dt = tsne_res
query_gr = query_gr
agg_dt = prof_dt[, .(value = max(y[abs(x) < .2])), .(tall_var, wide_var, id)]
# agg_dt[, value := y]
agg_dt = merge(agg_dt, tsne_dt[, .(id, tx, ty)], by = "id")
# overlap_dt = res[[5]]
annotation_dt = data.table(id = query_gr$id, gene_name = query_gr$gene_name, distance = 0)
config_dt = qdt[, .(file = file.path("/slipstream/galaxy/uploads/working/qc_framework", file), tall_var = "hESC", wide_var = mark, cell, mark)]
# color_mapping = res[[8]]
# remove(res)
|
fd8a3ccb781ac3bd5266c3efd226b45dbcecaa12 | 0bd4d5ee50ebfb5a5325ae0284087ee886be4f37 | /man/findFiles.Rd | aa38b6a1ec6bd13fe3e0facf9b7202d0405438ee | [] | no_license | stla/SLutils | 91f53e3ef48b22154642b7425a1be94c0c48053e | 5c5ef7dbb5d172c0a7788b3975a1363a47c4bf67 | refs/heads/master | 2020-04-10T06:21:30.476088 | 2019-09-10T10:00:57 | 2019-09-10T10:00:57 | 160,851,990 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 769 | rd | findFiles.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/files.R
\name{findFiles}
\alias{findFiles}
\title{Find files before/after a given data with size lower/bigger than a given size}
\usage{
findFiles(path = ".", date = Sys.Date(), before = TRUE, size = 0,
bigger = TRUE, ...)
}
\arguments{
\item{path}{the path to search in}
\item{date}{the reference date}
\item{before}{logical, whether to search files before the reference date}
\item{size}{reference size}
\item{bigger}{logical, whether to search file bigger than the reference size}
\item{...}{arguments passed to \code{\link[base]{list.files}}}
}
\value{
The found files, in a vector.
}
\description{
Find files before/after a given data with size lower/bigger than a given size.
}
|
d0b9c3ae59567c9f3627b22a9c049aba4497e3a8 | 52503a6a457c1e804053a4e0dacc3e45a6d54f5c | /R/crawling_building_from_activity.R | 1eb1ba45c891693d4c17ead523ae7602ef31b53c | [] | no_license | youngjaewon/leed | ac19a09d91123e8a8b9b4d825764187e69c21d2d | e2264ffd43b5ee7f7b665fb2999074ea5e594df7 | refs/heads/master | 2021-02-20T01:41:14.515712 | 2020-03-13T23:02:07 | 2020-03-13T23:02:07 | 245,324,193 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,442 | r | crawling_building_from_activity.R | library(dplyr)
library(xml2)
library(rvest)
activity <- "C:/Users/ywon7/Desktop/GitHub/leed/data/activity.csv" %>% read.csv
building.URL <- activity$building.URL %>% as.character %>% paste0("/details")
page.list <- NULL
data.list <- NULL
date.list <- NULL
std.list <- NULL
for (i in 1:length(building.URL)){
page.list[[i]] <- tryCatch(
xml2::read_html(building.URL[i]),
error = function(e){ "error" }
)
if( page.list[[i]][1] != "error" ){
date.vector <- page.list[[i]] %>% html_nodes(xpath = '//*[@class="media feed-event project"]//*[@class="date"]') %>% html_text
reg.date <- date.vector[length(date.vector)] #registered date
std.vector <- page.list[[i]] %>% html_nodes(xpath = '//*[@class="media feed-event project"]//*[@class="event"]') %>% html_text
reg.std <- std.vector[length(std.vector)] #registered standard
owner.type <- page.list[[i]] %>% html_node(xpath = '//*[@class="buildings-aside-inner"]//tr/td[2]') %>% html_text
data.list[[i]] <- data.frame(building.URL=building.URL[i], reg.date=reg.date, reg.std=reg.std, owner.type=owner.type)
cat("1..")
} else{
data.list[[i]] <- data.frame(building.URL=NA, reg.date=NA, reg.std=NA, owner.type=NA)
}
if(i %% 100 == 0){
print(i)
}
if(i %% 1000 == 0){
write.csv( bind_rows(data.list), paste0("C:/Users/ywon7/Desktop/GitHub/leed/building_",i,".csv") )
}
}
View( bind_rows(data.list) )
|
404618646bb5b993d8f1e840df04ab514a5449c5 | 2b8569d206b99a3c3b48b8ed7c70d48aabc1a821 | /R/ggtheme_plot.R | fc1a6175252bc64725dfc8a1ca87b748e9df9377 | [
"MIT"
] | permissive | emlab-ucsb/startR | 29190d9f24a05e6c6316e1f0e88b3d6cf1149111 | 49d5c9acd531320671d7ae411231fa0cfc15c557 | refs/heads/master | 2023-08-11T23:09:22.035471 | 2021-10-08T18:07:22 | 2021-10-08T18:07:22 | 141,326,123 | 1 | 1 | MIT | 2021-10-08T18:07:16 | 2018-07-17T18:07:28 | R | UTF-8 | R | false | false | 2,746 | r | ggtheme_plot.R | #' Plot theme
#'
#' @description Creates a standard theme for all my plots
#'
#' @param font_size The size of the font for \code{element_text()}. Defaults to \code{font_size = 10}.
#' @param font_family The font family
#' @param line_size The size of the lines
#'
#' @importFrom ggplot2 %+replace% theme_gray theme element_rect element_text margin rel element_line element_blank unit
#' @export
#'
ggtheme_plot <- function (font_size = 10, font_family = "", line_size = 0.5) {
half_line <- font_size / 2
theme_gray(base_size = font_size, base_family = font_family) %+replace%
theme(rect = element_rect(fill = "transparent",
colour = NA,
color = NA,
size = 0,
linetype = 0),
text = element_text(family = font_family,
face = "plain",
colour = "black",
size = font_size,
hjust = 0.5,
vjust = 0.5,
angle = 0,
lineheight = 0.9,
margin = margin(),
debug = FALSE),
axis.text = element_text(colour = "black",
size = rel(0.8)),
axis.ticks = element_line(colour = "black"),
axis.line = element_line(colour = "black",
size = line_size,
lineend = "square"),
axis.line.x = element_line(colour = "black",
size = line_size,
lineend = "square"),
axis.line.y = element_line(colour = "black",
size = line_size,
lineend = "square"),
legend.background = element_blank(),
legend.key = element_blank(),
legend.key.size = unit(1, "lines"),
legend.spacing = unit(0.4, "cm"),
legend.text = element_text(size = rel(0.8)),
legend.justification = c("left", "center"),
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.text = element_text(size = rel(0.8)),
strip.background = element_blank(),
plot.background = element_blank(),
plot.title = element_text(face = "bold",
size = font_size,
margin = margin(b = half_line)),
complete = TRUE
)
}
|
344a5bc40d5ad1860b4c928ff34c7294751f1266 | 11b2a54f71c83dc64b3583334aaaf34917a1d02e | /R/day-09.R | 56cee40197ccbb015ea1ca0531b02875902d271d | [] | no_license | shurtadogonzalez/geog176A-daily-exercises | 31e782af2dcac4bb40b040b4fe844bc469f836a7 | 20af2a9d838d06b2dcc829be00cdef6a325a183d | refs/heads/master | 2022-12-04T07:30:34.410007 | 2020-09-01T00:38:35 | 2020-09-01T00:38:35 | 286,666,906 | 0 | 0 | null | 2020-08-11T06:42:56 | 2020-08-11T06:42:55 | null | UTF-8 | R | false | false | 676 | r | day-09.R | #Name:Stephanie Hurtado
#Date:8/18/20
#Exercise: 09
install.packages("sf")
library(sf)
install.packages("stars")
library(stars)
install.packages("units")
library(units)
installed.packages("raster")
library(raster)
install.packages("mapview")
library(mapview)
install.packages("leaflet")
library(leaflet)
install.packages("gdalUtilities")
library(gdalUtilities)
install.packages("whitebox", repos="http://R-Forge.R-project.org")
remotes::install_github("ropensci/getlandsat")
remotes::install_github("ropensci/USAboundaries")
remotes::install_github("ropensci/USAboundariesData")
remotes::install_github("ropenscilabs/rnaturalearthdata")
sf::sf_extSoftVersion()
|
562108ef24050daebf0c315546de7684785ed144 | 267635bea01525fbf4c6cc471f275a47d5d41868 | /cod_code/3_plot_spawing_biomass_distributions.r | 0dd3ee320bef2f71b28ba838b5e8db000c8b3a59 | [] | no_license | mikprov/popdy | 956486c54375b39aeb470d59579a306342bfff92 | 0af4aa9143282227b83dbf06c9fad83de4bd3820 | refs/heads/master | 2021-08-17T05:53:11.184914 | 2021-01-06T21:49:24 | 2021-01-06T21:49:24 | 97,860,370 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,816 | r | 3_plot_spawing_biomass_distributions.r | # Plot spawning biomass distributions
# by: Mikaela Provost
# Goals:
# 1. calculate probability of spawning biomass at age distributions
# 2. create and export eigentables for MG and MP parms
library(ggplot2)
library(gridExtra)
library(tidyr)
library(dplyr)
# ---
# Load functions:
#source("C:/Users/provo/Documents/GitHub/popdy/cod_code/2_cod_functions.r")
# ---
# load cod data, break into separate populations
#source("C:/Users/provo/Documents/GitHub/popdy/cod_code/0_load_cod_data.r")
# ---
# load max ages, table showing max age in each population
#source("C:/Users/provo/Documents/GitHub/popdy/cod_code/6_find_and_export_oldest_fish_in_each_pop.r")
# change max age for 23LKJ to 17
# this needs to be changed because 2J3KL is an outlier if max age is
# left at 20y. The assessment (Brattey et al. 2010, p28) says that most
# ages are 1-10y, with maximum age reaching 17.
#max_ages_table[max_ages_table$codNames == "cod2J3KL", ]$max_ages <- 17
# ---
# reorder pops by peak spawning age
# load peak spawning age info
#eigentable = read.csv("C:/Users/provo/Documents/GitHub/popdy/cod_code/mikaelaLSB/eigentable.csv",
# header=TRUE,stringsAsFactors = FALSE)
#eigentable = as.data.frame(eigentable)
#codNames_ordered_by_peak <- eigentable %>% arrange(mode_age) %>% pull(codNames)
#codNames_ordered_by_peak_plot <- eigentable %>% arrange(mode_age) %>% pull(codNames_plot)
# ---
# Plot spawning biomass distribution -- new way: treat as probability distribution
# y axis = probability of spawning
# x axis = age
cvs_modeMG = rep(NA, length=length(codNames))
mode_ageMG = rep(NA, length=length(codNames))
sd_modeMG = rep(NA, length=length(codNames))
cvs_modeMP = rep(NA, length=length(codNames))
mode_ageMP = rep(NA, length=length(codNames))
sd_modeMP = rep(NA, length=length(codNames))
max_ages <- rep(NA, length=length(codNames))
temp <- rep(NA, length=length(codNames))
F.halfmax = 0
codNames_plot <- rep(NA, length=length(codNames))
# note: need to recalculate sd with mode, instead of mean
pMG <- as.list(rep(NA,length=length(codNames)))
names(pMG) <- codNames
pMP <- as.list(rep(NA,length=length(codNames)))
names(pMP) <- codNames
for (i in 1:length(codNames)) { # step through each cod population
# this should load parms: L_inf, K, TEMP, maxage
source(file = paste('C:/Users/Mikaela/Documents/GitHub/popdy/cod_pops/',codNames[i], '.r', sep=''))
# calculate LEP at each age
out = calculate_LSB_at_age_by_F(maxage=maxage,L_inf=L_inf,K=K,TEMP=TEMP,
F.halfmax=0,B0=B0,B1=B1)
Ages = seq(from=1,to=maxage,by=1)
codNames_plot[i] <- name
max_ages[i] <- maxage
temp[i] <- TEMP
# calculate probability of spawning at age
#p_spawnMG = as.data.frame((out$LEP_MG/sum(out$LEP_MG))*(15/sum(out$LEP_MG))*(1/length(out$LEP_MG)))
p_spawnMG = as.data.frame(out$LEP_MG/sum(out$LEP_MG))
#p_spawnMG = as.data.frame(out$LEP_MG/sum(out$LEP_MG))
colnames(p_spawnMG) <- "p_spawnMG"
#p_spawnMP = as.data.frame((out$LEP_MP/sum(out$LEP_MP))*(15/sum(out$LEP_MP))*(1/length(out$LEP_MP)))
p_spawnMP = as.data.frame(out$LEP_MP/sum(out$LEP_MP))
#p_spawnMP = as.data.frame(out$LEP_MP/sum(out$LEP_MP))
colnames(p_spawnMP) <- "p_spawnMP"
keep= cbind(p_spawnMG,p_spawnMP,Ages)
# using mode in sd
mode_ageMG[i] = keep$Age[which.max(keep$p_spawnMG)] # what is the age with highest probability?
sd_modeMG[i] = sqrt(sum(keep$p_spawnMG*(keep$Age-mode_ageMG[i])^2) ) # stdev
cvs_modeMG[i] = sd_modeMG[i]/mode_ageMG[i] # coefficient of variation
mode_ageMP[i] = keep$Age[which.max(keep$p_spawnMP)] # what is the age with highest probability?
sd_modeMP[i] = sqrt( sum(keep$p_spawnMP*(keep$Age-mode_ageMP[i])^2) ) # stdev
cvs_modeMP[i] = sd_modeMP[i]/mode_ageMP[i] # coefficient of variation
# Plot spawning distribution for each population:
pMG[[i]] <- ggplot(keep,aes(x=Ages,y=p_spawnMG)) +
geom_line() + theme_classic() + xlab("") +
ylab("Pr(spawning)") +
xlab("Age") +
ggtitle(paste(codNames_plot[i])) +
scale_y_continuous(limits = c(0,0.35)) + #y axis for not adjusted
xlim(0,20) +
geom_vline(xintercept=mode_ageMG[i],linetype="dashed") +
geom_text(x=(mode_ageMG[i]+2), y=0.3, label=mode_ageMG[i], size=4) +
theme(text = element_text(size = 10))
pMP[[i]] <- ggplot(keep,aes(x=Ages,y=p_spawnMP)) +
geom_line() + theme_classic() + xlab("Age") +
ylab("") +
ggtitle(paste(codNames[i])) +
scale_y_continuous(limits = c(0,0.35)) + #y axis for not adjusted
xlim(0,20) +
geom_vline(xintercept=mode_ageMP[i],linetype="dashed") +
geom_text(x=(mode_ageMP[i]+2), y=0.3, label=mode_ageMP[i], size=4) +
theme(text = element_text(size = 10))
}
eigentable_MM <- as.data.frame(cbind(codNames,codNames_plot,max_ages,temp,mode_ageMG,mode_ageMP,
sd_modeMG,sd_modeMP,cvs_modeMG,cvs_modeMP))
eigentable_MM$max_ages <- as.numeric(levels(eigentable_MM$max_ages))[eigentable_MM$max_ages]
eigentable_MM$temp <- as.numeric(levels(eigentable_MM$temp))[eigentable_MM$temp]
eigentable_MM$mode_ageMG <- as.numeric(levels(eigentable_MM$mode_ageMG))[eigentable_MM$mode_ageMG]
eigentable_MM$mode_ageMP <- as.numeric(levels(eigentable_MM$mode_ageMP))[eigentable_MM$mode_ageMP]
eigentable_MM$sd_modeMG <- as.numeric(levels(eigentable_MM$sd_modeMG))[eigentable_MM$sd_modeMG]
eigentable_MM$sd_modeMP <- as.numeric(levels(eigentable_MM$sd_modeMP))[eigentable_MM$sd_modeMP]
eigentable_MM$cvs_modeMG <- as.numeric(levels(eigentable_MM$cvs_modeMG))[eigentable_MM$cvs_modeMG]
eigentable_MM$cvs_modeMP <- as.numeric(levels(eigentable_MM$cvs_modeMP))[eigentable_MM$cvs_modeMP]
#plot(eigentable_MM$cvs_modeMG,eigentable_MM$cvs_modeMP)
# export eigentable_MM
#write.csv(eigentable_MM,file='C:/Users/provo/Documents/GitHub/popdy/cod_code/mikaelaLSB/eigentable_MM.csv')
# Export high res fig
#tiff(file='C:/Users/Mikaela/Documents/GitHub/popdy/cod_figures/manuscript3/SI/figS1_spawning_distributions_MG.tiff', units="in", width=7, height=7, res=300)
##do.call(grid.arrange,c(pMG,ncol=4,top="Gislason, LEP-at-age/total LEP, where LEP-at-age = f*survival-to-that-age",left="Pr(spawning)"))
#do.call(grid.arrange,c(pMG,ncol=4,bottom="Age (years)",left="Pr(spawning)"))
#dev.off()
# tiff(file='C:/Users/provo/Documents/GitHub/popdy/cod_figures/manuscript/fig1_spawning_distributions_MP_oneoverLEP.tiff', units="in", width=7, height=7, res=300)
# do.call(grid.arrange,c(pMP,ncol=4,top="Pauly, LEP-at-age/total LEP, where LEP-at-age = f*survival-to-that-age",left="Pr(spawning)"))
# dev.off()
# tiff(file='C:/Users/provo/Documents/GitHub/popdy/cod_figures/manuscript3/fig3a_spawning_distributions_subplot.tiff', units="in", width=2, height=6, res=300)
# do.call(grid.arrange,c(pMG[c(6,3,15,5)],ncol=1))
# dev.off()
|
502fd0532b333bfc6f377208ba512a4ad26ea427 | 118e00ab030c8cc5131fd6a1ecf690f97eeab858 | /sklearn/stock-pred-redshift/model/printRecords.R | 9c80d8f2c943693f3b84722bfba8c923aa53ebdf | [] | no_license | ajayanto/dkubeio-examples | 0f9df3238c7b11bab7e598a66acfe551459c0a16 | c4ab19bb5d640448a9512709d2edc6243c658f02 | refs/heads/master | 2023-05-27T09:07:26.435848 | 2021-06-08T01:56:19 | 2021-06-08T01:56:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 855 | r | printRecords.R | library(jsonlite)
library(httr)
ds <- fromJSON("/etc/dkube/redshift.json")
user <- Sys.getenv("LOGNAME")
url <- "http://dkube-controller-worker.dkube:5000/dkube/v2/controller/users/%s/datums/class/dataset/datum/%s"
token <- Sys.getenv("DKUBE_USER_ACCESS_TOKEN")
header_data <- sprintf("Bearer %s", token)
rs_fetch_datasets <- function(){
for (row in 1:nrow(ds)){
r <- GET(sprintf(url, Sys.getenv("LOGNAME"), ds[row, "rs_name"]), add_headers(Authorization = header_data))
password <- content(r)$data$datum$redshift$password
ds[row, "password"] <- password
}
ds
}
get_password <- function(user, db){
datasets <- rs_fetch_datasets()
for (row in 1:nrow(datasets)){
if (datasets[row, "rs_user"] == user && datasets[row, "rs_database"] == db){
return(datasets[row, "password"])
}
}
}
get_password("dpaks", "dkube")
|
f87f08eaf7503eb557b3961dfbc84f8a3ea2487b | 0662ba611c00aa49c3afca318a7f3ea16c83ba5d | /man/dsl_get.Rd | 97a379d485412118d726abe421c9b0e2262a0f1f | [] | no_license | ropensci/tic | 38e06d7675e6820801edf74daa904d6ceea1a804 | 379cf98787e924a68e47792462fafca03f148d5f | refs/heads/main | 2023-09-01T19:28:37.862973 | 2023-05-22T14:07:38 | 2023-05-22T14:07:38 | 72,775,037 | 50 | 10 | null | 2023-01-18T18:03:44 | 2016-11-03T18:31:22 | R | UTF-8 | R | false | true | 2,456 | rd | dsl_get.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dsl-storage.R
\name{dsl_get}
\alias{dsl_get}
\alias{dsl_load}
\alias{dsl_init}
\title{Stages and steps}
\usage{
dsl_get()
dsl_load(path = "tic.R", force = FALSE, quiet = FALSE)
dsl_init(quiet = FALSE)
}
\arguments{
\item{path}{\verb{[string]}\cr
Path to the stage definition file, default: \code{"tic.R"}.}
\item{force}{\verb{[flag]}\cr
Set to \code{TRUE} to force loading from file even if a configuration exists.
By default an existing configuration is not overwritten by \code{dsl_load()}.}
\item{quiet}{\verb{[flag]}\cr
Set to \code{TRUE} to turn off verbose output.}
}
\value{
A named list of opaque stage objects with a \code{"class"} attribute
and a corresponding \code{\link[=print]{print()}} method for pretty output.
Use the high-level \code{\link[=get_stage]{get_stage()}} and \code{\link[=add_step]{add_step()}} functions to configure,
and the \link{stages} functions to run.
}
\description{
\pkg{tic} works in a declarative way, centered around the \code{tic.R} file
created by \code{\link[=use_tic]{use_tic()}}.
This file contains the \emph{definition} of the steps to be run in each stage:
calls to \code{\link[=get_stage]{get_stage()}} and \code{\link[=add_step]{add_step()}}, or macros like
\code{\link[=do_package_checks]{do_package_checks()}}.
Normally, this file is never executed directly.
Running these functions in an interactive session will \strong{not} carry out
the respective actions.
Instead, a description of the code that would have been run is printed
to the console.
Edit \code{tic.R} to configure your CI builds.
See \code{vignette("build-lifecycle", package = "tic")} for more details.
}
\details{
Stages and steps defined using tic's \link{DSL} are stored in an
internal object in the package.
The stages are accessible through \code{dsl_get()}.
When running the \link{stages}, by default a configuration defined
in the \code{tic.R} file is loaded with \code{dsl_load()}.
See \code{\link[=use_tic]{use_tic()}} for setting up a \code{tic.R} file.
For interactive tests, an empty storage can be initialized
with \code{dsl_init()}.
This happens automatically the first time \code{dsl_get()} is called
(directly or indirectly).
}
\examples{
\dontrun{
dsl_init()
dsl_get()
dsl_load(system.file("templates/package/tic.R", package = "tic"))
dsl_load(system.file("templates/package/tic.R", package = "tic"),
force =
TRUE
)
dsl_get()
}
}
|
d902844b7e0dd1cac4121aab4cd3398cc8f5426b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/tidyxl/examples/tidy_xlsx.Rd.R | bd23aeff7659084b7e69a1ba6eebab95558d55c7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,235 | r | tidy_xlsx.Rd.R | library(tidyxl)
### Name: tidy_xlsx
### Title: Import xlsx (Excel) cell contents into a tidy structure.
### Aliases: tidy_xlsx
### ** Examples
## Not run:
##D examples <- system.file("extdata/examples.xlsx", package = "tidyxl")
##D
##D # All sheets
##D str(tidy_xlsx(examples)$data)
##D
##D # Specific sheet either by position or by name
##D str(tidy_xlsx(examples, 2)$data)
##D str(tidy_xlsx(examples, "Sheet1")$data)
##D
##D # Data (cell values)
##D x <- tidy_xlsx(examples)
##D str(x$data$Sheet1)
##D
##D # Formatting
##D str(x$formats$local)
##D
##D # The formats of particular cells can be retrieved like this:
##D
##D Sheet1 <- x$data$Sheet1
##D x$formats$style$font$bold[Sheet1$style_format]
##D x$formats$local$font$bold[Sheet1$local_format_id]
##D
##D # To filter for cells of a particular format, first filter the formats to get
##D # the relevant indices, and then filter the cells by those indices.
##D bold_indices <- which(x$formats$local$font$bold)
##D Sheet1[Sheet1$local_format_id %in% bold_indices, ]
##D
##D # In-cell formatting is available in the `character_formatted` column as a
##D # data frame, one row per substring.
##D tidy_xlsx(examples)$data$Sheet1$character_formatted[77]
## End(Not run)
|
2226a8700a0bcda51acdf5eca2068612f0d1f90b | c3026ac4a49b0ff3361cf925a49965bf26ef6bf5 | /R/ggartprint.R | ff85b448f56ac686ef6ebb03caf933ba701a8f73 | [
"MIT"
] | permissive | keyegon/ggirl | d85e806c326a3688483949465cafd628ba60a5e6 | 7e042953dcacf6c569a3c31afe672a5b97456dbf | refs/heads/master | 2023-07-03T01:17:04.654516 | 2021-08-10T12:37:23 | 2021-08-10T12:37:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,327 | r | ggartprint.R | artprint_dpi <- 300
artprint_size_info <-
data.frame(
size = c("11x14", "16x20", "18x24", "24x36", "12x12", "16x16", "20x20"),
price_cents = c(3000L, 4000L, 5000L, 7500L, 3000L, 3500L, 4500L),
width_in = c(11L, 16L, 18L, 24L, 12L, 16L, 20L),
height_in = c(14L, 20L, 24L, 36L, 12L, 16L, 20L)
)
#' get a table of sizes of prints available.
#'
#' Prices include shipping. If a size isn't available that you want email support@ggirl.art for custom sizes.
#' @export
ggartprint_sizes <- function(){
info <- artprint_size_info[,c("size","price_cents","width_in","height_in")]
info_names <- c("size","price","width_inches","height_inches")
info$price <- paste0("$", sprintf("%.2f",info$price/100))
info[,c("size","price")]
}
#' Preview your art print
#'
#' This function takes a ggplot2 output and gives a preview of how the plot will look as an art print.
#' While it's totally fine to just call ggirl::ggartprint to preview, this allows you to preview locally.
#'
#' The preview will appear in either the "Viewer" pane of RStudio or in your browser, depending on if RStudio is installed or not.
#' The preview includes a frame, but that will not be included with the print.
#'
#' @param plot the plot to use as an art print
#' @param size the size of the art print. Use [ggartprint_sizes()] to see a list of the sizes. If a size isn't available that you want email support@ggirl.art for custom sizes.
#' @param orientation should the plot be landscape or portrait?
#' @param ... other options to pass to `ragg::agg_png()` when turning the plot into an image.
#' @seealso [ggartprint()] to order the art print
#' @examples
#' library(ggplot2)
#' library(ggirl)
#' plot <- ggplot(data.frame(x=1:10, y=runif(10)),aes(x=x,y=y))+geom_line()+geom_point()+theme_gray(48)
#' ggartprint_preview(plot, size="24x36", orientation = "landscape")
#' @export
ggartprint_preview <- function(plot, size, orientation, ...){
temp_dir <- tempfile()
dir.create(temp_dir)
temp_plot_file <- file.path(temp_dir, "plot.png")
temp_css_file <- file.path(temp_dir, "site.css")
temp_html_file <- file.path(temp_dir, "index.html")
mg <- ceiling(postcard_width_px*(safe_margin-cut_margin))
css <- "body {margin: 0;}
.frame {
background-color: #303030;
box-shadow: 0 10px 7px -5px rgba(0, 0, 0, 0.3);
padding: 1rem!important;
margin: 1rem!important;
display: inline-block;
}
.box-shadow {
position: relative;
text-align: center;
}
.box-shadow::after {
box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.5) inset;
bottom: 0;
content: '';
display: block;
left: 0;
height: 100%;
position: absolute;
right: 0;
top: 0;
width: 100%;
}
.box-shadow img {
max-width: 100%;
width: auto;
max-height: 90vh;
}
.artprint {
max-width: 100%;
height: auto;
}"
html <- '
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>ggartprint preview</title>
<link rel="stylesheet" href="site.css">
</head>
<body>
<div class="frame">
<div class="box-shadow">
<img src="plot.png" class = "artprint">
</div>
</div>
</body>
</html>
'
ggartprint_save(filename = temp_plot_file, plot = plot, size = size, orientation = orientation, ...)
writeLines(css, temp_css_file)
writeLines(html, temp_html_file)
viewer <- getOption("viewer")
if (!is.null(viewer))
viewer(temp_html_file)
else
utils::browseURL(temp_html_file)
}
ggartprint_save <- function(filename, plot, size, orientation = c("landscape","portrait"), ...){
orientation <- match.arg(orientation)
size_info <- as.list(artprint_size_info[artprint_size_info$size == size,])
if(is.null(size_info$size)){
stop("Invalid size list selected. Use ggartprint_sizes() to see available sizes")
}
if(orientation == "landscape"){
width <- size_info$height_in
height <- size_info$width_in
} else if(orientation == "portrait") {
width <- size_info$width_in
height <- size_info$height_in
} else {
stop("invalid orientation")
}
old_dev <- grDevices::dev.cur()
ragg::agg_png(
filename,
width = width,
height = height,
units = "in",
res = artprint_dpi,
...)
on.exit(utils::capture.output({
grDevices::dev.off()
if (old_dev > 1) grDevices::dev.set(old_dev)
}))
grid::grid.draw(plot)
}
#' Order art prints of your ggplot!
#'
#' This function takes a ggplot2 output and will order an art print to hang on a wall!
#' Running this function will bring you to a webpage to confirm the order and submit it.
#' _No order will be submitted until you explicitly approve it._
#'
#' You can choose from a number of options for the size of the print (and either rectangular or square).
#' All of the sizes are high resolution, so things like text size in the R/RStudio plot may not reflect what
#' it would look like as a poster. It's recommended you run the function a few times and adjust plot attributes
#' until you get it the way you like it. The preview image includes a frame, but that will not be included with the print.
#'
#' Prints take up to 2-3 weeks to deliver.
#'
#' @param plot the plot to use as an art print.
#' @param size the size of the art print. Use [ggartprint_sizes()] to see a list of the sizes. If a size isn't available that you want email support@ggirl.art for custom sizes.
#' @param orientation should the plot be landscape or portrait?
#' @param contact_email email address to send order updates.
#' @param quantity the number of prints to order (defaults to 1).
#' @param address the physical address to mail the print(s) to. Use the [address()] function to format it.
#' @param ... other options to pass to `ragg::agg_png()` when turning the plot into an image for the front of the postcard.
#' @seealso [address()] to format an address for ggirl
#' @examples
#' library(ggplot2)
#' library(ggirl)
#' delivery_address <- address(name = "Fake person", address_line_1 = "101 12th st",
#' address_line_2 = "Apt 17", city = "Seattle", state = "WA",
#' postal_code = "98102", country = "US")
#' contact_email = "fakeemail275@gmail.com"
#' plot <- ggplot(data.frame(x=1:10, y=runif(10)),aes(x=x,y=y))+geom_line()+geom_point()+theme_gray(48)
#' ggartprint(plot, size="24x36", orientation = "landscape", quantity = 1,
#' contact_email = contact_email, address = delivery_address)
#' @export
ggartprint <- function(plot, size = "11x14", orientation = c("landscape","portrait"), quantity=1, contact_email, address, ...){
orientation <- match.arg(orientation)
if(any(address$country != "US")){
stop("Art prints only available for US addresses through package. Email support@ggirl.art to price a custom order.")
}
version <- packageDescription("ggirl", fields = "Version")
if(is.na(version)){
version <- "0.0.0"
}
server_url <- getOption("ggirl_server_url",
"https://skyetetra.shinyapps.io/ggirl-server")
# in the event the server is sleeping, we need to kickstart it before doing the post
response <- httr::GET(server_url)
if(response$status_code != 200L){
message("Waiting 10 seconds for ggirl server to come online")
Sys.sleep(10)
}
temp_png <- tempfile(fileext = ".png")
on.exit({file.remove(temp_png)}, add=TRUE)
ggartprint_save(filename = temp_png, plot=plot, size = size, orientation = orientation, ...)
raw_plot <- readBin(temp_png, "raw", file.info(temp_png)$size)
data <- list(
type = "artprint",
contact_email = contact_email,
raw_plot = raw_plot,
address = address,
size = size,
orientation = orientation,
quantity = quantity,
version = version
)
zz <- rawConnection(raw(0), "r+")
on.exit({close(zz)}, add=TRUE)
saveRDS(data, zz)
seek(zz, 0)
response <- httr::POST(paste0(server_url, "/upload"),
body = rawConnectionValue(zz),
httr::content_type("application/octet-stream"))
if(response$status_code == 403L){
stop("Cannot connect to ggirl server. Go to https://github.com/jnolis/ggirl to see latest status updates")
}
if(response$status_code != 201L){
stop(httr::content(response, as="text", encoding="UTF-8"))
}
token <- httr::content(response, as="text", encoding="UTF-8")
browseURL(paste0(server_url,"/artprint?token=",token))
}
|
fbf3ffa7ab13ab79605a5d6749641516a1997c1e | 21ad28c47798365e3ff84ef9cab5f278d8bd731d | /man/gsetfn.Rd | b4de47cb4d451f59465c22de11f82ec082d3466a | [] | no_license | syspremed/intPredict | b5d603e00e8ba803013ea03e81bb37e4247f788a | 139d5914946d8c5a29c92d2e69b040ee799baa26 | refs/heads/master | 2021-01-01T18:46:16.696853 | 2017-07-31T15:02:38 | 2017-07-31T15:02:38 | 98,432,331 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 790 | rd | gsetfn.Rd | \name{gsetfn}
\alias{gsetfn}
\title{
Gene selection function}
\description{
Pipeline for gene selection.}
\usage{
gsetfn(response, array, fac, narray, np, p, nrmeth, ngenes, nsubs, test.idx)
}
\arguments{
\item{response}{
Sample known classes.
}
\item{array}{
Expression data, genes by samples
}
\item{fac}{
Ratio of splitting learning and testing data.
}
\item{narray}{
Number of samples}
\item{np}{
Number of gene sets}
\item{p}{
Size of each gene set}
\item{nrmeth}{
Number of gene selection methods}
\item{ngenes}{
Total number of genes}
\item{nsubs}{
Number of subtypes}
\item{test.idx}{
Index of samples selected as test}
}
\details{
Pipeline for gene selection}
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
30ddd9d46878df6b3d52ae5ebd22dff5abc85a3d | a1d42f310c9ad32b915fd9df8d4606858de28822 | /R/create_figures/create_f2_hmagma_results.R | 82b96b203fff89ba8b7f30ebdeb1c2db4de4a5b2 | [
"MIT"
] | permissive | ryurko/HMAGMA-comment | e0d730c5a55e3390578c87bced2ba0adb794fdc4 | d4396a02d5864c4476524ff7215b795dd45a6b82 | refs/heads/master | 2023-01-05T13:36:59.166360 | 2020-11-07T03:29:48 | 2020-11-07T03:29:48 | 249,070,304 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,712 | r | create_f2_hmagma_results.R | # PURPOSE: Create figure 2 displaying the H-MAGMA results
library(tidyverse)
library(latex2exp)
library(cowplot)
# Load the H-MAGMA data ---------------------------------------------------
read_excel_allsheets <- function(filename, tibble = FALSE) {
sheets <- readxl::excel_sheets(filename)
x <- lapply(sheets, function(sheet_i) readxl::read_excel(filename,
sheet = sheet_i))
if(!tibble) x <- lapply(x, as.data.frame)
names(x) <- sheets
return(x)
}
# Load the adult and fetal brain H-MAGMA results:
hmagma_adult_brain_results <-
read_excel_allsheets("data/hmagma/output/reported/H-MAGMA_Adult_brain_output.xlsx")
hmagma_fetal_brain_results <-
read_excel_allsheets("data/hmagma/output/reported/H-MAGMA_Fetal_brain_output.xlsx")
# Get the H-MAGMA fetal discoveries ---------------------------------------
asd_fetal_hmagma_bh_results <-
hmagma_fetal_brain_results$`H-MAGMA_ASD`$GENE[
which(
p.adjust(hmagma_fetal_brain_results$`H-MAGMA_ASD`$P,
method = "BH") <= 0.05)
]
scz_fetal_hmagma_bh_results <-
hmagma_fetal_brain_results$`H-MAGMA_SCZ`$GENE[
which(
p.adjust(hmagma_fetal_brain_results$`H-MAGMA_SCZ`$P,
method = "BH") <= 0.05)
]
adhd_fetal_hmagma_bh_results <-
hmagma_fetal_brain_results$`H-MAGMA_ADHD`$GENE[
which(
p.adjust(hmagma_fetal_brain_results$`H-MAGMA_ADHD`$P,
method = "BH") <= 0.05)
]
bd_fetal_hmagma_bh_results <-
hmagma_fetal_brain_results$`H-MAGMA_BD`$GENE[
which(
p.adjust(hmagma_fetal_brain_results$`H-MAGMA_BD`$P,
method = "BH") <= 0.05)
]
mdd_fetal_hmagma_bh_results <-
hmagma_fetal_brain_results$`H-MAGMA_MDD`$GENE[
which(
p.adjust(hmagma_fetal_brain_results$`H-MAGMA_MDD`$P,
method = "BH") <= 0.05)
]
# Get the H-MAGMA adult discoveries ---------------------------------------
asd_adult_hmagma_bh_results <-
hmagma_adult_brain_results$`H-MAGMA_ASD`$GENE[
which(
p.adjust(hmagma_adult_brain_results$`H-MAGMA_ASD`$P,
method = "BH") <= 0.05)
]
scz_adult_hmagma_bh_results <-
hmagma_adult_brain_results$`H-MAGMA_SCZ`$GENE[
which(
p.adjust(hmagma_adult_brain_results$`H-MAGMA_SCZ`$P,
method = "BH") <= 0.05)
]
adhd_adult_hmagma_bh_results <-
hmagma_adult_brain_results$`H-MAGMA_ADHD`$GENE[
which(
p.adjust(hmagma_adult_brain_results$`H-MAGMA_ADHD`$P,
method = "BH") <= 0.05)
]
bd_adult_hmagma_bh_results <-
hmagma_adult_brain_results$`H-MAGMA_BD`$GENE[
which(
p.adjust(hmagma_adult_brain_results$`H-MAGMA_BD`$P,
method = "BH") <= 0.05)
]
mdd_adult_hmagma_bh_results <-
hmagma_adult_brain_results$`H-MAGMA_MDD`$GENE[
which(
p.adjust(hmagma_adult_brain_results$`H-MAGMA_MDD`$P,
method = "BH") <= 0.05)
]
# Get the H-MAGMA union ---------------------------------------------------
asd_hmagma_bh_union <-
union(asd_fetal_hmagma_bh_results, asd_adult_hmagma_bh_results)
scz_hmagma_bh_union <-
union(scz_fetal_hmagma_bh_results, scz_adult_hmagma_bh_results)
adhd_hmagma_bh_union <-
union(adhd_fetal_hmagma_bh_results, adhd_adult_hmagma_bh_results)
bd_hmagma_bh_union <-
union(bd_fetal_hmagma_bh_results, bd_adult_hmagma_bh_results)
mdd_hmagma_bh_union <-
union(mdd_fetal_hmagma_bh_results, mdd_adult_hmagma_bh_results)
# Load the corrected fetal results ----------------------------------------
asd_fetal_mcfisher <-
map_dfr(list.files("data/hmagma/output/asd/fetal_new/",
full.names = TRUE), read_csv) %>%
# Conservative adjustment to p-values:
mutate(fisher_pval = (1200000 * fisher_pval + 1) / (1200000))
scz_fetal_mcfisher <-
map_dfr(list.files("data/hmagma/output/scz/fetal_new/",
full.names = TRUE), read_csv) %>%
# Conservative adjustment to p-values:
mutate(fisher_pval = (1200000 * fisher_pval + 1) / (1200000))
adhd_fetal_mcfisher <-
map_dfr(list.files("data/hmagma/output/adhd/fetal_new/",
full.names = TRUE), read_csv) %>%
# Conservative adjustment to p-values:
mutate(fisher_pval = (1200000 * fisher_pval + 1) / (1200000))
mdd_fetal_mcfisher <-
map_dfr(list.files("data/hmagma/output/mdd/fetal_new/",
full.names = TRUE), read_csv) %>%
# Conservative adjustment to p-values:
mutate(fisher_pval = (1200000 * fisher_pval + 1) / (1200000))
bd_fetal_mcfisher <-
map_dfr(list.files("data/hmagma/output/bd/fetal_new/",
full.names = TRUE), read_csv) %>%
# Conservative adjustment to p-values:
mutate(fisher_pval = (1200000 * fisher_pval + 1) / (1200000))
# Load the adult H-MAGMA MC Fisher results --------------------------------
asd_adult_mcfisher <-
map_dfr(list.files("data/hmagma/output/asd/adult_new/",
full.names = TRUE), read_csv) %>%
# Conservative adjustment to p-values:
mutate(fisher_pval = (1200000 * fisher_pval + 1) / (1200000))
scz_adult_mcfisher <-
map_dfr(list.files("data/hmagma/output/scz/adult_new/",
full.names = TRUE), read_csv) %>%
# Conservative adjustment to p-values:
mutate(fisher_pval = (1200000 * fisher_pval + 1) / (1200000))
adhd_adult_mcfisher <-
map_dfr(list.files("data/hmagma/output/adhd/adult_new/",
full.names = TRUE), read_csv) %>%
# Conservative adjustment to p-values:
mutate(fisher_pval = (1200000 * fisher_pval + 1) / (1200000))
mdd_adult_mcfisher <-
map_dfr(list.files("data/hmagma/output/mdd/adult_new/",
full.names = TRUE), read_csv) %>%
# Conservative adjustment to p-values:
mutate(fisher_pval = (1200000 * fisher_pval + 1) / (1200000))
bd_adult_mcfisher <-
map_dfr(list.files("data/hmagma/output/bd/adult_new/",
full.names = TRUE), read_csv) %>%
# Conservative adjustment to p-values:
mutate(fisher_pval = (1200000 * fisher_pval + 1) / (1200000))
# Get the fetal H-MAGMA MC Fisher BH results ------------------------------
asd_fetal_mcfisher_bh_results <-
asd_fetal_mcfisher$gene_id[
which(p.adjust(asd_fetal_mcfisher$fisher_pval, method = "BH") <= 0.05)
]
length(asd_fetal_mcfisher_bh_results)
# [1] 85
scz_fetal_mcfisher_bh_results <-
scz_fetal_mcfisher$gene_id[
which(p.adjust(scz_fetal_mcfisher$fisher_pval, method = "BH") <= 0.05)
]
length(scz_fetal_mcfisher_bh_results)
# [1] 6062
adhd_fetal_mcfisher_bh_results <-
adhd_fetal_mcfisher$gene_id[
which(p.adjust(adhd_fetal_mcfisher$fisher_pval, method = "BH") <= 0.05)
]
length(adhd_fetal_mcfisher_bh_results)
# [1] 146
bd_fetal_mcfisher_bh_results <-
bd_fetal_mcfisher$gene_id[
which(p.adjust(bd_fetal_mcfisher$fisher_pval, method = "BH") <= 0.05)
]
length(bd_fetal_mcfisher_bh_results)
# [1] 792
mdd_fetal_mcfisher_bh_results <-
mdd_fetal_mcfisher$gene_id[
which(p.adjust(mdd_fetal_mcfisher$fisher_pval, method = "BH") <= 0.05)
]
length(mdd_fetal_mcfisher_bh_results)
# [1] 1580
# Get the adult H-MAGMA MC Fisher BH results ------------------------------
asd_adult_mcfisher_bh_results <-
asd_adult_mcfisher$gene_id[
which(p.adjust(asd_adult_mcfisher$fisher_pval, method = "BH") <= 0.05)
]
length(asd_adult_mcfisher_bh_results)
# [1] 102
scz_adult_mcfisher_bh_results <-
scz_adult_mcfisher$gene_id[
which(p.adjust(scz_adult_mcfisher$fisher_pval, method = "BH") <= 0.05)
]
length(scz_adult_mcfisher_bh_results)
# [1] 6469
adhd_adult_mcfisher_bh_results <-
adhd_adult_mcfisher$gene_id[
which(p.adjust(adhd_adult_mcfisher$fisher_pval, method = "BH") <= 0.05)
]
length(adhd_adult_mcfisher_bh_results)
# [1] 168
bd_adult_mcfisher_bh_results <-
bd_adult_mcfisher$gene_id[
which(p.adjust(bd_adult_mcfisher$fisher_pval, method = "BH") <= 0.05)
]
length(bd_adult_mcfisher_bh_results)
# [1] 845
mdd_adult_mcfisher_bh_results <-
mdd_adult_mcfisher$gene_id[
which(p.adjust(mdd_adult_mcfisher$fisher_pval, method = "BH") <= 0.05)
]
length(mdd_adult_mcfisher_bh_results)
# [1] 1664
# Get the union of these sets ---------------------------------------------
asd_mcfisher_bh_union <-
union(asd_fetal_mcfisher_bh_results, asd_adult_mcfisher_bh_results)
scz_mcfisher_bh_union <-
union(scz_fetal_mcfisher_bh_results, scz_adult_mcfisher_bh_results)
adhd_mcfisher_bh_union <-
union(adhd_fetal_mcfisher_bh_results, adhd_adult_mcfisher_bh_results)
bd_mcfisher_bh_union <-
union(bd_fetal_mcfisher_bh_results, bd_adult_mcfisher_bh_results)
mdd_mcfisher_bh_union <-
union(mdd_fetal_mcfisher_bh_results, mdd_adult_mcfisher_bh_results)
# Display a plot showing the reduction in discoveries ---------------------
# Make a pie chart version of this
# Make a table where the the counts of which ones that are in HAGMA
# versus in the mcMAGMA are displayed:
hmagma_lost_table <-
tibble(disc_set = rep(c("H-MAGMA only", "Corrected"), 5),
phenotype = c(rep("ASD", 2), rep("SCZ", 2),
rep("MDD", 2), rep("ADHD", 2),
rep("BD", 2)),
n_disc = c(length(setdiff(asd_hmagma_bh_union, asd_mcfisher_bh_union)),
length(intersect(asd_hmagma_bh_union, asd_mcfisher_bh_union)),
length(setdiff(scz_hmagma_bh_union, scz_mcfisher_bh_union)),
length(intersect(scz_hmagma_bh_union, scz_mcfisher_bh_union)),
length(setdiff(mdd_hmagma_bh_union, mdd_mcfisher_bh_union)),
length(intersect(mdd_hmagma_bh_union, mdd_mcfisher_bh_union)),
length(setdiff(adhd_hmagma_bh_union, adhd_mcfisher_bh_union)),
length(intersect(adhd_hmagma_bh_union, adhd_mcfisher_bh_union)),
length(setdiff(bd_hmagma_bh_union, bd_mcfisher_bh_union)),
length(intersect(bd_hmagma_bh_union, bd_mcfisher_bh_union))))
hmagma_pie_chart <- hmagma_lost_table %>%
mutate(phenotype = fct_relevel(phenotype, "ASD"),
disc_set = fct_relevel(disc_set, "H-MAGMA only")) %>%
#filter(phenotype %in% c("ASD", "SCZ")) %>%
group_by(phenotype) %>%
mutate(disc_prop = n_disc / sum(n_disc),
pos = cumsum(disc_prop) - disc_prop / 2) %>%
ungroup() %>%
ggplot(aes(x = "", y = disc_prop, fill = disc_set)) +
geom_bar(stat = "identity") +
geom_text(aes(label = paste0(round(disc_prop, 2) * 100, "%"),
color = disc_set),
position = position_stack(vjust = 0.5), size = 5) +
coord_polar(theta = "y") +
facet_wrap(~ phenotype, ncol = 3) +
scale_fill_manual(values = ggsci::pal_npg()(5)[c(4, 5)]) +
scale_color_manual(values = c("white", "black"), guide = FALSE) +
guides(fill = guide_legend(override.aes = list(size = 15))) +
theme_minimal() +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank(),
axis.title = element_blank(),
legend.position = c(.85, .3),
strip.text = element_text(size = 18),
legend.text = element_text(size = 18),
legend.title = element_blank())
# Save the individual plot
save_plot("figures/pdf/main/f2b_hmagma_pies.pdf",
hmagma_pie_chart, ncol = 3, nrow = 2)
save_plot("figures/nonpdf/main/f2b_hmagma_pies.jpg",
hmagma_pie_chart, ncol = 3, nrow = 2)
# Try proceeding through each phenotype and make the list separately due
# to the strange spacing that is taking place with the facets
pheno_vec <- c("ASD", "ADHD", "BD", "MDD", "SCZ")
pie_list <- lapply(pheno_vec,
function(pheno_i) {
hmagma_lost_table %>%
filter(phenotype == pheno_i) %>%
mutate(disc_set = fct_relevel(disc_set, "H-MAGMA only")) %>%
mutate(disc_prop = n_disc / sum(n_disc),
pos = cumsum(disc_prop) - disc_prop / 2) %>%
ggplot(aes(x = "", y = disc_prop, fill = disc_set)) +
geom_bar(stat = "identity",
aes(color = disc_set, alpha = disc_set)) +
geom_text(aes(label = paste0(round(disc_prop, 2) * 100, "%")),
color = "black",
position = position_stack(vjust = 0.5), size = 5) +
coord_polar(theta = "y") +
labs(title = pheno_i) +
scale_fill_manual(values = ggsci::pal_npg()(5)[c(4, 5)]) +
scale_alpha_manual(values = c(.4, .8)) +
scale_color_manual(values = ggsci::pal_npg()(5)[c(4, 5)]) +
guides(fill = guide_legend(override.aes = list(size = 15))) +
labs(fill = "Method", alpha = "Method", color = "Method") +
theme_minimal() +
theme(axis.text = element_blank(),
axis.ticks = element_blank(),
panel.grid = element_blank(),
axis.title = element_blank(),
#legend.position = c(.85, .3),
# strip.text = element_text(size = 18),
plot.title = element_text(size = 18, hjust = 0.5, vjust = 0),
legend.text = element_text(size = 18),
legend.title = element_blank())
})
display_pie_list <- lapply(pie_list,
function(plot) plot + theme(legend.position = "none",
plot.margin = unit(c(0,0,0,0), "cm")))
top_row_pie <- plot_grid(display_pie_list[[1]], display_pie_list[[2]], display_pie_list[[3]],
ncol = 3)
bottom_row_pie <-
plot_grid(display_pie_list[[4]], display_pie_list[[5]], get_legend(pie_list[[1]]),
ncol = 3)
pie_chart_grid <- plot_grid(top_row_pie, bottom_row_pie, ncol = 1)
# Save the individual plot
save_plot("figures/pdf/main/f2b_hmagma_pie_grid.pdf",
pie_chart_grid, ncol = 3, nrow = 2)
save_plot("figures/nonpdf/main/f2b_hmagma_pie_grid.jpg",
pie_chart_grid, ncol = 3, nrow = 2)
# Create H-MAGMA versus MC ------------------------------------------------
# Histogram for H-MAGMA ASD:
asd_comp_hist <-
bind_rows(hmagma_adult_brain_results$`H-MAGMA_ASD` %>%
dplyr::select(P) %>%
rename(asd_pval = P) %>%
mutate(type = "H-MAGMA"),
asd_adult_mcfisher %>%
dplyr::select(fisher_pval) %>%
rename(asd_pval = fisher_pval) %>%
mutate(type = "Corrected")) %>%
#mutate(type = fct_relevel(type, "H-MAGMA")) %>%
ggplot(aes(x = asd_pval, fill = type, color = type,
# ,
# color = "white",
#
alpha = type)) +
# color = type, size = type,)) +
geom_histogram(breaks = seq(0, 1, by = 0.05),
position = "identity",
closed = "left") +
#facet_zoom(xlim = c(.5, 1), horizontal = FALSE) +
scale_alpha_manual(values = c(.8, .4)) +
#scale_size_manual(values = c(.5, 1)) +
#ggthemes::scale_fill_colorblind() +
scale_color_manual(values = ggsci::pal_npg()(5)[c(5, 4)]) +
scale_fill_manual(values = ggsci::pal_npg()(5)[c(5, 4)]) +
labs(x = "ASD gene-level p-value",
y = "Number of genes",
color = "Method", fill = "Method", alpha = "Method", size = "Method") +
theme_bw() +
guides(fill = guide_legend(override.aes = list(size = 15))) +
theme(#strip.background = element_blank(),
plot.title = element_text(size = 32),
plot.subtitle = element_text(size = 24),
strip.text = element_text(size = 24),
axis.title.x = element_text(size = 24),
axis.text.x = element_text(size = 16),
axis.title.y = element_text(size = 24),
axis.text.y = element_text(size = 18),
legend.position = c(.6, .7),
legend.title = element_blank(),
legend.text = element_text(size = 20),
legend.direction = "horizontal")
# Save the individual plot
save_plot("figures/pdf/main/f2a_asd_hist.pdf",
asd_comp_hist, ncol = 1, nrow = 1)
save_plot("figures/nonpdf/main/f2a_asd_hist.jpg",
asd_comp_hist, ncol = 1, nrow = 1)
# Now the layout:
fig2_layout <-
plot_grid(asd_comp_hist, pie_chart_grid,
labels = c("a", "b"), label_fontface = "bold",
ncol = 2, rel_widths = c(2, 2),
label_size = 24)
# Save the individual plot
save_plot("figures/pdf/main/f2_hmagma_grid.pdf",
fig2_layout, ncol = 4, nrow = 2)
save_plot("figures/nonpdf/main/f2_hmagma_grid.jpg",
fig2_layout, ncol = 4, nrow = 2)
|
c6853b8a284caaea63e20a78b0df143bd56d3e1e | 184a8a6432fb2bd9efbd4dcdcb684acd08441d46 | /R/metrics_worker.R | 99a7e899df6b5e5e97bc8a02a34037fc04bc9c19 | [] | no_license | lhenneman/searchAQ | e552958660cea73659e10b5320bf4be0f0ccf658 | 52e568425f3d669789193664f34667edffb617dd | refs/heads/master | 2022-02-16T03:01:50.488785 | 2019-09-30T16:40:31 | 2019-09-30T16:40:31 | 122,418,098 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,825 | r | metrics_worker.R | #' calculate daily metrics from hourly inputs
#'
#' \code{dailymetrics_SEARCH_gm} takes as input a list of hourly gas, meteorology (met), pm2.5 (pm)
#' and ion species concentrations (e.g., from the \code{read_SEARCH} function) and outputs a list of
#' daily gas, met, pm, and ion species. Multiple daily metrics are taken for each species
#'
#' @param metric string, one of:
#' #'\enumerate{
#' \item mda8 - mean daily 8 hr average
#' \item afternoon - afternoon average
#' \item sum
#' \item midday
#' \item morning
#' \item max
#' \item 2p
#' }
#' @param start.hour defines which hour data starts on, default is zero
# values denote how many hours before midnight data starts
#' @return This function returns a vector of daily metrics
metrics_worker <- function(x,
metric,
datehour,
species = NULL){
try( if( metric %ni% c( 'mda8',
'afternoon',
'mean',
'sum',
'midday',
'morning',
'max',
'2p'))
stop('choose a real metric'))
start.hour <- hour( min( datehour))
#trim data from hours before/after 0
if ( start.hour != 0){
length.x <- length( x)
x <- x[-c( 1:start.hour)]
datehour <- datehour[-c( 1:start.hour)]
x[( length( x) + 1):length.x] <- NA
}
#Z defines which hour is associated with mda8h
length.x <- length( x)
nday <- length.x / 24
try( if( nday < 3) stop( "too few days"))
## Mean takes the 24-hr average of each day
if (metric == 'mda8') {## MDA8 of each day
#z's much be from hour 17 the day before
Z <- matrix( NA, 31, nday);
for ( d in 1:nday){
h.mark <- ( d - 1) * 24
Z[,d] <- as.double( x[( h.mark + 1):( h.mark + 24 + 7)])
}
Z.which <- unlist( apply( Z, 2, max_fun))
Z.8h <- mean_which8h( Z, Z.which)
out1 <- unlist( Z.8h)
} else {
Z.mean <- rep( NA, nday)
for ( d in 1:nday){
if ( metric == 'afternoon') {
h <- ((d - 1) * 24 + 12): (d * 24 - 4)
na.crit <- 5;
if ( length( which( is.na( x[h])==T)) >= na.crit) next
Z.mean[d] <- mean( x[h], na.rm = T)
} else if (metric =='mean') {
h <- ((d - 1) * 24 + 1) : (d * 24)
na.crit <- 12
if ( length( which( is.na( x[h]) == T)) >= na.crit) next
Z.mean[d] <- mean( x[h], na.rm = T)
} else if (metric =='sum') {
h = ((d - 1) * 24 + 1) : (d * 24)
na.crit <- 1
if ( length( which( is.na( x[h]) == T)) >= na.crit) next
Z.mean[d] <- sum( x[h], na.rm = T)
} else if ( metric == 'midday'){
h <- ((d - 1) * 24 + 12): (d * 24 - 8)
na.crit <- 3
if ( length( which( is.na( x[h])==T)) >= na.crit) next
Z.mean[d] <- mean( x[h],na.rm=T)
} else if (metric == 'morning'){
h <- ((d - 1) * 24 + 8) : (d * 24 - 13)
na.crit <- 3
if ( length( which( is.na( x[h]) == T)) >= na.crit) next
Z.mean[d] <- mean( x[h], na.rm = T)
} else if (metric == 'max'){
h <- ((d - 1) * 24 + 1) : (d * 24)
na.crit <- 24
if ( length( which( is.na( x[h]) == T)) >= na.crit) next
Z.mean[d] <- max( x[h], na.rm=T)
} else if (metric =='2p'){
h <- ((d - 1) * 24 + 15) : (d * 24 - 8)
na.crit <- 1
if ( length( which( is.na( x[h]) == T)) >= na.crit) next
Z.mean[d] <- mean( x[h], na.rm = T)
}
}
out1 <- unlist( Z.mean)
}
date.unique <- unique(as.Date( datehour, tz = ''))
name <- paste( species, metric, sep = '_')
out <- data.frame( date = date.unique,
metric = out1)
names(out) <- c('date', name)
return( out)
}
|
2e79475f8ced5c3795f2f87fd1373c637a977a01 | 08a1fc13a32b36128e69a9070d877bf9cc69d0b6 | /code/chapter_2/inputs_app.R | eb17ca8c88ffe1d0d776fd5146b0a9b25c6e4115 | [
"MIT"
] | permissive | ads40/mastering_shiny | 155a28ab89ae9b7812b003a50911fed717c1a668 | 9dc7dff438af59b6ebbbc27dcd282bb36ff0b877 | refs/heads/main | 2023-06-09T02:20:34.115622 | 2021-07-02T10:03:55 | 2021-07-02T10:03:55 | 381,514,371 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,804 | r | inputs_app.R | ################################################################################################
#
# App + notes for Chapter 2 of Mastering Shiny
#
################################################################################################
library(shiny)
# Focus is on builtin inputs and outputs in Shiny, but lots of extensions are available:
#
# - shinyWidgets: https://dreamrs.github.io/shinyWidgets/index.html
# - colourpicker: https://deanattali.com/blog/colourpicker-package/
# - sortable: https://rstudio.github.io/sortable/
#
# See also Nan Xiao's curated list: https://github.com/nanxstats/awesome-shiny-extensions
animals <- c("Zebra", "Hippo", "Meerkat", "Lion", "Dung Beetle")
states <- c("NSW", "SA", "VIC", "WA", "QLD", "TAS", "NT", "ACT")
ui <- fluidPage(
# Application title
titlePanel("Basic UI elements"),
# basic text input
textInput("name", "What's your name?"),
passwordInput("password", "What's your password?"), # only obscures typed text
textAreaInput("story", "Tell me a story", rows = 4),
# basic numeric inputs
numericInput("num", "Number One", value = 0, min = 0, max = 100),
sliderInput("num2", "Number Two", value = 0, min = 0, max = 100),
sliderInput("rng", "Range", value = c(10, 20), min = 0, max = 100), # supply a length-2 vector
# for more on sliders see `?sliderInput` and https://shiny.rstudio.com/articles/sliders.html
# date input
dateInput("dob", "When were you born?"),
dateRangeInput("holidays", "Pick your holiday dates."),
# limited choices
selectInput("state", "What's your favourite state?", choices = states),
radioButtons("animal", "What's your favourite animal?", choices = animals),
radioButtons("emote", "Choose one:",
choiceNames = list(
icon("angry"),
icon("smile"),
icon("sad-tear")
),
choiceValues = list("angry", "happy", "sad")
),
# allow multiple choices
selectInput("state", "What's your favourite (dropdown) state",
choices = states,
multiple = TRUE
),
# allow multiple choices in a layout similar to radioButtons
checkboxGroupInput("animal", "Pick an animal", animals),
# for all these elements, consult the documentation under "Server value" to check what is
# passed to the server function via the `input` object
# upload a file: see Chapter 9 for detailed discussion
fileInput("upload", NULL),
# action buttons
actionButton("click", "Click me!"),
actionButton("drink", "Drink me!", icon = icon("cocktail")),
# pair actionButtons with observeEvent() or eventReactive()
# use Bootstrap classes to customise the appearance: see http://bootstrapdocs.com/
actionButton("drink1", "No, drink me!", icon = icon("cocktail"), class = "btn-danger")
)
server <- function(input, output) {
}
shinyApp(ui = ui, server = server)
|
4442e05ad55f471f0bf2bed46bbf5db484623e47 | b0b156b7c4f8d300897ac2b5d184e692bcad2f94 | /SupFig6/Main.r | 7a1e101f27d489dc00be962685d9c719add8fd52 | [] | no_license | xiaoshubaba/Seppo_world_horse | 74dd144d89bc827d349e2a6a0a73bfca676b8b84 | c742863e18557d48abfcdf69350a916590e0e8ca | refs/heads/master | 2023-07-15T23:25:55.452281 | 2021-08-25T08:53:47 | 2021-08-25T08:53:47 | 276,278,326 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 508 | r | Main.r | library(ggpubr)
library(reshape2)
ME = read.table("sample.metadata",head=T)
TW = read.table("CeSta.matrix",head=T,sep="\t")
TW$cohort = ME$cohort
TW$status = ME$status
TWN = melt(TW,id=c("sample","cohort","status"))
p = ggboxplot(TWN,palette=c("#B03060","#87CEEB"),"status","value",ylab="Relative abundance",add="jitter",color="status") + theme(legend.position="none")+stat_compare_means(comparisons=list(c("domestic","wild")),method.args=list(alternative="less"))
facet(p,facet.by="variable",scales="free")
|
8c47164719a1750ae7e690ff6637f2796d4a912d | cb186a68eefc8b7ab304154c00fdfb47dd2beb0a | /lectures/code/Cap5 - PH - Interpretacion.r | 0e95e3dd08abc83ce06a0507c087b49bcf1b2169 | [] | no_license | eduardoleon/survival | 79c9b0d51c58121ed8c450f0681e959d7cceef0a | 26e87e3ec185b2d5afc5f439b4f8403b7590ee3a | refs/heads/master | 2021-01-19T19:51:26.360095 | 2017-07-14T07:19:50 | 2017-07-14T07:19:50 | 88,455,654 | 1 | 0 | null | null | null | null | ISO-8859-2 | R | false | false | 4,025 | r | Cap5 - PH - Interpretacion.r |
#
# Lecture 5: Modelo de riesgos proporcionales
# Giancarlo Sal y Rosas
# 04/26/17
#
rm(list=ls(all=TRUE))
library(survival)
#
# Interpretation
#
#
# Binary response
#
model1 <- coxph(Surv(time,status)~sex,data=cancer)
summary(model1)
# Intervalo de confianza
confint(model1)
exp(confint(model1))
# Politomica
cancer$ph.ecog <- ifelse(cancer$ph.ecog < 2,cancer$ph.ecog,2)
model2 <- coxph(Surv(time,status)~factor(ph.ecog),data=cancer)
summary(model2)
confint(model2)
exp(confint(model2))
model2$var
c(0.56 - 1.96*sqrt(0.06),0.56 + 1.96*sqrt(0.06))
exp(c(0.56 - 1.96*sqrt(0.06),0.56 + 1.96*sqrt(0.06)))
# Continua
model3 <- coxph(Surv(time,status)~age,data=cancer)
summary(model3)
# Intervalo de confianza
c(0.02*10 - 1.96*10*sqrt(model3$var),0.02 + 1.96*10*sqrt(model3$var))
exp(c(0.02*10 - 1.96*10*sqrt(model3$var),0.02 + 1.96*10*sqrt(model3$var)))
# Multivariado
model4 <- coxph(Surv(time,status)~age + sex + factor(ph.ecog),data=cancer)
summary(model4)
# Comparación de modelos
model5 <- coxph(Surv(time,status)~ sex + factor(ph.ecog),data=cancer)
summary(model5)
anova(model5,model4)
#############################################
#
# Estimation of the baseline hazard function
#
#############################################
# Función de supervivencia
pdf("fig51.pdf")
plot(survfit(model1,newdata=data.frame(sex=1),conf.int=FALSE),
xlab="Tiempo (dias)",ylab=" Función de supervivencia ",col=1)
lines(survfit(model1,newdata=data.frame(sex=2),conf.int=FALSE),col=2)
legend("topright",legend=c("Hombre","Mujeres"),title="Sexo del paciente",
lty=c(1,1),col=1:2)
dev.off()
#
# Función basehaz
#
# Base on the basehaz function
cancer$sex <- as.numeric(cancer$sex) - 1
model1 <- coxph(Surv(time,status)~sex,data=cancer)
haz <- basehaz(model1,centered=FALSE)
pdf("fig52.pdf")
plot(haz$time,haz$hazard,xlab="Tiempo (dias)",ylab="Riesgo acumulado basal",
col=1,type="s")
aux <- survfit(model1,newdata=data.frame(sex=0),conf.int=FALSE)
lines(aux$time,-log(aux$surv),col=2)
legend("topleft",legend=c("basehaz","survfit"),title="Método",lty=c(1,1),col=1:2)
dev.off()
#
# Adjusting by age
#
model3 <- coxph(Surv(time,status)~age+sex,data=cancer)
summary(model3)
aux1 <- survfit(model3,newdata=data.frame(sex=1,age=62.5),conf.int=FALSE)
aux2 <- survfit(model3,newdata=data.frame(sex=2,age=62.5),conf.int=FALSE)
pdf("fig53.pdf")
plot(aux1,xlab="Tiempo (dias)",ylab="Función de supervivencia")
lines(aux2$time,aux2$surv,col=2)
legend("topright",legend=c("Hombres","Mujeres"),lty=c(1,1),col=1:2)
dev.off()
#
# Risk score
#
uis <- read.csv(file.choose())
model4 <- coxph(Surv(time,censor)~age+beck+race+treat+site,data=uis)
summary(model4)
lp <- predict(model4,newdata=uis,type="lp")
summary(lp)
pdf("fig54.pdf")
hist(lp,prob=TRUE,xlab="Score",main="")
dev.off()
s25 <- survfit(model4)$surv^exp(quantile(model4$linear.predictors,0.25))
s50 <- survfit(model4)$surv^exp(quantile(model4$linear.predictors,0.5))
s75 <- survfit(model4)$surv^exp(quantile(model4$linear.predictors,0.75))
pdf("fig55.pdf")
plot(survfit(model4)$time,s25,xlab="Tiempo (dias)",
ylab=" Función de supervivencia ",col=1,type="l",ylim=c(0,1))
lines(survfit(model4)$time,s50,col=2,type="l")
lines(survfit(model4)$time,s75,col=3,type="l")
legend(500,0.8,legend=c("Percentil 25","Percentil 50","Percentil 75"),
title="Cuantiles del score de riesgo",lty=c(3,3),col=c(1,2,3))
dev.off()
pdf("fig56.pdf")
sa <- survfit(model4)$surv^exp(quantile(model4$linear.predictors,0.5))
sb <- survfit(model4)$surv^exp(quantile(model4$linear.predictors-coef(model4)[4],0.5))
plot(survfit(model4)$time,sa,xlab="Tiempo (dias)",
ylab=" Función de supervivencia ",col=1,type="l",ylim=c(0,1))
lines(survfit(model4)$time,sb,col=2,type="l")
legend(700,0.8,legend=c("Largo","Corto"),
title="Tratamiento",lty=c(1,1),col=1:2)
dev.off()
|
145ea2cfd535c06320ea110eb4381977b2f379d3 | 8b0dee9d51374e8bced0f0fd8efa8b6f0c14c9d7 | /man/primer.Rd | c6d886b2f284948b599fa7744420aa79800e5f35 | [] | no_license | rwoldford/qqtest | 4b9c595ea4c8f7e9ee6f1947e5f94e20c72be0a0 | f3737db73bfd00e36067d394d749a7232c3f3bb9 | refs/heads/master | 2021-02-11T16:26:56.146877 | 2020-03-16T15:49:28 | 2020-03-16T15:49:28 | 244,509,892 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,681 | rd | primer.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/primer.R
\docType{data}
\name{primer}
\alias{primer}
\title{Automobile primer paint thickness quality control measurements.}
\format{A data frame with 20 rows and 14 variates:
\describe{
\item{day}{Day on which the parts were taken and measured.}
\item{batch}{Either the first or second set of 10 consecutive parts taken.}
\item{sel1}{Thickness of primer in mils on the first part sampled in the specified batch of that day.}
\item{sel2}{Thickness of primer in mils on the second part sampled in the specified batch of that day.}
\item{sel3}{Thickness of primer in mils on the third part sampled in the specified batch of that day.}
\item{sel4}{Thickness of primer in mils on the fourth part sampled in the specified batch of that day.}
\item{sel5}{Thickness of primer in mils on the fifth part sampled in the specified batch of that day.}
\item{sel6}{Thickness of primer in mils on the sixth part sampled in the specified batch of that day.}
\item{sel7}{Thickness of primer in mils on the seventh part sampled in the specified batch of that day.}
\item{sel8}{Thickness of primer in mils on the eighth part sampled in the specified batch of that day.}
\item{sel9}{Thickness of primer in mils on the ninth part sampled in the specified batch of that day.}
\item{sel10}{Thickness of primer in mils on the tenth part sampled in the specified batch of that day.}
\item{xbar}{Arithmetic average of the measurements of primer thickness of the 10 parts selected in the specified batch of that day.}
\item{s}{Sample standard deviation of the measurements of primer thickness of the 10 parts selected in the specified batch of that day.}
}}
\source{
"Statistical Process Control - SPC",
Automotive Industry Action Group(AIAG), Southfield MI, (1995), page 64.
}
\usage{
primer
}
\description{
Contains process control measurements of thickness of primer applied to
automotive body parts in an auto factory.
Twice daily, a set of 10 consecutive parts were selected and the thickness in mils (thousandths of an inch)
were measured. For each set of 10 parts, the average (xbar) and the sample standard deviation (s) were also
calculated and recorded. These summaries would be plotted in xbar or s control charts with suitably determined upper and
lower control limits.
Alternatively, for checking outliers a qqplot (via qqtest) could be used for either xbar or s.
}
\details{
\code{with(primer,qqtest(xbar, main="Averages"))} will effect this plot for xbar.
\code{with(primer,qqtest(s,dist="kay", df=9, main ="Standard deviations"))} will effect this plot for s.
}
\keyword{datasets}
|
ed963f4a4567dd0f39777ca205ac5391cf7e8652 | e1d1222d501ba0bbc1ea0df6290c40a5c5bdf96a | /app.R | 7655d7eb621c410f0d71dadd814135838c672118 | [] | no_license | laurenslow/shiny_app | 0b95ba4d1e59e17c8db97f7f4833ece37eabfbaf | a280072bef30430ccb32d50b169e69399007f15b | refs/heads/master | 2022-12-31T23:35:53.426737 | 2020-10-20T11:34:41 | 2020-10-20T11:34:41 | 297,665,138 | 0 | 0 | null | 2020-09-22T13:55:08 | 2020-09-22T13:55:07 | null | UTF-8 | R | false | false | 2,489 | r | app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tidyverse)
library(readr)
library(hrbrthemes)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
checkboxGroupInput(inputId = "stems_plot",
label = "Census year:",
choices = c("2008-2010" = "1",
"2013" = "2",
"2018" = "3")
),
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 20)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("histPlot")
))
)
# Define server logic required to draw a histogram
server <- function(input, output) {
stem1 <- here::here("data/scbi.stem1.csv") %>%
read_csv()
stem2 <- here::here("data/scbi.stem2.csv") %>%
read_csv()
stem3 <- here::here("data/scbi.stem3.csv") %>%
read_csv()
stems <- rbind(stem1, stem2, stem3)
output$histPlot <- renderPlot({
stems_plot <- stems %>%
filter(dbh != "NULL") %>%
filter(CensusID %in% input$stems_plot) %>%
# filter(CensusID %in% c("1", "2")) %>%
na.omit(stems) %>%
mutate(CensusID = as.factor(CensusID),
dbh = as.numeric(dbh)) %>%
filter(dbh < 500)
# generate bins based on input$bins from ui.R
x <- stems_plot[, 2]
bins <- seq(min(x), max(x), length.out = input$bins)
# draw the histogram with the specified number of bins
ggplot(data = stems_plot, aes(x = dbh, fill = CensusID)) +
geom_histogram(bins = input$bins, alpha = 0.5, position = "identity") +
labs(title = "Distrubution of Tree Diameters at the Smithsonian Conservation Biology \n Institute (SCBI) in Front Royal, VA ",
x = "Diameter at Breast Height (DBH)",
y= "Number of Trees")
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
8c671afb90f48fb5d71ef36b65f37164b29d3bb2 | 39d3010f966489e31e5f9ab8f4a2e42de9d922a4 | /ui.R | 9305f19adaa8106dad1f11b6908ab3703ad0d35d | [] | no_license | AoibhinnRed/CourseraDSCourse9 | 10e1b2dcb5ec89101bda78718649518a49eb836d | be16ec6784ba12659d68d7edd7e53e529679f771 | refs/heads/master | 2020-11-25T09:30:55.443043 | 2019-12-17T11:48:13 | 2019-12-17T11:48:13 | 228,597,593 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 658 | r | ui.R | library(datasets)
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Chances of Old Faithful erupting"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("waitingtime",
"After waiting this amount of days:",
min = 43,
max = 95,
value = 43)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)) |
f8d48746cdc1a8fcbb0f623737d1411b60adc0a8 | 0c1b0970b6b01ac603df5a578e0e9f0d422fe3d4 | /R scripts/Ms-Cc_field-temp-manip_plots_com-meet_11-22-19.R | 41b912841e6a30422b92e16d7b368aeb159e6b1f | [] | no_license | melmoore/Ms-Cc_field_temp_manip | b0c57d71b74df91aa18d051bce824ea1b3824b1a | 9a562045e5a6c12efc1eebba93684df48a0ba095 | refs/heads/master | 2021-07-16T08:15:31.198443 | 2020-09-16T21:37:42 | 2020-09-16T21:37:42 | 209,346,844 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,897 | r | Ms-Cc_field-temp-manip_plots_com-meet_11-22-19.R | #Ms Cc field temp manip--plots for committee meeting 11-22-19
#load libraries
library(scales)
library(readr)
library(ggplot2)
library(Rmisc)
library(dplyr)
library(tidyr)
library(plotly)
library(nlme)
library(lme4)
library(extrafont)
library(cowplot)
library(viridis)
#---------------------------
#load data
#caterpillar data
ftm <- read_csv("data/Ms-Cc_FTM_incomp_ed-raw.csv",
col_types = cols(treat_heat = col_factor(levels = c("con", "hs")),
treat_para = col_factor(levels = c("p", "np"))))
View(ftm)
ftm_cl <- read_csv("data/Ms-Cc_FTM_incomp_clean.csv",
col_types = cols(plot_id = col_factor(levels = c("plot1","plot2")),
treat_heat = col_factor(levels = c("con", "hs")),
treat_para = col_factor(levels = c("np", "p"))))
View(ftm_cl)
ftm_lng <- read_csv("data/Ms-Cc_FTM_incomp_clean_lng.csv",
col_types = cols(plot_id = col_factor(levels = c("plot1","plot2")),
treat_heat = col_factor(levels = c("con", "hs")),
treat_para = col_factor(levels = c("p", "np"))))
#temp data from data logger
dlt_lng <- read_csv("data/temp_data/Ms-Cc_FTM_datalogger_temp_ed_lng.csv")
#set plot theme
theme_set(theme_classic())
#TV data (2017)
tv <- read_csv("~/Manduca expts/Summer+Fall 2017/Ms-Cc_25-28-30_temp-var/data files/25-28-30_tv-final_clean.csv",
col_types = cols(temp.var = col_factor(levels = c("0", "5", "10")),
treatment = col_factor(levels = c("control", "para"))))
#early heat shock data
ehs <- read_csv("~/Manduca expts/Spring+Summer+Fall 2018/Early Ms+Cc heat shock/Ms-Cc-EHS-analysis/data/Ms+Cc_EHS_incomplete_clean.csv")
#dev timing heat shock data
rhs <- read_csv("~/Manduca expts/Spring+Summer+Fall 2018/Ms-Cc_repeated_heatshock/data files/Ms+Cc_RHS_incomplete_clean.csv",
col_types = cols(shock.stage = col_factor(levels = c("control", "early", "mid", "late"))))
#-------------------------
#PLOT DELTA MASS AND AGE FOR M SEXTA
#calculate delta mass (log(mf) - log(mi)) and delta age (af - ai)
ftm_cl$dlta_mss <- log(ftm_cl$mass_end) - log(ftm_cl$mass_3)
ftm_cl$dlta_age <- ftm_cl$ttend - ftm_cl$tt3
#find mn and se of delta mass
dm_sum <- summarySE(ftm_cl, measurevar = "dlta_mss",
groupvars = c("treat_heat", "plot_id", "obs_treatp"),
na.rm = TRUE)
dm_sum
#find mn and se of delta age
da_sum <- summarySE(ftm_cl, measurevar = "dlta_age",
groupvars = c("treat_heat", "plot_id", "obs_treatp"),
na.rm = TRUE)
da_sum
#make label vector for facet wrap plot_id names
fplot_labs <- c(plot1 = "Plot 1", plot2 = "Plot 2")
#plot mn delta mass
mn_dm_plot <- ggplot(dm_sum, aes(x=treat_heat, y=dlta_mss, group=obs_treatp, color=obs_treatp))
mn_dm_plot+geom_point(size=9
)+geom_line(aes(linetype=obs_treatp),
size=2.5
)+geom_errorbar(aes(ymin=dlta_mss-se, ymax=dlta_mss+se),
width=.4, size=2
)+scale_color_manual(values = c("#5F9ED1", "#D55E00"),
breaks = c("obs_np", "obs_p"),
labels = c("NP", "P")
)+scale_linetype_manual(values = c("solid", "dashed"),
breaks=c("obs_np", "obs_p"),
labels=c("NP", "P")
)+scale_x_discrete(breaks=c("con", "hs"),
labels=c("Grey", "Black")
)+labs(x="Weed Barrier Treatment", y=expression(Delta* "Mass")
)+facet_wrap(~plot_id, labeller=labeller(plot_id = fplot_labs)
)+theme(text = element_text(family=("Cambria")),
axis.line.x=element_line(colour = 'black', size = 2),
axis.line.y=element_line(colour = 'black', size = 2),
axis.ticks = element_line(colour = 'black', size = 2),
axis.ticks.length = unit(2.5, "mm"),
axis.text.x = element_text(size = 26, face = "bold", color="black"),
axis.text.y = element_text(size = 26,face = "bold", color="black"),
axis.title.x = element_text(size = 26, vjust=-.35,face = "bold",
margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 26, vjust=1.5,face = "bold",
margin = margin(t = 0, r = 20, b = 0, l = 0)),
#legend.position = c(0.9, 0.5),
legend.text = element_text(size=22, face = "bold"),
legend.title = element_blank(),
legend.key.width = unit(4,"line"),
strip.text = element_text(size=23, face="bold"),
strip.background = element_rect(size=2.5, fill="white"))
#plot mn delta age
mn_da_plot <- ggplot(da_sum, aes(x=treat_heat, y=dlta_age, group=obs_treatp, color=obs_treatp))
mn_da_plot+geom_point(size=9
)+geom_line(aes(linetype=obs_treatp),
size=2.5
)+geom_errorbar(aes(ymin=dlta_age-se, ymax=dlta_age+se),
width=.4, size=2
)+scale_color_manual(values = c("#5F9ED1", "#D55E00"),
breaks = c("obs_np", "obs_p"),
labels = c("NP", "P")
)+scale_linetype_manual(values = c("solid", "dashed"),
breaks=c("obs_np", "obs_p"),
labels=c("NP", "P")
)+scale_x_discrete(breaks=c("con", "hs"),
labels=c("Grey", "Black")
)+labs(x="Weed Barrier Treatment", y=expression(Delta* "Age")
)+facet_wrap(~plot_id, labeller=labeller(plot_id = fplot_labs)
)+theme(text = element_text(family=("Cambria")),
axis.line.x=element_line(colour = 'black', size = 2),
axis.line.y=element_line(colour = 'black', size = 2),
axis.ticks = element_line(colour = 'black', size = 2),
axis.ticks.length = unit(2.5, "mm"),
axis.text.x = element_text(size = 26, face = "bold", color="black"),
axis.text.y = element_text(size = 26,face = "bold", color="black"),
axis.title.x = element_text(size = 26, vjust=-.35,face = "bold",
margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 26, vjust=1.5,face = "bold",
margin = margin(t = 0, r = 20, b = 0, l = 0)),
#legend.position = c(0.9, 0.5),
legend.text = element_text(size=22, face = "bold"),
legend.title = element_blank(),
legend.key.width = unit(4,"line"),
strip.text = element_text(size=23, face="bold"),
strip.background = element_rect(size=2.5, fill="white"))
#-----------------------
#PLOTS OF WASP SURVIVAL TO ECLOSION AND ADULT MASS BY SEX
#data wrangling to subset to parasitized caterpillars and make long data frames for wasp sex data
#subset to only parasitized caterpillars
ftm_cl$date_em.j[is.na(ftm_cl$date_em.j)]<-0
ftm_p<-subset(ftm_cl, date_em.j>0)
#subset out those left in field for wasp dev
ftm_p<-subset(ftm_p, em_lab==1)
#make a long data set for wasp sex
ftm_pl<-gather(ftm_p, sex, ecl, fem_ecl, male_ecl)
ftm_pl$sex<-gsub("fem_ecl", "Female", ftm_pl$sex)
ftm_pl$sex<-gsub("male_ecl", "Male", ftm_pl$sex)
#make a long data set for wasp mass
ftm_pml<-gather(ftm_p, sex, mass, ind_fem_mass, ind_male_mass)
ftm_pml$sex<-gsub("ind_fem_mass", "Female", ftm_pml$sex)
ftm_pml$sex<-gsub("ind_male_mass", "Male", ftm_pml$sex)
#subset long data frame for wasp mass to merge with long dataframe for wasp num_ecl
ftm_pml <- select(ftm_pml, bug_id, treat_heat, sex, mass)
#merge the two long dataframes
ftm_pl <- merge(ftm_pl, ftm_pml, by=c("bug_id", "treat_heat", "sex"))
#Calculate mn and se of num ecl
wsex_sum<-summarySE(ftm_pl, measurevar = "ecl",
groupvars = c("plot_id","treat_heat", "sex"),
na.rm = TRUE)
wsex_sum
#plot mn survival to eclosion by sex (number)
mn_numecl_plot <- ggplot(wsex_sum, aes(x=treat_heat, y=ecl, group=sex, color=sex))
mn_numecl_plot+geom_point(size=9
)+geom_line(aes(linetype=sex),
size=2.5
)+geom_errorbar(aes(ymin=ecl-se, ymax=ecl+se),
width=.4, size=2
)+scale_color_manual(values = c("#E69F00", "#000000"),
breaks = c("Male", "Female"),
labels=c("Male", "Female")
)+scale_linetype_manual(values=c("solid", "dashed"),
breaks=c("Male", "Female"),
labels=c("Male", "Female")
)+scale_x_discrete(breaks=c("con", "hs"),
labels=c("Grey", "Black")
)+labs(x="Weed Barrier Treatment", y="Num. Eclosed"
)+facet_wrap(~plot_id, labeller=labeller(plot_id = fplot_labs)
)+theme(text = element_text(family=("Cambria")),
axis.line.x=element_line(colour = 'black', size = 2),
axis.line.y=element_line(colour = 'black', size = 2),
axis.ticks = element_line(colour = 'black', size = 2),
axis.ticks.length = unit(2.5, "mm"),
axis.text.x = element_text(size = 26, face = "bold", color="black"),
axis.text.y = element_text(size = 26,face = "bold", color="black"),
axis.title.x = element_text(size = 26, vjust=-.35,face = "bold",
margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 26, vjust=1.5,face = "bold",
margin = margin(t = 0, r = 20, b = 0, l = 0)),
#legend.position = c(0.9, 0.5),
legend.text = element_text(size=22, face = "bold"),
legend.title = element_blank(),
legend.key.width = unit(5,"line"),
strip.text = element_text(size=23, face="bold"),
strip.background = element_rect(size=2.5, fill="white"))
#calculate mn and se of wasp adult mass
wmass_sum<-summarySE(ftm_pl, measurevar = "mass",
groupvars = c("plot_id","treat_heat", "sex"),
na.rm = TRUE)
wmass_sum
#plot mn adult wasp mass by sex
mn_wadmss_plot <- ggplot(wmass_sum, aes(x=treat_heat, y=mass, group=sex, color=sex))
mn_wadmss_plot+geom_point(size=9
)+geom_line(aes(linetype=sex),
size=2.5
)+geom_errorbar(aes(ymin=mass-se, ymax=mass+se),
width=.4, size=2
)+scale_color_manual(values = c("#E69F00", "#000000"),
breaks = c("Male", "Female"),
labels=c("Male", "Female")
)+scale_linetype_manual(values=c("solid", "dashed"),
breaks=c("Male", "Female"),
labels=c("Male", "Female")
)+scale_x_discrete(breaks=c("con", "hs"),
labels=c("Grey", "Black")
)+labs(x="Weed Barrier Treatment", y="Ind. Mass [mg]"
)+facet_wrap(~plot_id, labeller=labeller(plot_id = fplot_labs)
)+theme(text = element_text(family=("Cambria")),
axis.line.x=element_line(colour = 'black', size = 2),
axis.line.y=element_line(colour = 'black', size = 2),
axis.ticks = element_line(colour = 'black', size = 2),
axis.ticks.length = unit(2.5, "mm"),
axis.text.x = element_text(size = 26, face = "bold", color="black"),
axis.text.y = element_text(size = 26,face = "bold", color="black"),
axis.title.x = element_text(size = 26, vjust=-.35,face = "bold",
margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 26, vjust=1.5,face = "bold",
margin = margin(t = 0, r = 20, b = 0, l = 0)),
#legend.position = c(0.9, 0.5),
legend.text = element_text(size=22, face = "bold"),
legend.title = element_blank(),
legend.key.width = unit(5,"line"),
strip.text = element_text(size=23, face="bold"),
strip.background = element_rect(size=2.5, fill="white"))
#------------------------------
#convert loc into a factor so the plot panels are in the correct order
dlt_lng$loc <- factor(dlt_lng$loc, levels=c("h_un_sh", "m_un_sh", "l_un_sh"))
#make object for facet labelling
dens_labs <- c(h_un_sh="High", m_un_sh="Middle", l_un_sh="Low")
#plot densities of temperature, grouped by treat_hs and loc
tempdens_plot <- ggplot(dlt_lng, aes(x=temp, group=treat_hs, fill=treat_hs))
tempdens_plot+geom_density(aes(color=treat_hs),
alpha=.5, size=1.5
)+scale_fill_manual(values = c("#8D8D8D", "black"),
breaks=c("con", "hs"),
labels=c("Grey WB", "Black WB")
)+scale_color_manual(values = c("#8D8D8D", "black"),
breaks=c("con", "hs"),
labels=c("Grey WB", "Black WB")
)+labs(x="Temperature [C]", y="Density"
)+facet_wrap(~loc, labeller = labeller(loc = dens_labs)
)+theme(text = element_text(family=("Cambria")),
axis.line.x=element_line(colour = 'black', size = 2),
axis.line.y=element_line(colour = 'black', size = 2),
axis.ticks = element_line(colour = 'black', size = 2),
axis.ticks.length = unit(2.5, "mm"),
axis.text.x = element_text(size = 26, face = "bold", color="black"),
axis.text.y = element_text(size = 26,face = "bold", color="black"),
axis.title.x = element_text(size = 26, vjust=-.35,face = "bold",
margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 26, vjust=1.5,face = "bold",
margin = margin(t = 0, r = 20, b = 0, l = 0)),
legend.position = c(0.9, 0.8),
legend.text = element_text(size=22, face = "bold"),
legend.title = element_blank(),
legend.key.width = unit(3,"line"),
strip.text = element_text(size=23, face="bold"),
strip.background = element_rect(size=2.5, fill="white"))
#-----------------------------
#2017 Temp variation experiment--wasp survival rxn norm
#subset and clean data
#subset to only parasitized treatment
tv_para <- subset(tv, treatment=="para")
#remove parasitized wanderers and wowes
tv_para$date.em.j[is.na(tv_para$date.em.j)]<-0
tv_para <- subset(tv_para, date.em.j>0)
#Remove +/-5 treatment
tv_para <- subset(tv_para, temp.var!=5)
#mean perc survival to eclosion--tot.surv is num_ecl / load
totsurv.sum<-summarySE(tv_para, measurevar = "tot.surv",
groupvars = c("temp.avg","temp.var"),
na.rm=TRUE)
totsurv.sum
#mean percent surviving to eclosion plot--Stand alone plot
totsurv.plot<-ggplot(totsurv.sum,aes(x=temp.avg,y=tot.surv,group=temp.var,color=temp.var))
percecl_fig2<-totsurv.plot+geom_point(size=7,
shape=17
)+geom_line(aes(linetype=temp.var),
size=2.5
)+geom_errorbar(aes(ymin=tot.surv-se, ymax=tot.surv+se),
width=.5, size=1.7
)+scale_color_manual(values=c("#56B4E9","#D55E00"),name=c("Fluctuation [C]"),
breaks=c("0","10"),labels=c("0","10")
)+scale_linetype_manual(values=c("solid", "dashed"),
breaks=c("0", "10"),
labels=c("0", "10"),
name="Fluctuation [C]"
)+scale_x_continuous(limits=c(24.5,30.5),
breaks = c(25, 28, 30)
)+scale_y_continuous(limits = c(0, 0.75),
breaks = c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7)
)+labs(x="Mean Temperature [C]", y="% Eclosion"
)+theme(text = element_text(family=("Cambria")),
axis.line.x=element_line(colour = 'black', size = 2),
axis.line.y=element_line(colour = 'black', size = 2),
axis.ticks = element_line(colour = 'black', size = 2),
axis.ticks.length = unit(2.5, "mm"),
axis.text.x = element_text(size = 26, face = "bold", color="black"),
axis.text.y = element_text(size = 26,face = "bold", color="black"),
axis.title.x = element_text(size = 26, vjust=-.35,face = "bold",
margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 26, vjust=1.5,face = "bold",
margin = margin(t = 0, r = 20, b = 0, l = 0)),
legend.position = c(0.8, 0.7),
legend.text = element_text(size=22, face = "bold"),
legend.title = element_text(size=22, face = "bold"),
legend.key.width = unit(4,"line"),
strip.text = element_text(size=23, face="bold"),
strip.background = element_rect(size=2.5, fill="white"))
percecl_fig2
#-------------------------------
#dev timing heat shock expt--wasp survival num ecl
#remove dead individuals
rhs_cl <- subset(rhs, died.bf5==0)
#add immature and mature 2nds columns into 1 tot num_unem
rhs_cl$num.unem.im[is.na(rhs_cl$num.unem.im)]<-0
rhs_cl$num.unem.mat[is.na(rhs_cl$num.unem.mat)]<-0
rhs_cl$num_unem_tot <- rhs_cl$num.unem.im + rhs_cl$num.unem.mat
#make sorting column to exclude undissected hosts with wasp em, while not excluding wowes
rhs_cl$date.em.j[is.na(rhs_cl$date.em.j)]<-0
rhs_cl$date.cull.j[is.na(rhs_cl$date.cull.j)]<-0
rhs_cl$diss_keep <- ifelse(rhs_cl$date.cull.j>0, 1,
ifelse(rhs_cl$date.em.j>0 & rhs_cl$num_unem_tot>0, 1, 0))
rhs_cld <- subset(rhs_cl, diss_keep==1)
#make a long data frame for wasp sex
rhs_wlng <- gather (rhs_cld, sex, ecl, fem.ecl, male.ecl)
#mean number of wasps that survived to eclosion (grouped by sex)
numecl_sum <- summarySE(rhs_wlng, measurevar = "ecl",
groupvars = c("shock.stage", "sex"),
na.rm = TRUE)
numecl_sum[is.na(numecl_sum)]<-0
numecl_sum
#creating a grouping variable so that control points don't connect by geom_line to treatment groups
numecl_sum$group <- c("con", "con", "hs", "hs",
"hs", "hs", "hs", "hs")
#plot of mean number surviving to eclosion by shock stage
numecl_plot <- ggplot(numecl_sum, aes(x=shock.stage, y=ecl, color=sex, group=interaction(sex, group)))
ss_stage_numecl <- numecl_plot+geom_point(aes(shape=sex),
size=9
)+geom_line(aes(linetype=sex), size=2.5
)+geom_errorbar(aes(ymin=ecl-se, ymax=ecl+se),
width=.3, size=1.5
)+scale_color_manual(values = c("#DA8E03", "black"),
breaks=c("fem.ecl", "male.ecl"),
labels=c("Female", "Male"),
name="Sex"
)+scale_linetype_manual(values=c("solid", "dashed"),
breaks=c("fem.ecl", "male.ecl"),
labels=c("Female", "Male"),
name="Sex"
)+scale_shape_manual(values=c(17, 16),
breaks=c("fem.ecl", "male.ecl"),
labels=c("Female", "Male"),
name="Sex"
)+scale_x_discrete("Heat Shock Timing",
labels=c("Control", "Early", "Middle", "Late")
)+scale_y_continuous(limits = c(0,55),
breaks = c(0,10,20,30,40,50)
)+labs(y="Num. Eclosed"
)+theme(text = element_text(family=("Cambria")),
axis.line.x=element_line(colour = 'black', size = 2),
axis.line.y=element_line(colour = 'black', size = 2),
axis.ticks = element_line(colour = 'black', size = 2),
axis.ticks.length = unit(2.5, "mm"),
axis.text.x = element_text(size = 26, face = "bold", color="black"),
axis.text.y = element_text(size = 26,face = "bold", color="black"),
axis.title.x = element_text(size = 26, vjust=-.35,face = "bold",
margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 26, vjust=1.5,face = "bold",
margin = margin(t = 0, r = 20, b = 0, l = 0)),
legend.position = c(0.85, 0.8),
legend.text = element_text(size=22, face = "bold"),
legend.title = element_blank(),
legend.key.width = unit(4,"line"),
strip.text = element_text(size=23, face="bold"),
strip.background = element_rect(size=2.5, fill="white"))
ss_stage_numecl
#---------------------------------
#plot of all census location data to show how much data I have
#subset out some incorrect loc data that will be fixed in data sheet
ftm_lng <- subset(ftm_lng, cen_loc!="veil")
ftm_lng <- subset(ftm_lng, cen_loc!="onbv")
ftm_lng <- subset(ftm_lng, cen_loc!="net")
ftm_lng <- subset(ftm_lng, cen_loc!="8:04")
#making a cen_time_dec column (cen_time / 24) to make a day fraction of time to add to cen_date
ftm_lng$cen_time_dec <- ftm_lng$cen_time / 24
#add cen_time_dec to cen_date
ftm_lng$cen_date_time <- ftm_lng$cen_date + ftm_lng$cen_time_dec
#make a numeric bug_id column
ftm_lng$bug_idn <- as.factor(ftm_lng$bug_id)
ftm_lng$bug_idn <- as.numeric(ftm_lng$bug_idn)
#plot location data by cen_date_time
loc_date_plot <- ggplot(ftm_lng, aes(x=cen_date_time, y=bug_idn, group=cen_loc, color=cen_loc))
loc_date_plot + geom_jitter(size=6, width = .5
)+scale_color_viridis(discrete = "TRUE"
)+scale_y_continuous(breaks=seq(0,200,20),
limits = c(1,205)
)+labs(x="Day", y="Caterpillar ID"
)+theme(text = element_text(family=("Cambria")),
axis.line.x=element_line(colour = 'black', size = 2),
axis.line.y=element_line(colour = 'black', size = 2),
axis.ticks = element_line(colour = 'black', size = 2),
axis.ticks.length = unit(2.5, "mm"),
axis.text.x = element_text(size = 26, face = "bold", color="black"),
axis.text.y = element_text(size = 26,face = "bold", color="black"),
axis.title.x = element_text(size = 26, vjust=-.35,face = "bold",
margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 26, vjust=1.5,face = "bold",
margin = margin(t = 0, r = 20, b = 0, l = 0)),
legend.position = "none")
#-----------------------
#temp by date plot, raw datalogger data
#order location data as factor
dlt_lng$loc <- factor(dlt_lng$loc, levels = c("h_un_sh", "m_un_sh", "l_un_sh"))
#labeller for facet_wrap
tc_labs <- c(tc1 = "TC1", tc2 = "TC2", tc3 = "TC3", tc4 = "TC4",
tc5 = "TC5", tc6 = "TC6", tc7 = "TC7", tc8 = "TC8",
tc9 = "TC9", tc10 = "TC10", tc11 = "TC11", tc12 = "TC12",
tc13 = "TC13", tc14 = "TC14", tc15 = "TC15", tc16 = "TC16",
tc17 = "TC17", tc18 = "TC18", tc19 = "TC19", tc20 = "TC20")
#plot raw data, temp by date time, facet wrap by tc, color by location
temp_dt_plot <- ggplot(dlt_lng, aes(x=date_time_j, y=temp, color=loc))
temp_dt_plot+geom_line(size=1
)+geom_hline(aes(yintercept=40),
linetype="dashed",
size=2
)+scale_color_manual(values = c("#548235", "#DB9C1B", "#FF8585"),
breaks=c("h_un_sh", "m_un_sh", "l_un_sh"),
labels=c("High", "Middle", "Low"),
name="Location"
)+labs(y="Temperature [C]", x="Date"
)+facet_wrap(~tc, labeller = labeller(tc = tc_labs)
)+theme(text = element_text(family=("Cambria")),
axis.line.x=element_line(colour = 'black', size = 2),
axis.line.y=element_line(colour = 'black', size = 2),
axis.ticks = element_line(colour = 'black', size = 2),
axis.ticks.length = unit(2.5, "mm"),
axis.text.x = element_text(size = 20, face = "bold", color="black"),
axis.text.y = element_text(size = 20,face = "bold", color="black"),
axis.title.x = element_text(size = 26, vjust=-.35,face = "bold",
margin = margin(t = 20, r = 0, b = 0, l = 0)),
axis.title.y = element_text(size = 26, vjust=1.5,face = "bold",
margin = margin(t = 0, r = 20, b = 0, l = 0)),
#legend.position = c(0.85, 0.8),
legend.text = element_text(size=22, face = "bold"),
legend.title = element_text(size=22, face = "bold"),
legend.key.width = unit(4,"line"),
strip.text = element_text(size=18, face="bold"),
strip.background = element_rect(size=1, fill="white"))
|
156a7647183a6197681e8bae7fe7ff4f33475ad6 | 1cb5a3fc57b330012b862d49c3627cf52549f5d8 | /R/tracer.R | f1f2dc881398489beb9b347c2890fd6f79a4570f | [] | no_license | MyKo101/tracewarnings | f61f8175517dfc2af8912b2575e6af9612b3287e | d207b979c281d511b8847603dc970fcfd04f043c | refs/heads/main | 2023-06-02T00:19:12.737560 | 2021-06-22T12:25:55 | 2021-06-22T12:25:55 | 379,255,404 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,173 | r | tracer.R | #' @export
warning_trace <- function(){
tr <- rev(sys.calls())[-(1:5)]
tr2 <- vector("pairlist",length=length(tr))
for(i in 1:length(tr)){
tr2[[i]] <- deparse(tr[[i]])
attributes(tr2[[i]]) <- attributes(tr[[i]])
}
.warning_traceback <<- tr2
}
#' @export
.tracebackW <- function() .warning_traceback
#' @export
tracebackW <- function(){
n <- length(x <- .tracebackW())
if (n == 0L) {
cat(gettext("No traceback available"), "\n")
} else {
for (i in 1L:n) {
xi <- x[[i]]
label <- paste0(n - i + 1L, ": ")
m <- length(xi)
srcloc <- if (!is.null(srcref <- attr(xi, "srcref"))) {
srcfile <- attr(srcref, "srcfile")
paste0(" at ", basename(srcfile$filename),
"#", srcref[1L])
}
if (isTRUE(attr(xi, "truncated"))) {
xi <- c(xi, " ...")
m <- length(xi)
}
if (!is.null(srcloc)) {
xi[m] <- paste0(xi[m], srcloc)
}
if (m > 1) {
label <- c(label, rep(substr(" ",
1L, nchar(label, type = "w")), m - 1L))
}
cat(paste0(label, xi), sep = "\n")
}
}
invisible(x)
}
|
43316b5fc0a2c9e25b13961f281d7f3cb87b92b0 | d12f765faa63b13c3d6c95e726aced1a1edfe272 | /Data_Prep2.R | dc7e9d84fc5ca8f932b4386a72a9757af2eb633e | [] | no_license | rohitgupta189/Kaggle-Comp-Predicting-House-Prices | 736517ce780906a8545c3f2c954db07a337193e7 | 78cf00254d83bf43a7ec63c11418eea5a3db541e | refs/heads/master | 2020-12-31T00:54:00.485508 | 2017-02-22T21:15:17 | 2017-02-22T21:15:17 | 80,585,158 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,515 | r | Data_Prep2.R | #Loading Caret Package
library("caret")
#Importing final train data provided by Saurav
train_final <- read.csv("C:/Users/rohit/Desktop/Spring16/Kaggle/Regression/data/train_final.csv", stringsAsFactors=FALSE)
#Importing final test data provided by Saurav
test_final <- read.csv("C:/Users/rohit/Desktop/Spring16/Kaggle/Regression/data/test_final.csv", stringsAsFactors=FALSE)
#Importing transformed train data provided by Saurav
train_trans <- read.csv("C:/Users/rohit/Desktop/Spring16/Kaggle/Regression/data/train_trans.csv", stringsAsFactors=FALSE)
#Importing transformed test data provided by Saurav
test_trans <- read.csv("C:/Users/rohit/Desktop/Spring16/Kaggle/Regression/data/test_trans.csv", stringsAsFactors=FALSE)
#Finding the variances explained by each variables
x<-nearZeroVar(train_final, saveMetrics = T)
x<-write.csv(x,"C:/Users/rohit/Desktop/Spring16/Kaggle/Regression/output/nearzerovar.csv")
#Exporting x to identify variables with near zero variance
out<- c("LandContour", "Condition2","RoofMatl", "Heating","KitchenAbvGr", "GarageYrBlt",
"GarageQual","EnclosedPorch", "X3SsnPorch", "ScreenPorch")
#Removing variables with near zero variance
datasetNew <- train_final[, !colnames(train_final) %in% out]
#Considering only integer variables
var_type<-data.frame(summarise_all(datasetNew,class))
int_var<-colnames(datasetNew[which(var_type=="integer"|var_type=="numeric")])
new_data<-datasetNew[,c(int_var)]
new_data$SalePrice <- NULL
new_data$Id <- NULL
library("RED")
a<-col_missing_count(new_data)
#Running Principal component analysis
pca <- prcomp(new_data, scale. = T, center = T)
#Get eigen values
install.packages("factoextra")
library(factoextra)
eigenvalues <- get_eigenvalue(pca)
eigenvalues
#Get the variables and their correlations
pcaVar <- get_pca_var(pca)
pcaVar <- c(pcaVar)
pcaVar <- as.data.frame(pcaVar)
#Considering only those principal components for which eigen values are greater than 1
pcaVarNew <- pcaVar[, 1:9]
#Get the highly informative variables.
var <- pcaVarNew[FALSE,]
k <- 1
for(i in colnames(pcaVarNew)){
for(j in rownames(pcaVarNew)){
if(abs(pcaVarNew[j , i]) >= 0.5){
var[k, i] <- j
k <- k + 1
}
}
k <- 1
}
)
#Creating interactions corresponding to important PCAs
#PCA1
train_trans$interQual <- (train_trans$OverallQual)*(train_trans$OverallCond)
#PCA2
train_trans$built <- (train_trans$YearBuilt) * (train_trans$BsmtUnfSF)
#PCA3
train_trans$Remod <- (train_trans$YearRemodAdd) * (train_trans$LotArea)
#PCA4
train_trans$Area <- (train_trans$YearBuilt) * (train_trans$MasVnrArea)
train_trans$Basement <- (train_trans$TotalBsmtSF) * (train_trans$BsmtUnfSF)
train_trans$Ground<- (train_trans$GrLivArea) * (train_trans$YearBuilt)
train_trans$Bath <- (train_trans$FullBath) * (train_trans$YearBuilt)
train_trans$AboveGround <- (train_trans$TotRmsAbvGrd) * (train_trans$BedroomAbvGr)
train_trans$livingArea <- (train_trans$TotalBsmtSF) * (train_trans$GrLivArea)
train_trans$years <- (train_trans$YearBuilt) * (train_trans$YearRemodAdd)
#In test dataset as well
#PCA1
test_trans$interQual <- (test_trans$OverallQual)*(test_trans$OverallCond)
#PCA2
test_trans$built <- (test_trans$YearBuilt) * (test_trans$BsmtUnfSF)
#PCA3
test_trans$Remod <- (test_trans$YearRemodAdd) * (test_trans$LotArea)
#PCA4
test_trans$Area <- (test_trans$YearBuilt) * (test_trans$MasVnrArea)
|
595970fc0bcdc80baf80c2c2fa0fc9f9b6599c71 | df7bd815b064c0ab66765ad9d137b590dcf0ee6b | /predict.r | 9cf124b8a3751b83c635d4eb4aa54cebdd6c7432 | [
"MIT"
] | permissive | msanterre/practical-machine-learning | b416549068fb7403ae2e77a47e366fdfa255c6ec | 04cf0eca4fc9d242fa9d43bbede1a54a989dc19a | refs/heads/master | 2021-01-10T06:48:12.010653 | 2016-01-02T00:32:11 | 2016-01-02T00:32:11 | 48,661,598 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,573 | r | predict.r | library(doMC)
library(caret)
# Use all the cores on this baby
registerDoMC(cores=8)
# We're trying to find classe
training <- read.csv("pml-training.csv", header=TRUE)
testing <- read.csv("pml-testing.csv", header=TRUE)
# Remove aggregate rows
training <- training[training[,"new_window"] == "no",]
# Remove the useless columns
training <- training[,-c(1:7)]
testing <- testing[,-c(1:7)]
set.seed(333)
# Remove columns with many NAs
nas <- sapply(2L:(NCOL(training)-1), function(i) {sum(is.na(training[,i]))})
naCols <- colnames(training)[which(nas != 0) + 1]
training <- training[ , -which(names(training) %in% naCols)]
# Remove the useless variables
nearZeroVarianceColumns <- nearZeroVar(training)
training <- training[,-nearZeroVarianceColumns]
inTrain <- createDataPartition(training$classe, p=0.7, list=FALSE)
training.train <- training[inTrain,]
training.test <- training[-inTrain,]
fitModel <- train(classe ~ ., data=training.train, method="rf", ntrees=750)
# Test out the predictions
pred <- predict(fitModel, training.test)
print(confusionMatrix(pred, training.test$classe))
# Predict the provided test cases
cols <- intersect(names(testing), names(training)) # Use the same columns as the training set
testing <- testing[,cols]
pmlWriteFiles = function(x){ # Utility method from the instructions
n = length(x)
for(i in 1:n){
filename = paste0("problems/problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
predictProblems <- predict(fitModel, testing)
pmlWriteFiles(predictProblems)
|
65fb27fb32c5bf0827cfd62a2bef3fef838448c1 | 0630b7df32260a614ecde6bb8902ba8eee3ef371 | /scripts_prior_checks/load_sim_data.R | 81a05c3b5779cb400f76cf33e9d5cc5ba852bd81 | [
"MIT"
] | permissive | pmarchand1/seed-seedling-kernels | e3530ee40431e5d20e1f342a261ec3739e675f1c | e6225fec5f393f608d4d3deba092c91180606e45 | refs/heads/master | 2020-05-02T05:17:53.120388 | 2019-09-16T19:07:22 | 2019-09-16T19:07:22 | 177,768,396 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 803 | r | load_sim_data.R | # Load 2010 tree census and locations for seed traps 1-200
library(readr)
library(dplyr)
library(stringr)
# Dimensions of plot
xmin <- 0
xmax <- 1000
ymin <- 0
ymax <- 500
# Edge buffer size
buffer <- 20
load("data/bci.full7.rdata")
traps <- read_csv("data/seed_traps.csv") %>%
filter(TRAP %in% 1:200) %>%
filter(X >= xmin + buffer, X <= xmax - buffer,
Y >= ymin + buffer, Y <= ymax - buffer)
colnames(traps) <- str_to_lower(colnames(traps))
sp_tab <- read_csv("data/sp_tab.csv")
trees <- mutate(bci.full7, sp_code6 = str_to_upper(sp)) %>%
inner_join(select(sp_tab, sp_code6, rdbh_mm)) %>%
filter(status == "A", dbh >= rdbh_mm * 2/3) %>%
select(id = treeID, sp_code6, x = gx, y = gy, dbh_mm = dbh) %>%
mutate(rba_cm2 = pi * (dbh_mm / 20)^2)
rm(bci.full7)
|
be87b6a772cdff19c882f21a66ded5fdbd770463 | 0f6f347f47365ef22e3584acbbd4966819154f78 | /7_3.R | f1e10d8eee3245bbe99f767b64e06feddcac9b2b | [] | no_license | ryota765/Anomaly-Detection | 5628fd1c52249043df34635f4b913a745f7956e8 | c2eefb56fb2cc54ce14944e64548aaa127acd9b8 | refs/heads/master | 2022-11-29T07:17:59.075169 | 2020-08-14T09:52:51 | 2020-08-14T09:52:51 | 286,388,261 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 361 | r | 7_3.R | Dtr <- nottem[1:120]; xi <- nottem[121:240]
Tt <- length(xi)
ar.model <- ar(Dtr); print(ar.model)
r <- ar.model$order
alpha <- ar.model$ar
xmean <- ar.model$x.mean; sig2 <- ar.model$var.pred; N <- Tt - r
X <- t(embed(xi-xmean, r))[,1:N]
ypred <- t(X) %*% alpha + xmean
y <- xi[(1+r):Tt]
a <- (y - as.numeric(ypred))^2/sig2
plot(a,ylab="anomaly score",type="l")
|
fcb4e514d82c3d421210b441e4d9f81b27b57e67 | 9c33864192dffba239d89345f53231df714a1cd9 | /run_analysis.R | f34ce38f6b315132529586fdf73e5e453b70fe39 | [] | no_license | kiiiiibble/get_clean_data_project | 464faec8b24bd72e3756e92e342bb35b07004b6f | 6f6dcc52d3432ce2945a70f820523f2d078c6cc4 | refs/heads/master | 2021-01-23T06:35:01.639486 | 2017-06-02T07:41:52 | 2017-06-02T07:41:52 | 93,029,346 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,273 | r | run_analysis.R | rm(list=ls())
#===
#1. Merges the training and the test sets to create one data set.
#Read all relevant files
x_test<-read.table("./test/X_test.txt")
y_test<-read.table("./test/Y_test.txt")
subject_test<-read.table("./test/subject_test.txt")
x_train<-read.table("./train/X_train.txt")
y_train<-read.table("./train/Y_train.txt")
subject_train<-read.table("./train/subject_train.txt")
activity<-read.table("./activity_labels.txt")
features<-read.table("./features.txt")
#Rename columns
colnames(y_test) = "activity_id"
colnames(y_train) = "activity_id"
colnames(x_test) = features[,2]
colnames(x_train) = features[,2]
colnames(subject_test) = "subject_id"
colnames(subject_train) = "subject_id"
#Create the "test" and "train" tables.
test_only<-cbind(subject_test, y_test, x_test)
train_only<-cbind(subject_train, y_train, x_train)
#Combine the data of the "test" and "train" tables.
combined<-rbind(test_only, train_only)
#===
#2. Extracts only the mean and standard deviation of each measurement.
#Makes a text vector containing the combined table's column names.
colnames<-names(combined)
#Makes a logical vector that returns TRUE if the column
#contains mean or standard deviation. Also returns TRUE for
#the subject_id and activity columns. Returns false for others.
tf<-grepl("[Mm]ean|[Ss]td|^subject_id|^activity", colnames) & !grepl("[Mm]ean[Ff]req|BodyAccMean|gravityMean", colnames)
#subsets the columns with subject_id, activity, mean, and standard deviation data.
extracted<-combined[,tf]
#===
#3. Uses descriptive activity names to name the activities in the data set
#Change column names of 'activity'.
colnames(activity)<-c('activity_id', 'activity_name')
#Add the new column containing descriptive activity names to the dataset.
described<-merge(extracted, activity, by='activity_id', all.x=TRUE)
#===
#4. Appropriately label the dataset with descriptive activity names.
#Replaces messy sections of column names with clearer ones.
colnames(described)<-gsub("\\()", "", colnames(described))
colnames(described)<-gsub("mean", "Mean", colnames(described))
colnames(described)<-gsub("std", "StdDev", colnames(described))
colnames(described)<-gsub("^t", "Time-", colnames(described))
colnames(described)<-gsub("^f", "Frequency-", colnames(described))
colnames(described)<-gsub("X$", "XDirection", colnames(described))
colnames(described)<-gsub("Y$", "YDirection", colnames(described))
colnames(described)<-gsub("Z$", "ZDirection", colnames(described))
colnames(described)<-gsub("[Mm]ag", "Magnitude", colnames(described))
colnames(described)<-gsub("[Aa]cc", "Acceleration", colnames(described))
colnames(described)<-gsub("[Bb]ody[Bb]ody|[Bb]ody", "Body", colnames(described))
#===
#5. From the data set in step 4, creates a second, independent tidy data set with the average
#of each variable for each activity and each subject.
#Creates a vector containing TRUE if the column name has "Mean" in it, as
#well as the "activity_id", "subject_id", and "activity_name" columns.
tf<-grepl("Mean|^activity_id|^subject_id|^activity_name", colnames(described))
#Isolates columns containing means, activity_id, subject_id, and activity_name.
final<-described[tf]
#Reorder columns so activity_name is beside activity_id
tidy_data<-final[,c(1,36,2,3:35)]
|
c05d492a59a7f2a20ee4c30abc28a7baa756ccb9 | 6952973189692a3fb0251c32bde8309431495381 | /feature_analysis/boxplot-csv.R | e042488a1d98f133a647a858fe37e861cc3970d9 | [] | no_license | tmbo/stackoverflow-media-mining | 5fdf2d07a66d572fc9cc034f9c873f6340c7e9f8 | 2e9452924e288ecdc9a25ef9e0390400c15e704e | refs/heads/master | 2020-03-30T21:18:11.201339 | 2015-02-20T14:15:18 | 2015-02-20T14:15:18 | 26,333,743 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 504 | r | boxplot-csv.R | library(RMySQL)
con <- dbConnect(MySQL(), user = 'root', host = '127.0.0.1', dbname='stackoverflow')
# data <- dbReadTable(conn = con, name = 'training_features')
data <- dbGetQuery(conn = con, statement = "SELECT * FROM SO_training_features WHERE AnswerDate is not null")
# data <- read.csv(file="query_result.csv",sep=";",head=TRUE)
jpeg('feature-boxplot.jpg')
boxplot(data$answer_duration~data$bounty_height, ylab='Response time after bounty start (minutes)', xlab='Height of bounty')
dev.off()
|
9d71ea0efcdec2ed5e2028f2da4effb85c422521 | 8b9c4825565521d67b192990016b5a174f9650d6 | /man/weibull.Rd | 0d853d82f7cc046b109fa35479472381e6db9d00 | [] | no_license | cran/distributionsrd | d6c0efa50e5ad60999b6ffa3cee94dff50047973 | e380c274007c99e2ccaa6b34775f6cfb4fefe1c9 | refs/heads/master | 2022-09-07T03:41:19.652239 | 2020-05-25T17:50:03 | 2020-05-25T17:50:03 | 267,007,404 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,620 | rd | weibull.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weibull.R
\name{weibull}
\alias{weibull}
\alias{mweibull}
\title{The Weibull distribution}
\usage{
mweibull(r = 0, truncation = 0, shape = 2, scale = 1, lower.tail = TRUE)
}
\arguments{
\item{r}{rth raw moment of the distribution, defaults to 1.}
\item{truncation}{lower truncation parameter, defaults to 0.}
\item{shape, scale}{shape and scale of the distribution with default values of 2 and 1 respectively.}
\item{lower.tail}{logical; if TRUE (default), moments are \eqn{E[x^r|X \le y]}, otherwise, \eqn{E[x^r|X > y]}}
}
\value{
returns the truncated rth raw moment of the distribution.
}
\description{
Raw moments for the Weibull distribution.
}
\details{
Probability and Cumulative Distribution Function:
\deqn{f(x) = \frac{shape}{scale}(\frac{\omega}{scale})^{shape-1}e^{-(\frac{\omega}{scale})^shape} , \qquad F_X(x) = 1-e^{-(\frac{\omega}{scale})^shape}}
The y-bounded r-th raw moment of the distribution equals:
\deqn{\mu^r_y = scale^{r} \Gamma(\frac{r}{shape} +1, (\frac{y}{scale})^shape ) }
where \eqn{\Gamma(,)} denotes the upper incomplete gamma function.
}
\examples{
## The zeroth truncated moment is equivalent to the probability function
pweibull(2, shape = 2, scale = 1)
mweibull(truncation = 2)
## The (truncated) first moment is equivalent to the mean of a (truncated) random sample,
#for large enough samples.
x <- rweibull(1e5, shape = 2, scale = 1)
mean(x)
mweibull(r = 1, lower.tail = FALSE)
sum(x[x > quantile(x, 0.1)]) / length(x)
mweibull(r = 1, truncation = quantile(x, 0.1), lower.tail = FALSE)
}
|
6d3cebe0e467b802504fa82eed7506b08418ee41 | 75d9a43fc7af3084127c3b0fde2366492e58057e | /ui.R | 9a1eaffb0cc11e649e0983ddac54f2dc378a2e63 | [] | no_license | DawitHabtemariam/RPresentation | 7952895ff25bf1cf94d3145bf49386e83c624612 | 3ec5b22d94d51f8d1cdfee0f5db9e212cd2e4b1a | refs/heads/master | 2021-01-23T11:48:07.924138 | 2014-08-23T03:18:16 | 2014-08-23T03:18:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,390 | r | ui.R |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(
fluidPage(
titlePanel("Climate in 1973"),
sidebarLayout(
sidebarPanel(
helpText("This graphic displays the change of certain climate features
over a few months in 1973 under a range of temperatures in New York City. You can select a range of temperatures
using the slider to see the change of the climate over time. The source of the data is the New York Air Quality Measurements dataset in R") ,
h4("Choose Your Feature"),
selectInput("choice","Pick Variable",
choices=c("Ozone","Solar Radiation"="Solar.R","Wind"),
selected="Ozone"),
sliderInput("temperature","Pick Temperature Range",min=56,max=97,value=c(56,97))
,checkboxInput("summary","Show Stats")
),
mainPanel(
title="Air Quality Plot"
, plotOutput("airqualityPlot"),
tableOutput("summary")
),
position="left",
fluid=T)
)
)
|
6897127e5f2a9b520899928be039899f442e5a9d | ef3e7f89905ea5dd3a6d1c6e7634289ae2fb34ad | /Sesion2.r | 0bec43ed8267272f505958959294d241029fd242 | [] | no_license | elantinoiser/Rbasico | f2b5f90a51ac8dcca6c2ac9fb43f2c4315df96fd | 7fb371b3340efb5d230a8ce81fa955075290a2bf | refs/heads/main | 2023-04-14T22:38:32.438258 | 2021-05-05T03:12:49 | 2021-05-05T03:12:49 | 348,812,235 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 820 | r | Sesion2.r | url_name <- 'https://scholar.google.com/scholar?start=0&q=transportation+AND+resilience+AND+LITERATURE+AND+REVIEW&hl=en&as_sdt=0,5'
wp <- xml2::read_html(url_name)
titles <- rvest::html_text(rvest::html_nodes(wp, '.gs_rt'))
authors_years <- rvest::html_text(rvest::html_nodes(wp, '.gs_a'))
authors <- gsub('^(.*?)\\W+-\\W+.*', '\\1', authors_years, perl = TRUE)
years <- gsub('^.*(\\d{4}).*', '\\1', authors_years, perl = TRUE)
df <- data.frame(titles = titles, authors = authors, years = years, stringsAsFactors = FALSE)
##################################################################################################
page <- textreadr::read_html("https://scholar.google.com/citations?user=sTR9SIQAAAAJ&hl=en&oi=ao")
citations <- page %>% rvest::html_nodes ("#gsc_a_b .gsc_a_c") %>% rvest::html_text()%>%as.numeric()
|
9d18406ccc51e25ba4dac7962574d3507068dc43 | 484200b4ad0025e44d09d36e3ffada8900ffe3b6 | /R/locationUI.R | 90df5dd34ed16354eb0f4ec9d911ca166058b6fd | [] | no_license | c5sire/brapps | 3b8a4657bda0e3aad20c1af56be8f2d3ee3d1e53 | 8d6311e6821b3c691cfcecf0cad1de27dd8e5a6a | refs/heads/master | 2021-01-23T21:29:11.249784 | 2017-06-26T19:54:32 | 2017-06-26T19:54:32 | 59,123,381 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 237 | r | locationUI.R |
#' locationsUI
#'
#' @param id shiny ID
#' @import shiny
#' @author Reinhard Simon
#' @return list
locationsUI <- function(id){
ns <- NS(id)
tagList(
#h2("Trial Location Explorer"),
DT::dataTableOutput(ns("table"))
)
}
|
5b04089e9a78635c94bac738e09be95bbb45c41b | 8a1119cfb5ee8680c0df840c010dc7e88dcb9640 | /man/sraToFastq.Rd | 3338ba2149d5778fb9ec77dc814620b2a07e9b19 | [] | no_license | CasedUgr/KnowSeq | f361ce49274aa81aaa9e104bbf863088dc6aef55 | f7051cb22dc7c6edab886c4fd34cb876b794d888 | refs/heads/master | 2022-09-01T06:35:36.323370 | 2022-08-11T22:35:28 | 2022-08-11T22:35:28 | 180,246,920 | 6 | 2 | null | 2022-04-05T12:16:31 | 2019-04-08T23:14:38 | R | UTF-8 | R | false | true | 793 | rd | sraToFastq.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sraToFastq.R
\name{sraToFastq}
\alias{sraToFastq}
\title{sraToFastq downloads and converts the sra files to fastq files. The function admits both gz and sra formats.}
\usage{
sraToFastq(identifier)
}
\arguments{
\item{identifier}{A vector that contains a list with the URLs requested.}
}
\value{
Nothing.
}
\description{
This function downloads and converts the sra files to fastq files by using the URLs indicated through the identifier argument. The function admits both gz and sra formats. This function is used internally by \code{\link{rawAlignment}} but it can be used separatelly.
}
\examples{
# This function needs the download of the pre-compiled tools supplied by KnowSeq.
\dontrun{sraToFastq("SRA1")}
}
|
715fded67707590bbe5651b18595b69cb75b5bd3 | 1f0978ef6e0917e17c3a1545a5fae66f06b7f320 | /R/mod_create_svg_input.R | 9df0f76eccbbf15d29237923dee8eee0c82eea24 | [] | no_license | JidduAlexander/svgInput | 562a38450c30ac74cb590c311b7ba41fafdfac6f | d1416a11267cd9846871d94890d9ea78e22694e5 | refs/heads/master | 2023-05-03T12:14:24.413158 | 2021-05-14T09:18:02 | 2021-05-14T09:18:02 | 367,150,853 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,767 | r | mod_create_svg_input.R |
# Module UI function
mod_create_svg_input_ui <- function(id) {
ns <- shiny::NS(id)
shiny::tagList(
bs4Dash::accordion(
id = ns("accordion"),
bs4Dash::accordionItem(
title = "1 Upload", status = "olive", collapsed = FALSE,
shiny::div(
style = "max-width:1200px; width:100%; margin:0 auto;",
mod_upload_svg_ui(ns("up")),
shiny::fluidRow(shiny::column(width = 12, shiny::actionButton(ns("btn_1"), "Next")))
)
),
bs4Dash::accordionItem(
title = "2 Set Inputs", status = "olive",
shiny::div(
style = "max-width:1200px; width:100%; margin:0 auto;",
mod_set_inputs_ui(ns("si")),
shiny::fluidRow(shiny::column(width = 12, shiny::actionButton(ns("btn_2"), "Next")))
)
),
bs4Dash::accordionItem(
title = "3 Download", status = "olive",
shiny::div(
style = "max-width:1200px; width:100%; margin:0 auto;",
mod_download_files_ui(ns("df"))
)
)
),
if (Sys.getenv("BROWSE") == "TRUE") { shiny::actionButton(ns("browse"), "Browse")}
)
}
# Module server function
mod_create_svg_input_server <- function(id) {
shiny::moduleServer(id, function(input, output, session) {
shiny::observeEvent(input$browse, { browser() })
shiny::observeEvent(input$btn_1, { bs4Dash::updateAccordion(id = "accordion", selected = 2, session = session) })
shiny::observeEvent(input$btn_2, { bs4Dash::updateAccordion(id = "accordion", selected = 3, session = session) })
svg_base <- mod_upload_svg_server("up")
svg_inputs <- mod_set_inputs_server("si", svg_base)
mod_download_files_server("df", svg_inputs)
})
} |
5def94ec78a027c58d21e0692dae79eb059f30dd | 90547805f8fa7f3759a01f5e791890ea5144455e | /R/pdf2pptx.R | 672e4e98de320ec330e6f2594070fa0fd3470eb9 | [
"MIT"
] | permissive | bannarisoftwares/pdf2pptx | 69e1d8a820f6bc96369ea8f0c0b44018c01725d8 | 6bc9461c994a820dbc620548cfb38b1f2d30a9fa | refs/heads/master | 2022-12-21T22:35:01.745728 | 2020-09-27T13:37:36 | 2020-09-27T13:37:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,178 | r | pdf2pptx.R |
#' Convert PDF to PPTX
#'
#' @param pdf_filename File name of the source PDF file
#' @param pptx_filename File name of the destination PPTX file
#' @param dpi Optional parameter specifying image density for rendering images from PDF
#' @param path Directory where to put rendered images. If `NULL`, temporary folder is used and the images are deleted.
#' @param ratio Fast option to switch between 4:3 (default) and 16:9, without the need to add new template.
#' @param template Alternative PPTX file used as template. Useful for different aspect ratios.
#'
#' @return Nothing
#' @export
#'
#' @examples
#' example_from_url <-
#' "http://mirrors.ctan.org/macros/latex/contrib/beamer-contrib/themes/metropolis/demo/demo.pdf"
#' # conversion takes several seconds
#' pdf2pptx(example_from_url, "demo.pptx")
#' unlink("demo.pptx")
pdf2pptx <- function(
pdf_filename,
pptx_filename,
dpi = 300,
path = NULL,
ratio = c(43, 169),
template = NULL
) {
if (is.null(path)) {
folder_for_files <- file.path(tempdir(), "pdf_images")
dir.create(folder_for_files)
} else {
folder_for_files <- path
}
if (is.null(template) & ratio[1] == 169) {
template <- system.file(package = "pdf2pptx", "template/template169.pptx")
}
# turn pdf into png files
img_pdf <- magick::image_read_pdf(pdf_filename, density = dpi)
img_png <- magick::image_convert(img_pdf, format = "png")
n_slides <- length(img_pdf)
slide_filenames <-
file.path(folder_for_files, sprintf("slide_%04d.png", 1:n_slides))
for (i in 1:n_slides) {
magick::image_write(
img_png[i],
slide_filenames[i]
)
}
# insert png files into pptx
if (is.null(template)) {
pptx <- officer::read_pptx()
} else {
pptx <- officer::read_pptx(template)
}
# for info use layout_summary(pptx)
for (i in 1:n_slides) {
pptx <- officer::add_slide(pptx, layout = "Blank", master = "Office Theme")
pptx <- officer::ph_with(
pptx,
officer::external_img(slide_filenames[i]),
location = officer::ph_location_fullsize())
}
print(pptx, target = pptx_filename)
if (is.null(path)) {
unlink(folder_for_files, recursive = T)
}
}
|
0a30c5a752554f8d3354f859773d8fd54f1b7c83 | 4a2ac914de175c03d7d6ba98c7c4852ec508d6e6 | /nfl_fit.R | 91ebf3f39400ab422a8b1c1aff643e939e4312d1 | [] | no_license | PirateGrunt/sparsity_blues | 9e89b6ddc3121059d0bf6d5fae1c22918cd6d6c1 | af8f3c575b925dc12aef7dfdff7e2d85aaca7b13 | refs/heads/master | 2021-05-14T13:57:31.107905 | 2019-03-23T11:37:08 | 2019-03-23T11:37:08 | 115,961,553 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,025 | r | nfl_fit.R | library(rpart)
library(randomForest)
library(ggplot2)
library(ROCR)
load('data/all_arrests.rda')
# Generate indices to split data into 2 sets
num_rec <- nrow(dfPlayers)
set.seed(1234)
train <- base::sample(num_rec, size = 0.8 * num_rec)
test <- setdiff(seq.int(num_rec), train)
dfTrain <- dfPlayers[train, ]
dfTest <- dfPlayers[test, ]
# Very simple fit
fit <- rpart(
formula = MultiArrest ~ Team_Division + Position + Day_of_Week
, data = dfTrain
)
fit
summary(fit)
plot(fit)
text(fit, pretty = 1)
dfTrain$Pred1 <- predict(fit)
dfTest$Pred1 <- predict(fit, newdata = dfTest)
pred_train <- prediction(dfTrain$Pred1, dfTrain$MultiArrest)
roc_train = performance(pred_train, measure = "tpr", x.measure = "fpr")
plot(roc_train)
abline(a=0, b= 1)
pred_test <- prediction(dfTest$Pred1, dfTest$MultiArrest)
roc_test = performance(pred_test, measure = "tpr", x.measure = "fpr")
plot(roc_test)
abline(a=0, b= 1)
perf <- performance(prediction(dfTest$Pred1, dfTest$MultiArrest), 'auc')
as.numeric(perf@y.values)
perf <- performance(prediction(dfTrain$Pred1, dfTrain$MultiArrest), 'auc')
as.numeric(perf@y.values)
# More complicated fit
fit2 <- rpart(
formula = MultiArrest ~ Team_city + Encounter + Position_name + ArrestSeasonState + Day_of_Week
, data = dfTrain
)
dfTrain$Pred2 <- predict(fit2)
dfTest$Pred2 <- predict(fit2, newdata = dfTest)
pred_train <- prediction(dfTrain$Pred2, dfTrain$MultiArrest)
roc_train = performance(pred_train, measure = "tpr", x.measure = "fpr")
plot(roc_train)
abline(a=0, b= 1)
perf <- performance(prediction(dfTrain$Pred2, dfTrain$MultiArrest), 'auc')
as.numeric(perf@y.values)
pred_test <- prediction(dfTest$Pred2, dfTest$MultiArrest)
roc_test = performance(pred_test, measure = "tpr", x.measure = "fpr")
plot(roc_test)
abline(a=0, b= 1)
perf <- performance(prediction(dfTest$Pred2, dfTest$MultiArrest), 'auc')
as.numeric(perf@y.values)
#------------------------
# MCA
library(caret)
flds <- createFolds(y, k = 10, list = TRUE, returnTrain = FALSE)
names(flds)[1] <- "train"
|
c048622f8007ce3ef893da9d6aab2ac183e05f8e | f5ad30ec784cccd03371242a63b99f265bf98dc5 | /man/leading.zeros.Rd | 2bb35784afc4b75746825c05961519c66d3cf50b | [] | no_license | claus-e-andersen/clanTools | cf93ec1b81b38277f90eaab7901a8f82b2ffa847 | dbbca750aba52b08c99d43a2a49a05810e2d0630 | refs/heads/master | 2021-07-06T09:22:18.006123 | 2020-10-17T04:20:21 | 2020-10-17T04:20:21 | 22,555,505 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 551 | rd | leading.zeros.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clanToolsFunctions.R
\name{leading.zeros}
\alias{leading.zeros}
\title{Add zeros in string representing a number}
\usage{
leading.zeros(c(9,111),3)
}
\arguments{
\item{x}{is a vector of numeric values}
\item{digits}{is the number of digits in the final results.}
}
\value{
The vecor of characters wher zeors have been added as necessary
}
\description{
Add zeros when numbers occur in filenames and such
009 comes before 111 (cf. 9 vs. 111)
}
\author{
Claus E. Andersen
}
|
7f0d2669d53e48d8ee0730ddf751cefb357a3ec8 | 52384e5931df60533cc55307dd115e6a0f078bb1 | /GaussianMixtureModel.R | afa1774eb426891774dd90568e1f6a6735f056fc | [] | no_license | julvi/MachineLearning | 6918a997180618878b739488485cf650b95a3237 | 3f701c28a2c463c7af9a27f3b7e8f968bd781993 | refs/heads/master | 2021-05-08T10:18:31.428396 | 2018-02-01T13:12:14 | 2018-02-01T13:12:14 | 119,837,468 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,951 | r | GaussianMixtureModel.R | # exercise 10.1.1
rm(list=ls())
setwd("~/GoogleDrive/PhD/Courses/IntroductiontoMachineLearningandDataMining/ToolBox/02450Toolbox_R")
source("setup.R")
library(mixtools) # install.packages("mixtools") # package that can be used to fit a gaussian mixture model. This package does allow random starts of the algorithm.
library(mclust) # install.packages("mclust") # package that can be used to fit a gaussian mixture model. This package does not allow random starts of the algorithm. It is faster than the algorithm in mixtools.
# Load data
library(R.matlab)
dat <- readMat(file.path('Data', 'synth2.mat'))
X <- dat$X
N <- dat$N
attributeNames <- as.vector(unlist(dat$attributeNames))
M <- dat$M
y <- dat$y
C <- dat$C
classNames <- as.vector(unlist(dat$classNames))
# substitute spaces with dots to make handling of columns in data matrix easier
attributeNames <- gsub(' ', '.', attributeNames)
Xdf <- data.frame(X)
colnames(Xdf) <- attributeNames
## Gaussian mixture model
# Number of clusters
K = 4; #minimum value is 2
# Fit model
model <- Mclust(data=Xdf, G=K) # using the mclust package
# model <- mvnormalmixEM(x=Xdf, k=K, maxit=100, epsilon=1e-2, verb=TRUE) # using the mixtools package.
#Defaults for maxit and epsilon are 500 and 1e-8, respectively.
#Avoid extreme running times by allowing fewer iterations and deeming convergence earlier
#by setting maxit and epsilon as done here. The argument verb=TRUE makes the method write
#output from the EM algorithm at each iteration. The argument verb is FALSE by default.
model$loglik
# Get clustering
i = model$classification # using the mclust package
#i = max_idx(model$posterior) # using the mixtools package
# Get cluster centers
Xc = t(model$parameters$mean) # using the mclust package
#Xc = matrix(unlist(model$mu), nrow=length(model$mu), byrow=TRUE) # using the mixtools package
## Plot results
# Plot clustering
clusterplot(Xdf, y, i, Xc, main='GMM: Clustering'); |
9879b05e4d4e2ea04fd4bfa2bb1a3f6c3ac01d66 | 342fe703c0d370bf02dacbd660eff54050f2c02e | /class_11_12.R | 350a1c15b322fe594112ce49e389a723d6a5bff2 | [] | no_license | emccollister/class_11_12 | 809fea5ab8717539d898526479d0ddf71f546299 | 3677c5730a22b9ae923b5c58ac7cc28168235870 | refs/heads/master | 2020-04-05T22:30:52.455102 | 2018-11-12T19:30:20 | 2018-11-12T19:30:20 | 157,258,919 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 401 | r | class_11_12.R | library(tidyverse)
download_link <- function(district, wave) {
district <- str_remove(string = district, pattern = "-") %>%
str_to_lower()
x <- paste("https://raw.githubusercontent.com/TheUpshot/2018-live-poll-results/master/data/elections-poll-",
district,
"-",
wave,
".csv",
sep = "")
read_csv(x)
}
|
80e124b6c4c6d5e3ca49516aec8b5d34bb6320f5 | a2ba22a5bb7f38a214d40b769cbc8f6378426750 | /R/qualityReportSFF.R | 2de30f12b4ceab58e9c68f598a1f2c2efe7bc39d | [] | no_license | huklein/R453Plus1Toolbox | faa368f57e1f27b2f45249ce12d95ed72610be8b | 539c22e575530ea93c2f2e95166bd6ef646a2a8d | refs/heads/master | 2021-08-30T02:00:13.385605 | 2017-12-14T01:55:16 | 2017-12-14T01:55:16 | 113,723,920 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 362 | r | qualityReportSFF.R | qualityReportSFF <- function(sfffiles, outfile="qcreport.pdf") {
basename <- gsub(".pdf", "", outfile, fixed=TRUE)
tmp <<- environment()
file.copy(system.file("extdata", "qualityReport.Rnw", package="R453Plus1Toolbox"), paste(basename, ".Rnw", sep=""))
Sweave(paste(basename, ".Rnw", sep=""))
texi2dvi(file=paste(basename, ".tex", sep=""), pdf=TRUE)
}
|
f9214665032b2ac2226ab69fa05f278572c95324 | 90a42b7bcfcc6c639a3ec64776867ae2d14d5ba2 | /server.R | 2264b0b9be28b5564894eacba426cc11c9d93d76 | [] | no_license | abhijit-nimbalkar/Immigration-and-Crime-Data-Analysis | 8fb6284ca16d99e6d337fe1b9becc38d77bad0e4 | b0cf8c95d10fc9636180b8b58776eb33817dfe71 | refs/heads/master | 2020-04-07T04:09:44.032077 | 2019-01-17T00:00:02 | 2019-01-17T00:00:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,726 | r | server.R |
set.seed(100)
server <- function(input,output, session){
getDataSet1<-reactive({
input.str<-str_split(input$parameter,'_')
input.str1<-input.str[[1]][1]
input.str2<-input.str[[1]][2]
dataSet1<-immigrant_data[immigrant_data$X1==input$year & immigrant_data$variable==paste0(input.str1,"_state_",input.str2),]
joinedDataset1<-districts
joinedDataset1@data$DISTRICT<-as.integer(joinedDataset1@data$DISTRICT)
joinedDataset1@data <- suppressWarnings(left_join(joinedDataset1@data, dataSet1, by="DISTRICT"))
joinedDataset1
})
getDataSet2<-reactive({
input.str<-str_split(input$parameter,'_')
input.str1<-input.str[[1]][1]
input.str2<-input.str[[1]][2]
dataSet2<-immigrant_data[immigrant_data$X1==input$year & immigrant_data$variable==paste0(input.str1,"_abroad_",input.str2),]
joinedDataset2<-districts
joinedDataset2@data$DISTRICT<-as.integer(joinedDataset2@data$DISTRICT)
joinedDataset2@data <- suppressWarnings(left_join(joinedDataset2@data, dataSet2, by="DISTRICT"))
joinedDataset2
})
data_for_scatter<-reactive({
input.str<-str_split(input$parameter,'_')
input.str1<-input.str[[1]][1]
input.str2<-input.str[[1]][2]
data_for_scatter<-immigrant_data[immigrant_data$X1==input$year,]
yaxis<-data_for_scatter %>% filter(variable=='freq')
xaxis1<-data_for_scatter %>% filter(variable==paste0(input.str1,"_abroad_",input.str2))
xaxis2<-data_for_scatter %>% filter(variable==paste0(input.str1,"_state_",input.str2))
total_xaxis<-left_join(xaxis1,xaxis2,by=c("X1","DISTRICT","name")) %>% mutate(total=value.x+value.y)
df<-left_join(yaxis,total_xaxis,by=c("X1","DISTRICT","name"))
df
})
output$scatterImmigrantCrime <- renderPlotly({
data_to_plot<-data_for_scatter()
plot_ly(data = data_to_plot, x = ~total, y = ~value, color = ~value, colors = c("#CCB7F1","#4E2623")) %>% layout(title = 'Crime and Immigrant Scatter Plot',
xaxis = list(title='Immigrant',showgrid = TRUE ),
yaxis = list(title='Crime',showgrid = TRUE),
plot_bgcolor="#0B0B0B",
font=list(color="#ffffff",size=10),
paper_bgcolor="#0B0B0B",
showlegend = FALSE)
})
output$mymap_state<-renderLeaflet({
leaflet() %>%
addTiles() %>%
setView(-118.387990,34.145223,zoom=10)
})
output$mymap_abroad<-renderLeaflet({
leaflet() %>%
addTiles() %>%
setView(-118.387990,34.145223,zoom=10)
})
observe({
theData1<-getDataSet1()
theData2<-getDataSet2()
pal <- colorBin(c("#E57960","#CD5D48","#B24334","#952C23","#761714","#550606"),theData1$value)
data_popup_state <- paste0("<strong>Representative Name: </strong>",
theData1$name,
"<br>Total Immigrant(in <strong>",
input$parameter,"
)</strong>: ",
formatC(theData1$value, format="d", big.mark=',')
)
data_popup_abroad <- paste0("<strong>Representative Name: </strong>",
theData2$name,
"<br>Total Immigrant(in <strong>",
input$inputradio,"
)</strong>: ",
formatC(theData2$value, format="d", big.mark=',')
)
labels1 <- sprintf(
"<strong>District Number %s: </strong>%g Immigrant",
theData1$DISTRICT, theData1$value
) %>% lapply(htmltools::HTML)
labels2 <- sprintf(
"<strong>District Number %s: </strong>%g Immigrant",
theData2$DISTRICT, theData2$value
) %>% lapply(htmltools::HTML)
m1<-leafletProxy("mymap_state", data = theData1) %>% addProviderTiles(providers$CartoDB.DarkMatter) %>%
clearShapes() %>%
addPolygons(data = theData1,
fillColor = pal(theData1$value),
fillOpacity = 1,
color = "white",
weight = 2,
dashArray = 3,
highlight = highlightOptions(
weight = 5,
color = "black",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE), popup = data_popup_state,
label = labels1,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto"))
m1 <-m1 %>% clearControls() %>% addLegend(pal = pal, values = ~value, opacity = 0.7, title = NULL,
position = "bottomright")
m2<-leafletProxy("mymap_abroad", data = theData2) %>% addProviderTiles(providers$CartoDB.DarkMatter) %>%
clearShapes() %>%
addPolygons(data = theData2,
fillColor = pal(theData2$value),
fillOpacity = 1,
color = "white",
weight = 2,
dashArray = 3,
highlight = highlightOptions(
weight = 5,
color = "black",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE), popup = data_popup_abroad,
label = labels2,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto"))
m2 %>% clearControls() %>% addLegend(pal = pal, values = ~value, opacity = 0.7, title = NULL,
position = "bottomright")
if(input$crimeoverlay=='crime'){
data_for_circle<-get(paste0("crime",input$year))
m1 <- m1 %>% addCircles(data=data_for_circle,lat = ~lat,lng = ~lng, radius = 0.5, color = c("#57531F","#C6DA64")) %>% withSpinner()
m2 %>% addCircles(data=data_for_circle,lat = ~lat,lng = ~lng, radius = 0.5, color = c("#57531F","#C6DA64")) %>% withSpinner()
}
})
output$immigrantTable <- renderDataTable(datatable({
dataset1<-getDataSet1()
dataset2<-getDataSet2()
dataset1<-dataset1@data[,c(4,11,12,14)]
names(dataset1)<-c("District_ID","Year","Reprensetative_Name",paste0(input$parameter,"_From Different STATE"))
dataset2<-dataset2@data[,c(4,11,12,14)]
names(dataset2)<-c("District_ID","Year","Reprensetative_Name",paste0(input$parameter,"_From ABROAD"))
final_df<-left_join(dataset1,dataset2,by=c("Year","District_ID","Reprensetative_Name"))
final_df
},
options = list(lengthMenu = c(5, 10, 33), pageLength = 5))
)
} |
b7d6a281b46f688a72d798cc85ef86c4647a1989 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/MIXFIM/examples/fisher_optimization.Rd.R | 02dcbb11bb87748ae16795ba12d816b3a2bd75c6 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 843 | r | fisher_optimization.Rd.R | library(MIXFIM)
### Name: fisher_optimization
### Title: Optimization of the sampling times (or doses) in Nonlinear Mixed
### Effect Models using Markov Chains Monte Carlo
### Aliases: fisher_optimization
### ** Examples
############################
# PLEASE UNCOMMENT EXAMPLE #
############################
#params = c(1,8,0.15,0.6,0.02,0.07,0.1)
#times_poss = c(0.1,0.5,1,2,3,6,12,24,36,48,72,120,240)
# Files can be found in external data
#model = stan_model("model_b_given_y.stan")
#model2 = stan_model("model_derivatives.stan")
#model3 = stan_model("model_y.stan")
#opt1 = fisher_optimization(nb_t=3, set_t=times_poss, y_ini=0.5, model=model,
#model2=model2, model3=model3, params=params, dim_b=3, set_seed=TRUE, seed=42,
#step_mc=100, n_samp_min=30, n_samp_max=2000, n_rep=1, n_iter=500, n_burn=500,
#nb_patients=32)
#opt1
|
99ea837d3009d0f88e2479853d482e13fce48e27 | 57b918ae794b1e855d5ec84c45dcf57f2b7a224a | /H1B Final_in progress.R | 1736f867a75706393af793a55f8cf9ce95691ca2 | [] | no_license | ZoeLiuu/H1B_VisaPrediction | ad002cd611994d19fb3b5b517331c34fcf5b99bb | 2b6f68765fb9255b709edb37ed46d2445d8dfd57 | refs/heads/master | 2023-02-04T17:23:20.680089 | 2020-12-21T08:44:54 | 2020-12-21T08:44:54 | 323,263,438 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 26,861 | r | H1B Final_in progress.R | ## ----setup, include=TRUE, cache=FALSE, echo=FALSE----------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ---- echo = FALSE, warning=FALSE, message=FALSE-----------------------------------------------
library(readr)
library(readxl)
library(dplyr)
library(ggplot2)
library(caret)
library(DMwR)
library(knitr)
library(tibble)
library(tidyr)
library(lubridate)
library(rpart)
library(rpart.plot)
library(glmnet)
library(kernlab)
library(randomForest)
library(plotROC)
## ----load_data, message=FALSE, echo=FALSE------------------------------------------------------
#load("H1BPrediction.Rdata")
#load("all_years.Rdata") ## dataset we created and organized for the EDA
load("H1BVisaClassSuccessful-2.RData")
load("H1BFinalData.RData")
## ----variables, message=FALSE, echo=FALSE, eval=FALSE------------------------------------------
## names(all_years_2)
## ----general_plots, message=FALSE, echo=FALSE, fig.align="left", fig.width=8, fig.height=6-----
## case_statuses' percentage among all applications
all_years_2 %>%
group_by(CASE_STATUS) %>%
summarise(perc = n()/nrow(all_years_2)) %>%
ggplot(mapping = aes(x = CASE_STATUS, y = perc, fill = CASE_STATUS,
label = scales::percent(perc))) +
geom_bar(stat= "identity") +
geom_text(position = position_dodge(width = .9), # move to center of bars
vjust = -0.5, # nudge above top of bar
size = 4) +
scale_y_continuous(labels = scales::percent)
## ----clean_data, message=FALSE, echo=FALSE, eval=FALSE-----------------------------------------
## ## remove rows with over 0.2% missing values
FY_new <- all_years_2[-manyNAs(all_years),]
FY_new <- mutate(FY_new, SOC_CATEGORY = (substr(FY_new$SOC_CODE, start = 1, stop = 2)))
##
##
## ## Convert variables to numeric/integer/factor for tuning models
## #FY_new$PREVAILING_WAGE <- as.integer(gsub(",", "", FY_new$PREVAILING_WAGE))
## #FY_new$WAGE_RATE_OF_PAY <- as.numeric(gsub(",", "", FY_new$WAGE_RATE_OF_PAY)) # ADDED THIS LINE. NEED TO CONSIDER WAGE_RATE_OF_PAY
## FY_new$WAGE_RATE_OF_PAY_TO <- as.numeric(gsub(",", "", FY_new$WAGE_RATE_OF_PAY_TO))
## FY_new$WAGE_RATE_OF_PAY_FROM <- as.numeric(gsub(",", "", FY_new$WAGE_RATE_OF_PAY_FROM))
## #FY_new$SOC_CODE <- as.factor(gsub(".00", "", FY_new$SOC_CODE))
## FY_new$year <- as.integer(FY_new$year)
## FY_new$NAICS_CODE <- as.integer(FY_new$NAICS_CODE)
##
##
## FY_new <- mutate(FY_new, WAGE_MEAN = (WAGE_RATE_OF_PAY_FROM + WAGE_RATE_OF_PAY_TO)/2)
##
## FY_new <- mutate(FY_new, WAGE_RATE_YEARLY = ifelse(WAGE_UNIT_OF_PAY == "Year", WAGE_MEAN,
## ifelse(WAGE_UNIT_OF_PAY == "Month", WAGE_MEAN*12,
## ifelse(WAGE_UNIT_OF_PAY == "Week", WAGE_MEAN*52,
## ifelse(WAGE_UNIT_OF_PAY == "Bi-Weekly", WAGE_MEAN*52/2, WAGE_MEAN*40*52)))))
##
##
## ---- message=FALSE, echo=FALSE, eval=FALSE----------------------------------------------------
## names(FY_new)
## ---- message=FALSE, echo=FALSE, eval=FALSE----------------------------------------------------
## #can remove SOC_CODE, and "wage" variables except for WAGE_RATE_YEARLY
##
## ## nearZeroVar(FY_new)
## ## romove variables with near zero variance
##
##
names(FY_new[c(3,4,5, 6, 7, 8, 10, 11, 13, 14, 15, 17, 19,20, 21, 24, 25, 26,27, 28, 29,30,32, 34, 35, 36, 37, 42, 49, 52)])
##
FY_new2 <- FY_new[, -c(3,4,5, 6, 7, 8, 10, 11, 13, 14, 15, 17, 19,20, 21, 24, 25, 26,27, 28, 29,30,32, 34, 35, 36, 37, 42, 49, 52)]
nzv_FY_new2 <- nearZeroVar(FY_new2, saveMetrics = TRUE)
nzv_FY_new2 <- rownames_to_column(nzv_FY_new2)
nzv_FY_new2 <- nzv_FY_new2 %>%
filter(nzv == TRUE)
##
FY_new2 <- FY_new2 %>%
select(-c(nzv_FY_new2$rowname))
##
FY_new2$CASE_SUBMITTED <- month(FY_new2$CASE_SUBMITTED)
##
FY_new2$year <- as.integer(FY_new2$year)
FY_new2$PW_SOURCE_YEAR <- as.integer(FY_new2$PW_SOURCE_YEAR)
FY_new2$SOC_CATEGORY <- as.integer(FY_new2$SOC_CATEGORY)
##
FY_new2 <- FY_new2 %>%
filter(PW_SOURCE_YEAR >1000)
## ---- message=FALSE, echo=FALSE, eval=FALSE----------------------------------------------------
## OPTION 1
##
## Change the reposnse to binary outcomes
##
FY_new3 <- FY_new2 %>%
filter(CASE_STATUS != "WITHDRAWN")
##
FY_new3$CASE_STATUS <- gsub("CERTIFIED", "CERTIFIED", FY_new3$CASE_STATUS)
FY_new3$CASE_STATUS <- gsub("DENIED", "DENIED", FY_new3$CASE_STATUS)
FY_new3$CASE_STATUS <- gsub("CERTIFIED-WITHDRAWN", "CERTIFIED", FY_new3$CASE_STATUS)
FY_new2$CASE_STATUS <- gsub("WITHDRAWN", "DENIED", FY_new2$CASE_STATUS)
##
FY_new3$CASE_STATUS <- as.factor(FY_new3$CASE_STATUS)
##
FY_new3 <- FY_new3 %>%
select(-SUPPORT_H1B)
## ---- message=FALSE, echo=FALSE, eval=FALSE----------------------------------------------------
## Option 2
##
## Change the reposnse to binary outcomes
##
## interesting find: because gsub is looking for patterns, it included "CERTIFIED-WITHDRAWN" in the initial "CERTIFIED" script. Then it also added the "WITHDRAWN" part so the result was a new category of "SUCCESSFUL-UNSUCCESSFUL"... so to fix this, I had to rearrange for "CERTIFIED-WITHDRAWN" to be evaluated first.
##
FY_new3 <- FY_new2
##
FY_new3$CASE_STATUS <- gsub("CERTIFIED-WITHDRAWN", "UNSUCCESSFUL", FY_new3$CASE_STATUS)
FY_new3$CASE_STATUS <- gsub("CERTIFIED", "SUCCESSFUL", FY_new3$CASE_STATUS)
FY_new3$CASE_STATUS <- gsub("DENIED", "UNSUCCESSFUL", FY_new3$CASE_STATUS)
FY_new3$CASE_STATUS <- gsub("WITHDRAWN", "UNSUCCESSFUL", FY_new3$CASE_STATUS)
##
FY_new3$CASE_STATUS <- as.factor(FY_new3$CASE_STATUS)
##
FY_new3 <- FY_new3 %>%
select(-SUPPORT_H1B)
## ----revised_plots, message=FALSE, echo=FALSE, fig.align="left", fig.width=8, fig.height=6-----
## case_statuses' percentage among all applications (after cleaning)
FY_new3 %>%
group_by(CASE_STATUS) %>%
summarise(perc = n()/nrow(FY_new3)) %>%
ggplot(mapping = aes(x = CASE_STATUS, y = perc, fill = CASE_STATUS,
label = scales::percent(perc))) +
geom_bar(stat= "identity") +
geom_text(position = position_dodge(width = .9), # move to center of bars
vjust = -0.5, # nudge above top of bar
size = 4) +
scale_y_continuous(labels = scales::percent)
## ---- message=FALSE, echo=FALSE----------------------------------------------------------------
str(FY_new3)
## ----datasplit, message=FALSE, echo=FALSE, eval=FALSE------------------------------------------
index <- sample.int(n = nrow(FY_new3),
size = floor(0.008* nrow(FY_new3)),
replace = FALSE)
##
train_FY <- FY_new3[index,]
test_FY <- FY_new3[-index,]
##
library(janitor)
train_FY <- clean_names(train_FY)
test_FY <- clean_names(test_FY)
## ---- message=FALSE, echo=FALSE, fig.cap= "Summary of Training Data"---------------------------
dim(train_FY)
summary(train_FY)
## ----trControl, message=FALSE, echo=FALSE, eval=FALSE------------------------------------------
ctrl <- trainControl(method = "cv",
number = 10,
summaryFunction = twoClassSummary,
classProbs = TRUE,
savePredictions = TRUE)
## ----glmnet, warning=FALSE, eval=FALSE, echo=FALSE---------------------------------------------
## Linear: GLMnet
##
enet.grid <- expand.grid(alpha = c(0.1, 0.3, 0.5, 0.7),
lambda = exp(seq(-6, 1, length.out = 21)))
##
set.seed(4321)
fit.glmnet <- train(case_status ~ ., data = train_FY,
method = "glmnet",
metric = "ROC",
tuneGrid = enet.grid,
preProcess = c("center", "scale"),
trControl = ctrl, na.action = na.omit)
## ---- echo=FALSE, eval=FALSE-------------------------------------------------------------------
plot(fit.glmnet, xTrans = log)
##
## confusion matrix
confusionMatrix.train(fit.glmnet)
## ----pls, warning=FALSE, eval=FALSE, echo=FALSE------------------------------------------------
## Linear: PLS
##
library(pls)
pls_grid <- expand.grid(ncomp = seq(1, 13, by = 1))
##
set.seed(4321)
fit.pls <- train(case_status ~ ., data = train_FY,
method = "pls",
metric = "ROC",
tuneGrid = pls_grid,
preProcess = c("center", "scale"),
trControl = ctrl, na.action = na.omit)
## ---- echo=FALSE, eval=FALSE-------------------------------------------------------------------
fit.pls
##
## plot
plot(fit.pls)
##
## confusion matrix
confusionMatrix.train(fit.pls)
## ----tree, echo=FALSE, eval=FALSE--------------------------------------------------------------
## Tree
##
library(rpart)
library(rpart.plot)
##
fit.tree <- train(x = train_FY[, 2:19],
y = train_FY$case_status,
method = "rpart",
metric = "ROC",
tuneLength = 30,
trControl = ctrl, na.action = na.omit)
## ---- echo=FALSE, eval=FALSE-------------------------------------------------------------------
fit.tree
##
## plot
prp(fit.tree$finalModel,
box.palette = "Reds",
tweak = 1.5,
varlen = 30)
##
## confusion matrix
confusionMatrix.train(fit.tree)
##
## ----nonlinear, echo=FALSE, eval=FALSE---------------------------------------------------------
## Nonlinear: SVM
##
set.seed(4321)
fit.svm <- train(case_status ~ ., data = train_FY,
method = "svmRadial",
metric = "ROC",
preProcess = c("center", "scale"),
trControl = ctrl, na.action = na.omit)
## ---- echo=FALSE, eval=FALSE-------------------------------------------------------------------
fit.svm
##
## the best model
fit.svm$bestTune
##
## confusion matrix
confusionMatrix.train(fit.svm)
## ----ensemble, echo=FALSE, eval=FALSE----------------------------------------------------------
## Ensemble: Random Forest
##
rf_gird <- expand.grid(mtry = c(2, 3, 6, 9, 12, 15))
##
set.seed(4321)
fit.rf <- train(case_status ~ ., data = train_FY,
method = "rf",
metric = "ROC",
trControl = ctrl,
tuneGrid = rf_gird,
importance = TRUE, na.action = na.omit)
## ---- echo=FALSE, eval=FALSE-------------------------------------------------------------------
fit.rf
##
## confusion matrix
confusionMatrix.train(fit.rf)
## ----combineresults, echo=FALSE, fig.align="left", fig.width=8, fig.height=6, eval=FALSE-------
## **Figure 5:** Model Comparision (ROC, Sensitivity, Specificity)
##
model_cv_result <- resamples(list(GLMNET = fit.glmnet,
PLS = fit.pls,
SVM = fit.svm,
RF = fit.rf,
Tree = fit.tree))
## visualize ROC, Sensitivity, and Specificity comparison across models
dotplot(model_cv_result)
## ----option2, echo=FALSE, eval=FALSE-----------------------------------------------------------
cv_pred_results <- fit.glmnet$pred %>% tbl_df() %>%
filter(alpha %in% fit.glmnet$bestTune$alpha,
lambda %in% fit.glmnet$bestTune$lambda) %>%
select(pred, obs, SUCCESSFUL, UNSUCCESSFUL, rowIndex, Resample) %>%
mutate(model_name = "GLMNET") %>%
bind_rows(fit.pls$pred %>%
tbl_df() %>%
filter(ncomp %in% fit.pls$bestTune$ncomp) %>%
select(pred, obs, SUCCESSFUL, UNSUCCESSFUL, rowIndex, Resample) %>%
mutate(model_name = "PLS")) %>%
bind_rows(fit.svm$pred %>%
tbl_df() %>%
filter(sigma %in% fit.svm$bestTune$sigma,
C %in% fit.svm$bestTune$C) %>%
select(pred, obs, SUCCESSFUL, UNSUCCESSFUL, rowIndex, Resample) %>%
mutate(model_name = "SVM")) %>%
bind_rows(fit.rf$pred %>%
tbl_df() %>%
filter(mtry == fit.rf$bestTune$mtry) %>%
select(pred, obs, SUCCESSFUL, UNSUCCESSFUL, rowIndex, Resample) %>%
mutate(model_name = "RF")) %>%
bind_rows(fit.tree$pred %>%
tbl_df() %>%
filter(cp == fit.tree$bestTune$cp) %>%
select(pred, obs, SUCCESSFUL, UNSUCCESSFUL, rowIndex, Resample) %>%
mutate(model_name = "Tree"))
## ----foldscomparison, echo=FALSE, fig.align="left", fig.width=8, fig.height=6, eval=FALSE------
## **Figure 6:** Model Comparision (ROC for each 10 Fold)
##
## plot the ROC for different folds
cv_pred_results %>%
ggplot(mapping = aes(m = SUCCESSFUL,
d = ifelse(obs == "SUCCESSFUL", 1, 0))) +
geom_roc(cutoffs.at = 0.5,
mapping = aes(color = Resample)) +
geom_roc(cutoffs.at = 0.5) +
coord_equal() +
facet_wrap(~ model_name) +
style_roc()
## ----modelcomparison, echo=FALSE, fig.align="left", fig.width=8, fig.height=6, eval=FALSE------
## **Figure 7:** Model Comparision (Average ROC)
##
cv_pred_results %>%
ggplot(mapping = aes(m = SUCCESSFUL,
d = ifelse(obs == "SUCCESSFUL", 1, 0),
color = model_name)) +
geom_roc(cutoffs.at = 0.5) +
coord_equal() +
style_roc() +
ggthemes::scale_color_colorblind()
## ----plsvarimp, echo=FALSE, eval=FALSE---------------------------------------------------------
plot(varImp(fit.pls), top = 25)
## ----organize testset, echo=FALSE, eval=FALSE--------------------------------------------------
test_FY <- na.omit(test_FY) ## remove rows with NAs
## ----pred_rf, echo=FALSE, eval=FALSE-----------------------------------------------------------
pred <- predict(fit.rf, test_FY)
prediction <- as.data.frame(pred)
pred_cb <- cbind(prediction, test_FY)
##
ggplot(pred_cb, mapping = aes(x = case_status)) +
geom_density(color = "black", size = 1) +
geom_density(aes(x = pred), color = "tomato", size = 1)
## ----glmnet_best, echo=FALSE, warning=FALSE, message = FALSE, eval=FALSE-----------------------
## **GLMnet**
##
glmnet_num <- fit.glmnet$results %>%
filter(ROC == max(ROC)) %>%
summarize(n = ROC-ROCSD) %>%
as.numeric(n)
##
bestglmnet <- fit.glmnet$results %>%
filter(ROC >= glmnet_num) %>%
filter(alpha == min(alpha)) %>%
filter(lambda == min(lambda))
##
bestglmnet$model <- "GLMNET"
##
glmnetvalues <- bestglmnet %>%
elect(model, ROC, Sens, Spec, alpha, lambda)
##
## ----pls_best, echo=FALSE, warning=FALSE, message = FALSE, eval=FALSE--------------------------
## **PLS**
##
pls_num <- fit.pls$results %>%
filter(ROC == max(ROC)) %>%
summarize(n = ROC-ROCSD) %>%
as.numeric(n)
##
bestpls <- fit.pls$results %>%
filter(ROC >= pls_num) %>%
filter(ncomp == min(ncomp))
##
bestpls$model <- "PLS"
##
plsvalues <- bestpls %>%
select(model, ROC, Sens, Spec, ncomp)
## ----rpart_best, echo=FALSE, warning=FALSE, message = FALSE, eval=FALSE------------------------
## **Regression Tree**
##
rpart_num <- fit.tree$results %>%
filter(ROC == max(ROC)) %>%
filter(cp == max(cp)) %>%
summarize(n = (ROC-ROCSD)) %>%
as.numeric(n)
##
besttree <- fit.tree$results %>%
filter(ROC >= rpart_num) %>%
filter(cp == max(cp))
##
besttree$model <- "Tree"
##
treevalues <- besttree %>%
select(model, ROC, Sens, Spec, cp)
## ----mars_best, echo=FALSE, warning=FALSE, message = FALSE, eval=FALSE-------------------------
## **SVM**
##
svm_num <- fit.svm$results %>%
filter(ROC == max(ROC)) %>%
summarize(n = ROC-ROCSD) %>%
as.numeric(n)
##
bestsvm <- fit.svm$results %>%
filter(ROC >= svm_num) %>%
filter(C == min(C))
##
bestsvm$model <- "SVM"
##
svmvalues <- bestsvm %>%
select(model, ROC, Sens, Spec, sigma, C)
## ----rf_best, echo=FALSE, warning=FALSE, message = FALSE, eval=FALSE---------------------------
## **RF**
##
rf_num <- fit.rf$results %>%
filter(ROC == max(ROC)) %>%
summarize(n = ROC-ROCSD) %>%
as.numeric(n)
##
bestrf <- fit.rf$results %>%
filter(ROC >= rf_num) %>%
filter(mtry == min(mtry))
##
bestrf$model <- "RF"
##
rfvalues <- bestrf %>%
select(model, ROC, Sens, Spec, mtry)
## ----glmnet2, warning=FALSE, echo=FALSE, eval=FALSE--------------------------------------------
## Linear: GLMnet
##
set.seed(4321)
final.glmnet <- train(case_status ~ ., data = train_FY,
method = "glmnet",
metric = "ROC",
tuneGrid = data.frame(alpha = glmnetvalues$alpha,
lambda = glmnetvalues$lambda),
preProcess = c("center", "scale"),
trControl = ctrl, na.action = na.omit)
##
## ---- echo=FALSE-------------------------------------------------------------------------------
final.glmnet
## confusion matrix
confusionMatrix.train(final.glmnet)
## ----pls2, warning=FALSE, echo=FALSE, eval=FALSE-----------------------------------------------
## Linear: PLS
##
set.seed(4321)
final.pls <- train(case_status ~ ., data = train_FY,
method = "pls",
metric = "ROC",
tuneGrid = expand.grid(ncomp=plsvalues$ncomp),
preProcess = c("center", "scale"),
trControl = ctrl, na.action = na.omit)
##
## ---- echo=FALSE-------------------------------------------------------------------------------
final.pls
## confusion matrix
confusionMatrix.train(final.pls)
## ----tree2, echo=FALSE, eval=FALSE-------------------------------------------------------------
## Tree
##
final.tree <- train(case_status ~., data = train_FY,
method = "rpart",
metric = "ROC",
tuneGrid = expand.grid(cp=treevalues$cp),
trControl = ctrl, na.action = na.omit)
## ---- echo=FALSE, fig.align="left", fig.width=8, fig.height=6---------------------------------
final.tree
## plot
prp(final.tree$finalModel,
box.palette = "Reds",
tweak = 1.5,
varlen = 30)
## confusion matrix
confusionMatrix.train(final.tree)
## ----svm2, echo=FALSE, message=FALSE, eval=FALSE-----------------------------------------------
## Nonlinear: SVM
##
## doesnt like no variance in each state, etc.
##
set.seed(4321)
final.svm <- train(case_status ~ ., data = train_FY,
method = "svmRadial",
metric = "ROC",
tuneGrid = expand.grid(sigma = svmvalues$sigma,
C = svmvalues$C),
preProcess = c("center", "scale"),
trControl = ctrl, na.action = na.omit)
## ---- echo=FALSE-------------------------------------------------------------------------------
final.svm
## confusion matrix
confusionMatrix.train(final.svm)
## ----randforest2, echo=FALSE, eval=FALSE-------------------------------------------------------
## Ensemble: Random Forest
##
set.seed(4321)
final.rf <- train(case_status ~ ., data = train_FY,
method = "rf",
metric = "ROC",
trControl = ctrl,
tuneGrid = expand.grid(mtry=rfvalues$mtry),
importance = TRUE, na.action = na.omit)
## ---- echo=FALSE-------------------------------------------------------------------------------
final.rf
## confusion matrix
confusionMatrix.train(final.rf)
## ---- echo=FALSE, eval=FALSE-------------------------------------------------------------------
##
set.seed(123)
idxsmall <- sample.int(n = nrow(test_FY),
size = 36152,
replace = FALSE)
##
test_FY_small <- test_FY[idxsmall,]
##
predictions <- test_FY_small
pred_probs <- test_FY_small
## ----------------------------------------------------------------------------------------------
test_FY_small %>%
group_by(case_status) %>%
summarize(n=n())
## ----predict, warning=FALSE, echo=FALSE--------------------------------------------------------
predictions$glmnet <- predict(final.glmnet, test_FY_small)
predictions$pls <- predict(final.pls, test_FY_small)
predictions$tree <- predict(final.tree, test_FY_small)
predictions$svm <- predict(final.svm, test_FY_small)
predictions$rf <- predict(final.rf, test_FY_small)
predictions <- predictions %>%
select(case_status, glmnet:rf)
## ----combineresults2, echo=FALSE---------------------------------------------------------------
final_model_results <- resamples(list(GLMNET = final.glmnet,
PLS = final.pls,
SVM = final.svm,
RF = final.rf,
Tree = final.tree))
## ---- echo=FALSE, fig.align="left", fig.width=8, fig.height=6---------------------------------
## visualize ROC, Sensitivity, and Specificity comparison across models
dotplot(final_model_results)
## ---- echo=FALSE, eval=FALSE-------------------------------------------------------------------
##
## changed to make for final.models
##
final_cv_pred_results <- final.glmnet$pred %>% tbl_df() %>%
filter(alpha %in% final.glmnet$bestTune$alpha,
lambda %in% final.glmnet$bestTune$lambda) %>%
select(pred, obs, SUCCESSFUL, UNSUCCESSFUL, rowIndex, Resample) %>%
mutate(model_name = "GLMNET") %>%
bind_rows(final.pls$pred %>%
tbl_df() %>%
filter(ncomp %in% final.pls$bestTune$ncomp) %>%
select(pred, obs, SUCCESSFUL, UNSUCCESSFUL, rowIndex, Resample) %>%
mutate(model_name = "PLS")) %>%
bind_rows(final.svm$pred %>%
tbl_df() %>%
filter(sigma %in% final.svm$bestTune$sigma,
C %in% final.svm$bestTune$C) %>%
select(pred, obs, SUCCESSFUL, UNSUCCESSFUL, rowIndex, Resample) %>%
mutate(model_name = "SVM")) %>%
bind_rows(final.tree$pred %>%
tbl_df() %>%
filter(cp == final.tree$bestTune$cp) %>%
select(pred, obs, SUCCESSFUL, UNSUCCESSFUL, rowIndex, Resample) %>%
mutate(model_name = "Tree"))%>%
bind_rows(final.rf$pred %>%
bl_df() %>%
filter(mtry == final.rf$bestTune$mtry) %>%
select(pred, obs, SUCCESSFUL, UNSUCCESSFUL, rowIndex, Resample) %>%
mutate(model_name = "RF"))
## ----foldscomparison2, echo=FALSE, fig.align="left", fig.width=8, fig.height=6-----------------
## plot the ROC for different folds
final_cv_pred_results %>%
ggplot(mapping = aes(m = SUCCESSFUL,
d = ifelse(obs == "SUCCESSFUL", 1, 0))) +
geom_roc(cutoffs.at = 0.5,
mapping = aes(color = Resample)) +
geom_roc(cutoffs.at = 0.5) +
coord_equal() +
facet_wrap(~ model_name) +
style_roc()
## different cutoff
final_cv_pred_results %>%
ggplot(mapping = aes(m = SUCCESSFUL,
d = ifelse(obs == "SUCCESSFUL", 1, 0))) +
geom_roc(cutoffs.at = 0.7,
mapping = aes(color = Resample)) +
geom_roc(cutoffs.at = 0.7) +
coord_equal() +
facet_wrap(~ model_name) +
style_roc()
## ----modelcomparison2, echo=FALSE, fig.align="left", fig.width=8, fig.height=6-----------------
final_cv_pred_results %>%
ggplot(mapping = aes(m = UNSUCCESSFUL,
d = ifelse(obs == "UNSUCCESSFUL", 1, 0),
color = model_name)) +
geom_roc(cutoffs.at = 0.5) +
coord_equal() +
style_roc() +
ggthemes::scale_color_colorblind()
## ----------------------------------------------------------------------------------------------
## this look like a step change generally, thus, this can be an ideal ROC curve
ggplot(fit.tree$results, mapping = aes(x = 1 - Spec, y = Sens)) +
geom_line()
## ----------------------------------------------------------------------------------------------
## Get the ROC curve
roc.tree <- roc(test_FY$case_status,
predict(fit.tree, test_FY, type = "prob")[,1],
levels = rev(levels(test_FY$case_status)))
roc.tree ## AUC = 0.6747
plot(roc.tree, print.thres = c(.5), type = "S",
print.thres.pattern = "%.3f (Spec = %.2f, Sens = %.2f)",
print.thres.cex = .8,
legacy.axes = TRUE,
main = "ROC of tree")
roc.rf <- roc(test_FY$case_status,
predict(fit.rf, test_FY, type = "prob")[,1],
levels = rev(levels(test_FY$case_status)))
roc.rf ## AUC = 0.7433 highest
plot(roc.rf, print.thres = c(.5), type = "S",
print.thres.pattern = "%.3f (Spec = %.2f, Sens = %.2f)",
print.thres.cex = .8,
legacy.axes = TRUE,
main = "ROC of rf")
roc.glmnet <- roc(test_FY$case_status,
predict(fit.glmnet, test_FY, type = "prob")[,1],
levels = rev(levels(test_FY$case_status)))
roc.glmnet ## AUC = 0.7054
plot(roc.glmnet, print.thres = c(.5), type = "S",
print.thres.pattern = "%.3f (Spec = %.2f, Sens = %.2f)",
print.thres.cex = .8,
legacy.axes = TRUE,
main = "ROC of glmnet")
roc.pls <- roc(test_FY$case_status,
predict(fit.pls, test_FY, type = "prob")[,1],
levels = rev(levels(test_FY$case_status)))
roc.pls ## AUC = 0.706
plot(roc.pls, print.thres = c(.5), type = "S",
print.thres.pattern = "%.3f (Spec = %.2f, Sens = %.2f)",
print.thres.cex = .8,
legacy.axes = TRUE,
main = "ROC of pls")
roc.svm <- roc(test_FY$case_status,
predict(fit.svm, test_FY, type = "prob")[,1],
levels = rev(levels(test_FY$case_status)))
roc.svm ## AUC = 0.6937
plot(roc.svm, print.thres = c(.5), type = "S",
print.thres.pattern = "%.3f (Spec = %.2f, Sens = %.2f)",
print.thres.cex = .8,
legacy.axes = TRUE,
main = "ROC of svm")
## ----------------------------------------------------------------------------------------------
## pick the median predicted value of the Successful cases for the test set
## this will be close to the threshold when we using ROC curve where TPR&FPR overlap
pred_rf <- predict(fit.rf, test_FY, type = "prob")
summary(pred_rf$SUCCESSFUL)
## ----------------------------------------------------------------------------------------------
matplot(data.frame(roc.rf$sensitivities, roc.rf$specificities),
x = roc.rf$thresholds,
xlab = "threshold", ylab = "TPR, TNR")
legend('bottomright', legend=c('TPR', 'TNR'), lty=1:2, col=1:2)
## ----------------------------------------------------------------------------------------------
threshold_5 <- 0.5
predicted_values5 <- ifelse(predict(final.rf, test_FY_small, type='prob') > threshold_5, 1, 0)
actual_values <- test_FY_small$case_status
conf_matrix_5 <- table(predicted_values5, actual_values)
conf_matrix_5
## ----------------------------------------------------------------------------------------------
threshold_8 <- 0.8
predicted_values8 <- ifelse(predict(final.rf, test_FY_small, type='prob') > threshold_8, 1, 0)
actual_values <- test_FY_small$case_status
conf_matrix_8 <- table(predicted_values8, actual_values)
conf_matrix_8
|
f09e46ca7e9ec09dc68d40735ab89c37a3345695 | 484d0d81fa373313ade9cf30e5df9139c342bba1 | /src/simulation/r_code/MyDataGeneration.R | 72ee398072afac494c9abe434c71854016dd482a | [
"MIT"
] | permissive | rushkock/sim_study_thesis | 95d77452764e582107f42e627decc8107fc2c913 | 6cddc5becf565a6e62ca5e4db873e522b5b5f0f5 | refs/heads/master | 2020-05-09T13:19:01.767533 | 2019-06-05T07:21:52 | 2019-06-05T07:21:52 | 181,146,833 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 745 | r | MyDataGeneration.R |
#Comparison of power of Mann-Whitney U test and power of t-test
# In group 1 variable Y is from a normal distribution with mu = 0 and sigma = 1
# In group 2 variable Y is from a normal distribution with mu = 0 + es and sigma = 1
# es is "effect size", which is a factor in the simulation design (either 0.2, 0.5, or 0.8)
# samp = sample size of group 1 = sample size of group 2
# samp = 10, 20, 40, 80
MyDataGeneration <- function(samp1, samp2, es, sd1, sd2){
averageSD = sqrt((sd1^2 + sd2^2)/2)
mean1 = 0
mean2 = averageSD * es
var1 <- rnorm(samp1, mean1, sd1)
var2 <- rnorm(samp2, mean2, sd2)
Y <- c(var1, var2)
group <- as.factor(c(rep(1, samp1), rep(2,samp2)))
dat <- data.frame(Y, group, mean1, mean2)
return(dat)
}
|
627dfe577621700024cf2c70ce6979635a980c6c | 2db48faacba5906814d5d94c98dcaad0932c0ef5 | /man/frontseat.Rd | 47a199383e3d5f49e4a89ded750eae265c849d9a | [] | no_license | cran/DOS2 | 985dbb239a40a4f7539cde77ab3f0fbdcc4bbd80 | f333e07e0041ee09525ea32bee07b4eb685f00af | refs/heads/master | 2020-07-26T22:37:00.966718 | 2019-09-16T09:30:05 | 2019-09-16T09:30:05 | 208,785,907 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,851 | rd | frontseat.Rd | \name{frontseat}
\alias{frontseat}
\concept{Causal inference}
\concept{Counterclaim}
\concept{Observational Study}
\concept{Mediation}
\docType{data}
\title{
Safety Belts in Vehicle Crashes
}
\description{
Data from the US Fatality Analysis Reporting System (FARS) in 2010-2011, as discussed in Rosenbaum (2015) and in the "Design of Observational Studies", second edition, Chapter 7. The data concern crashes in which a driver and a right front passenger were present, following Evans (1986). The data compare the injuries of the driver and passenger, and are particularly interesting when their safety belt use is different. The example illustrates the analysis of a counterclaim.
}
\usage{data("frontseat")}
\format{
A data frame with 17014 observations on the following 7 variables.
\describe{
\item{\code{restraint}}{Saftey belt use by (Driver,Passenger), where n=unbelted, ls=lap-shoulder belt. A factor with levels \code{ls.ls} \code{ls.n} \code{n.ls} \code{n.n}. Here, ls.n means the driver was belted and the passenger was not.}
\item{\code{injury}}{Injury of (Driver,Passenger).}
\item{\code{injurydif}}{Difference in injury scores, driver-minus-passenger, from -4 to 4. A score of -4 means the driver was uninjured, but the passenger died.}
\item{\code{ejection}}{Ejection from the vehicle of the (Driver,Passenger).}
\item{\code{ejectiondif}}{1 if the driver was ejected but the passenger was not, -1 if the passenger was ejected but the driver was not, 0 if their fates were the same.}
\item{\code{gender}}{Genders of the (Driver,Passenger).}
\item{\code{agedif}}{Difference in ages, driver-minus-passenger.}
}
}
\details{
This example is discussed in "Design of Observational Studies", second edition, Chapter 7.
Details are given in Rosenbaum (2015). A crash, perhaps involving several vehicles, is recorded in FARS only if there is at least
one fatality, creating issues of ascertainment (Fisher 1934) that do not affect tests of the hypothesis of no effect,
but that do affect estimation. Only tests of no effect are considered in this example.
}
\source{
Rosenbaum (2015)
}
\references{
Evans, L. (1986) <doi:10.1016/0001-4575(86)90007-2> "The
Effectiveness of Safety Belts in Preventing Fatalities".
Accident Analysis and Prevention, 18, 229–241.
Fisher, R.A. (1934) <doi:10.1111/j.1469-1809.1934.tb02105.x> "The Effect of Methods of Ascertainment Upon the Estimation of Frequencies". Annals of Eugenics, 6(1), 13-25.
Imai, K., Keele, L., Yamamoto, T. (2010) <doi:10.1214/10-STS321> "Identification, Inference and Sensitivity Analysis for Causal Mediation Effects". Statistical Science, 25, 51–71.
Rosenbaum, P.R. (2015) <doi:10.1080/01621459.2015.1054489> "Some Counterclaims Undermine Themselves in Observational Studies". Journal of the American Statistical Association, 110:512, 1389-1398.
}
\examples{
data(frontseat)
attach(frontseat)
use<-(!is.na(injurydif))
# Compare with Table 1 in Rosenbaum (2015), case ls.n
table(restraint[use])
use<-use&(restraint=="ls.n")
2*sensitivitymv::senmv(-injurydif[use],gamma=5,
trim=1,lambda=.99)$pval
2*sensitivitymv::senmv(-injurydif[use],gamma=5.5,
trim=1,lambda=.99)$pval
2*sensitivitymv::senmv(-injurydif[use],gamma=6,
trim=1,lambda=.99,inner=.25)$pval
2*sensitivitymv::senmv(-injurydif[use],gamma=6.5,
trim=1,lambda=.99,inner=.25)$pval
# Counterclaim analysis, one ejected individual
# Compare with Table 2 in Rosenbaum (2015), case ls.n
table(ejection,ejectiondif)
use<-use&(!is.na(ejectiondif))&(ejectiondif!=0)
sum(use)
2*sensitivitymv::senmv(-injurydif[use],gamma=9,
trim=1,lambda=.99)$pval
2*sensitivitymv::senmv(-injurydif[use],gamma=11,
trim=1,lambda=.99,inner=.25)$pval
detach(frontseat)
}
\keyword{datasets}
|
3ce49536c6feeacbc96070eb96b55bbab24944a7 | d1b9d11a226d61ca175d5d842deb4b9f24199d89 | /scripts/2017_metadata_cleaning.R | d3cdd06a8e72e2fb4b72aa73a815cc40cff56cb9 | [] | no_license | diportugal/Oyster_16S | b168d6ad5de369d854917fe9f9683cf73d70dbff | 309933bdfbf09013159ea0d537a910932f89da52 | refs/heads/main | 2023-07-26T11:33:52.219778 | 2021-09-09T05:05:02 | 2021-09-09T05:05:02 | 374,736,478 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,375 | r | 2017_metadata_cleaning.R | #Document Information ####
#Oyster 16S Metadata Cleaning
#Author: Diana Portugal
#Contact: dportugal8@gmail.com
#Loading Data ####
#library(tidyverse)
#install.packages("data.table")
#library(data.table)
#Loading the data (Original Data is called DE_DATA_ForGenetics_17.csv)
de_data17 <- read.csv("Oyster_data_raw/DE_DATA_ForGenetics_17.csv")
de_data17
#Loading the data (Original Data is called metadata_de17.csv)
meta17 <- read.csv("Oyster_data_raw/metadata_de17.csv")
meta17
#Loading the data (Original Data Name = asvtable_de17.csv)
asv17 <- fread("Oyster_data_raw/asvtable_de17.csv")
#Renaming the Treatment Names ####
de_data17$Treatment
de_data17$Treatment2 <- ifelse(de_data17$Treatment =="HH", "HIGH_POLY",
ifelse(de_data17$Treatment == "HL", "HIGH_MONO",
ifelse(de_data17$Treatment == "LL", "LOW_MONO", "LOW_POLY")))
#Creating a new column names Colornumber
de_data17$Colornumber <- paste0(de_data17$Color, de_data17$Number)
#Creating the UniqueIDs in de_data ####
de_data17$UniqueID <- paste("2017", de_data17$Site, de_data17$Treatment2, de_data17$Colornumber, de_data17$Species, sep = "_")
#Using Merge to combine the two data frames ####
#Done by matching the Unique ID columns
meta17data <- merge(meta17, de_data17, by = "UniqueID", all.x = TRUE) #Matching by column UniqueID, all.x referrers to Meta17 because it was on the X place
#Deleting columns in the new data frame
data_meta17_clean <- select(meta17data,
-"X",
-"V1",
-"Phase_1_DO",
-"Phase_1_temp",
-"Phase_2_DO",
-"Phase_2_Temp",
-"Overall_treatment",
-"Date_post",
-"Notes_pre",
-"POST_DEAD_ALIVE",
-"Dry_Weight_plate",
-"Dry_weight_final",
-"Dry_weight_shell",
-"Notes_post",
-"Genetics_Weight")
#Getting rid of the "missing" data ####
#Changing the MISSING data in the measurements to NA values
data_meta17_clean$Length_pre <-sub("MISSING","NA", data_meta17_clean$Length_pre)
data_meta17_clean
data_meta17_clean$Width_pre <-sub("MISSING","NA", data_meta17_clean$Width_pre)
data_meta17_clean
data_meta17_clean$Height_pre <-sub("MISSING","NA", data_meta17_clean$Height_pre)
data_meta17_clean
data_meta17_clean$Weight_pre <-sub("MISSING","NA", data_meta17_clean$Weight_pre)
data_meta17_clean
#*####
data_meta17_clean$RFTM_score.asnum <- as.numeric(as.character(data_meta17_clean$RFTM_score.x))
data_meta17_clean$peacrabs.asnum <- as.numeric(as.character(data_meta17_clean$peacrabs.x))
#*####
#Making Unique IDs the new row names for Phyloseq
write.csv(data_meta17_clean, file = "Oyster_data_raw/cleanmetadata17")
data_meta17_clean <- read.csv("Oyster_data_raw/cleanmetadata17")
rownames(data_meta17_clean) = data_meta17_clean$UniqueID
data_meta17_clean$UniqueID=NULL
data_meta17_clean
write.csv(data_meta17_clean, file = "Oyster_data_raw/meta17cleaned")
#End here with the data cleaning and start a new script for the data analysis on phyloseq ####
#Comment
|
11bc8f564abc6ed3f637e734aa3667a51b0c6e5a | 10cdf485801971b8357d13454d0185c9ebc6a36e | /man/corme.Rd | c59dc67e48543b538b8c49660fc15b0832cfa2ec | [] | no_license | dschulman/corme | 2e5fad5b1fd1709f47e393466dbe11c4040dbf52 | 12eeec79257df20ad01093bc9cfe236ecc44bb3b | refs/heads/master | 2016-09-06T14:51:48.630954 | 2011-11-16T23:57:25 | 2011-11-16T23:57:25 | 2,775,512 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,771 | rd | corme.Rd | \name{corme}
\alias{corme}
\alias{corme.default}
\alias{corme.formula}
\title{corme: correlation with mixed-effect models}
\usage{
corme(...)
\method{corme}{default} (g, x, y = NULL, REML = T, ...)
\method{corme}{formula} (formula, data, REML = T, ...)
}
\arguments{
\item{...}{additional arguments, not currently used}
\item{g}{a factor with at least 2 levels identifying
which group each observation is from.}
\item{x}{a numeric vector, matrix, or data frame}
\item{y}{an optional second vector, matrix, or data
frame. If absent, this is the same as specifying y=x
(but more than twice as fast).}
\item{REML}{if true use restricted maximum likelihood
estimation}
\item{formula}{a formula of the form y1+...+yn ~
x1+...+xm | g. The left-hand side is optional, and if
missing is treated as if it were the same as the right.}
\item{data}{a data frame containing the variables in
'formula'}
}
\value{
a list, where each item is either a scalar value (if we
are comparing two variables only) or a matrix: \describe{
\item{r.group}{correlation between groups}
\item{p.group}{significance of r.group}
\item{r.obs}{correlation within groups (accounting for
r.group)} \item{p.obs}{significance of r.obs}
\item{r.total}{overall correlation from all sources}
\item{p.total}{overall signifance of any correlation in
the model} }
}
\description{
Estimate between-group, within-group, and overall
correlation in grouped data using mixed-effect models.
}
\details{
Given grouped data, \code{corme} will estimate
between-group, within-group, and overall correlation by
fitting each pair of variables with a mixed-effect
regression model where: \itemize{ \item{group means are
drawn from a bivariate normal distribution}
\item{observations within a group also have a bivariate
normal distribution} }
As with \code{cor}, you can specify either a single set
of variables (\code{x}) or a second set (\code{y}) as
well. Only a single grouping factor is currently
supported.
Significance testing is by chi-square likelihood ratio
tests, which may be inaccurate at small sample sizes.
Note that the significance test \code{p.total} may be
confusing: it is a model comparison between a model with
both between-group and within-group correlation and a
null model with no correlation. It does \emph{not} test
a hypothesis that \code{r.total} is zero. If
between-group and within-group correlations have
different signs, then it is possible that \code{r.total}
could be near zero while \code{p.total} is highly
significant.
}
\references{
A. Hamlett, L. Ryan, P. Serrano-Trespalacios, R.
Wolfinger, Journal of the Air & Waste Management
Association (1995) 53, 442 (2003).
}
|
7419586dd1591b01c848e528ce6c0ee88879bda8 | fe2949a661f4805d693f4224266ed2261b615448 | /projects/epa_analysis/part4/layers.r | 64d694b7fae24056ca2782121c7ebae1b3728ea6 | [] | no_license | dnfehren/dnfehrenbach.com | c4d6a54f703b6aa74701e518db950b74fd527f26 | e1bbf16daab454f224551b0f87579362806533fd | refs/heads/master | 2021-01-04T22:34:01.939666 | 2012-02-20T20:51:59 | 2012-02-20T20:51:59 | 3,497,585 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,363 | r | layers.r | #load libraries
library(RSQLite)
library(ggplot2)
#connect to database
#con <- dbConnect(SQLite(), "/users/dnfehren/Desktop/epa.sqlite")
con <- dbConnect(SQLite(), "C:\\Users\\dnfehren\\Desktop\\epa.sqlite")
#send query and load data into R data frame
query <- dbSendQuery(con, statement = "SELECT TRI_FACILITY_ID,Fugitive_Air,Stack_Air,Water,Underground_Class_I,Underground_Class_II_V,RCRA_C_Landfills,Other_Landfills,Land_Treatment,Surface_Impoundment,RCRA_C_Surface_Impoundment,Other_Surface_Impoundment,Other_Disposal,POTW_Total_Transfers,M10,M41,M62,M71,M81,M82,M72,M63,M66,M67,M64,M65,M73,M79,M90,M94,M99,M20,M24,M26,M28,M93,M56,M92,M40,M50,M54,M61,M69,M95 FROM tri");
return <- fetch(query, n = -1)
#get the on site data columns
on_site_wide <- return[c("TRI_Facility_ID","Fugitive_Air","Stack_Air","Water","Underground_Class_I","Underground_Class_II_V","RCRA_C_Landfills","Other_Landfills","Land_Treatment","Surface_Impoundment","RCRA_C_Surface_Impoundment","Other_Surface_Impoundment","Other_Disposal")]
#rename them
names(on_site_wide) <- c("Facility ID","Air Leak","Air Release", "Water", "Strict Well", "Other Well", "Strict Landfill", "Other Landfill", "Soil", "Surface Holding", "Strict Surface Holding","Other Surface","Other")
#get the off site data columns
off_site_wide <- return[c("TRI_Facility_ID","POTW_Total_Transfers","M10","M41","M62","M71","M72","M63","M64","M65","M73","M79","M90","M94","M99")]
#rename them as well
names(off_site_wide) <- c("Facility ID","OS POTW","OS Storage","OS Solidification","OS WasteWater Treatment", "OS Well", "OS Old Well Data", "OS Surface", "OS Landfill", "OS Strict Landfill", "OS Soil", "OS Other Surface","OS Other", "Waste Broker", "Unknown")
#melt the wide data frames into long frames each column name becomes a variable
on_site_full <- melt.data.frame(on_site_wide, id=1)
off_site_full <- melt.data.frame(off_site_wide, id=1)
#remove variables with no entries
on_site <- subset(on_site_full, on_site_full$value != 0)
off_site <- subset(off_site_full, off_site_full$value != 0)
#histogram of onsite
qplot(variable, data=on_site, geom="histogram")
#ggsave(file="on_site_release_histogram.png", width=10, height=6)
#histogram of offsite
qplot(variable, data=off_site, geom="histogram")
#ggsave(file="off_site_release_histogram.png", width=10, height=6)
#create new dataframes using tabluated values from the on and off site frames
on_df <- as.data.frame(table(on_site$variable))
off_df <- as.data.frame(table(off_site$variable))
combo_df <- rbind(on_df,off_df)
names(combo_df) <- c("type","count")
#pull out the release methods that match between on and off site
similar_methods <- combo_df[c(6,21,7,20,8,22,11,23),]
#create new columns to allow for easier factor plotting
similar_methods["site"] <- c("on","off","on","off","on","off","on","off")
similar_methods["type"] <- c("Strict Landfill","Strict Landfill","Landfill","Landfill","Soil","Soil","Surface","Surface")
#plot layers
p <- ggplot(similar_methods)
#p + layer(geom="bar")
p <- p + aes(factor(type))
#p + layer(geom="bar")
p <- p + aes(factor(type), weight = count)
#p + layer(geom="bar")
p <- p + aes(factor(type), fill = factor(site))
#p + layer(geom="bar")
p + geom_bar(position="dodge")
#ggsave(file="comparison_barchart.png", width=10, height=6)
|
e34c8c92aea9be22e090dd094762ce58cd2da656 | 46a52c79eaf7260c33605fc97769cae952b26adf | /demo/hivmodelage.R | c0cd142793194da53a3710718c0df3fafe12f9ac | [
"MIT"
] | permissive | haganje/epicookbook | d69f7d9b46e220ce9b75809e715c766f2a122661 | 311af3d66052e26f59b4965835b2b3f97ed23b1e | refs/heads/master | 2020-04-27T11:52:40.598500 | 2014-01-16T10:45:03 | 2014-01-16T10:45:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,360 | r | hivmodelage.R | hivmodel.discrete.age <- new("discreteModel",
main = function(time, init, parms, ...){
with(parms,{
S <- init[1:m]
I <- init[(m+1):(2*m)]
Stot <- sum(S)
Itot <- sum(I)
N <- Stot+Itot
Snew <- S
Inew <- I
Snew[1] <- S[1]+(lambd-mu*S[1]-beta*c*S[1]*Itot/N)*DELTAT-(S[1])*(DELTAT/DELTAX)
Snew[2:(m-1)] <- S[2:(m-1)]+(-mu*S[2:(m-1)]-beta*c*S[2:(m-1)]*Itot/N)*DELTAT-(S[2:(m-1)]-S[1:(m-2)])*(DELTAT/DELTAX)
Snew[m] <- S[m]+(-mu*S[m]-beta*c*S[m]*Itot/N)*DELTAT-(-S[m-1])*(DELTAT/DELTAX)
Inew[1] <- I[1] + (beta*c*S[1]*Itot/N-gamma*I[1]-mu*I[1])*DELTAT-(I[1])*(DELTAT/DELTAX)
Inew[2:(m-1)] <- I[2:(m-1)]+(beta*c*S[2:(m-1)]*Itot/N-gamma*I[2:(m-1)]-mu*I[2:(m-1)])*DELTAT-(I[2:(m-1)]-I[1:(m-2)])*(DELTAT/DELTAX)
Inew[m] <- I[m]+(beta*c*S[m]*Itot/N-gamma*I[m]-mu*I[m])*DELTAT-(-I[m-1])*(DELTAT/DELTAX)
c(S=as.numeric(Snew),I=as.numeric(Inew))
})},
parms = list(beta=0.01,c=1,gamma=1./520,mu=1./(70*52),lambd=10000./(70*52),DELTAX=52,m=15*70),
init = c(S=vector(mode="numeric"),I=vector(mode="numeric")),
times = c(from=0,to=3000,by=1),
solver = "modeliterator"
)
hivmodel.discrete.age <- sim(hivmodel.discrete.age)
|
c4a6f7ca1fae898a1a26052a8bfa804f6474c2ef | 96ca33786427091cdab915d8e130400382e5c6e4 | /scripts/05_plot_spatial_campaign.R | 27b2241511b9341b2a3ac7eb731e52703df72a1f | [] | no_license | wileyjennings/chile_mapocho_2019 | a0f886c90a1e07789a4e43d8aca1a812d596860b | 01e2d87dc162251350f90aaa3534674fa5c4e0a9 | refs/heads/master | 2021-05-26T21:50:28.859131 | 2020-08-20T12:01:49 | 2020-08-20T12:01:49 | 254,168,919 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,265 | r | 05_plot_spatial_campaign.R | # Doc Info ----------------------------------------------------------------
# Title: Plot Chile Mapocho samples
# Project: Chile Mapocho survey
# Author: Wiley Jennings | Boehm Group | Stanford
# Date: 27 Dec 2019
# Description: Plot spatial campaign data.
# Requirements ------------------------------------------------------------
required_packages <- c("here", "dplyr", "forcats", "ggbeeswarm", "ggplot2")
lapply(required_packages, library, character.only = T)
source(here::here("scripts", "util.R"))
# Processed data
water <- readRDS(here::here("data", "processed", "water.rds"))
# Global settings ---------------------------------------------------------
theme_set(theme_bw())
scale_colour_discrete <- scale_colour_viridis_d
# Plot spatial campaign data ----------------------------------------------
# Filter spatial data and recode factors for plotting.
water_spatial <-
water %>%
filter(target != "tc" & campaign %in% c("spatial")) %>%
mutate(target = fct_relevel(target, "crass", "hf183", "noro", "ec", "ent"),
target = fct_recode(target, crAssphage = "crass", HF183 = "hf183",
Norovirus = "noro", `E. coli` = "ec",
Enterococci = "ent"))
# Plot microbial data facetted by organism.
plot_spatial_facet_target <-
water_spatial %>%
filter(!is.na(detect)) %>%
ggplot(., aes(x = location, y = l10_100ml_cens)) +
# geom_hline(data = water_spatial %>% filter(vol_mce == 100),
# aes(yintercept = l10_100ml_lod), color = "gray50",
# linetype = 2) +
# geom_hline(data = water_spatial %>% filter(target %in% c("ec", "ent")),
# aes(yintercept = l10_100ml_lod), color = "gray50",
# linetype = 2) +
geom_boxplot(outlier.shape = NA) +
geom_beeswarm(aes(shape = detect, color = detect), cex = 2) +
scale_shape_manual(values = c(19, 4)) +
scale_color_manual(values = c("black", "red")) +
facet_wrap(~target) +
labs(x = "Location", y = expression("Concentration (log"[10]*"MPN or cp/100 ml)"),
color = "Organism") +
theme(legend.position = "none")
# Write figures to file ---------------------------------------------------
write_tif(plot_spatial_facet_target, "spatial_facet_target.tif", "horiz")
|
c709739d388771cb55121da95d264179e823d817 | 486ac08cb972bc756c7ff49f2d53d152d9298957 | /ejemplo-2/requirements.R | 1943c1c57a8a5ddb79f61a2db3f419f927d2c6c9 | [] | no_license | luismartinez83/docker-datascience | ad9df628dc86673789c37223a3ce52639ab644a7 | dc4a4bbd33cb1ca68e4cf6cd3594387425bb907c | refs/heads/main | 2023-05-19T06:40:32.605755 | 2021-06-11T16:36:37 | 2021-06-11T16:36:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 33 | r | requirements.R | install.packages("randomForest")
|
f86e0623c5b496ac3127f1fbad14fee3dfac6e45 | 9f2b375053d41ed33adee755456ccec58a486933 | /server.R | 887f464ed8a813f2094d3eba3fd53df3f109f49a | [] | no_license | mo-sayed/Bluebikes-Shiny_Visualization | 49125c3b72f95cc2af6359882e15747109f74d9a | affa618d771f4f2d2353929923bf0ae5c51bb18f | refs/heads/master | 2022-08-02T10:22:00.783186 | 2020-05-22T02:12:28 | 2020-05-22T02:12:28 | 239,390,706 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,273 | r | server.R | # SERVER.R
function(input, output){
first = reactive({
df0 %>% filter(TRIPDURATION <= 60*60*2) %>%
filter(hour(TIMEOFDAY) >= input$time[1] & hour(TIMEOFDAY) <=input$time[2]) %>%
filter(TRIPDURATION > input$duration[1]*60 & TRIPDURATION < input$duration[2]*60)
})
output$dailytripduration = renderPlot(
# first() %>% select(TRIPDURATION, DAYOFWEEK, TIMEOFDAY) %>%
first() %>% select(TRIPDURATION, DAYOFWEEK) %>%
ggplot(aes(x=TRIPDURATION)) +
geom_freqpoly(binwidth = 60, aes(color=DAYOFWEEK)) +
labs(x = "Duration (seconds)" , y = 'Number of Trips')
)
output$dailyusertype = renderPlot(
# first() %>% select(USERTYPE, DAYOFWEEK, TRIPDURATION, TIMEOFDAY)%>%
first() %>% select(USERTYPE, DAYOFWEEK, TRIPDURATION)%>%
group_by(USERTYPE, DAYOFWEEK) %>% summarise(n = n()) %>%
ggplot(aes(x = DAYOFWEEK, y = n)) +
geom_col(aes( fill = USERTYPE), position = 'dodge') +
labs(y="Number of Trips", x = "Day of the Week") +
theme(legend.position = "top") +
scale_fill_discrete(name = "User Type", breaks = c("Subscriber","Customer"), labels = c("Subscribers", "Customers"))
)
output$trip_bygender = renderPlot(
df0 %>% filter(STARTDAY >= input$timelog[1] & STARTDAY <= input$timelog[2]) %>%
select(STARTDAY, GENDER, YEAR) %>% group_by(Date = STARTDAY, GENDER) %>%
summarise(n = n()) %>%
ggplot(aes(x=Date, y = n)) +
geom_point(aes(color = GENDER)) +
geom_smooth(method = "gam", aes(color = GENDER)) +
labs(y = "Number of Trips") +
theme(legend.key = element_blank(), legend.position = "bottom") +
scale_color_discrete(name = "Gender", breaks = c('Male', 'Female'), labels = c('Male', 'Female'))
)
output$hourlytrips = renderPlot(
# df0 %>% select(STARTDAY, TIMEOFDAY, GENDER)%>% group_by(hr = hour(parse_date_time(df0$TIMEOFDAY, 'H:M:S'))) %>%
df0 %>% select(STARTDAY, TIMEOFDAY, GENDER)%>% group_by(hr = hour(df0$TIMEOFDAY)) %>%
filter(STARTDAY >= input$timelog & STARTDAY <= input$timelog) %>%
summarise(n = n()) %>%
ggplot(aes(x = hr, y = n)) +
geom_bar(stat = 'identity', fill = "blue") +
labs(x = "Time of day (hour)", y = "Number of Trips")
)
output$dailycyclists = renderPlot(
df3 %>% ggplot(aes(x=TIMEOFDAY_)) +
geom_point(aes(y=Male, color = "pink"), stat='identity') +
geom_point(aes(y=Female, color = "blue"), stat='identity') +
theme(legend.position = "none") +
geom_line(aes(y=Male, group = 1, color = 'pink')) +
geom_line(aes(y=Female, group = 1, color = 'blue')) +
labs(x = 'Time of Day', y = 'Number of Cyclists',
title = 'Daily Frequency of Cyclists', size=3.5) +
ylim(c(0,400000))
)
age_reactive = reactive({
df0 %>% filter(AGE >= input$riderage[1] & AGE <= input$riderage[2])
})
output$bikeheatmap = renderLeaflet(
age_reactive() %>% leaflet() %>% addTiles() %>%
addHeatmap(lng = ~STARTSTATIONLONGITUDE, lat = ~STARTSTATIONLATITUDE, radius = 10,
gradient = "Blues" )
)
output$photo = renderImage({
return(list(src='hubway4.jpg',
filetype='image/jpeg',
alt='photo'))
}, deleteFile = F)
} |
e07cdbf142df5da496a1364b4dff72f843b76952 | e8f282cec225e3678e8464050edf7475d6995a91 | /man/createRollingTimeIndices.Rd | 2bc7905c7cbd25365c51f140ab6898aece861819 | [] | no_license | stjordanis/tsconv | aca5eaa03c699bbbf4775a960af38dbfe33e8681 | 0d7e9bc63aaecfcbbda79cf588334af663c20102 | refs/heads/master | 2020-04-15T15:12:43.422059 | 2017-01-06T05:59:22 | 2017-01-06T05:59:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,079 | rd | createRollingTimeIndices.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/E-createRollingTimeIndices.R
\name{createRollingTimeIndices}
\alias{createRollingTimeIndices}
\title{create index of time-stamps for vectorized functions}
\usage{
createRollingTimeIndices(initialTimeVector, estimationLength = 18,
timeUnit = "months", investmentHorizonLength = 1,
windowType = "rolling")
}
\arguments{
\item{initialTimeVector}{vector of dates YYYY-MM-DD}
\item{estimationLength}{a positive integer indicating the size of the estimation period}
\item{timeUnit}{a character string, one of "weeks", "days","weeks","months","years", indicating the unit size for estimationLength and investmentHorizon}
\item{investmentHorizonLength}{a positive integer indicating the size of the investment horizon (out of sample period)}
}
\value{
a character matrix of xts-subsettable date ranges, one column for the in-sample periods, and another for the out-of-sample periods
}
\description{
create index of time-stamps for vectorized functions
}
\examples{
FUNCTION STILL UNDER DEVELOPMENT
}
|
f052bfe4a573eac856604ccee0c829c37bf1dedf | beac66411ce2eddaea216148a8c5ffca34074fd6 | /tests/testthat.R | f0f312fcf5863a8c922ee87e0548004c0468b396 | [] | no_license | AustralianAntarcticDivision/bgmfiles | f6043ddcb79fc7f5d3febb5aa7a3d1b5ca5b7733 | bf3390a23222c788d99c6370d4d5ef2923fb8db5 | refs/heads/master | 2020-04-06T06:49:40.243993 | 2016-09-09T06:00:02 | 2016-09-09T06:00:02 | 55,750,978 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 60 | r | testthat.R | library(testthat)
library(bgmfiles)
test_check("bgmfiles")
|
9004bcf956f701e5cdb0bc3cca5472a57e49429b | 2bd293d9eff164a31e5ca98900a3b623aced5815 | /man/Colour.Pixel.Rd | b252b58fe37133f70964d7afb2abf3a332eaf752 | [] | no_license | carelvdmerwe/UBbipl3 | 213a17f60e7cd1796f51ce2a2f5fc097b57e722c | ea5bbe4767d42d92d949e42b1db199fc8d5e12f0 | refs/heads/master | 2020-06-07T04:44:22.514152 | 2019-06-20T13:39:36 | 2019-06-20T13:39:36 | 192,926,846 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 107 | rd | Colour.Pixel.Rd | \name{Colour.Pixel}
\alias{Colour.Pixel}
\title{
COLOUR PIXEL
}
\description{
See Understanding Biplots.
}
|
e187d0c65b91ae3134a34b7acf462a5ae75e9918 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/phylosim/examples/getName.Event.Rd.R | 37df9508696c293440cc0f275036990978db43e4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 291 | r | getName.Event.Rd.R | library(phylosim)
### Name: getName.Event
### Title: Get the name of an Event object
### Aliases: getName.Event Event.getName getName,Event-method
### ** Examples
# create an Event object
e<-Event(name="MyEvent")
# get event name
getName(e)
# get name via virtual field
e$name
|
39176000b5684e75b125a8708a8554cc33fb0abf | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/GtkUIManager.Rd | a7b9f9818e92ae116eb137288373908a95f55b98 | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 13,572 | rd | GtkUIManager.Rd | \alias{GtkUIManager}
\alias{gtkUIManager}
\alias{GtkUIManagerItemType}
\name{GtkUIManager}
\title{GtkUIManager}
\description{Constructing menus and toolbars from an XML description}
\section{Methods and Functions}{
\code{\link{gtkUIManagerNew}()}\cr
\code{\link{gtkUIManagerSetAddTearoffs}(object, add.tearoffs)}\cr
\code{\link{gtkUIManagerGetAddTearoffs}(object)}\cr
\code{\link{gtkUIManagerInsertActionGroup}(object, action.group, pos)}\cr
\code{\link{gtkUIManagerRemoveActionGroup}(object, action.group)}\cr
\code{\link{gtkUIManagerGetActionGroups}(object)}\cr
\code{\link{gtkUIManagerGetAccelGroup}(object)}\cr
\code{\link{gtkUIManagerGetWidget}(object, path)}\cr
\code{\link{gtkUIManagerGetToplevels}(object, types)}\cr
\code{\link{gtkUIManagerGetAction}(object, path)}\cr
\code{\link{gtkUIManagerAddUiFromString}(object, buffer, length = -1, .errwarn = TRUE)}\cr
\code{\link{gtkUIManagerNewMergeId}(object)}\cr
\code{\link{gtkUIManagerAddUi}(object, merge.id, path, name, action = NULL, type, top)}\cr
\code{\link{gtkUIManagerRemoveUi}(object, merge.id)}\cr
\code{\link{gtkUIManagerGetUi}(object)}\cr
\code{\link{gtkUIManagerEnsureUpdate}(object)}\cr
\code{gtkUIManager()}
}
\section{Hierarchy}{\preformatted{GObject
+----GtkUIManager}}
\section{Interfaces}{GtkUIManager implements
\code{\link{GtkBuildable}}.}
\section{Detailed Description}{A \code{\link{GtkUIManager}} constructs a user interface (menus and toolbars) from
one or more UI definitions, which reference actions from one or more
action groups.}
\section{UI Definitions}{The UI definitions are specified in an XML format which can be
roughly described by the following DTD.
Do not confuse the GtkUIManager UI Definitions described here with
the similarly named GtkBuilder UI
Definitions.
\preformatted{<!ELEMENT ui (menubar|toolbar|popup|accelerator)* >
<!ELEMENT menubar (menuitem|separator|placeholder|menu)* >
<!ELEMENT menu (menuitem|separator|placeholder|menu)* >
<!ELEMENT popup (menuitem|separator|placeholder|menu)* >
<!ELEMENT toolbar (toolitem|separator|placeholder)* >
<!ELEMENT placeholder (menuitem|toolitem|separator|placeholder|menu)* >
<!ELEMENT menuitem EMPTY >
<!ELEMENT toolitem (menu?) >
<!ELEMENT separator EMPTY >
<!ELEMENT accelerator EMPTY >
<!ATTLIST menubar name #IMPLIED
action #IMPLIED >
<!ATTLIST toolbar name #IMPLIED
action #IMPLIED >
<!ATTLIST popup name #IMPLIED
action #IMPLIED
accelerators (true|false) #IMPLIED >
<!ATTLIST placeholder name #IMPLIED
action #IMPLIED >
<!ATTLIST separator name #IMPLIED
action #IMPLIED
expand (true|false) #IMPLIED >
<!ATTLIST menu name #IMPLIED
action #REQUIRED
position (top|bot) #IMPLIED >
<!ATTLIST menuitem name #IMPLIED
action #REQUIRED
position (top|bot) #IMPLIED
always-show-image (true|false) #IMPLIED >
<!ATTLIST toolitem name #IMPLIED
action #REQUIRED
position (top|bot) #IMPLIED >
<!ATTLIST accelerator name #IMPLIED
action #REQUIRED >
}
There are some additional restrictions beyond those specified in the
DTD, e.g. every toolitem must have a toolbar in its anchestry and
every menuitem must have a menubar or popup in its anchestry. Since
a \verb{GMarkup} parser is used to parse the UI description, it must not only
be valid XML, but valid \verb{GMarkup}.
If a name is not specified, it defaults to the action. If an action is
not specified either, the element name is used. The name and action
attributes must not contain '/' characters after parsing (since that
would mess up path lookup) and must be usable as XML attributes when
enclosed in doublequotes, thus they must not '"' characters or references
to the " entity.
\emph{A UI definition}\preformatted{<ui>
<menubar>
<menu name="FileMenu" action="FileMenuAction">
<menuitem name="New" action="New2Action" />
<placeholder name="FileMenuAdditions" />
</menu>
<menu name="JustifyMenu" action="JustifyMenuAction">
<menuitem name="Left" action="justify-left"/>
<menuitem name="Centre" action="justify-center"/>
<menuitem name="Right" action="justify-right"/>
<menuitem name="Fill" action="justify-fill"/>
</menu>
</menubar>
<toolbar action="toolbar1">
<placeholder name="JustifyToolItems">
<separator/>
<toolitem name="Left" action="justify-left"/>
<toolitem name="Centre" action="justify-center"/>
<toolitem name="Right" action="justify-right"/>
<toolitem name="Fill" action="justify-fill"/>
<separator/>
</placeholder>
</toolbar>
</ui>
}
The constructed widget hierarchy is very similar to the element tree
of the XML, with the exception that placeholders are merged into their
parents. The correspondence of XML elements to widgets should be
almost obvious:
\describe{
\item{menubar}{a \code{\link{GtkMenuBar}}}
\item{toolbar}{a \code{\link{GtkToolbar}}}
\item{popup}{a toplevel \code{\link{GtkMenu}}}
\item{menu}{a \code{\link{GtkMenu}} attached to a menuitem}
\item{menuitem}{a \code{\link{GtkMenuItem}} subclass, the exact type depends on the
action}
\item{toolitem}{a \code{\link{GtkToolItem}} subclass, the exact type depends on the
action. Note that toolitem elements may contain a menu element, but only
if their associated action specifies a \code{\link{GtkMenuToolButton}} as proxy.}
\item{separator}{a \code{\link{GtkSeparatorMenuItem}} or
\code{\link{GtkSeparatorToolItem}}}
\item{accelerator}{a keyboard accelerator}
}
The "position" attribute determines where a constructed widget is positioned
wrt. to its siblings in the partially constructed tree. If it is
"top", the widget is prepended, otherwise it is appended.}
\section{UI Merging}{The most remarkable feature of \code{\link{GtkUIManager}} is that it can overlay a set
of menuitems and toolitems over another one, and demerge them later.
Merging is done based on the names of the XML elements. Each element is
identified by a path which consists of the names of its anchestors, separated
by slashes. For example, the menuitem named "Left" in the example above
has the path \code{/ui/menubar/JustifyMenu/Left} and the
toolitem with the same name has path
\code{/ui/toolbar1/JustifyToolItems/Left}.}
\section{Accelerators}{Every action has an accelerator path. Accelerators are installed together with
menuitem proxies, but they can also be explicitly added with <accelerator>
elements in the UI definition. This makes it possible to have accelerators for
actions even if they have no visible proxies.}
\section{Smart Separators}{The separators created by \code{\link{GtkUIManager}} are "smart", i.e. they do not show up
in the UI unless they end up between two visible menu or tool items. Separators
which are located at the very beginning or end of the menu or toolbar
containing them, or multiple separators next to each other, are hidden. This
is a useful feature, since the merging of UI elements from multiple sources
can make it hard or impossible to determine in advance whether a separator
will end up in such an unfortunate position.
For separators in toolbars, you can set \code{expand="true"} to
turn them from a small, visible separator to an expanding, invisible one.
Toolitems following an expanding separator are effectively right-aligned.}
\section{Empty Menus}{Submenus pose similar problems to separators inconnection with merging. It is
impossible to know in advance whether they will end up empty after merging.
\code{\link{GtkUIManager}} offers two ways to treat empty submenus:
\itemize{
\item make them disappear by hiding the menu item they're attached to
\item add an insensitive "Empty" item
}
The behaviour is chosen based on the "hide_if_empty" property of the action
to which the submenu is associated.}
\section{GtkUIManager as GtkBuildable}{The GtkUIManager implementation of the GtkBuildable interface accepts
GtkActionGroup objects as <child> elements in UI definitions.
A GtkUIManager UI definition as described above can be embedded in
an GtkUIManager <object> element in a GtkBuilder UI definition.
The widgets that are constructed by a GtkUIManager can be embedded in
other parts of the constructed user interface with the help of the
"constructor" attribute. See the example below.
\emph{An embedded GtkUIManager UI definition}\preformatted{<object class="GtkUIManager" id="uiman">
<child>
<object class="GtkActionGroup" id="actiongroup">
<child>
<object class="GtkAction" id="file">
<property name="label">_File</property>
</object>
</child>
</object>
</child>
<ui>
<menubar name="menubar1">
<menu action="file">
</menu>
</menubar>
</ui>
</object>
<object class="GtkWindow" id="main-window">
<child>
<object class="GtkMenuBar" id="menubar1" constructor="uiman"/>
</child>
</object>
}}
\section{Structures}{\describe{\item{\verb{GtkUIManager}}{
The \code{GtkUIManager} struct contains only private
members and should not be accessed directly.
}}}
\section{Convenient Construction}{\code{gtkUIManager} is the equivalent of \code{\link{gtkUIManagerNew}}.}
\section{Enums and Flags}{\describe{\item{\verb{GtkUIManagerItemType}}{
These enumeration values are used by \code{\link{gtkUIManagerAddUi}} to determine
what UI element to create.
\describe{
\item{\verb{auto}}{Pick the type of the UI element according to context.}
\item{\verb{menubar}}{Create a menubar.}
\item{\verb{menu}}{Create a menu.}
\item{\verb{toolbar}}{Create a toolbar.}
\item{\verb{placeholder}}{Insert a placeholder.}
\item{\verb{popup}}{Create a popup menu.}
\item{\verb{menuitem}}{Create a menuitem.}
\item{\verb{toolitem}}{Create a toolitem.}
\item{\verb{separator}}{Create a separator.}
\item{\verb{accelerator}}{Install an accelerator.}
}
}}}
\section{Signals}{\describe{
\item{\code{actions-changed(merge, user.data)}}{
The "actions-changed" signal is emitted whenever the set of actions
changes.
Since 2.4
\describe{
\item{\code{merge}}{a \code{\link{GtkUIManager}}}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{add-widget(merge, widget, user.data)}}{
The add_widget signal is emitted for each generated menubar and toolbar.
It is not emitted for generated popup menus, which can be obtained by
\code{\link{gtkUIManagerGetWidget}}.
Since 2.4
\describe{
\item{\code{merge}}{a \code{\link{GtkUIManager}}}
\item{\code{widget}}{the added widget}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{connect-proxy(uimanager, action, proxy, user.data)}}{
The connect_proxy signal is emitted after connecting a proxy to
an action in the group.
This is intended for simple customizations for which a custom action
class would be too clumsy, e.g. showing tooltips for menuitems in the
statusbar.
Since 2.4
\describe{
\item{\code{uimanager}}{the ui manager}
\item{\code{action}}{the action}
\item{\code{proxy}}{the proxy}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{disconnect-proxy(uimanager, action, proxy, user.data)}}{
The disconnect_proxy signal is emitted after disconnecting a proxy
from an action in the group.
Since 2.4
\describe{
\item{\code{uimanager}}{the ui manager}
\item{\code{action}}{the action}
\item{\code{proxy}}{the proxy}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{post-activate(uimanager, action, user.data)}}{
The post_activate signal is emitted just after the \code{action}
is activated.
This is intended for applications to get notification
just after any action is activated.
Since 2.4
\describe{
\item{\code{uimanager}}{the ui manager}
\item{\code{action}}{the action}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{pre-activate(uimanager, action, user.data)}}{
The pre_activate signal is emitted just before the \code{action}
is activated.
This is intended for applications to get notification
just before any action is activated.
Since 2.4
\describe{
\item{\code{uimanager}}{the ui manager}
\item{\code{action}}{the action}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
}}
\section{Properties}{\describe{
\item{\verb{add-tearoffs} [logical : Read / Write]}{
The "add-tearoffs" property controls whether generated menus
have tearoff menu items.
Note that this only affects regular menus. Generated popup
menus never have tearoff menu items.
Default value: FALSE Since 2.4
}
\item{\verb{ui} [character : * : Read]}{
An XML string describing the merged UI. Default value: "<ui>\\n</ui>\\n"
}
}}
\references{\url{https://developer-old.gnome.org/gtk2/stable/GtkUIManager.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\seealso{\code{\link{GtkBuilder}}}
\keyword{internal}
|
2cad50d54775675a329fe52880ee6fee0e31ce5b | 401d48b917525346b9b4320607ebb4a7df373d8a | /man/Maintenance_metabolism.Rd | 2cac8d98d3374d8e0fc886be9c9e794d012eeade | [] | no_license | CamiloCarneiro/eneRgetics | 0f7822f027320dbf2c04eb046d1a4e3d00062c3e | 0910832fd62330dbc11022b3206c19c1670d6158 | refs/heads/master | 2023-04-27T11:37:20.387666 | 2023-04-14T07:33:55 | 2023-04-14T07:33:55 | 202,355,162 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 410 | rd | Maintenance_metabolism.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Maintenance_metabolism.R
\docType{data}
\name{Maintenance_metabolism}
\alias{Maintenance_metabolism}
\title{Example of Maintenence metabolism output}
\format{A data frame with 192 rows and 17 variables}
\usage{
data(Maintenance_metabolism)
}
\description{
Example of Maintenence metabolism output
}
\keyword{datasets}
|
85a9581cbe0a1b414318772819a9a90c45452b8a | e779c8abbc066e633b328371ff6f76f68f0c4b35 | /1.1 load month data and create year data.R | 32282ca4b4ec9f2c1bd1289a61e2c066c802ab01 | [] | no_license | richarddeng88/citibike | 285b509ec36987efc8ef5826d3b080b5d0ddb02a | a366ed17347866e569e7f7ed910d0c6729aec1c6 | refs/heads/master | 2021-07-23T22:14:11.975650 | 2016-10-06T20:59:22 | 2016-10-06T20:59:22 | 58,492,168 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,963 | r | 1.1 load month data and create year data.R | # 2013 ###########################
c7 <- read.csv("c:/Users/Richard/Desktop/citibike/201307-citibike-tripdata.csv")
c8 <- read.csv("c:/Users/Richard/Desktop/citibike/201308-citibike-tripdata.csv")
c9 <- read.csv("c:/Users/Richard/Desktop/citibike/201309-citibike-tripdata.csv")
c10 <- read.csv("c:/Users/Richard/Desktop/citibike/201310-citibike-tripdata.csv")
c11 <- read.csv("c:/Users/Richard/Desktop/citibike/201311-citibike-tripdata.csv")
c12 <- read.csv("c:/Users/Richard/Desktop/citibike/201312-citibike-tripdata.csv")
df2013 <- rbind(c7,c8,c9,c10,c11,c12)
write.csv(df2013, file="citibike/2013citibike.csv",row.names = F)
# 2014###########################
c1 <- read.csv("c:/Users/Richard/Desktop/citibike/201401-citibike-tripdata.csv", stringsAsFactors = F)
c2 <- read.csv("c:/Users/Richard/Desktop/citibike/201402-citibike-tripdata.csv",stringsAsFactors = F)
c3 <- read.csv("c:/Users/Richard/Desktop/citibike/201403-citibike-tripdata.csv",stringsAsFactors = F)
c4 <- read.csv("c:/Users/Richard/Desktop/citibike/201404-citibike-tripdata.csv",stringsAsFactors = F)
c5 <- read.csv("c:/Users/Richard/Desktop/citibike/201405-citibike-tripdata.csv",stringsAsFactors = F)
c6 <- read.csv("c:/Users/Richard/Desktop/citibike/201406-citibike-tripdata.csv",stringsAsFactors = F)
c7 <- read.csv("c:/Users/Richard/Desktop/citibike/201407-citibike-tripdata.csv",stringsAsFactors = F)
c8 <- read.csv("c:/Users/Richard/Desktop/citibike/201408-citibike-tripdata.csv",stringsAsFactors = F)
old <- rbind(c1,c2,c3,c4,c5,c6,c7,c8)
df_old <- rbind(df2013,old)
df_old$starttime <- strptime(df_old$starttime,"%Y-%m-%d %H:%M:%S")
df_old$stoptime <- strptime(df_old$stoptime,"%Y-%m-%d %H:%M:%S")
write.csv(df_old, file="citibike/2013to082014citibike.csv",row.names = F)
c9 <- read.csv("c:/Users/Richard/Desktop/citibike/201409-citibike-tripdata.csv",stringsAsFactors = F)
c10 <- read.csv("c:/Users/Richard/Desktop/citibike/201410-citibike-tripdata.csv",stringsAsFactors = F)
c11 <- read.csv("c:/Users/Richard/Desktop/citibike/201411-citibike-tripdata.csv",stringsAsFactors = F)
c12 <- read.csv("c:/Users/Richard/Desktop/citibike/201412-citibike-tripdata.csv",stringsAsFactors = F)
df2014 <- rbind(c9,c10,c11,c12)
df2014$starttime <- strptime(df2014$starttime,"%m/%d/%Y %H:%M")
df2014$stoptime <- strptime(df2014$stoptime,"%m/%d/%Y %H:%M")
write.csv(df2014, file="citibike/092014to122014citibike.csv",row.names = F)
# 2015 ###########################
c1 <- read.csv("c:/Users/Richard/Desktop/citibike/201501-citibike-tripdata.csv")
c2 <- read.csv("c:/Users/Richard/Desktop/citibike/201502-citibike-tripdata.csv")
c3 <- read.csv("c:/Users/Richard/Desktop/citibike/201503-citibike-tripdata.csv")
c4 <- read.csv("c:/Users/Richard/Desktop/citibike/201504-citibike-tripdata.csv")
c5 <- read.csv("c:/Users/Richard/Desktop/citibike/201505-citibike-tripdata.csv")
c6 <- read.csv("c:/Users/Richard/Desktop/citibike/201506-citibike-tripdata.csv")
c7 <- read.csv("c:/Users/Richard/Desktop/citibike/201507-citibike-tripdata.csv")
c8 <- read.csv("c:/Users/Richard/Desktop/citibike/201508-citibike-tripdata.csv")
c9 <- read.csv("c:/Users/Richard/Desktop/citibike/201509-citibike-tripdata.csv")
c10 <- read.csv("c:/Users/Richard/Desktop/citibike/201510-citibike-tripdata.csv")
c11 <- read.csv("c:/Users/Richard/Desktop/citibike/201511-citibike-tripdata.csv")
c12 <- read.csv("c:/Users/Richard/Desktop/citibike/201512-citibike-tripdata.csv")
df2015 <- rbind(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12)
df2015$starttime <- strptime(df2015$starttime,"%m/%d/%Y %H:%M")
df2015$stoptime <- strptime(df2015$stoptime,"%m/%d/%Y %H:%M")
write.csv(df2015, file="citibike/2015citibike.csv",row.names = F)
# 2016 ###########################
c1 <- read.csv("c:/Users/Richard/Desktop/citibike/201601-citibike-tripdata.csv")
c2 <- read.csv("c:/Users/Richard/Desktop/citibike/201602-citibike-tripdata.csv")
c3 <- read.csv("c:/Users/Richard/Desktop/citibike/201603-citibike-tripdata.csv")
df2016 <- rbind(c1,c2,c3)
df2016$starttime <- strptime(df2016$starttime,"%m/%d/%Y %H:%M")
df2016$stoptime <- strptime(df2016$stoptime,"%m/%d/%Y %H:%M")
write.csv(df2016, file="citibike/2016citibike.csv",row.names = F)
### tranfor the format fo time from begining to 08/2013
#c<- gsub("-","/",b)
#d <- strptime(c, "%Y/%m/%d %H:%M:%S")
e <- strptime(b,"%Y-%m-%d %H:%M:%S") #for time from begining to 08/2013
f <- head(c1$starttime)
g <- strptime(f, "%m/%d/%Y %H:%M") #fro time after 08/2013
|
a7467fad32b17a43f9804beb2fb9c11c796c7673 | 10f93e9a5583959405966c5a9076596278e0dd55 | /NY_Saratoga/combine_races_withfunction.R | 72b02a0e0f9e1b57e6ff9a39debb034b9f9594d1 | [
"MIT"
] | permissive | amkessler/openelections_work | 9dc089c5802874fce34aeb69434e4888b7c7fc15 | 8054b196eb6b1a6b134942658ae9968cef4886c2 | refs/heads/main | 2023-03-26T16:59:37.838848 | 2021-03-25T00:52:52 | 2021-03-25T00:52:52 | 322,394,285 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,178 | r | combine_races_withfunction.R | library(tidyverse)
library(janitor)
library(readxl)
# library(arrow) #for exporting feather files
#source processing function
source("process_ny_data_functions.R")
## PROCESS DATA FILES ####
#create county name variable and Excel file name string for use below
target_county <- "Saratoga"
filestring_import <- paste0(
"NY_",
target_county,
"/",
target_county,
"_NY_GE20_cleaned.xlsx"
)
# Presidential ####
#run processing function sourced at the top along with import in one step
# Function wants:
# - dataset (or import from excel function)
# - office: text label for office (e.g. "U.S. House")
# - district: text label for district (e.g. "42")
processed_prez <- process_ny_data(read_excel(filestring_import, sheet = "presidential"),
"President",
"")
processed_prez
## Congressional - District 20 ####
processed_cd20 <- process_ny_data(read_excel(filestring_import, sheet = "cd20"),
"U.S. House",
"20")
processed_cd20
## Congressional - District 21 ####
processed_cd21 <- process_ny_data(read_excel(filestring_import, sheet = "cd21"),
"U.S. House",
"21")
processed_cd21
## State Senate 43 ####
processed_statesen43 <- process_ny_data(read_excel(filestring_import, sheet = "statesen43"),
"State Senate",
"43")
processed_statesen43
## State Senate 49 ####
processed_statesen49 <- process_ny_data(read_excel(filestring_import, sheet = "statesen49"),
"State Senate",
"49")
processed_statesen49
## State House 108 ####
processed_statehou108 <- process_ny_data(read_excel(filestring_import, sheet = "statehou108"),
"State Assembly",
"108")
processed_statehou108
## State House 112 ####
processed_statehou112 <- process_ny_data(read_excel(filestring_import, sheet = "statehou112"),
"State Assembly",
"112")
processed_statehou112
## State House 113 ####
processed_statehou113 <- process_ny_data(read_excel(filestring_import, sheet = "statehou113"),
"State Assembly",
"113")
processed_statehou113
## State House 114 ####
processed_statehou114 <- process_ny_data(read_excel(filestring_import, sheet = "statehou114"),
"State Assembly",
"114")
processed_statehou114
### COMBINE INTO ONE #####
#combine tidy/long datasets created above
#we'll use a pattern matching to pull all dataframes in the environment
#with "processed" in the name and build a list of them to feed to bind_rows()
target_dfs <- grep("processed", names(.GlobalEnv), value=TRUE)
target_dfs_list <- do.call("list", mget(target_dfs))
processed_combined <- bind_rows(target_dfs_list)
#add county name for all records ####
#we'll use the saved county name specified at top of script
processed_combined <- processed_combined %>%
mutate(
county = target_county
) %>%
select(county, everything()) %>%
arrange(office, district)
## MANUAL INTEGRITY CHECKS ####
#check parties
processed_combined %>%
count(party)
#check districts
processed_combined %>%
count(office, district)
#check candidates
processed_combined %>%
count(candidate)
### EXPORT RESULTS ####
#build file name string using openelex naming convention
filestring_export <- paste0(
"NY_",
target_county,
"/20201103__ny__general__",
str_to_lower(target_county),
"__precinct.csv"
)
#export to csv
write_csv(processed_combined, filestring_export, na = "")
|
370845d6e1d5a42f46a6c80832bec6cad3d6e2c4 | c02d5a4f35041397163c57adb535e2f4b210c139 | /VIIRS_open.R | 226f9d240b0c7c9b5b43f3c4f53018999b6e218b | [] | no_license | solrepresa/VIIRS | 70915dd50b0bea7f7c22b91ff9b9b4baf4c8db73 | 834c388f74efae204eee66a26fc26083286b50ae | refs/heads/main | 2023-01-03T11:39:44.337505 | 2020-10-26T12:54:32 | 2020-10-26T12:54:32 | 306,706,820 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,740 | r | VIIRS_open.R | ## Sol Represa
# Objective: Open VIIRS - VNP46A1 product
# 23/10/2020 - La Plata, Argentina
library(gdalUtils)
library(raster)
library(rgdal)
#library(R.utils)
#library(maptools)
setwd("C:\\Users\\solre\\Desktop\\S5P")
filename = "C:\\Users\\solre\\Desktop\\S5P\\VNP46A1.A2020285.h12v12.001.2020286073702.h5"
file_save <- "intermedio.tif"
file_export <- "DNB_Sensor_Radiance.tif"
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CORROBORAR:
# gdal_setInstallation(verbose=TRUE) # ver version gdal
# getOption("gdalUtils_gdalPath") #verificar que podamos abrir HDF5
# gdal_chooseInstallation(hasDrivers="HDF5")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
## Inspect hdf5
info <- gdalinfo(filename) #Abrir hdf5
info[207] #Long Name
#crs_modis = '+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +R=6371007.181 +units=m +no_defs'
crs_project = "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
## Open hdf5
sds <- get_subdatasets(filename)
gdal_translate(sds[5], dst_dataset = file_save)
VIIRS_raster <- raster(file_save, crs = crs_project)
# NA
NAvalue(VIIRS_raster) <- 65535
# Extention
xmax = as.numeric(substr(info[183], nchar(info[183])-3,nchar(info[183])-1)) #EastBoundingCoord
ymax = as.numeric(substr(info[191], nchar(info[191])-3,nchar(info[191])-1)) #NorthBoundingCoord
ymin = as.numeric(substr(info[204], nchar(info[204])-3,nchar(info[204])-1)) #SouthBoundingCoord
xmin = as.numeric(substr(info[209], nchar(info[209])-3,nchar(info[209])-1)) #WestBoundingCoord
extent(VIIRS_raster) <- extent(xmin, xmax, ymin, ymax)
writeRaster(VIIRS_raster, file_export, format = "GTiff", overwrite=TRUE)
|
0b25c8e95765842601b08b718709add59376248c | 6dbc7d2df79a031c0d7877cd7d43652a6585a1b5 | /ComparisionOperator.R | 47cce6b5d959f49a748e07aaeb3cabd50a18a4cb | [] | no_license | RamrajSekar/Rfundamentals | bacaa236c19d75afaa00bddad291c40598fb0724 | b966ab9a1b11f38e175a19148f8d61074161d942 | refs/heads/master | 2021-01-21T10:25:27.096869 | 2017-05-19T07:36:58 | 2017-05-19T07:36:58 | 91,687,611 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 491 | r | ComparisionOperator.R | #Comparision operator
# The linkedin and facebook vectors
linkedin <- c(16, 9, 13, 5, 2, 17, 14)
facebook <- c(17, 7, 5, 16, 8, 13, 14)
#Find On which days did the number of LinkedIn profile views exceed 15?
linkedin >= 15
# Quiet days
linkedin <= 5
# LinkedIn more popular than Facebook
linkedin > facebook
views <- matrix(c(linkedin, facebook), nrow = 2, byrow = TRUE)
# When does views equal 13?
views[1:2,1:7] == 13
# When is views less than or equal to 14?
views[1:2,1:7] <= 14 |
fb352ac823e4b9cf10858525b92617ef34ed53e1 | 7caf7270e6791ecff62b417e361392e8ed48dd4c | /man/get_common_columns.Rd | 94e1960869fb8c8e3d63245bd71d386646a618ae | [] | no_license | cran/actuaryr | afb6a180b5fa2fb3020149af1420342caaae3c23 | 5d91d8a105368711dfe11968289f77d0aa6dde25 | refs/heads/master | 2021-02-27T11:09:43.320494 | 2020-04-13T07:40:02 | 2020-04-13T07:40:02 | 245,601,344 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 418 | rd | get_common_columns.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_common_columns}
\alias{get_common_columns}
\title{Get common columns across two tables}
\usage{
get_common_columns(tables)
}
\arguments{
\item{tables}{a list with two elements: x and y}
}
\value{
a list with two elements: x and y
}
\description{
Get common columns across two tables
}
\keyword{internal}
|
0341910182ce6f195b3f52bb46f6e71bf3ab94dd | 8caeff2957ae777eabbb17e92ac49a5f51f1937c | /Nick_Mahrley_9:25:18.R | 5745556fb914eec769067207d6162d25507f274a | [] | no_license | ayusharora99/2018_Umpires | fbe382e7c3d1b6fbafeb2503cb9a9bffc26103db | ea344d33ad55e732a22c33ab155842834ded4422 | refs/heads/master | 2020-04-23T18:22:49.316852 | 2019-03-04T00:43:47 | 2019-03-04T00:43:47 | 171,364,791 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,027 | r | Nick_Mahrley_9:25:18.R | # 9/25/18 : Giants vs Padres : Nick Mahrley
# 167 pitches were called strikes/balls
# The robot-ump called 45 of those pitches as called strikes & 123 as balls
# Nick Mahrley called 62 of those pitches as called strikes & 105 as balls
# Accuracy: 90%
# File --> Import Dataset --> From text (base) (YES TO HEADER) & import csv file downloaded from https://baseballsavant.mlb.com/statcast_search
Nick_Mahrley <- read.csv("~/Desktop/Analyzing Baseball Data with R/2018 Giants Umpires/Nick_Mahrley_9:25:18.csv")
# Packages needed for Analysis
install.packages(c("e1071","caret","rpart"))
library(e1071)
library(caret)
library(rpart)
# Getting Familiar With Dataset & removing any NULL values
dim(Nick_Mahrley)
names(Nick_Mahrley)
is.na(Nick_Mahrley)
colSums(is.na(Nick_Mahrley))
Nick_Mahrley = Nick_Mahrley[,colSums(is.na(Nick_Mahrley)) == 0]
dim(Nick_Mahrley)
# Subsetting Relevant Info
drops = c("event","des","hit_location","bb_type","on_3b","on_2b","on_1b","hc_x","hc_y","hit_distance_sc","launch_speed","launch_angle","estimated_ba_using_speedangle","estimated_woba_using_speedangle","woba_value","woba_denom","launch_speed_angle","iso_value","babip_value")
Nick_Mahrley = Nick_Mahrley[ , !(names(Nick_Mahrley) %in% drops)]
dim(Nick_Mahrley)
# Splitting data into Training (80% of data) & Testing (20% of data) sets
Nick_Mahrley_train = Nick_Mahrley[0:(0.8 * nrow(Nick_Mahrley)),]
dim(Nick_Mahrley_train)
prop.table(table(Nick_Mahrley_train$type))
Nick_Mahrley_test = Nick_Mahrley[(0.8*nrow(Nick_Mahrley)):nrow(Nick_Mahrley),]
dim(Nick_Mahrley_test)
prop.table(table(Nick_Mahrley_test$type))
# Creating Decision Tree to Predict Umpire's Call
tree_model <-rpart(type~., data = Nick_Mahrley_train)
plot(tree_model)
text(tree_model, use.n = T)
# Testing Decision Tree with Test Data
Prediction_UMP<-predict(tree_model, newdata = Nick_Mahrley_test, type = 'class')
# Accuracy of Decision Tree created for specific Umpire
confusionMatrix(table(Prediction_UMP, Nick_Mahrley_test$type))
# Subset for Borderline Calls
Nick_Mahrley$Borderline = ifelse(((abs(Nick_Mahrley$plate_x)> 0.748) & (abs(Nick_Mahrley$plate_x)<0.914))
& (((Nick_Mahrley$plate_z > Nick_Mahrley$sz_top-0.83) & (Nick_Mahrley$plate_z < Nick_Mahrley$sz_top+0.83))
| (((Nick_Mahrley$plate_z)<Nick_Mahrley$sz_bot+0.83) & ((Nick_Mahrley$plate_z) > Nick_Mahrley$sz_bot-0.83))), 'T','F')
# Copy Pitch Calls into another data set and adjust type to the electronic strike zone calls
# Seperate Ball & Strike Types
Nick_Mahrley_Strikes = subset(Nick_Mahrley, Nick_Mahrley$type == "S")
Nick_Mahrley_Balls = subset(Nick_Mahrley, Nick_Mahrley$type == "B")
# Borderline
Nick_Mahrley_Borderline = subset(Nick_Mahrley, Nick_Mahrley$Borderline == "T")
# Create new column for adjusted call based on electronic strike zone on Umpire's called strikes
# (plate_x < 0.833 & $plate_x > -0.833) & ($plate_z > sz_bot & plate_z < sz_top) == S
Nick_Mahrley_Strikes$AdjustedCall = ifelse((Nick_Mahrley_Strikes$plate_x < 0.833 & Nick_Mahrley_Strikes$plate_x > -0.833) & (Nick_Mahrley_Strikes$plate_z > Nick_Mahrley_Strikes$sz_bot & Nick_Mahrley_Strikes$plate_z < Nick_Mahrley_Strikes$sz_top), 'S', 'B')
table(Nick_Mahrley_Strikes$AdjustedCall)
# Create new column for adjusted call based on electronic strike zone on Umpire's called balls
# (plate_x > 0.833 | $plate_x < -0.833) | ($plate_z < sz_bot | plate_z > sz_top) == B
Nick_Mahrley_Balls$AdjustedCall = ifelse((Nick_Mahrley_Balls$plate_x > 0.833 | Nick_Mahrley_Balls$plate_x < -0.833)|(Nick_Mahrley_Balls$plate_z < Nick_Mahrley_Balls$sz_bot | Nick_Mahrley_Balls$plate_z > Nick_Mahrley_Balls$sz_top),'B','S')
table(Nick_Mahrley_Balls$AdjustedCall)
# Borderline
Nick_Mahrley_Borderline$AdjustedCall = ifelse((Nick_Mahrley_Borderline$plate_x < 0.833 & Nick_Mahrley_Borderline$plate_x > -0.833) & (Nick_Mahrley_Borderline$plate_z > Nick_Mahrley_Borderline$sz_bot & Nick_Mahrley_Borderline$plate_z < Nick_Mahrley_Borderline$sz_top), 'S', 'B')
# Merge to create new dataset
Nick_Mahrley_AdjustedCalls = rbind(Nick_Mahrley_Strikes,Nick_Mahrley_Balls)
Nick_Mahrley_AdjustedCalls$OnFieldRuling = ifelse(Nick_Mahrley_AdjustedCalls$type == "S","S","B")
# Re-create Decision Tree but this time with whole Data rather than just training set.
tree_model <-rpart(type~., data = Nick_Mahrley)
plot(tree_model)
text(tree_model, use.n = T)
# Predict using Umpire's Decision Tree on the AdjustedCalls dataset & compare calls with adjusted_call to find Accuracy
Prediction_UMP<-predict(tree_model, newdata = Nick_Mahrley_AdjustedCalls, type = 'class')
confusionMatrix(table(Prediction_UMP,Nick_Mahrley_AdjustedCalls$AdjustedCall))
# Borderline
Prediction_BORDERLINE<-predict(tree_model, newdata = Nick_Mahrley_Borderline, type = 'class')
confusionMatrix(table(Prediction_BORDERLINE,Nick_Mahrley_Borderline$AdjustedCall))
# Correct vs InCorrect Call
# Correct Calls
Nick_Mahrley_AdjustedCalls$Call = ifelse( ((Nick_Mahrley_AdjustedCalls$type == 'B') & ( (Nick_Mahrley_AdjustedCalls$AdjustedCall == "B") | (Nick_Mahrley_AdjustedCalls$Borderline == "T") ) ), "C","I" )
Nick_Mahrley_AdjustedCalls$Call = ifelse( ((Nick_Mahrley_AdjustedCalls$type == 'S') & ((Nick_Mahrley_AdjustedCalls$AdjustedCall == "S") | (Nick_Mahrley_AdjustedCalls$Borderline == "T") ) ), "C","I")
# InCorrect Calls
Nick_Mahrley_AdjustedCalls$Call = ifelse( ( (Nick_Mahrley_AdjustedCalls$type == 'B') & ((Nick_Mahrley_AdjustedCalls$AdjustedCall == "S") & (Nick_Mahrley_AdjustedCalls$Borderline == "F") ) ), "I","C")
Nick_Mahrley_AdjustedCalls$Call = ifelse( ( (Nick_Mahrley_AdjustedCalls$type == 'S') & ((Nick_Mahrley_AdjustedCalls$AdjustedCall == "B") & (Nick_Mahrley_AdjustedCalls$Borderline == "F") ) ), "I","C")
table(Nick_Mahrley_AdjustedCalls$Call)
# Which Pitchers Recieved the InCorrect Calls
Nick_Mahrley_Incorrect = subset(Nick_Mahrley_AdjustedCalls, Nick_Mahrley_AdjustedCalls$Call == "I")
print(Nick_Mahrley_Incorrect$player_name)
print(Nick_Mahrley_Incorrect$AdjustedCall)
|
e03121b96784b3a8a0b07262d0e085828349636c | 486deb2a88424a9dd6e4761af999263a6fa316b8 | /man/sim.crw.3d.Rd | 8e024d2a1820e4151b5af8afe8bd49fbeac7d8de | [] | no_license | cran/eRTG3D | 4ba5c89aba6d9f2a2500b6514de7a8fd157b7561 | 785c339e125caad743cc40502d58bfe15d53d24c | refs/heads/master | 2022-03-24T18:26:26.296102 | 2022-02-25T11:10:05 | 2022-02-25T11:10:05 | 209,614,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 795 | rd | sim.crw.3d.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testing3D.R
\name{sim.crw.3d}
\alias{sim.crw.3d}
\title{Simulation of a three dimensional Correlated Random Walk}
\usage{
sim.crw.3d(nStep, rTurn, rLift, meanStep, start = c(0, 0, 0))
}
\arguments{
\item{nStep}{the number of steps of the simulated trajectory}
\item{rTurn}{the correlation on the turn angle}
\item{rLift}{the correlation of the lift angle}
\item{meanStep}{the mean step length}
\item{start}{a vector of length 3 containing the coordinates of the start point of the trajectory}
}
\value{
A trajectory in the form of data.frame
}
\description{
Simulation of a three dimensional Correlated Random Walk
}
\examples{
sim.crw.3d(nStep = 10, rTurn = 0.9, rLift = 0.9, meanStep = 1, start = c(0, 0, 0))
}
|
f365fd71d211f51224d9077f4a228d7d29cdcbc2 | 9997881b10ba673c429aec141ace3176bae0ba86 | /man/theme_efdc.Rd | 4ded2d2b9cbf97337d028765a68e94f358c21d79 | [] | no_license | fankk18/efdcr | bd1fda050faeaa088ad7cfb00aabff693c1cf747 | 589d92f4d0a56be527776ff8a14d268e278501e3 | refs/heads/master | 2020-08-11T02:51:15.369600 | 2019-03-05T02:47:50 | 2019-03-05T02:47:50 | 214,476,489 | 1 | 0 | null | 2019-10-11T15:57:24 | 2019-10-11T15:57:23 | null | UTF-8 | R | false | true | 520 | rd | theme_efdc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme_efdc.R
\name{theme_efdc}
\alias{theme_efdc}
\title{The theme of the model plot}
\usage{
theme_efdc(base_size = 11, base_family = "",
base_line_size = base_size/22, base_rect_size = base_size/22)
}
\arguments{
\item{base_size}{Base font size.}
\item{base_family}{Base font family.}
\item{base_line_size}{Base size for line elements.}
\item{base_rect_size}{Base size for rect elements.}
}
\description{
The theme of the model plot
}
|
e7d8d1d53c03ee57888f794a988767e8fba8a334 | aa394b8ebb6e1abb4f869791a5de1caf797c4739 | /great_circle_tax_migration_long_lat.R | c51be260a0b43da474e33d363e22136aae03f854 | [] | no_license | chris-english/R_GIS | d391b358ad0d0b1787ab78135e9132bae80b4ad2 | d07e6d583284c9cbb65ec35678c59969608897f9 | refs/heads/master | 2021-01-23T08:56:16.901397 | 2012-08-22T23:51:02 | 2012-08-22T23:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,443 | r | great_circle_tax_migration_long_lat.R | ## Great Circles Tax Migration Long Lat
## to match logic with Nathan Yau, FlowingData
## http://flowingdata.com/2011/05/11/how-to-map-connections-with-great-circles/
library(maps)
library(geosphere)
## POSTGIS 2.0 Notes on preparing the centroids for use in R.
## map data from http://www.census.gov/geo/www/cob/co2000.html
## co99_d00 is a multipolygon and needs to be dumped to polygon
## and centroids taken on those polygons to avoid massive headaches.
## My two step approach, first dump to polygon:
##
## create table us_cnty_poly as
## select
## co99_d00.state,
## co99_d00.county,
## co99_d00.fips,
## (ST_Dump(co99_d00.geom)).geom as geom
## from co99_d00
## group by state,
## co99_d00.county,
## co99_d00.fips,
## co99_d00.geom
## order by co99_d00.state,
## co99_d00.county;
##
## create index idx_us_cnty_poly_gist on us_cnty_poly using gist(geom);
## select populate_geometry_columns('us_cnty_poly'::regclass);
## (can use pgsql2shp and use these to plot county outlines if it doesn't
## make the map look too busy aesthetically)
##
## (next centroids from the polygons)
## create table us_cnty_centroid
## as select state, county, fips,
## ST_Centroid(geom) as centroid,
## ST_AsText(ST_Centroid(geom)) as point_wkt,
## ST_AsEWKB(ST_Centroid(geom)) as cent_b,
## ST_y(ST_Centroid(geom)) as lat,
## ST_x(ST_Centroid(geom)) as long
## from us_cnty_poly;
##
## create index idx_us_cnty_centroid_gist on us_cnty_centroid using gist(centroid);
## select populate_geometry_columns('us_cnty_centroid'::regclass);
## gcIntermediate can consume different types of geometries and I want to play around
## hence the many different geometries in the centroid table
##
## Ship it off to use with R using pgsql2shp from command line
## -- remember to refresh your tables in pgAdmin prior to shipping or
## -- pgsql2shp will just ship a .dbf which won't help maptools
## you'll know you didn't refresh if you see this:
## Done (postgis major version: 2).
## Output shape: Unknown <- this tells you didn't refresh
## Dumping: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX [3118 rows].
##
## rather than this
## Initializing...
## Done (postgis major version: 2).
## Output shape: Point <- ah, points, lovely points
## Dumping: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX [3118 rows].
##
## ship it off
##
##C:\Documents and Settings\Owner>pgsql2shp -f c:/gisdata/us_cnty_centroid -h localhost -p 5434 -u postgres geocoder us_cn
## ty_centroid
## Initializing...
## Done (postgis major version: 2).
## Output shape: Point
## Dumping: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX [3489 rows].
##
## and ship off us_cnty_poly as well
## and now we can play with R
## read in Counties - note colClass that preserves leading 0's in fips
## this is from POSTGIS of - had to fix several 02(state) longitude that were
## missing '-' ie. -178.xxxxxxxxx was 178.xxxxxxxx that snarled antipodal
## comes up further down - fixed in Notepad** on visual inspection
## have to come up with a way to change programmatically.
##
require(maptools)
county_centroid <- readShapeSpatial("c:/gisdata/us_cnty_centroid.shp" )
names(county_centroid)
names(county_centroid) <- c("state", "county", "fips", "point_wkt", "cent_b","lat", "long") ##lower case
##see what we've got so far
str(county_centroid)
## address those factors
## county_centroid$state <- as.character(county_centroid$state)
## county_centroid$county <- as.character(county_centroid$county)
## county_centroid$fips <- as.character(county_centroid$fips)
## county_centroid$point_wkt <- as.character(county_centroid$point_wkt)
## county_centroid$centroid_b <-as.character(county_centroid$centroid_b)
## county_centroid$cent_ewkt <- as.character(county_centroid$cent_ewkt)
## put proj$string
proj4string(county_centroid) <- CRS(as.character("+proj=longlat +ellps=GRS80"))
proj = proj4string(county_centroid) ## just storing this for later
## we're matching on fips so check number of characters
min(nchar(as.character(county_centroid$fips))) ## [1] 5
## read the inflow data 2009_2010
## I took out the file header with Notepad++ and examined the file
## for obvious errors like embedded "
## source - http://www.irs.gov/pub/irs-soi/countyinflow0910.csv
## except for " scrubbing as it comes from IRS
inflow0910 = read.csv("c:/gisdata/census/migration/countyinflow0910.csv",
sep=",",
stringsAsFactors=FALSE,
as.is=TRUE,
colClass=c(rep("character",6),rep("numeric",3)),
col.names=c("st_code_dest","co_code_dest","st_code_orig","co_code_orig",
"st_abbv","co_name","return_num","exmpt_num","aggr_agi")
)
## since we're going to match this data with census maps by fips
## codes - check the IRS fips are giving valid fips code, five characters -
## leading two characters - state code, last three characters - county
require(stringr)
## actually want nchar() for these tests
max(nchar(inflow0910$st_code_dest))## this comes up 2
min(nchar(inflow0910$st_code_dest))## this comes up 1
min(nchar(inflow0910$co_code_orig))## this comes up 1
min(nchar(inflow0910$co_code_dest))## this comes up 1
## so the IRS files strip out leading 0's so we put them back in -
## from names(inflow0910)
## [1] "st_code_dest" "co_code_dest" "st_code_orig" "co_code_orig"
## we have four codes that need left padding with 0's
inflow0910$st_code_dest <- str_pad(c(inflow0910$st_code_dest),
2, side="left", pad="0")
min(nchar(inflow0910$st_code_dest)) #2
## now fix the other three
inflow0910$st_code_orig <- str_pad(c(inflow0910$st_code_orig),
2, side="left", pad="0")
inflow0910$co_code_dest <- str_pad(c(inflow0910$co_code_dest),
3, side="left", pad="0")
inflow0910$co_code_orig <- str_pad(c(inflow0910$co_code_orig),
3, side="left", pad="0")
nrow(inflow0910)
## we've still got something like 30,000 summary records
## that won't match against county centroids and cause
## mismatches in gcIntermediate so we dispatch them with some sqldf
## this will leave a county matches itself but we may find that useful
## this could be achieved with merge but I don't know how yet
require(sqldf)
## take out summary records
inflow0910 <- sqldf("SELECT st_code_dest, co_code_dest, st_code_orig, co_code_orig,
st_abbv, co_name, return_num, exmpt_num, aggr_agi FROM inflow0910 WHERE
st_code_dest NOT LIKE '00' and st_code_orig NOT LIKE '00' and st_code_dest NOT LIKE '01'
and st_code_orig NOT LIKE '01'and st_code_orig NOT LIKE '96' and st_code_orig NOT LIKE '97'
and st_code_orig NOT LIKE '98' and st_code_dest NOT LIKE '96' and st_code_dest NOT LIKE '97'
and st_code_dest NOT LIKE '98' and co_code_dest NOT LIKE '000' and st_abbv NOT LIKE 'DS'
and st_abbv NOT LIKE 'SS' and st_abbv NOT LIKE 'FR' and co_code_orig NOT LIKE '000'")
## compute fips_in_orig fips_in_dest
inflow0910$fips_in_dest <- paste(inflow0910$st_code_dest, inflow0910$co_code_dest, sep="")
inflow0910$fips_in_orig <- paste(inflow0910$st_code_orig, inflow0910$co_code_orig, sep="")
head(inflow0910_sql) ## take a look at it
tail(inflow0910_sql) ## look again
#
# read the outflow data 2009_10
## again took out header and cleaned out " around text in Notepad++
## which wasn't present in the inflow file
## source - http://www.irs.gov/pub/irs-soi/countyoutflow0910.csv
outflow0910 = read.csv("c:/gisdata/census/migration/countyoutflow0910.csv",
sep=",",
stringsAsFactors=FALSE,
as.is=TRUE,
colClass=c(rep("character",6),rep("numeric",3)),
col.names=c("st_code_orig", "co_code_orig", "st_code_dest", "co_code_dest",
"st_abbv", "co_name", "return_num", "exmpt_num", "aggr_agi")
)
## fix these up with padding as above
outflow0910$st_code_dest <- str_pad(c(outflow0910$st_code_dest),2, side="left", pad="0")
outflow0910$st_code_orig <- str_pad(c(outflow0910$st_code_orig),2, side="left", pad="0")
outflow0910$co_code_dest <- str_pad(c(outflow0910$co_code_dest),3, side="left", pad="0")
outflow0910$co_code_orig <- str_pad(c(outflow0910$co_code_orig),3, side="left", pad="0")
## take out summary records
outflow0910 <- sqldf("SELECT st_code_dest, co_code_dest, st_code_orig, co_code_orig,
st_abbv, co_name, return_num, exmpt_num, aggr_agi FROM outflow0910 WHERE
st_code_dest NOT LIKE '00' and st_code_orig NOT LIKE '00' and st_code_dest NOT LIKE '01'
and st_code_orig NOT LIKE '01'and st_code_orig NOT LIKE '96' and st_code_orig NOT LIKE '97'
and st_code_orig NOT LIKE '98' and st_code_dest NOT LIKE '96' and st_code_dest NOT LIKE '97'
and st_code_dest NOT LIKE '98' and co_code_dest NOT LIKE '000' and st_abbv NOT LIKE 'DS'
and st_abbv NOT LIKE 'SS' and st_abbv NOT LIKE 'FR' and co_code_orig NOT LIKE '000'")
head(outflow0910_sql)
tail(outflow0910_sql)
## compute outflow origin FIPS code
outflow0910$fips_out_orig <- paste(outflow0910$st_code_orig, outflow0910$co_code_orig, sep="")
## compute outflow destination FIPS
outflow0910$fips_out_dest <- paste(outflow0910$st_code_dest, outflow0910$co_code_dest, sep="")
## library(maptools)
## map data from http://www.census.gov/geo/www/cob/co2000.html
## state.map <- readShapeSpatial("c:/gisdata/st99_d00.shp")
## county.map <- readShapeSpatial("c:/gisdata/census/co99_d00.shp")
# Color
pal <- colorRampPalette(c("#f2f2f2", "green"))
colors <- pal(100)
pal2 <- colorRampPalette(c("pink", "red"))
colors2 <- pal2(100)
xlim <- c(-171.738281, -56.601563)
ylim <- c(12.039321, 71.856229)
## get rid of cases that aren't running all the way through
movein_0910 <- unique(inflow0910$st_abbv)
movein_0910 <- movein_0910[!grepl("TX$|IL$|MN$|CO$|NM$|AZ$|CA$|WA$|PR$|AK$|NV$|UT$", movein_0910)]
moveout_0910 <- unique(outflow_0910$st_abbv)
moveout_0910 <- moveout_0910[!grepl("TX$|IL$|MN$|CO$|NM$|AZ$|CA$|WA$|PR$|AK$|NV$|UT$", moveout_0910)]
for (i in 1:length(movein_0910)) {
map("world", col="#f2f2f2", fill=TRUE, bg="white", lwd=0.05, xlim=xlim, ylim=ylim)
m_insub <- inflow0910[inflow0910$st_abbv == "NY",] ##options "NY",] or movein_0910[1],]
m_insub <- m_insub[order(m_insub$return_num),]
max_return_num <- max(m_insub$return_num)
for (j in 1:(length)(m_insub$fips_in_orig)) {
message(j)
##for(k in 1:length) (inter)) {
##message(k)
movein_orig <- county_centroid[county_centroid$fips == (m_insub[j,]$fips_in_orig),]
movein_dest <- county_centroid[county_centroid$fips == (m_insub[j,]$fips_in_dest),]
inter <- gcIntermediate(c(movein_orig[1,]$long, movein_orig[1,]$lat), c(movein_dest[1,]$long,
movein_dest[1,]$lat),sp=TRUE, n=100, addStartEnd=TRUE)
lines_in <<- inter
colindex <- round((m_insub[j,]$return_num / (max_return_num/10000)) * length(colors))#per Paul Butler Facebook visualize
lines(inter, col=colors[colindex], lwd=0.6)
}
##}
dev.off()
}
(i in 1:length(moveout_0910)) {
m_outsub <- outflow0910[outflow0910$st_abbv == moveout_0910[i],] ##was "NY",]
m_outsub <- m_outsub[order(m_outsub$return_num),]
max_return_num <- max(m_outsub$return_num)
for (j in 1:length(m_outsub$fips_out_orig)) {
message(j)
moveout_orig <- county_centroid[county_centroid$fips == (m_outsub[j,]$fips_out_orig),]
moveout_dest <- county_centroid[county_centroid$fips == (m_outsub[j,]$fips_out_dest),]
inter <- gcIntermediate(c(moveout_orig[1,]$long, moveout_orig[1,]$lat), c(moveout_dest[1,]$long,
moveout_dest[1,]$lat), sp=TRUE, n=100, addStartEnd=TRUE)
lines_out <<- inter
colindex2 <- round( (m_outsub[j,]$return_num /( max_return_num/10000)) * length(colors2))
##for k in 1:length(inter){
##lines_out <<- inter[k,]$
lines(inter, col=colors2[colindex2], lwd=0.6)
}
dev.off()
}
require(raster)
require(gdistance)
|
53d8e73314c54732a1e30659026dee7b3902015d | 6618b20129735112fef493bdd8c497152fdb8805 | /ggplot.R | 77c30cba781e100025ba4f94a282fb93ee3013a9 | [] | no_license | Karan5161/R | 6a859185bb0c7540dbadd678038ed497c76e6892 | b52f534ad1863f68bc829a97f51052c68baf02c8 | refs/heads/main | 2023-05-13T13:24:52.030046 | 2021-06-09T04:58:26 | 2021-06-09T04:58:26 | 354,505,512 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,419 | r | ggplot.R | library(ggplot2)
data(mpg)
view(mpg)
# plain canvas
ggplot()
# with axis
ggplot(mpg, aes(x = displ, y = hwy))
# with values using geom_point
ggplot(mpg, aes(x = displ, y = hwy)) + geom_point()
ggplot(mpg, aes(displ, hwy)) + geom_point()
ggplot(mpg, aes(displ, cty, colour = class)) +
geom_point()
ggplot(mpg, aes(displ, cty, shape = drv)) +
geom_point()
ggplot(mpg, aes(displ, cty, size = cyl)) +
geom_point()
ggplot(mpg, aes(displ, cty, colour = class, shape = drv)) +
geom_point()
ggplot(mpg, aes(displ, hwy)) + geom_point(colour = "Blue")
-------------------------------------------------------------
#facetting
# to display multiple chart
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
facet_wrap(~class)
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
geom_smooth()
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
geom_smooth(se = F)
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
geom_smooth(span = 0.2)
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
geom_smooth(span = 0.2, se = F)
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
geom_smooth() +
facet_wrap(~year)
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
geom_smooth() +
facet_wrap(~class)
ggplot(mpg, aes(displ, hwy)) +
geom_point() +
geom_smooth(method = "gam", formula = y ~ s(x))
ggplot(mpg, aes(drv, hwy)) +
geom_point()
ggplot(mpg, aes(drv, hwy)) +
geom_jitter()
ggplot(mpg, aes(drv, hwy)) +
geom_jitter(width = 0.1)
ggplot(mpg, aes(drv, hwy, colour = class)) +
geom_jitter(width = 0.1)
ggplot(mpg, aes(drv, hwy)) +
geom_boxplot()
ggplot(mpg, aes(drv, hwy)) +
geom_violin()
ggplot(mpg, aes(hwy)) +
geom_histogram()
ggplot(mpg, aes(hwy)) +
geom_freqpoly()
ggplot(mpg, aes(hwy)) +
geom_freqpoly(binwidth = 2.5)
ggplot(mpg, aes(hwy)) +
geom_freqpoly(binwidth = 1)
ggplot(mpg, aes(displ, colour = drv)) +
geom_freqpoly(binwidth = 0.5)
ggplot(mpg, aes(displ, fill = drv)) +
geom_freqpoly(binwidth = 0.5) +
facet_wrap(~drv, ncol = 1)
ggplot(mpg, aes(displ, fill = drv)) +
geom_freqpoly(binwidth = 0.5) +
facet_wrap(~drv, ncol = 2)
ggplot(mpg, aes(manufacturer)) + geom_bar()
------------------------------------------------
#to understand barchart in depth
drugs <- data.frame(
drug = c("a", "b", "c"),
effect = c(4.2, 9.7, 6.1)
)
ggplot(drugs, aes(drug, effect)) +
geom_bar(stat = "identity")
data("economics")
ggplot(economics, aes(date, unemploy / pop)) +
geom_line()
ggplot(economics, aes(date, uempmed)) +
geom_line()
year <- function(x) as.POSIXlt(x)$year + 1900
ggplot(economics, aes(unemploy / pop, uempmed)) +
geom_path(colour = "grey50") +
geom_point(aes(colour = year(date)))
ggplot(mpg, aes(cty, hwy)) +
geom_point(alpha = 0.1)
ggplot(mpg, aes(cty, hwy)) +
geom_point(alpha = 1 / 3) +
xlab("City driving (mpg)") +
ylab("Hoghway driving (mpg)")
ggplot(mpg, aes(cty, hwy)) +
geom_point(alpha = 1 / 3) +
xlab(NULL) +
ylab(NULL)
ggplot(mpg, aes(drv, hwy)) +
geom_jitter(width = 0.25) +
xlim("f", "r") +
ylim(20, 30)
ggplot(mpg, aes(drv, hwy)) +
geom_jitter(width = 0.25, na.rm = T) +
ylim(NA, 30)
p <- ggplot(mpg, aes(displ, hwy, colour = factor(cyl))) +
geom_point()
p
--------------------------------------------------------
# to save the data or plot
saveRDS(p, "plot.rds")
getwd()
ggsave("plot.png", width = 8, height = 5, dpi = 300)
summary(p)
df <- data.frame(
x = c(3,1,5),
y = c(2,4,6),
label = c("a", "b", "c")
)
p <- ggplot(df, aes(x, y, label = label)) +
labs(x = NULL, y = NULL) +
theme(plot.title = element_text(size = 12))
p
p + geom_point() + ggtitle("Point")
p + geom_text() + ggtitle("Text")
p + geom_bar(stat = "Identity") + ggtile("Bar")
p + geom_tile() + ggtitle("Raster")
p + geom_line() + ggtitle("Line")
p + geom_area() + ggtitle("Area")
p + geom_path() + ggtitle("Path")
p + geom_polygon() + ggtitle("Polygon")
df <- data.frame(x = 1, y = 3.1, face = c("plain", "bold", "italic"))
ggplot(df, aes(x, y)) +
geom_text(aes(label = face, fontface = face))
df
ggplot(mpg, aes(class, fill = drv)) +
geom_bar()
ggplot(mpg, aes(class, fill = drv)) +
geom_bar(position = "stack")
ggplot(mpg, aes(class, fill = drv)) +
geom_bar(position = "dodge")
ggplot(mpg, aes(class, fill = drv)) +
geom_bar(position = "fill")
ggplot(mpg, aes(class, fill = drv)) +
geom_bar(position = "identity", alpha = 1/2, colour="grey50")
ggplot(mpg, aes(class, fill = hwy)) +
geom_bar(position = "stack")
ggplot(mpg, aes(class, fill = hwy, group = hwy)) +
geom_bar(position = "stack")
ggplot(mpg, aes(drv, hwy)) +
geom_boxplot() +
geom_line(colour = "#3366FF", alpha = 0.5)
-----------------------------------------------------------
#24/10/2020
library(dplyr)
install.packages("maps")
library(maps)
mi_countries <- map_data("world", "India")
df <- maps::world.cities
mi_cities <- maps::world.cities %>%
as_tibble() %>%
filter(country.etc == "India") %>%
arrange(desc(pop))
ggplot(mi_cities, aes(long, lat)) +
geom_point(aes(size = pop)) +
scale_size_area() +
coord_quickmap()
ggplot(mi_cities, aes(long, lat)) +
geom_polygon(aes(group = group), mi_countries, fill = NA, colour ="grey50") + geom_point(aes(size = pop), colour = "red") +
scale_size_area() +
coord_quickmap()
ggplot(diamonds, aes(color, price)) +
geom_bar(stat = "summary_bin", fun = mean)
p <- ggplot(mpg, aes(displ, hwy))
p
p + geom_point()
p + layer(mapping = NULL,
data = NULL,
geom = "point", params = list(),
stat = "identity",
position = "identity"
)
library(ggplot2)
df <- data.frame(x = rnorm(2000), y = rnorm(2000))
norm <- ggplot(df, aes(x, y)) + xlab(NULL) + ylab(NULL)
norm + geom_point()
norm + geom_point(shape = 14)
norm + geom_point(shape = ".")
ggplot(mpg, aes(displ, hwy, colour = class)) +
geom_point()
ggplot(mpg,aes(displ, hwy)) +
geom_point(aes(colour = class))
ggplot(mpg) +
geom_point(aes(displ, hwy, colour = class))
ggplot() +
geom_point(data=mpg, aes(displ, hwy, colour = class))
ggplot(mpg, aes(displ,hwy, colour = class)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE) +
theme(legend.position = "none")
ggplot(mpg, aes(displ,hwy)) +
geom_point(aes(colour=class)) +
geom_smooth(method = "lm", se = FALSE) +
theme(legend.position = "right")
ggplot(mpg, aes(displ,hwy)) +
geom_point(aes(colour=class)) +
geom_smooth(method = "lm", se = FALSE) +
labs(x="Displacement", y ="Highway MPG", color = "Vehical Class)")
ggplot(mpg, aes(displ,hwy)) +
geom_point(aes(colour=class)) +
geom_smooth(method = "lm", se = FALSE) +
labs(x="Displacement", y ="Highway MPG", color = "Vehical Class)") +
scale_x_continuous(breaks = c(4,6))
ggplot(mpg, aes(displ,hwy)) +
geom_point(aes(colour=class)) +
geom_smooth(method = "lm", se = FALSE) +
labs(x="Displacement", y ="Highway MPG", color = "Vehical Class)") +
scale_x_continuous(breaks = c(4,6), labels = c("disp 4", "Disp 6"))
----------------------------------------------------------------------
# to convert given number to mentioned format
scales::label_dollar()(4)
scales::label_percent()(0.5)
ggplot(mpg, aes(displ,hwy)) +
geom_point(aes(colour=class)) +
geom_smooth(method = "lm", se = FALSE) +
labs(x="Displacement", y ="Highway MPG", color = "Vehical Class)") +
scale_x_continuous(breaks = c(4,6), labels = c("disp 4", "Disp 6")) +
scale_y_continuous(labels = scales::label_dollar())
?scales::dollar_format
# to change the color size (sequence should be same)
ggplot(mpg, aes(displ, hwy)) +
geom_point(size = 4, colour = "grey20") +
geom_point(aes(colour = class), size = 5)
ggplot(mpg, aes(displ, hwy)) +
geom_point(size = 4, colour = "grey20") +
geom_point(aes(size = 5), colour = "grey20")
ggplot(mpg, aes(displ, hwy)) +
geom_point(size = 4, colour = "grey20") +
geom_point(aes(size = 5), colour = "grey20", show.legend = T)
-----------------------------------------------------------
norm <- data.frame(x = rnorm(1000), y = rnorm(1000))
norm
#create column z and gave a b c value
norm$z <- cut(norm$x, 3, labels = c("a", "b", "c"))
norm
ggplot(norm, aes(x, y)) +
geom_point(aes(colour = z), alpha = 0.1)
# to change the transparency
ggplot(norm, aes(x, y)) +
geom_point(aes(colour = z), alpha = 0.1) +
guides(colour = guide_legend(override.aes = list(alpha = 1)))
p <- ggplot(mpg, aes(displ, hwy)) +
geom_point(aes(colour = class), size = 5) +
geom_point(size = 2, colour = "grey20", show.legend = T)
p
#to change the position of legend(value description box)
p + theme(legend.position = c(1,0), legend.justification = c(1,0))
p + theme(legend.position = c(1,1), legend.justification = c(0,0))
-------------------------------------------------------------------
# PIECHART
theme_set(theme_classic())
df <- as.data.frame(table(mpg$class))
colnames(df) <- c("class", "freq")
df
#first create barchart
pie <- ggplot(df, aes(x = "", y=freq, fill = factor(class))) +
geom_bar(width = 1, stat = "identity") + theme(axis.line =
element_blank(), plot.title = element_text(hjust = 0.5)) +
labs(fill="class", x=NULL, y=NULL, title="Pie Chart Of Class", caption="Source: MPG")
pie
#then this
pie + coord_polar(theta="y", start=-90)
---------------------------------------------------
mtcars$`car name` <- rownames(mtcars)
mtcars$mpg_z <- round((mtcars$mpg - mean(mtcars$mpg)) / sd(mtcars$mpg),2)
#in above code created new column
mtcars$mpg_type <- ifelse(mtcars$mpg_z < 0, "below", "above")
mtcars <- mtcars[order(mtcars$mpg_z), ]
mtcars$`car name` <- factor(mtcars$`car name`, levels = mtcars$`car name`)
ggplot(mtcars, aes(x=`car name`, y=mpg_z, label=mpg_z)) +
geom_bar(stat="identity", aes(fill=mpg_type), width=0.5) +
scale_fill_manual(name = "Mileage", labels = c("Above Average", "Below
Average"), values = c("above"="#00ba38", "below"="#f8766d")) +
labs(subtitle = "Normalised mileage from 'mtcars'", title =
"Diverging Bars") + coord_flip()
?scale_fill_brewer
g <- ggplot(mpg, aes(displ)) +
scale_fill_brewer(palette = "Spectral")
g + geom_histogram(aes(fill=class), binwidth = 0.2, col="black",
sizw = 0.1) +
labs(title="Histogram with Auto Binning", subtitle= "Engine
Displacement across vehical classes")
#we can add mention bin numbers too
g + geom_histogram(aes(fill=class), bins=10, col="black",
sizw = 0.1) +
labs(title="Histogram with Auto Binning", subtitle= "Engine
Displacement across vehical classes")
g <- ggplot(mpg, aes(manufacturer))
g + geom_bar(aes(fill=class), width = 0.5) +
theme(axis.text.x = element_text(angle =90,vjust=60)) +
labs(title="Histogram on Categorical Variable",
subtitle = "Manufacturer across Behical Classes")
----------------------------------------------------------------
plot(iris)
plot(iris$Petal.Length, iris$Sepal.Width)
ggplot(iris, aes(x=Petal.Length, y=Sepal.Width)) +
geom_point()
#adding column
ggplot(iris, aes(x=Petal.Length, y=Sepal.Width, col = Species)) +
geom_point()
#setting up size
ggplot(iris, aes(x=Petal.Length, y=Sepal.Width,
col = Species, size = Petal.Width)) +
geom_point()
#adding shape
ggplot(iris, aes(x=Petal.Length, y=Sepal.Width,
col = Species, size = Petal.Width, shape = Species)) +
geom_point()
#adding alpha (darker in shade)
ggplot(iris, aes(x=Petal.Length, y=Sepal.Width,
col = Species, size = Petal.Width, shape = Species,
alpha = Sepal.Length)) + geom_point()
#creating geombar using mean
ggplot(iris, aes(Species, Sepal.Length, fill = Species)) +
geom_bar(stat = "summary", fun = "mean")
# to check the outliers
ggplot(iris, aes(Species, Sepal.Length)) +
geom_bar(stat = "summary", fun = "mean", fill = "#ff0080",
col = "black") + geom_point()
#
ggplot(iris, aes(Species, Sepal.Length)) +
geom_bar(stat = "summary", fun = "mean", fill = "#ff0080",
col = "black") + geom_point(position = position_jitter(0.2),
size = 3, shape = 21)
-----
myplot <- ggplot(iris, aes(Species, Sepal.Length)) +
geom_bar(stat = "summary", fun = "mean", fill = "#ff0080",
col = "black") + geom_point(position = position_jitter(0.2),
size = 3, shape = 21)
myplot
myplot <- theme(position = element_blank(),
panel.background = element_rect(fill = "white"),
panel.border = element_rect(colour = "black", fill = NA,
size = 0.2))
myplot
myplot + theme_bw()
myplot + theme_classic()
myplot + theme_linedraw() + theme(panel.background =
element_rect(fill = "blue"))
------------------------------------------------------
ggplot(iris, aes(Species, Sepal.Length)) +
geom_point() +
geom_boxplot(fill = "#ff0080", col = "black", notch = TRUE)
myplot + theme_bw() +
labs(x = "", y = "Sepal length (mm)") +
ggtitle("Sepal length by iris species") +
theme(plot.title = element_text(hjust = 0.5))
|
76bcddc225983bb9fed8ee909c1da46b0aeef679 | c07d6d6f4778f026dc06aa81490955df662577a1 | /Custom_scripts/getBamStatSummary.R | c9ae01967a6b7b68bb7c0f75c107146f2eb060f7 | [
"MIT"
] | permissive | daewoooo/ApeInversion_paper | 7b61085d579abf9958b017279561933cb40b4b49 | 321701aa97b53ad66ec46ad6dc2338af9eacbd49 | refs/heads/master | 2020-07-12T08:31:28.340629 | 2020-03-05T16:54:10 | 2020-03-05T16:54:10 | 204,766,896 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 9,513 | r | getBamStatSummary.R | ## Load required libraries ##
#############################
suppressPackageStartupMessages( library(primatR) )
suppressPackageStartupMessages( library(Biostrings) )
suppressPackageStartupMessages( library(BSgenome.Hsapiens.UCSC.hg38) )
suppressPackageStartupMessages( library(BSgenome.Hsapiens.UCSC.hg38.masked) )
## Get genome size ##
#####################
#example.bam <- "/home/porubsky/WORK/Great_apes/Gorilla/Ashley_bam/selected/GG09x02PE20396.sort.mdup.bam"
#file.header <- Rsamtools::scanBamHeader(example.bam)[[1]]
#chroms <- file.header$targets
genome <- BSgenome.Hsapiens.UCSC.hg38.masked
genomeSize <- 0
for (chr in paste0('chr', c(1:22, 'X'))) {
chr.len <- length(genome[[chr]])
gap.len <- maskedwidth(masks(genome[[chr]])[1])
genomeSize <- genomeSize + (chr.len - gap.len)
}
## Parameters ##
################
read.len <- 80
outputfolder <- "/home/porubsky/WORK/Great_apes/Data_stats"
if (!dir.exists(outputfolder)) {
dir.create(outputfolder)
}
## Load required data ##
########################
## Chimpanzee
chimpanzee.bams <- list.files("/home/porubsky/WORK/Great_apes/Chimpanzee/Dorien_Bams_GRCh38/selected/", pattern = "\\.bam$", full.names = TRUE)
message("Calculating BAM stats for chimpanzee ...")
chimpanzee.bams.stat <- list()
for (i in seq_along(chimpanzee.bams)) {
bam <- chimpanzee.bams[i]
bam.stat <- bam2stat(bamfile = bam, min.mapq = 10, chromosomes = paste0('chr', c(1:22, 'X')), filt.alt = FALSE, filt.flag = 3328)
chimpanzee.bams.stat[[i]] <- bam.stat
}
chimpanzee.bams.stat <- do.call(rbind, chimpanzee.bams.stat)
chimpanzee.bams.stat$depth <- (chimpanzee.bams.stat$total.reads * read.len) / genomeSize
chimpanzee.bams.stat$covered.pos.perc <- (chimpanzee.bams.stat$covered.pos / genomeSize) * 100
#Load merged bam
merged.bam <- "/home/porubsky/WORK/Great_apes/Chimpanzee/Dorien_Bams_GRCh38/Dorien_GRCh38_merged/Dorien_GRCh38_selected.bam"
bam.stat <- bam2stat(bamfile = merged.bam, chunkSize = 10000, min.mapq = 10, chromosomes = paste0('chr', c(1:22, 'X')), filt.alt = FALSE, filt.flag = 3328)
bam.stat$depth <- (bam.stat$total.reads * read.len) / genomeSize
bam.stat$covered.pos.perc <- (bam.stat$covered.pos / genomeSize) * 100
bam.stat$filename <- 'total'
#Export results
final.df <- rbind(chimpanzee.bams.stat, bam.stat)
final.df$ID <- 'chimpanzee'
destination <- file.path(outputfolder, 'chimpanzee_selected_bamStat.txt')
write.table(final.df, file = destination, quote = FALSE, row.names = FALSE)
## Bonobo
bonobo.bams <- list.files("/home/porubsky/WORK/Great_apes/Bonobo/Ulindi_Bams_GRCh38/selected/", pattern = "\\.bam$", full.names = TRUE)
message("Calculating BAM stats for bonobo ...")
bonobo.bams.stat <- list()
for (i in seq_along(bonobo.bams)) {
bam <- bonobo.bams[i]
bam.stat <- bam2stat(bamfile = bam, min.mapq = 10, chromosomes = paste0('chr', c(1:22, 'X')), filt.alt = FALSE, filt.flag = 3328)
bonobo.bams.stat[[i]] <- bam.stat
}
bonobo.bams.stat <- do.call(rbind, bonobo.bams.stat)
bonobo.bams.stat$depth <- (bonobo.bams.stat$total.reads * read.len) / genomeSize
bonobo.bams.stat$covered.pos.perc <- (bonobo.bams.stat$covered.pos / genomeSize) * 100
#Load merged bam
merged.bam <- "/home/porubsky/WORK/Great_apes/Bonobo/Ulindi_Bams_GRCh38/Bonobo_GRCh38_merged/bonobo_GRCh38_selected_merged.bam"
bam.stat <- bam2stat(bamfile = merged.bam, chunkSize = 10000, min.mapq = 10, chromosomes = paste0('chr', c(1:22, 'X')), filt.alt = FALSE, filt.flag = 3328)
bam.stat$depth <- (bam.stat$total.reads * read.len) / genomeSize
bam.stat$covered.pos.perc <- (bam.stat$covered.pos / genomeSize) * 100
bam.stat$filename <- 'total'
#Export results
final.df <- rbind(bonobo.bams.stat, bam.stat)
final.df$ID <- 'bonobo'
destination <- file.path(outputfolder, 'bonobo_selected_bamStat.txt')
write.table(final.df, file = destination, quote = FALSE, row.names = FALSE)
## Gorilla
gorilla.bams <- list.files("/home/porubsky/WORK/Great_apes/Gorilla/Ashley_bam/selected/", pattern = "\\.bam$", full.names = TRUE)
message("Calculating BAM stats for gorilla ...")
gorilla.bams.stat <- list()
for (i in seq_along(gorilla.bams)) {
bam <- gorilla.bams[i]
bam.stat <- bam2stat(bamfile = bam, min.mapq = 10, chromosomes = paste0('chr', c(1:22, 'X')), filt.alt = FALSE, filt.flag = 3328)
gorilla.bams.stat[[i]] <- bam.stat
}
gorilla.bams.stat <- do.call(rbind, gorilla.bams.stat)
gorilla.bams.stat$depth <- (gorilla.bams.stat$total.reads * read.len) / genomeSize
gorilla.bams.stat$covered.pos.perc <- (gorilla.bams.stat$covered.pos / genomeSize) * 100
#Load merged bam
merged.bam <- "/home/porubsky/WORK/Great_apes/Gorilla/Ashley_bam/Gorilla_GRCh38_merged/gorilla_GRCh38_selected_merged.bam"
bam.stat <- bam2stat(bamfile = merged.bam, chunkSize = 10000, min.mapq = 10, chromosomes = paste0('chr', c(1:22, 'X')), filt.alt = FALSE, filt.flag = 3328)
bam.stat$depth <- (bam.stat$total.reads * read.len) / genomeSize
bam.stat$covered.pos.perc <- (bam.stat$covered.pos / genomeSize) * 100
bam.stat$filename <- 'total'
#Export results
final.df <- rbind(gorilla.bams.stat, bam.stat)
final.df$ID <- 'gorilla'
destination <- file.path(outputfolder, 'gorilla_selected_bamStat.txt')
write.table(final.df, file = destination, quote = FALSE, row.names = FALSE)
## Orangutan
orangutan.bams <- list.files("/home/porubsky/WORK/Great_apes/Orangutan/Ashley_bam/selected/", pattern = "\\.bam$", full.names = TRUE)
message("Calculating BAM stats for orangutan ...")
orangutan.bams.stat <- list()
for (i in seq_along(orangutan.bams)) {
bam <- orangutan.bams[i]
bam.stat <- bam2stat(bamfile = bam, min.mapq = 10, chromosomes = paste0('chr', c(1:22, 'X')), filt.alt = FALSE, filt.flag = 3328)
orangutan.bams.stat[[i]] <- bam.stat
}
orangutan.bams.stat <- do.call(rbind, orangutan.bams.stat)
orangutan.bams.stat$depth <- (orangutan.bams.stat$total.reads * read.len) / genomeSize
orangutan.bams.stat$covered.pos.perc <- (orangutan.bams.stat$covered.pos / genomeSize) * 100
#Load merged bam
merged.bam <- "/home/porubsky/WORK/Great_apes/Orangutan/Ashley_bam/Orangutan_GRCh38_merged/orangutan_GRCh38_selected_merged.bam"
bam.stat <- bam2stat(bamfile = merged.bam, chunkSize = 10000, min.mapq = 10, chromosomes = paste0('chr', c(1:22, 'X')), filt.alt = FALSE, filt.flag = 3328)
bam.stat$depth <- (bam.stat$total.reads * read.len) / genomeSize
bam.stat$covered.pos.perc <- (bam.stat$covered.pos / genomeSize) * 100
bam.stat$filename <- 'total'
#Export results
final.df <- rbind(orangutan.bams.stat, bam.stat)
final.df$ID <- 'orangutan'
destination <- file.path(outputfolder, 'orangutan_selected_bamStat.txt')
write.table(final.df, file = destination, quote = FALSE, row.names = FALSE)
## Load BAM stat data ##
chimpanzee.stat <- read.table(file.path(outputfolder, "chimpanzee_selected_bamStat.txt"), header=TRUE)
bonobo.stat <- read.table(file.path(outputfolder, "bonobo_selected_bamStat.txt"), header=TRUE)
gorilla.stat <- read.table(file.path(outputfolder, "gorilla_selected_bamStat.txt"), header=TRUE)
orangutan.stat <- read.table(file.path(outputfolder, "orangutan_selected_bamStat.txt"), header=TRUE)
total.counts <- rbind(chimpanzee.stat[nrow(chimpanzee.stat),],
bonobo.stat[nrow(bonobo.stat),],
gorilla.stat[nrow(gorilla.stat),],
orangutan.stat[nrow(orangutan.stat),])
single.cell.counts <- rbind(chimpanzee.stat[-nrow(chimpanzee.stat),],
bonobo.stat[-nrow(bonobo.stat),],
gorilla.stat[-nrow(gorilla.stat),],
orangutan.stat[-nrow(orangutan.stat),])
library.counts.perIndivid <- table(single.cell.counts$ID)
## Plot BAM stats
message("Preparing plots ...")
suppressPackageStartupMessages( library(reshape2) )
plt.df <- total.counts[c('total.reads', 'depth', 'covered.pos.perc', 'ID')]
colnames(plt.df) <- c('Total # of reads', 'Depth of coverage', '% of genome covered', 'ID')
plt.df$ID <- paste0(plt.df$ID, "\n(n=", library.counts.perIndivid, ")")
plt.df <- melt(plt.df, measure.vars = c('Total # of reads', 'Depth of coverage', '% of genome covered'), id.vars = 'ID')
plt <- ggplot(plt.df) +
geom_col(aes(x=ID, y=value, fill=ID)) +
scale_fill_manual(values = c('#3182bd','#31a354','#8856a7','#e6550d'), name="") +
scale_y_continuous(labels = comma) +
facet_grid(variable ~ ., scales = 'free') +
xlab("") + ylab("") +
theme(legend.position = "none")
## Save plot
destination <- file.path(outputfolder, "total_stat.pdf")
ggsave(plt, filename = destination, width = 5, height = 6)
## Plot total coverage per library
my_theme <- theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
p1 <- ggplot(single.cell.counts) +
geom_boxplot(aes(x=ID, y=total.reads, fill=ID)) +
scale_fill_manual(values = c('#3182bd','#31a354','#8856a7','#e6550d'), guide="none") +
ylab("Total # of reads") + xlab("")
p2 <- ggplot(single.cell.counts) +
geom_boxplot(aes(x=ID, y=depth, fill=ID)) +
scale_fill_manual(values = c('#3182bd','#31a354','#8856a7','#e6550d'), guide="none") +
ylab("Depth of coverage") + xlab("")
p3 <- ggplot(single.cell.counts) +
geom_boxplot(aes(x=ID, y=covered.pos.perc, fill=ID)) +
scale_fill_manual(values = c('#3182bd','#31a354','#8856a7','#e6550d'), guide="none") +
ylab("% of genome covered") + xlab("")
plt <- plot_grid(p1, p2, p3, nrow = 1)
## Save plot
destination <- file.path(outputfolder, "perCell_stat.pdf")
ggsave(plt, filename = destination, width = 10, height = 5)
message("DONE!!!") |
ffdc99cb062621dfd5a391633cf1c3618600989c | e1c388af0d6464a60848d46e3379c1ce24deb7e3 | /R/flow.r | 6282bbefbed0b946b36ce4153a2d3e5470d68123 | [] | no_license | mengeln/PHAB-metrics | 485886dbf0591be17f6050f0ea3e0a2d5cb3e9e6 | 6387e32611cc9eecb53ef4a5e9dbab79f83eb0bd | refs/heads/master | 2021-03-12T23:55:50.680066 | 2013-02-06T21:28:19 | 2013-02-06T21:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,378 | r | flow.r | ###Read in requested observations
setwd("L:/Bioassessment Data Management Tools_RM/R Scripts")
rowinput <- read.csv("input.csv", header=T)
options(useFancyQuotes = F)
rowinput$StationCode <- sQuote(rowinput$StationCode)
SC <- paste(rowinput$StationCode, collapse="")
SC <- gsub("''", "', '", SC)
###Construct Query
select <- "SELECT Sample_Entry.ProjectCode, Sample_Entry.SampleDate, StationLookUp.Waterbody, Sample_Entry.StationCode, Sample_Entry.AgencyCode, Geometry_Entry.LocationRowID, Location_Entry.LocationCode, Location_Entry.LocationRowID, Location_Entry.GeometryShape, Geometry_Entry.Latitude, Geometry_Entry.Longitude, FractionLookUp.FractionName, UnitLookUp.UnitCode, UnitLookUp.UnitName, HabitatCollection_Entry.CollectionTime, HabitatCollection_Entry.HabitatCollectionComments, CollectionMethodLookUp.CollectionMethodCode, CollectionMethodLookUp.CollectionMethodName, HabitatCollection_Entry.Replicate, ConstituentLookUp.ConstituentCode, AnalyteLookUp.AnalyteName, MatrixLookUp.MatrixName, HabitatResult_Entry.VariableResult, HabitatResult_Entry.Result, MethodLookUp.MethodName, HabitatResult_Entry.QACode, HabitatResult_Entry.CollectionDeviceCode, HabitatResult_Entry.HabitatResultComments, CollectionDeviceLookUp.CollectionDeviceName, HabitatResult_Entry.ResQualCode, HabitatResult_Entry.ComplianceCode"
from <- "FROM UnitLookUp INNER JOIN (CollectionDeviceLookUp INNER JOIN (FractionLookUp INNER JOIN (AnalyteLookUp INNER JOIN ((CollectionMethodLookUp INNER JOIN (((StationLookUp INNER JOIN Sample_Entry ON StationLookUp.StationCode = Sample_Entry.StationCode) INNER JOIN (Location_Entry LEFT JOIN Geometry_Entry ON Location_Entry.LocationRowID = Geometry_Entry.LocationRowID) ON Sample_Entry.SampleRowID = Location_Entry.SampleRowID) INNER JOIN HabitatCollection_Entry ON Location_Entry.LocationRowID = HabitatCollection_Entry.LocationRowID) ON CollectionMethodLookUp.CollectionMethodCode = HabitatCollection_Entry.CollectionMethodCode) INNER JOIN (MethodLookUp INNER JOIN (MatrixLookUp INNER JOIN (ConstituentLookUp INNER JOIN HabitatResult_Entry ON ConstituentLookUp.ConstituentRowID = HabitatResult_Entry.ConstituentRowID) ON MatrixLookUp.MatrixCode = ConstituentLookUp.MatrixCode) ON MethodLookUp.MethodCode = ConstituentLookUp.MethodCode) ON HabitatCollection_Entry.HabitatCollectionRowID = HabitatResult_Entry.HabitatCollectionRowID) ON AnalyteLookUp.AnalyteCode = ConstituentLookUp.AnalyteCode) ON FractionLookUp.FractionCode = ConstituentLookUp.FractionCode) ON CollectionDeviceLookUp.CollectionDeviceCode = HabitatResult_Entry.CollectionDeviceCode) ON UnitLookUp.UnitCode = ConstituentLookUp.UnitCode"
if(length(rowinput$StationCode) < 50){
where <- paste("WHERE (((Sample_Entry.StationCode) IN (", SC, ")) AND (Location_Entry.LocationCode='X')) AND (MethodLookUp.MethodName In ('Velocity Area', 'Neutral Buoyant Object'))")}else
{where <- "WHERE (Location_Entry.LocationCode='X') AND (MethodLookUp.MethodName In ('Velocity Area', 'Neutral Buoyant Object'))"}
orderby <- "ORDER BY Sample_Entry.ProjectCode, Sample_Entry.SampleDate, Sample_Entry.StationCode, Location_Entry.LocationCode, ConstituentLookUp.ConstituentCode"
select2 <- "SELECT Sample_Entry.StationCode, Sample_Entry.SampleDate, FieldResult_Entry.CollectionDeviceCode, CollectionDeviceLookUp.CollectionDeviceName, FieldResult_Entry.CalibrationDate, FieldCollection_Entry.CollectionDepth, FieldCollection_Entry.Replicate, AnalyteLookUp.AnalyteName, FieldResult_Entry.Result, FieldResult_Entry.ResQualCode, FieldResult_Entry.QACode"
from2 <- "FROM CollectionDeviceLookUp INNER JOIN (AnalyteLookUp INNER JOIN (((Sample_Entry INNER JOIN Location_Entry ON Sample_Entry.SampleRowID = Location_Entry.SampleRowID) INNER JOIN FieldCollection_Entry ON Location_Entry.LocationRowID = FieldCollection_Entry.LocationRowID) INNER JOIN (ConstituentLookUp INNER JOIN FieldResult_Entry ON ConstituentLookUp.ConstituentRowID = FieldResult_Entry.ConstituentRowID) ON FieldCollection_Entry.FieldCollectionRowID = FieldResult_Entry.FieldCollectionRowID) ON AnalyteLookUp.AnalyteCode = ConstituentLookUp.AnalyteCode) ON CollectionDeviceLookUp.CollectionDeviceCode = FieldResult_Entry.CollectionDeviceCode"
if(length(rowinput$StationCode) < 50){
where2 <- paste("WHERE (((Sample_Entry.StationCode) IN (", SC, ")) AND ((AnalyteLookUp.AnalyteName) In ('Distance from Bank', 'StationWaterDepth', 'velocity', 'Distance, Float', 'Float time', 'wetted width')) AND ((Sample_Entry.EventCode)='ba'))")}else
{where2 <- "WHERE (AnalyteLookUp.AnalyteName) In ('Distance from Bank', 'StationWaterDepth', 'velocity', 'Distance, Float', 'Float time', 'wetted width') AND ((Sample_Entry.EventCode)='ba')"}
orderby2 <- "ORDER BY Sample_Entry.StationCode, Sample_Entry.SampleDate, AnalyteLookUp.AnalyteName"
###Connect to DB
library(RODBC)
mydsn <- odbcConnect("SMCreporter", uid ="GisUser", pwd = "")
flow <- data.frame(sqlQuery(mydsn, paste(select, from, where, orderby)))
velocity <- data.frame(sqlQuery(mydsn, paste(select2, from2, where2, orderby2)))
odbcClose(mydsn)
###Format Data Frame##
velocity$Result[velocity$ResQualCode=="NR"] <- NA
flow$Result[flow$ResQualCode=="NR"] <- NA
velocity$Result[velocity$Result==-88] <- NA
velocity$Result[velocity$Result<0] <- 0
library(reshape)
test <-velocity[, c("StationCode", "SampleDate", "Replicate", "AnalyteName", "Result", "ResQualCode", "QACode")]
test2 <- flow[, c("StationCode", "SampleDate", "Replicate", "AnalyteName", "Result", "ResQualCode", "QACode")]
tempp <- (rbind(test, test2))
tempp$id <- do.call(paste, c(tempp[c("StationCode", "SampleDate")]))
vmethod <- (cast(tempp, id + Replicate ~ AnalyteName, value="Result", fun.aggregate=mean))
###Calculate Velocity Area Method###
vmethod$flow <- rep(NA, length(vmethod$id))
vmethod$flow <-c(NA, unlist(lapply(2:length(vmethod$id), FUN=function(i, d, v, s){((d[i]-d[i-1]))*s[i]*v[i]*0.00107639104},
d=vmethod$"Distance from Bank", s=vmethod$StationWaterDepth, v=vmethod$Velocity)))
sumna <- function(data){sum(as.numeric(as.character(data)), na.rm = T)}
FL_Q_F<-tapply(vmethod$flow, vmethod$id, sumna)
FL_Q_F[which(FL_Q_F==0)] <-NA
FL_Q_F[which(FL_Q_F<0)] <-0
FL_Q_M <- FL_Q_F*0.0283168466
###Query for Neutrally buoyant float method###
select3 <- "SELECT Sample_Entry.StationCode, Sample_Entry.SampleDate, Location_Entry.LocationCode, AnalyteLookUp.AnalyteName, HabitatResult_Entry.Result, HabitatResult_Entry.ResQualCode, HabitatResult_Entry.QACode, HabitatCollection_Entry.Replicate"
from3 <- "FROM AnalyteLookUp INNER JOIN (((Sample_Entry INNER JOIN Location_Entry ON Sample_Entry.SampleRowID = Location_Entry.SampleRowID) INNER JOIN HabitatCollection_Entry ON Location_Entry.LocationRowID = HabitatCollection_Entry.LocationRowID) INNER JOIN (ConstituentLookUp INNER JOIN HabitatResult_Entry ON ConstituentLookUp.ConstituentRowID = HabitatResult_Entry.ConstituentRowID) ON HabitatCollection_Entry.HabitatCollectionRowID = HabitatResult_Entry.HabitatCollectionRowID) ON AnalyteLookUp.AnalyteCode = ConstituentLookUp.AnalyteCode"
if(length(rowinput$StationCode) < 50)
{where3 <- paste("WHERE (((Sample_Entry.StationCode) IN (", SC, ")) AND ((AnalyteLookUp.AnalyteName)
In ('Float Time', 'StationWaterDepth', 'Wetted Width', 'Distance, Float')) AND
((Sample_Entry.EventCode)='ba')) AND (Location_Entry.LocationCode In
('Float Reach', 'Lower Section Point 1', 'Upper Section Point3', 'Upper Section Point4',
'Lower Section Point 5', 'Middle Section Point 1', 'Upper Section Point2',
'Middle Section Point 5', 'Lower Section Point 4', 'Upper Section Point1',
'Middle Section Point 4', 'Upper Section Point5', 'Middle Section Point 2',
'Lower Section Point 2', 'Middle Section Point 3', 'Lower Section Point 3',
'Upper Section', 'Lower Section', 'Middle Section'))")}else
{where3 <- "WHERE (((AnalyteLookUp.AnalyteName)
In ('Float Time', 'StationWaterDepth', 'Wetted Width', 'Distance, Float')) AND
((Sample_Entry.EventCode)='ba')) AND (Location_Entry.LocationCode In
('Float Reach', 'Lower Section Point 1', 'Upper Section Point3', 'Upper Section Point4',
'Lower Section Point 5', 'Middle Section Point 1', 'Upper Section Point2',
'Middle Section Point 5', 'Lower Section Point 4', 'Upper Section Point1',
'Middle Section Point 4', 'Upper Section Point5', 'Middle Section Point 2',
'Lower Section Point 2', 'Middle Section Point 3', 'Lower Section Point 3',
'Upper Section', 'Lower Section', 'Middle Section', 'Float Reach TransUpper, Point1',
'Float Reach TransUpper, Point2', 'Float Reach TransUpper, Point3', 'Float Reach TransUpper, Point4', 'Float Reach TransUpper, Point5', 'Float Reach TransMiddle, Point1', 'Float Reach TransMiddle, Point2',
'Float Reach TransMiddle, Point3', 'Float Reach TransMiddle, Point4', 'Float Reach TransMiddle, Point5', 'Float Reach TransLower, Point1', 'Float Reach TransLower, Point2', 'Float Reach TransLower, Point3', 'Float Reach TransLower, Point4', 'Float Reach TransLower, Point5', 'Float Reach TransLower', 'Float Reach TransMiddle', 'Float Reach TransUpper'))"}
orderby3 <- "ORDER BY Sample_Entry.StationCode, Sample_Entry.SampleDate, AnalyteLookUp.AnalyteName"
mydsn <- odbcConnect("SMCreporter", uid ="GisUser", pwd = "")
neutral <- data.frame(sqlQuery(mydsn, paste(select3, from3, where3, orderby3)))
odbcClose(mydsn)
###Format Data Frame###
neutral$Result[neutral$ResQualCode=="NR"] <- NA
neutral$id<- do.call(paste, c(neutral[c("StationCode", "SampleDate")]))
neutral$Location <- rep(NA, length(neutral$id))
neutral$Location[grep("Upper Section Point", neutral$LocationCode)] <- "Upper"
neutral$Location[grep("Middle Section Point", neutral$LocationCode)] <- "Middle"
neutral$Location[grep("Lower Section Point", neutral$LocationCode)] <- "Lower"
neutral$Location[grep("Float Reach TransUpper", neutral$LocationCode)] <- "Upper"
neutral$Location[grep("Float Reach TransMiddle, Point", neutral$LocationCode)] <- "Middle"
neutral$Location[grep("Float Reach TransLower", neutral$LocationCode)] <- "Lower"
neutral$Location[intersect(grep("Lower Section", neutral$LocationCode), (which(neutral$AnalyteName=="Wetted Width")))] <- "L"
neutral$Location[intersect(grep("Middle Section", neutral$LocationCode), (which(neutral$AnalyteName=="Wetted Width")))] <- "M"
neutral$Location[intersect(grep("Upper Section", neutral$LocationCode), (which(neutral$AnalyteName=="Wetted Width")))] <- "U"
neutral$Location[intersect(grep("Float Reach TransLower", neutral$LocationCode), (which(neutral$AnalyteName=="Wetted Width")))] <- "L"
neutral$Location[intersect(grep("Float Reach TransMiddle", neutral$LocationCode), (which(neutral$AnalyteName=="Wetted Width")))] <- "M"
neutral$Location[intersect(grep("Float Reach TransUpper", neutral$LocationCode), (which(neutral$AnalyteName=="Wetted Width")))] <- "U"
###Calculate neutral buoyant method###
ncast <- cast(neutral[neutral$LocationCode != "Float Reach",], id~Location, value="Result", fun.aggregate=mean)
ncast$r <- rep(NA, length(ncast$id))
ncast$r <- lapply(1:length(ncast$id), function(i, l, lower, m, middle, u, upper){
mean((l[i]*lower[i]),(m[i]*middle[i]),(u[i]*upper[i]), na.rm=T, trim=0)/100},
l=ncast$L, lower=ncast$Lower, m=ncast$M, middle=ncast$Middle, u=ncast$U, upper=ncast$Upper)
narea <- tapply(ncast$r, ncast$id, invisible)
ncast2 <- cast(neutral[neutral$LocationCode == "Float Reach",], id~ AnalyteName + Replicate, value="Result", fun.aggregate=NULL)
ncast2$r <- rep(NA, length(ncast2$id))
ncast2$r <- lapply(1:length(ncast2$id), function(i, d1, d2, d3, t1, t2, t3){
mean((d1[i]/t1[i]),(d2[i]/t2[i]),(d3[i]/t3[i]))},
d1=ncast2$"Distance, Float_1", d2=ncast2$"Distance, Float_2", d3=ncast2$"Distance, Float_3",
t1=ncast2$"Float Time_1", t2=ncast2$"Float Time_2", t3=ncast2$"Float Time_3")
nspeed <- tapply(ncast2$r, ncast2$id, invisible)
FL_N_M <- as.numeric(narea)*as.numeric(nspeed[which(names(nspeed) %in% names(narea))])
names(FL_N_M) <- names(narea)
FL_N_F <- FL_N_M*0.0283168466
###Format Results###
result<-as.data.frame(matrix(NA, ncol=11, nrow=length(union(names(FL_N_M), names(FL_Q_M)))))
rownames(result)<-union(names(FL_N_M), names(FL_Q_M))
result[which(rownames(result)%in%names(FL_N_F)), 1]<-FL_N_F
result[which(rownames(result)%in%names(FL_N_M)), 2]<-FL_N_M
result[which(rownames(result)%in%names(FL_Q_F)), 3]<-FL_Q_F
result[which(rownames(result)%in%names(FL_Q_M)), 4]<-FL_Q_M
colnames(result)<-c("FL_N_F.result", "FL_N_M.result", "FL_Q_F.result", "FL_Q_M.result", "FL_F.result", "FL_M.result")
result$FL_F.result <- unlist(lapply(1:length(result$FL_N_F), FUN=function(i, a, b){
mean(c(a[i], b[i]), na.rm=T)}, a=result$FL_N_F, b=result$FL_Q_F))
result$FL_M.result <- unlist(lapply(1:length(result$FL_N_F), FUN=function(i, a, b){
mean(c(a[i], b[i]), na.rm=T)}, a=result$FL_N_M, b=result$FL_Q_M))
###Mean and max water velocity###
velocity_Q <- tapply(tempp[tempp$AnalyteName == "Velocity", "Result"], tempp[tempp$AnalyteName == "Velocity", "id"], max)
velocity_N <- lapply(1:length(ncast$id), function(i, d1, d2, d3, t1, t2, t3){
max((d1[i]/t1[i]),(d2[i]/t2[i]),(d3[i]/t3[i]))},
d1=ncast2$"Distance, Float_1", d2=ncast2$"Distance, Float_2", d3=ncast2$"Distance, Float_3",
t1=ncast2$"Float Time_1", t2=ncast2$"Float Time_2", t3=ncast2$"Float Time_3")
velocity_N <- tapply(velocity_N, ncast2$id, invisible)
result[which(rownames(result)%in%names(velocity_Q)), 7] <- velocity_Q
result[which(rownames(result)%in%names(velocity_N)), 7] <- unlist(velocity_N)
result[[8]]<- result[[7]]/3.2808399
velocity_QM <- tapply(tempp[tempp$AnalyteName == "Velocity", "Result"], tempp[tempp$AnalyteName == "Velocity", "id"], mean)
result[which(rownames(result)%in%names(velocity_QM)), 9] <- velocity_QM
result[which(rownames(result)%in%names(nspeed)), 9] <- unlist(nspeed)
result[[10]]<-result[[9]]/3.2808399
###Percent zero velocity###
zerov1 <- tapply(tempp[tempp$AnalyteName == "Velocity", "Result"], tempp[tempp$AnalyteName == "Velocity", "id"],
function(data)sum(data==0))
lengthna <- function(data){sum(!is.na(data))}
zerov<- (zerov1/tapply(tempp[tempp$AnalyteName == "Velocity", "Result"], tempp[tempp$AnalyteName == "Velocity", "id"],
lengthna))*100
zeron1<- (lapply(1:length(ncast$id), function(i, d1, d2, d3, t1, t2, t3){
sum(((d1[i]/t1[i])==0),((d2[i]/t2[i])==0),((d3[i]/t3[i])==0))},
d1=ncast2$"Distance, Float_1", d2=ncast2$"Distance, Float_2", d3=ncast2$"Distance, Float_3",
t1=ncast2$"Float Time_1", t2=ncast2$"Float Time_2", t3=ncast2$"Float Time_3"))
zeron <- tapply(lapply(zeron1, function(d)d/3), ncast2$id, invisible)
result[which(rownames(result)%in%names(zerov)), 11]<-zerov
result[which(rownames(result)%in%names(zeron)), 11]<-unlist(zeron)
colnames(result)[7:11] <-c("MWVM_F.result", "MWVM_M.result", "XWV_F.result", "XWV_M.result", "PWVZ.result")
###Write to file###
fc <- file("flow_metrics.csv", open="w")
write.csv(result, fc)
close(fc)
print("Flow complete") |
d88058d6ad5b1c54acf5feb8e492c217c8c1167b | 431f33c4b361ce49f70505f3bc923162eb92a062 | /man/PlotGMPair.Rd | effadc605a876e9a7db16696a8d2b43220d6d4f8 | [] | no_license | Mathelab/IntLIM | 17559cdef93768831f6fe45d88783bda02d6017a | 17c3229139d02de2ded783221323c988222beed3 | refs/heads/master | 2022-07-31T03:37:56.881904 | 2022-07-06T11:04:23 | 2022-07-06T11:04:23 | 99,143,369 | 5 | 7 | null | 2018-09-05T21:27:30 | 2017-08-02T17:23:01 | R | UTF-8 | R | false | true | 1,141 | rd | PlotGMPair.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plottingfunctions.R
\name{PlotGMPair}
\alias{PlotGMPair}
\title{scatter plot of gene-metabolite pairs (based on user selection)}
\usage{
PlotGMPair(inputData, stype = NULL, geneName, metabName,
palette = "Set1", viewer = T)
}
\arguments{
\item{inputData}{IntLimObject output of ReadData() or FilterData()}
\item{stype}{category to color-code by}
\item{geneName}{string of select geneName}
\item{metabName}{string of select metabName}
\item{palette}{choose an RColorBrewer palette ("Set1", "Set2", "Set3",
"Pastel1", "Pastel2", "Paired", etc.) or submit a vector of colors}
\item{viewer}{whether the plot should be displayed in the RStudio viewer (T) or
in Shiny/Knittr (F)}
}
\value{
a highcharter object
}
\description{
scatter plot of gene-metabolite pairs (based on user selection)
}
\examples{
\dontrun{
dir <- system.file("extdata", package="IntLIM", mustWork=TRUE)
csvfile <- file.path(dir, "NCItestinput.csv")
mydata <- ReadData(csvfile,metabid='id',geneid='id')
PlotGMPair(mydata,stype="PBO_vs_Leukemia","DLG4","(p-Hydroxyphenyl)lactic acid")
}
}
|
8c6cb7aafadabd8464116cc159da7b87cc0d0b48 | 5080eedfec76d435dd5075f29d62bc8adeabb376 | /R/util_stan.R | 4c65631a48c2c866b3411f8668f0ff5865230b71 | [] | no_license | smthzch/NCAAMB2019 | d85cdd5f3ffb62c5c76979ac8d37303c4abebd2e | f237bb794d81670399e9f9b4ac166e5637083811 | refs/heads/master | 2022-12-12T21:50:24.926532 | 2022-12-07T15:57:33 | 2022-12-07T15:57:33 | 178,485,254 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,057 | r | util_stan.R | get_posterior.pois <- function(p){
ofmu <- colMeans(p$of)
ofsd <- sapply(1:length(teams), function(i) sd(p$of[,i]))
dfmu <- colMeans(p$df)
dfsd <- sapply(1:length(teams), function(i) sd(p$df[,i]))
list(ofmu=ofmu,
ofsd=ofsd,
dfmu=dfmu,
dfsd=dfsd)
}
get_posterior.norm <- function(p){
ofmu <- colMeans(p$of)
ofsd <- sapply(1:length(teams), function(i) sd(p$of[,i]))
dfmu <- colMeans(p$df)
dfsd <- sapply(1:length(teams), function(i) sd(p$df[,i]))
phimu <- mean(p$phi)
#phisd <- sapply(1:length(teams), function(i) sd(p$phi[,i]))
phisd <- sd(p$phi)
list(ofmu=ofmu,
ofsd=ofsd,
dfmu=dfmu,
dfsd=dfsd,
phimu=phimu,
phisd=phisd)
}
rank_stan <- function(para, teams, bteams, its=1000){
ti <- which(teams %in% bteams)
ofmu <- colMeans(para$of)
ofsd <- sapply(1:length(teams), function(i) sd(para$of[,i]))
dfmu <- colMeans(para$df)
dfsd <- sapply(1:length(teams), function(i) sd(para$df[,i]))
matchups_prob <- matrix(0, nrow=length(teams), ncol=length(teams))
matchups_score <- matrix(0, nrow=length(teams), ncol=length(teams))
#its <- 1000
for(i in ti){
for(j in ti){
mcm <- mc_matchup(list(ofmu=ofmu[i],ofsd=ofsd[i],dfmu=dfmu[i],dfsd=dfsd[i]),
list(ofmu=ofmu[j],ofsd=ofsd[j],dfmu=dfmu[j],dfsd=dfsd[j]),
iter=its)
matchups_prob[i,j] <- sum(mcm$s1>mcm$s2)/its
matchups_score[i,j] <- mean(mcm$s1)
}
}
matchups_prob <- matchups_prob[ti,ti]
colnames(matchups_prob) <- bteams
rownames(matchups_prob) <- bteams
matchups_score <- matchups_score[ti,ti]
colnames(matchups_score) <- bteams
rownames(matchups_score) <- bteams
list(matchups_prob, matchups_score)
}
mc_matchup.pois_sum <- function(t1, t2, iter=1000){
of1 <- rnorm(iter, t1$ofmu, t1$ofsd)
df1 <- rnorm(iter, t1$dfmu, t1$dfsd)
of2 <- rnorm(iter, t2$ofmu, t2$ofsd)
df2 <- rnorm(iter, t2$dfmu, t2$dfsd)
s1 <- rpois(iter, exp(of1 - df2))
s2 <- rpois(iter, exp(of2 - df1))
list(s1=s1, s2=s2)
}
mc_matchup.pois_prod <- function(t1, t2, iter=1000){
of1 <- rnorm(iter, t1$ofmu, t1$ofsd)
df1 <- rnorm(iter, t1$dfmu, t1$dfsd)
of2 <- rnorm(iter, t2$ofmu, t2$ofsd)
df2 <- rnorm(iter, t2$dfmu, t2$dfsd)
s1 <- rpois(iter, exp(of1 + df2 + of1 * df2))
s2 <- rpois(iter, exp(of2 + df1 + of2 * df1))
list(s1=s1, s2=s2)
}
mc_matchup.norm <- function(t1, t2, phi, iter=1000){
of1 <- rnorm(iter, t1$ofmu, t1$ofsd)
df1 <- rnorm(iter, t1$dfmu, t1$dfsd)
#phi1 <- rnorm(iter, t1$phimu, t1$phisd)
of2 <- rnorm(iter, t2$ofmu, t2$ofsd)
df2 <- rnorm(iter, t2$dfmu, t2$dfsd)
#phi2 <- rnorm(iter, t2$phimu, t2$phisd)
phis <- rnorm(iter, phi$mu, phi$sd)
s1 <- rnorm(iter, exp(of1 - df2), phis)
s2 <- rnorm(iter, exp(of2 - df1), phis)
list(s1=s1, s2=s2)
}
mc_matchup <- mc_matchup.pois_sum
matchup.pois <- function(t1n, t2n, param, teams, its=10000, method="sum"){
if(!method %in% c("sum", "prod")){
stop("method must be one of: [sum, prod]")
}
t1 <- which(t1n==teams)
t2 <- which(t2n==teams)
if(method=="sum"){
mc_matchup.pois_sum(list(ofmu=param$ofmu[t1], ofsd=param$ofsd[t1], dfmu=param$dfmu[t1], dfsd=param$dfsd[t1]),
list(ofmu=param$ofmu[t2], ofsd=param$ofsd[t2], dfmu=param$dfmu[t2], dfsd=param$dfsd[t2]),
iter=its)
}else if(method=="prod"){
mc_matchup.pois_prod(list(ofmu=param$ofmu[t1], ofsd=param$ofsd[t1], dfmu=param$dfmu[t1], dfsd=param$dfsd[t1]),
list(ofmu=param$ofmu[t2], ofsd=param$ofsd[t2], dfmu=param$dfmu[t2], dfsd=param$dfsd[t2]),
iter=its)
}
}
matchup.norm <- function(t1n, t2n, param, teams, its=10000){
t1 <- which(t1n==teams)
t2 <- which(t2n==teams)
mc_matchup.norm(list(ofmu=param$ofmu[t1], ofsd=param$ofsd[t1], dfmu=param$dfmu[t1], dfsd=param$dfsd[t1]),
list(ofmu=param$ofmu[t2], ofsd=param$ofsd[t2], dfmu=param$dfmu[t2], dfsd=param$dfsd[t2]),
list(mu=param$phimu, sd=param$phisd),
iter=its)
}
matchup <- matchup.pois
|
da5ff4cf72a743ef6716843b5ddf58c62398b5b4 | 257816425c6e4193814e16970b6b96a47e62af67 | /man/plot_roc.Rd | 6def71450c52faa33a48437271236cc03bba2d96 | [
"Apache-2.0"
] | permissive | mmadsen/mmadsenr | 5e108fda68431f202e11c783cb0cec44450aba89 | 1a7ac5431d87628a32fee0453a7d2a289dd5975b | refs/heads/master | 2020-05-09T22:02:37.080170 | 2015-01-03T18:28:36 | 2015-01-03T18:28:36 | 19,648,056 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 335 | rd | plot_roc.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{plot_roc}
\alias{plot_roc}
\title{plot_roc}
\usage{
plot_roc(roc_obj)
}
\arguments{
\item{list}{Output object from calculate_roc_binary_classifier()}
}
\description{
Plots a single ROC curve with diagonal abline, given output objects from calculate_roc_binary_classifier()
}
|
3e3fba08e19d424009882c9da6e52a81ad2c88c3 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.compute/man/ec2_export_client_vpn_client_certificate_revocation_list.Rd | f6c8f676bd31651435463ffbe19adf9161d603fb | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,045 | rd | ec2_export_client_vpn_client_certificate_revocation_list.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_export_client_vpn_client_certificate_revocation_list}
\alias{ec2_export_client_vpn_client_certificate_revocation_list}
\title{Downloads the client certificate revocation list for the specified
Client VPN endpoint}
\usage{
ec2_export_client_vpn_client_certificate_revocation_list(
ClientVpnEndpointId,
DryRun = NULL
)
}
\arguments{
\item{ClientVpnEndpointId}{[required] The ID of the Client VPN endpoint.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Downloads the client certificate revocation list for the specified Client VPN endpoint.
See \url{https://www.paws-r-sdk.com/docs/ec2_export_client_vpn_client_certificate_revocation_list/} for full documentation.
}
\keyword{internal}
|
f040f6beb709e10b0c07a0a9fc001dab946a67b2 | 50ed41c278c75f4cc3d047e4c4514d2f72b83c2b | /FullScript.R | c18e41bcb9ab0d13933d9d1f94cf58569c54db69 | [] | no_license | DylanDriessen/TextMining-Clustering | afe513a1b4f35b50d9cc97f077306e8f9d07b735 | 2a8a576baa58c0f7fbd39cffd73aa20c64a36625 | refs/heads/master | 2020-04-02T02:18:30.921307 | 2019-04-11T09:42:13 | 2019-04-11T09:42:13 | 180,760,403 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,818 | r | FullScript.R | library('readr')
library(parallel)
library(tm)
library(tidytext)
#Cleaning Data
utf8Data <-read.table("Alfresco_EN_PDF__Persons_cln.utf8",
sep=",",
header=TRUE,
encoding="UTF-8",
stringsAsFactors=FALSE
)
dtm.rda <- load(file = "dtm.RDa")
dtmTable <- tidy(dtm)
dtmTable.toRemove <- data.frame(term=unique(dtmTable$term[grep('^.$', dtmTable$term, perl = TRUE)]))
dtmTable <- dtmTable[!dtmTable$term %in% dtmTable.toRemove$term,]
dtmTable$term <- lapply(dtmTable$term, function(x) tolower(x))
dtmTable <- dtmTable[!dtmTable$term %in% stop_words$word,]
#Cleaning with lapply and tm
ptm <- proc.time()
utf8Data$entity <- lapply(utf8Data$entity, function(x) tolower(x))
utf8Data$entity <- lapply(utf8Data$entity, function (x) sub("^\\s+", "", x))
utf8Data$entity <- lapply(utf8Data$entity, function(x) gsub('\\.', "", gsub('\\-', "", gsub(" ", "_", x))))
cleanUtf8Data <- transform(utf8Data, entity=unlist(utf8Data$entity))
proc.time()[3]-ptm[3]
names(cleanUtf8Data) <- c("id", "name")
#WORDCLOUDS
library(stats)
library(plyr)
library(wordcloud)
library(wordcloud2)
library(viridis)
library(dplyr)
dataSet2 <- cleanUtf8Data
#Te kiezen naam
name <- 'adolf_hitler'
#Geeft alle unieke ID's van de naam ".."
nameSetUnique <- unique(dataSet2[dataSet2$name==name,])
#Geeft alle unieke namen van personen verbonden met persoon hierboven in nameSet PER id
nameDataSetUnique <- unique(merge(nameSetUnique, dataSet2, by="id"))
#Tel frequentie van naam
library(plyr)
frequencyDataSetUnique <- count(nameDataSetUnique, nameDataSetUnique$name.y)
#Wordcloud
par(mfrow=c(1,1))
pal = brewer.pal(8,"Dark2")
set.seed(1234)
png("wordcloudUnique.png", width=1280,height=800)
wordcloud(frequencyDataSetUnique$`nameDataSetUnique$name.y`,min.freq=1, frequencyDataSetUnique$n,scale=c(4, 1),
max.words=Inf, random.order=FALSE, rot.per=.15,
colors = pal)
dev.off()
#Nu alles geven, dus niet uniek
#Geeft alle ID's van de naam ".."
nameSet <- (dataSet2[dataSet2$name==name,])
#Geeft alle namen van personen verbonden met persoon hierboven in nameSet PER id
nameDataSet <- (merge(nameSet, dataSet2, by="id"))
#Tel frequentie van naam
frequencyDataSet2 <- count(nameDataSet, nameDataSet$name.y)
#Wordcloud
par(mfrow=c(1,1))
pal = brewer.pal(8,"Dark2")
set.seed(1234)
png("wordcloud.png", width=1280,height=800)
wordcloud(frequencyDataSet2$`nameDataSet$name.y`, frequencyDataSet2$n, min.freq=1,scale=c(8, 1.5),
max.words=Inf, random.order=FALSE, rot.per=.15,
colors = pal)
dev.off()
#SCRIPT MAARTEN
library(tm)
library(slam)
library(skmeans)
library(plyr)
library(dplyr)
library(Matrix)
library(wordspace)
library(rjson)
library(jsonlite)
uniqueID <- unique(cleanUtf8Data$id)
documentNamesString <- NULL
idSet <- data.frame(names=factor())
documentNamerow <- data.frame(names=factor())
#Corpus maken
ptm <- proc.time()
for(i in 1:NROW(uniqueID)){
idSet <- cleanUtf8Data[cleanUtf8Data$id == uniqueID[i],]
documentNamerow <- paste(idSet$entity, collapse = " ")
documentNamesString <- rbind(documentNamesString, documentNamerow)
idSet <- data.frame(name=factor())
documentNamerow <- data.frame(names=factor())
}
proc.time()[3]-ptm[3]
#Cluster skmeans
data.aggr <- summarise(group_by(cleanUtf8Data, id, name), count = n())
data.aggr$name <- as.factor(data.aggr$name)
data.aggr$id <- as.factor(data.aggr$id)
i <- c(data.aggr$id)
j <- c(data.aggr$name)
v <- c(data.aggr$count)
data.triplet <- simple_triplet_matrix(i,j,v)
data.triplet2 <- simple_triplet_matrix(j, i, v)
set.seed(2000)
data.cluster <- skmeans(data.triplet, 5)
data.cluster2 <- skmeans(data.triplet2, 36)
#Prepare data for JSON#
#Names with cluster
data.names.cluster <- data.frame(group = data.cluster2$cluster, id=unique(data.aggr$name[order(data.aggr$name)]))
#Names with connected names and value
data.summ <- summarise(group_by(cleanUtf8Data, id, name))
data.links <- merge(data.summ, cleanUtf8Data, by="id")
data.links.freq <- summarise(group_by(data.links, name.x, name.y), count = n())
names(data.links.freq) <- c("source", "target", "value")
#Remove persons who don't often work together
data.links.freqMod <- data.links.freq[data.links.freq$value>10,]
data.links.freqMod$value <- lapply(data.links.freqMod$value, function(x){
if(x>30){
x=30
return(x)
}
return(x)
})
data.names.cluster.relevant <- data.names.cluster[data.names.cluster$id %in% data.links.freqMod$source,]
#Write to JSON
data.json.list <- list(data.names.cluster.relevant, data.links.freqMod)
names(data.json.list) <- c("nodes", "links")
data.json <- toJSON(data.json.list, dataframe = c("rows", "columns", "values"), pretty = TRUE)
write(data.json, "data.json")
#Cluster and most used words#
#Specifiq tabel with cluster and relevant docs
clusterNumber <- 16
clusterDocs <- merge(data.names.cluster, data.aggr, by.x = c('id'), c('name'))
clusterDocs$count <- NULL
clusterDocs$id <- NULL
names(clusterDocs) <- c('cluster', 'id')
clusterDocs.freq <- summarise(group_by(clusterDocs, cluster, id), freq=n())
clusterDocs.specific <- clusterDocs.freq[clusterDocs.freq$cluster%in%clusterNumber,]
#Words for specific cluster
clusterWords <- merge(clusterDocs.specific, dtmTable, by.x = c("id"), by.y = c("document"))
clusterWords$freq <- NULL
clusterWords$id <- NULL
#Set terms to character
clusterWords$term <- as.character(clusterWords$term)
#Frequency words in specific cluster (without excisting value)
countWords <- count(clusterWords, clusterWords$term)
count2Words <- merge(clusterWords, countWords, by.x = c('term'), c('clusterWords$term'))
#Alle waarden opgeteld over de verschillende documenten
library(data.table)
DT <- data.table(clusterWords)
count2Words <- DT[ , .(Totalcount = sum(count)), by = .(term)]
summary(count2Words)
#Wordcloud van termen
par(mfrow=c(1,1))
pal = brewer.pal(8,"Dark2")
set.seed(1234)
png("wordcloud.png", width=1280,height=800)
wordcloud(count2Words$term, count2Words$Totalcount, min.freq=1000,scale=c(8, 0.5),
max.words=Inf, random.order=FALSE, rot.per=.15,
colors = pal)
dev.off()
#Wordcloud specifieke persoon
personName <- 'roxana_angheluta'
personIdList <- cleanUtf8Data[cleanUtf8Data$name==personName, ]
personWordList <- merge(personIdList, dtmTable, by.x=c('id'), by.y=c('document'))
personWordList$id <- NULL
personWordList$name <- NULL
personWordList$term <- as.character(personWordList$term)
termFrequency <- aggregate(count ~ term, personWordList, sum)
#Wordcloud van termen
par(mfrow=c(1,1))
pal = brewer.pal(8,"Dark2")
set.seed(1234)
png("wordcloudPersonal.png", width=1280,height=800)
wordcloud(termFrequency$term, termFrequency$count, min.freq=750,scale=c(8, 0.5),
max.words=Inf, random.order=FALSE, rot.per=.15,
colors = pal)
dev.off() |
b8022f6c95ed71645640e3f0f882b6dae8f0d183 | b21f69120f18d19199dcdf981afc618d7d30aa8b | /R/individuals.R | 5d8374ae6e0482c978612668f0deed5105474520 | [
"MIT"
] | permissive | mrc-ide/hypatia | 5a5c49511331261b447b4b6d74211a0a2bfa4a07 | 14f8ee8ea109817f3dd3780ad799ac120b09b513 | refs/heads/main | 2023-02-01T01:05:36.576426 | 2020-12-16T16:23:47 | 2020-12-16T16:23:47 | 307,647,565 | 0 | 0 | NOASSERTION | 2020-12-16T16:32:24 | 2020-10-27T09:25:47 | R | UTF-8 | R | false | false | 449 | r | individuals.R | #' @title Define the human model
#' @description Declares the human individual and assigns the
#' relevant states and variables
#'
#' @noRd
#' @param states available states to assign
#' @param variables available variables to assign
#' @param events available events to assign
create_human <- function(states, variables, events) {
individual::Individual$new(
"human",
states = states,
variables = variables,
events = events
)
}
|
a5a2db9ff12bcde48fb06c9e6e3b5c86e1402fc8 | a9598d520c5c103e2505565b09487655a9b60e0d | /app.R | d95fcf78d0dd2eeacab6dfebccc785204a7bfd75 | [] | no_license | BillmanH/SCHonorsIPS | b624a7564431b98fdade1e1011bb6edb19f4edeb | 8c3dcbcd96e696657f9bb994b15e1d8da4cc715b | refs/heads/master | 2020-04-23T17:30:58.937397 | 2019-02-18T18:35:00 | 2019-02-18T18:35:00 | 171,334,682 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,110 | r | app.R | # TODO:
# Center images
# Match font
#
#
#
#
#
library(shiny)
library(dplyr)
d <- read.csv("data/gradeTable.csv",header = FALSE)
colnames(d) <- c("Grade","Percent")
ui <- fluidPage(
includeHTML("GoogleAnalytics.html"),
img(src = "SC Centercourt final CMYK.jpg", height = 72, width = 72),
h1("Shorecrest Honors IPS Grade Calculation"),
p("Please use the fields below to calculate your Honors IPS grade. Enter both your current IPS and IPS Challenge grades as they appear in Canvas. You need to enter the percent- not the letter grade. All final grades will be rounded to the nearest whole number."),
numericInput("Number1", "IPS grade :", 75, min = 1, max = 100),
numericInput("Number2", "IPS Challenge grade :", 75, min = 1, max = 100),
h4("Honors IPS grade:"),
verbatimTextOutput("value"),
tableOutput('table')
)
server <- function(input, output) {
output$value <- renderText({
round(
(input$Number1 *.7) + (input$Number2 *.3)
)
})
output$table <- renderTable(d)
}
# Run the application
shinyApp(ui = ui, server = server)
|
e74a8a591768446e13a36c659602c51e0f26b0cf | 785264b5af06dfe69e44abd1c147420e240c4ab0 | /R/translations.R | 12eb886f06cb7ceba1ca884a66d67cdbfe2f0aa1 | [] | no_license | yash1223/limiting_advr | 28665c1725ff56949a494f01bc939ae414274874 | 9747628413129c6fcae2ce3c7a27b4bbab8cf8d1 | refs/heads/master | 2020-04-10T23:02:26.147757 | 2018-12-11T14:12:53 | 2018-12-11T14:12:53 | 161,339,910 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,486 | r | translations.R |
getall <- function(r,p,q){
x1 <- r*p*q/(1-q + r*q)
x2 <- p-x1
x3 <- q - x1
x4 <- 1- x1-x2-x3
return(cbind(x1,x2,x3,x4))
}
fromxtopqr <- function(x){
p <- x[1]+x[2]
q <- x[1]+x[3]
r <- x[1]/(x[1]+x[2]) / (x[3]/(x[3]+x[4]))
return(c(p,q,r))
}
GetRatio <- function(luup, lup, cpr, demand){
alllimiters <- demand*luup
allspacers <- demand - alllimiters
unmet <- demand - cpr
ul <- lup*unmet
us <- unmet - ul
ratio <- ((1-ul/alllimiters)/(1 - us/allspacers))
return(ratio)
}
# S <- 1000
# p <- 0.5
# q <- 0.6
# x1.s <- runif(S, max(0, p+q-1), min(p,q))
# x2.s <- p - x1.s
# x3.s <- q - x1.s
# x4.s <- 1 - (x1.s + x2.s + x3.s)
# res.fs <- apply(cbind(x1.s, x2.s, x3.s, x4.s),1, fromxtopqr)
# res.fs[3,]
#
# GetRatio()
GetLup <- function(ratio, luup, cpr, demand){
alllimiters <- demand*luup
allspacers <- demand - alllimiters
clovercs <-ratio*alllimiters/allspacers
clpluscs <- cpr
cs <- clpluscs/(clovercs+1)
cl <- cpr - cs
ul <- alllimiters - cl
(cl/alllimiters)/(cs/allspacers)
lup <- ul/(demand - cpr)
return(lup)
}
# GetLup(ratio =3, luup = 0, cpr = 0.9, demand = 1)
# 0.5+0.9-1
#
# GetLup(ratio =1.05, luup = 0.7, cpr = 0.807, demand = 0.876)
# GetLup(ratio =1.3, luup = 0.7, cpr = 0.807, demand = 0.876)
# r <- 1.05
# q <- 0.807/0.876
# p <- 0.7
getx1 <- function(r) r*p*q/(1-q + r*q)
frompqrtox <- function(p,q,r){
x1 <- getx1(r)
#x1 <- getx1(r =1.3)
x2 <- p - x1
x3 <- q - x1
x4 <- 1-x1-x2-x3
return(c(x1,x2,x3,x4))
}
fromxtopqr <- function(x){
p <- x[1]+x[2]
q <- x[1]+x[3]
r <- x[1]/(x[1]+x[2]) / (x[3]/(x[3]+x[4]))
return(c(p,q,r))
}
# select <- which.max(getc.i == which(name.c=="Colombia"))
# fromxtopqr(cbind(cl, ul, cs, us)[select,]/sum(cbind(cl, ul, cs, us)[select,]))
# cbind(luup, cl+cs, ratio)[select,]
# (cl+cs)[select]/sum(cbind(cl, ul, cs, us)[select,])
#
# r=1.3
# fromxtopqr(frompqrtox(p,q,r)); p;q;r
#
#
# lup <- x2/(x2+x4); lup
#
# ((cl+cs)/totdemand)[getc.i == which(name.c=="Colombia")]
# q
# luup[getc.i == which(name.c=="Colombia")]
# (ul/unmet)[getc.i == which(name.c=="Colombia")]
# lup[getc.i == which(name.c=="Colombia")]
# ratio.i[getc.i == which(name.c=="Colombia")]
#
# curve(getx1(x), xlim = c(0,100))
# abline(h=p+q-1)
# abline(h=min(p,q))
#
# x1 <- getx1(3)
# x2 <- p-x1; x2
# x3 <- q - x1; x3
# 1-x1-x2-x3
#
# p <- runif(S)
# q <- runif(S)
# mean( (p+q-1) <= min(p,q))
getall <- function(r,p,q){
x1 <- r*p*q/(1-q + r*q)
x2 <- p-x1
x3 <- q - x1
x4 <- 1- x1-x2-x3
return(cbind(x1,x2,x3,x4))
}
# set.seed(12)
# p.s <- runif(S)
# q.s <- runif(S)
# r.s <- seq(0,S-1)
# res <- getall(r.s, p.s, q.s)
# apply(res,1,sum)
# sum(res<0)
# # all in x4
# sum(apply(res<0,1, sum) & !(res[,4] > 0))
#
# res[apply(res<0,1,sum)>0,][1:5,]
# cbind(r.s, p.s, q.s)[apply(res<0,1,sum)>0,][1:5,]
# sum(res>1)
#
# rationew <- function(a,b,d) a+b*d-(a+b)*d^2
# curve(rationew(-1,-2,x), xlim = c(0,1))
# obtain combis of cells that add to 1
#library(MCMCpack)
#S <- 1000
#samp.sc <- rdirichlet(S, alpha = rep(1/4,4))
#apply(samp.sc,1,sum)
# S <- 1000
# p <- 0.5
# q <- 0.6
# x1.s <- runif(S, max(0, p+q-1), min(p,q))
# x2.s <- p - x1.s
# x3.s <- q - x1.s
# summary(1 - (x1.s + x2.s + x3.s))
#
# p <- 0.5
# q <- 0.6
# p+q-1
# r <- 1
# x1 <- r*p*q/(1-q + r*q); x1
# p <- 0.5
# q <- 0.9
# 2
# p <- 0#0.9426217
# q <- 0.808246720
#
# GetLup(ratio =1.05, luup = 0.7, cpr = 0.807, demand = 0.876)
# GetLup(ratio =1.3, luup = 0.7, cpr = 0.807, demand = 0.876)
|
3bcdd96262a698bf640f4de259eeee25cd7ab14a | b521dd730b68f6839f3508a440f2116bf64a05b3 | /lab2/Lab 1/marks.R | 52ee666e1c45a731689898f37254a72d988ddf11 | [] | no_license | Cryptonex7/Data-Analytics-in-R | e02cdbe6c43bc9a67f52e426b7ba2190b35a5ac4 | bd184580971c9e1f5d9b2fcf70780f1f47509bc5 | refs/heads/master | 2020-09-29T12:17:06.437357 | 2020-02-17T04:35:42 | 2020-02-17T04:35:42 | 227,037,503 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 343 | r | marks.R | M = c(0,0,0,0,0)
M[1] = as.integer(readline("Enter M1: "))
M[2] = as.integer(readline("Enter M2: "))
M[3] = as.integer(readline("Enter M3: "))
M[4] = as.integer(readline("Enter M4: "))
M[5] = as.integer(readline("Enter M5: "))
cat("Average: ", (M[5] + M[1] + M[2] + M[3] + M[4])/5)
cat("Percentage: ", (M[5] + M[1] + M[2] + M[3] + M[4])/500)
|
2fe1d80e4d945dcfe88e4de4a6316e8c5f102563 | 1139519d1534d401b01aabbb585ee909ba8707ef | /mainProject.R | cf97a28ba6c0f2343745f769e4bf91952eb52bfa | [] | no_license | 01bui/Career_Path_Project_R | a6271207aadfaaacb613844989bd8638d076259e | 8706a73e1f4678d6e033d32d247094a76268bbdf | refs/heads/master | 2016-09-01T16:43:48.737684 | 2015-12-27T01:37:10 | 2015-12-27T01:37:10 | 48,628,417 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,879 | r | mainProject.R | # Read datasets into R
Datasets<-read.csv("C:/Users/vbui/Google Drive/CUA Fall 2014/CSC530/Project1/ss10pmd1.csv")
# Extract the data into subset included People with College and Highschool degree
WCollege<-subset(Datasets, Datasets$SCHL>=21 & Datasets$WAGP > 0)
HighSchool<-subset(Datasets, Datasets$SCHL==16 & Datasets$WAGP > 0)
# Summary: mean, median, range and interquartiles range. How many missing values a variable has.
summary(WCollege$WAGP)
summary(HighSchool$WAGP)
# Calculate the mean, standard deviation and variance
Mean.WCollege<-mean(WCollege$WAGP)
Mean.WCollege
Mean.HighSchool<-mean(HighSchool$WAGP)
Mean.HighSchool
Std.WCollege<-sd(WCollege$WAGP)
Std.WCollege
Std.HighSchool<-sd(HighSchool$WAGP)
Std.HighSchool
Variance.WCollege<-var(WCollege$WAGP)
Variance.WCollege
Variance.HighSchool<-var(HighSchool$WAGP)
Variance.HighSchool
# A hypothesis test that can help to determine whether a sample has been drawn from a normal distribution
# The null hypothesis for the test is that the sample is drawn from a normal distribution
shapiro.test(Datasets$WAGP[3:5000])
shapiro.test(HighSchool$WAGP[3:5000])
# Conclude: Not follow normal distribution.
# Confidence Interval for a sample mean with Student's T-Tests
t.test(WCollege$WAGP, conf.level = 0.95)
t.test(WCollege$WAGP, mu=40000, conf.level = 0.95, alternative="greater")
t.test(WCollege$WAGP, mu=40000, conf.level = 0.95, alternative="less")
t.test(HighSchool$WAGP, conf.level = 0.95)
t.test(HighSchool$WAGP, mu=30000, conf.level = 0.95, alternative="greater")
t.test(HighSchool$WAGP, conf.level = 0.95, alternative="less")
boxplot(WCollege$WAGP~SCHL, WCollege,main="Income of people with different degree", xlab="Degree",ylab="Earnings")
# Analysis of Variance
EarningsANOVA<-aov(WAGP~SCHL, WCollege)
anova(EarningsANOVA)
coef(EarningsANOVA)
TukeyHSD(EarningsANOVA)
pairwise.t.test(WCollege$WAGP, WCollege$SCHL, p.adj="bonferroni")
bartlett.test(WAGP~SCHL, WCollege)
var.test(WAGP~SCHL, WCollege)
# Prediction Interval
predict(lm(WCollege$WAGP~1), interval="prediction", level=0.90)[1, ]
predict(lm(HighSchool$WAGP~1), interval="prediction", level=0.95)[1, ]
# ?? Meaning of Prediction Interval??
# Bootstrap goes here
library(boot)
# function to obtain R-Squared from the data
rsq <- function(formula, data, indices) {
d <- data[indices,] # allows boot to select sample
fit <- lm(formula, data=d)
return(coef(fit))
}
# bootstrapping with 1000 replications
results <- boot(data=WCollege, statistic=rsq, R=1000, formula=WCollege$WAGP~SCHL+FOD1P)
# view results
results
plot(results)
# get 95% confidence interval
boot.ci(results, conf = 0.95, type=c("basic"))
# Frequency tables: summarize a categorical variable by displaying the number of observations belonging to each category
# The below command shows how many people get a specified degree (>=21 for bachelors and higher) vs. their income
table(Datasets$WAGP, useNA="ifany")
# Creating plots
# This plot tells Education Attainment vs. Income
plot(WCollege$WAGP[1:500], pch=1, col=2, main="Figure 1. Education Attainment vs. Earnings", xlab="Sample Size", ylab="Earnings")
par(new=T)
plot(HighSchool$WAGP[1:500], pch=2, col=4, axes=F,xlab="",ylab="")
par(new=T)
legend("topright", legend=c("College degree", "Highschool"), pch=c(1,2), col=c(2,4), cex=0.8)
# Find records of people with maximum income
Max_College_HS<-subset(Datasets, (Datasets$SCHL>=21 | Datasets$SCHL==16)& Datasets$WAGP==415000)
Max_College_HS<-subset(Datasets, Datasets$SCHL==16 & Datasets$WAGP==415000 & Datasets$AGEP>50)
Max_College_HS<-subset(Datasets, Datasets$SCHL==16 & Datasets$WAGP==415000 & Datasets$AGEP<50)
HighSchool_minus2<-subset(HighSchool, !HighSchool$row.names == 27632)
Max_College_HS<-subset(HighSchool_minus2, HighSchool_minus2$WAGP==415000 & HighSchool_minus2$AGEP<50)
# \n 0-15: Less than High School diploma\n16: Regular High School Diploma\n17-20: With High School diploma but no college degree\n21-24: bachelor's degree and beyond
# This plot tells income of Bachelors, Masters, Professional degree beyond bachelor and Docterate
plot(WCollege)
plot(WCollege$SCHL, WCollege$WAGP, pch=20, main="Fig 1. Education Attainment vs. Earnings", xlab="Education Attainment", ylab="Earnings")
# Earnings of All majors Bachelors
Bachelor<-subset(Datasets, Datasets$SCHL==21 & Datasets$WAGP > 0)
plot(Bachelor$WAGP, pch=20, main="Fig 2. Earnings of Bachelors, All majors", xlab="Total number of respondents", ylab="Earnings")
# Earnings of Computer Science Bachelors
par(mfrow=c(1,4))
BachelorCS<-subset(Datasets, Datasets$SCHL==21 & Datasets$WAGP > 0 & Datasets$FOD1P==2102)
plot(BachelorCS$WAGP, pch=20, main="Fig 3a. Earnings of Bachelors, Computer Science", xlab="Total number of respondents", ylab="Earnings")
BachelorPsycho<-subset(Datasets, Datasets$SCHL==21 & Datasets$WAGP > 0 & Datasets$FOD1P==5200)
plot(BachelorPsycho$WAGP, pch=20, main="Fig 3b. Earnings of Bachelors, Psychology", xlab="Total number of respondents", ylab="Earnings")
BachelorHistory<-subset(Datasets, Datasets$SCHL==21 & Datasets$WAGP > 0 & Datasets$FOD1P==6402)
plot(BachelorHistory$WAGP, pch=20, main="Fig 3c. Earnings of Bachelors, History", xlab="Total number of respondents", ylab="Earnings")
BachelorBussiness<-subset(Datasets, Datasets$SCHL==21 & Datasets$WAGP > 0 & Datasets$FOD1P==6200)
plot(BachelorBussiness$WAGP, pch=20, main="Fig 3d. Earnings of Bachelors, Bussiness", xlab="Total number of respondents", ylab="Earnings")
par(mfrow=c(1,2))
BachelorCS<-subset(Datasets, Datasets$SCHL==21 & Datasets$WAGP > 0 & Datasets$FOD1P==2102)
plot(BachelorCS$WAGP, pch=20, ylim=c(1,500000), main="Fig 2a. Earnings of Bachelors-Computer Science", xlab="Total number of respondents", ylab="Earnings")
BachelorEdu<-subset(Datasets, Datasets$SCHL==21 & Datasets$WAGP > 0 & Datasets$FOD1P==2300)
plot(BachelorEdu$WAGP, pch=20, ylim=c(1,500000), main="Fig 2b. Earnings of Bachelors-General Education", xlab="Total number of respondents", ylab="Earnings")
summary(BachelorCS$WAGP)
summary(BachelorEdu$WAGP)
BachelorCS_EDU<-subset(Datasets, Datasets$SCHL==21 & Datasets$WAGP > 0 & (Datasets$FOD1P==2102 | Datasets$FOD1P==2300))
boxplot(BachelorCS_EDU$WAGP~FOD1P, BachelorCS_EDU, main = "Boxplots for Barchelors Comp.Sci & Education", xaxt = "n", xlab = "", ylab = "Earnings")
axis(1, at=1:2, labels=c("CS", "EDU"))
#Plot for differnt degree of the same majors
#CS<-subset(Datasets, (Datasets$SCHL==21 | Datasets$SCHL==22 | Datasets$SCHL==23) & Datasets$WAGP > 0 & Datasets$FOD1P==2102)
CS.Bachelors<-subset(Datasets, Datasets$SCHL==21 & Datasets$WAGP > 0 & Datasets$FOD1P==2102)
CS.Masters<-subset(Datasets, Datasets$SCHL==22 & Datasets$WAGP > 0 & Datasets$FOD1P==2102)
CS.PhDs<-subset(Datasets, Datasets$SCHL==24 & Datasets$WAGP > 0 & Datasets$FOD1P==2102)
plot(CS.Bachelors$WAGP, col=2, xlim=c(1,400),xlab="Samples Size",ylab="Earnings",main="Earnings of CS, All degrees")
par(new=T)
plot(CS.Masters$WAGP, col=4, axes=F,xlab="",ylab="")
par(new=T)
plot(CS.PhDs$WAGP, pch=10, axes=F,xlab="",ylab="")
par(new=T)
legend(locator(1), legend=c("Bachelor", "Master", "PhD"), col=par("red", "blue", "black"), pch=c(1,1,10))
# Histogram
hist(Datasets$WAGP)
hist(WCollege$WAGP, main = "Histogram of Earnings of people have College degree", xlab = "Earnings", ylab = "Frequency")
hist(HighSchool$WAGP, main = "Histogram of Earnings of people have High School Diploma", xlab = "Earnings", ylab = "Frequency")
# Fit a normal distribution curve to the data
hist(Datasets$WAGP,freq=F)
curve(dnorm(x, mean(Datasets$WAGP), sd(Datasets$WAGP)), add=T)
par(mfrow=c(2,2))
hist(WCollege$WAGP,freq=F, main = "Histogram of Earnings of people have College degree", xlab = "Earnings", ylab = "Frequency")
curve(dnorm(x, mean(WCollege$WAGP), sd(WCollege$WAGP)), add=T)
qqnorm(WCollege$WAGP)
qqline(WCollege$WAGP)
# Conclusion: Data points curve from above the line to below the line and then back to above the line. Data has positive skew/ right-skewed
hist(HighSchool$WAGP,freq=F, main = "Histogram of Earnings of people have High School Diploma", xlab = "Earnings", ylab = "Frequency")
curve(dnorm(x, mean(HighSchool$WAGP), sd(HighSchool$WAGP)), add=T)
qqnorm(HighSchool$WAGP)
qqline(HighSchool$WAGP)
# A normal probability plot to determine whether a sample is drawn from a normal distribution
qqnorm(Datasets$WAGP)
qqline(Datasets$WAGP)
# Conclusion: A sample is not drawn from a normal distribution
# Stem-and-Leaf Plots
stem(Datasets$WAGP)
stem(WCollege$WAGP)
stem(HighSchool$WAGP)
# Bar Charts
barplot(table(Datasets$WAGP, Datasets$SCHL, useNA="ifany"))
barplot(table(WCollege$WAGP, WCollege$SCHL, useNA="ifany"))
# Boxplot
boxplot(Datasets$WAGP~SCHL, Datasets)
boxplot(WCollege$WAGP~SCHL, WCollege)
boxplot(HighSchool$WAGP~SCHL, HighSchool)
#CI & Hypothesis
# Kruskal-Wallis Test
kruskal.test(WCollege$WAGP~SCHL, WCollege)
pairwise.wilcox.test(WCollege$WAGP, WCollege$SCHL)
# Wilcoxon Rank-Sum test
wilcox.test(Bussiness$Earnings, mu=60000, alternative="less", conf.level=0.95)
# Build a model to predict a person's income from their degree and their majors
#model<-lm(Datasets$WAGP~SCHL+FOD1P, Datasets)
model<-lm(WCollege$WAGP~SCHL+FOD1P, WCollege)
formula(model)
summary(model)
coef(model)
confint(model, level=0.95)
residuals(model)
WCollege$resids<-rstudent(model)
WCollege$fittedvals<-fitted(model)
# Create residual plots for a model object
plot(resids~WAGP, WCollege)
plot(resids~SCHL, WCollege)
# Leverage
hatvalues(model)
# Plot of the residuals against the leverage
#Cook's Distance
cooks.distance(model)
par(mfrow=c(3,2))
hist(WCollege$resids)
plot(model, which=2)
plot(model, which=1)
plot(resids~WAGP, WCollege)
plot(resids~SCHL, WCollege)
plot(model, which=4)
plot(model, which=5)
plot(model, which=6)
newdata<-data.frame(SCHL=c(21, 21, 24, 24), FOD1P=c(2300, 2102, 2300, 2102))
newdata$predictions<-predict(model, newdata, interval="confidence", level=0.95)
newdata
|
203d65f0c0e2df237553ea8ac885d23a2c6fecb2 | e714ccf5e7f2ed5e72033cfbf6b563df1f767e73 | /R/quietcat.R | a21bae12facdac0b9e2f6bc6de9331d8c5710d60 | [] | no_license | jonkatz2/monitoR | 666fc2664ea450953fc4c17c2909a4cc652765ec | e38d34308a588541e4df19038c28441de3f1c0a5 | refs/heads/master | 2022-09-09T14:20:15.703095 | 2018-02-14T09:54:14 | 2018-02-14T09:54:14 | 117,285,036 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 72 | r | quietcat.R | # An empty function to allow quiet mode
quietcat <- function(...) NULL
|
fa9cb3ab3091c396c3354596947e60bf1f46219c | 6bc756162a2c9dbc41e5c497bbdebbc95f9f2074 | /R/mulOptPars.R | 27cb75f1f9d4f5fb1c0b584eab906de5f6de7e7e | [] | no_license | cisella/Mulcom | 0be6d52fb89c8417bcdf773a0de2950152454323 | 6e9b6a3a4d3087952c994c9d3e7d42a53c596706 | refs/heads/master | 2020-05-30T06:34:05.135802 | 2019-04-24T10:36:16 | 2019-04-24T10:36:16 | 21,429,531 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,689 | r | mulOptPars.R | mulOptPars <-
function (opt, ind, ths){
if(is.list(opt) & is.vector(ind) & is.vector(ths)){
coo <- c("NA", "NA")
fdr <- opt$FDR[, , ind]
sg <- opt$sg[, , ind]
if (length(which(fdr < ths)) == 0) {
print("No significant genes with chosen threshold")
return(coo <- vector())
}
fdr[which(fdr >= ths)] <- 1
sg[which(fdr >= ths)] <- 0
sigs <- sg[which(fdr < ths)]
best <- sg[sg == max(sigs)]
out <- rep(0, length(fdr))
dim(out) <- dim(fdr)
out[sg == max(sigs)] <- 1
inds <- which(out == 1)
cors <- vector()
for(i in 1:length(inds)) {
tm <- vector()
tm[1] <- inds[i]
if(inds[i]%%dim(fdr)[1] == 0) {
tm[2] <- it <- dimnames(fdr)[[1]][dim(fdr)[1]]
tm[3] <- iv <- dimnames(fdr)[[2]][(inds[i] - inds[i]%%dim(fdr)[1])/dim(fdr)[1]]
}else{
tm[2] <- it <- dimnames(fdr)[[1]][inds[i]%%dim(fdr)[1]]
tm[3] <- iv <- dimnames(fdr)[[2]][((inds[i] - inds[i]%%dim(fdr)[1])/dim(fdr)[1])+1]
}
cors <- cbind(cors, tm)
}
tms <- cors[, cors[2, ] == max(cors[2, ])]
if(length(tms) > 3) {
coo <- vector()
coo[1] <- as.numeric(tms[2, tms[3, ] == min(tms[3, ])])
coo[2] <- as.numeric(tms[3, tms[3, ] == min(tms[3, ])])
coo <- as.numeric(coo)
names(coo) <- c("t", "m")
return(coo)
}else{
coo[1] <- as.numeric(tms[2])
coo[2] <- as.numeric(tms[3])
coo <- as.numeric(coo)
names(coo) <- c("t", "m")
return(coo)
}
}else{
stop("error in input files", call. = FALSE)
}
}
|
1d6517afef7503ce7d35e60c67e5c59a1f74f51d | 3ae472e976bc6043cde771271978a1c1273000f8 | /map.R | 98eea3a9a9217795cf231db8d6c534dc71e62964 | [] | no_license | WillHNicholl/LTLA_Covid_Map | c09b0b84456b559cd3805c94e40bda141fdd293b | 4565a0daf03ebbd38dcbee378921673c89e26e06 | refs/heads/main | 2023-02-22T10:07:15.552320 | 2021-01-24T11:17:13 | 2021-01-24T11:17:13 | 331,638,203 | 0 | 0 | null | 2021-01-21T13:38:17 | 2021-01-21T13:34:33 | R | UTF-8 | R | false | false | 4,934 | r | map.R | library(sp)
library(tidyverse)
library(broom)
library(ggfortify)
library(lubridate)
library(maptools)
library(gganimate)
library(rmapshaper)
covid_age <- read_csv("ltla_2021-01-14.csv")
covid_age <- covid_age %>%
filter(areaType == "ltla")
ltlas <- factor(covid_age$areaName)
blank_dates <- expand.grid(date = seq.Date(as.Date("2020-01-30"), as.Date("2020-05-14"), by = "day"),
newCasesBySpecimenDate = 0,
areaName = ltlas) %>%
distinct()
covid_age <- full_join(blank_dates, covid_age)
# Data prep ---------------------------------------------------------------
df1 <- covid_age %>%
mutate(week = floor_date(date, unit = "weeks")) %>%
group_by(week, areaName) %>%
summarise(total = sum(newCasesBySpecimenDate))
# ggplot(df1, aes(total)) +
# geom_histogram(binwidth = 50) +
# scale_y_log10() +
# scale_x_continuous(breaks = seq(0, 5000, 100)) +
# theme(axis.text = element_text(angle = 45))
geo <- raster::shapefile("Local_Authority_Districts__May_2020__Boundaries_UK_BFE.shp")
geo1 <- ms_simplify(geo, keep = 0.0005) # fine
# geo2 <- ms_simplify(geo, keep = 0.0001) # bit weak but passable
# geo3 <- ms_simplify(geo, keep = 0.00001) # starts to look like Porygon
# plot(geo1)
geo1@data <- geo1@data %>%
rename(areaName = lad20nm)
geo1@data <- geo1@data %>%
mutate(areaName = str_replace(areaName, c("Cornwall|Isles of Scilly"), "Cornwall and Isles of Scilly")) %>%
mutate(areaName = str_replace(areaName, "Na h-Eileanan Siar", "Comhairle nan Eilean Siar")) %>%
mutate(areaName = str_replace(areaName, c("Hackney|City of London"), "Hackney and City of London")) %>%
mutate(areaName = str_replace(areaName, "Buckinghamshire", "Chiltern"))
#gpclibPermit()
map.df <- fortify(geo1, region ="areaName")
# merge data
map.df <- left_join(map.df, df1, by=c('id'='areaName'))
map.df %>% select(id, total) %>% subset(is.na(total)) %>% distinct()
gg <- ggplot() +
geom_polygon(data = map.df,
aes(x = long, y = lat, group = group, fill = total),
size = 0.25,
colour = NA) +
scale_fill_viridis_c(option = "plasma", trans = scales::pseudo_log_trans(sigma = 0.1), breaks=c(0,
5,
10,
30,
50,
100,
150,
500,
750,
1000,
2000,
3000,
4000)) +
coord_fixed(1) +
theme_void() +
theme(panel.background = element_blank(),
plot.title = element_text(size = 20),
plot.title.position = "plot",
plot.subtitle = element_text(size = 18),
plot.caption = element_text(size = 14, hjust = 0),
plot.caption.position = "plot",
legend.title = element_text(size = 18),
legend.text = element_text(size = 16),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
plot.margin = margin(2,1,2,1,"cm")) +
transition_states(week, transition_length = 3, state_length = 1) +
ease_aes('cubic-in-out') +
enter_fade() +
exit_fade() +
labs(title='LTLA Covid-19 Cases in the UK',
subtitle='Total Number of Cases Week Ending: {closest_state}',
caption='
Calculated as a weekly sum of total cases in an Lower Tier Local Authority
N.B. Mass testing begins in May 2020.
Cases data from: https://coronavirus.data.gov.uk/details/download
Geospatial data from: https://geoportal.statistics.gov.uk/datasets (May 2020 boundaries)') +
guides(fill = guide_legend(title="Weekly Cases", reverse = T))
#gg
animate(gg,
fps = 25,
duration = 30,
start_pause = 50,
end_pause = 50,
width = 1200,
height = 900,
renderer = gifski_renderer("gganim_map_covid.gif"))
# rm(covid_age, df1, geo, map.df)
# gc()
|
49da271d28bf9fa5afcc5ae177f079df4852cf9a | ebb314b086f96245ee9e776b8301b261bae78e70 | /R/model-tobit.R | d279142c19bae73360172f73797fa88c494acfc8 | [] | no_license | mbsabath/Zelig | 65e153648ff89790e758b104ffb0de5c1e376c29 | c90d171f11fde4042710353883f2ccb6e16e471e | refs/heads/master | 2021-01-25T04:21:44.942660 | 2017-06-06T20:33:24 | 2017-06-06T20:33:24 | 93,430,501 | 0 | 1 | null | 2017-07-12T15:14:34 | 2017-06-05T17:39:14 | R | UTF-8 | R | false | false | 3,475 | r | model-tobit.R | #' Linear Regression for a Left-Censored Dependent Variable
#'
#' Vignette: \url{http://docs.zeligproject.org/articles/zelig_tobit.html}
#' @import methods
#' @export Zelig-tobit
#' @exportClass Zelig-tobit
#'
#' @include model-zelig.R
ztobit <- setRefClass("Zelig-tobit",
contains = "Zelig",
fields = list(above = "numeric",
below = "numeric"))
ztobit$methods(
initialize = function() {
callSuper()
.self$name <- "tobit"
.self$authors <- "Kosuke Imai, Gary King, Olivia Lau"
.self$packageauthors <- "Christian Kleiber, and Achim Zeileis"
.self$year <- 2011
.self$description = "Linear regression for Left-Censored Dependent Variable"
.self$fn <- quote(AER::tobit)
# JSON
.self$outcome <- "continous"
.self$wrapper <- "tobit"
.self$acceptweights <- TRUE
}
)
ztobit$methods(
zelig = function(formula, ..., below = 0, above = Inf,
robust = FALSE, data, weights = NULL, by = NULL, bootstrap = FALSE) {
.self$zelig.call <- match.call(expand.dots = TRUE)
.self$model.call <- .self$zelig.call
.self$below <- below
.self$above <- above
.self$model.call$below <- NULL
.self$model.call$above <- NULL
.self$model.call$left <- below
.self$model.call$right <- above
callSuper(formula = formula, data = data, ..., weights = weights, by = by, bootstrap = bootstrap)
if(!robust){
fn2 <- function(fc, data) {
fc$data <- data
return(fc)
}
robust.model.call <- .self$model.call
robust.model.call$robust <- TRUE
robust.zelig.out <- .self$data %>%
group_by_(.self$by) %>%
do(z.out = eval(fn2(robust.model.call, quote(as.data.frame(.))))$var )
.self$test.statistics<- list(robust.se = robust.zelig.out$z.out)
}
}
)
ztobit$methods(
param = function(z.out, method="mvn") {
if(identical(method,"mvn")){
mu <- c(coef(z.out), log(z.out$scale))
simfull <- mvrnorm(n = .self$num, mu = mu, Sigma = vcov(z.out))
simparam.local <- as.matrix(simfull[, -ncol(simfull)])
simalpha <- exp(as.matrix(simfull[, ncol(simfull)]))
simparam.local <- list(simparam = simparam.local, simalpha = simalpha)
return(simparam.local)
} else if(identical(method,"point")){
return(list(simparam = t(as.matrix(coef(z.out))), simalpha = log(z.out$scale) ))
}
}
)
ztobit$methods(
qi = function(simparam, mm) {
Coeff <- simparam$simparam %*% t(mm)
SD <- simparam$simalpha
alpha <- simparam$simalpha
lambda <- dnorm(Coeff / SD) / (pnorm(Coeff / SD))
ev <- pnorm(Coeff / SD) * (Coeff + SD * lambda)
pv <- ev
pv <- matrix(nrow = nrow(ev), ncol = ncol(ev))
for (j in 1:ncol(ev)) {
pv[, j] <- rnorm(nrow(ev), mean = ev[, j], sd = SD)
pv[, j] <- pmin(pmax(pv[, j], .self$below), .self$above)
}
return(list(ev = ev, pv = pv))
}
)
ztobit$methods(
mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
mu <- b0 + b1 * x
ystar <- rnorm(n=length(x), mean=mu, sd=alpha)
if(sim){
y <- (ystar>0) * ystar # censoring from below at zero
return(y)
}else{
y.uncensored.hat.tobit<- mu + dnorm(mu, mean=0, sd=alpha)/pnorm(mu, mean=0, sd=alpha)
y.hat.tobit<- y.uncensored.hat.tobit * (1- pnorm(0, mean=mu, sd=alpha) ) # expected value of censored outcome
return(y.hat.tobit)
}
}
)
|
f16cbd3571c2100e8fd24fbc6eb3cb449d9e3747 | a04fc9475bef432808783656e745d9cc8d8f4806 | /cachematrix.R | 94c901388dbb6ba3552573ed4aa643040dc9cff1 | [] | no_license | FathyEltanany/ProgrammingAssignment2 | 5ec7c2f21a94220a690f5813b8487a99efe998c2 | 8cdc8d68cb247f2957f1dc07ec24465513c86238 | refs/heads/master | 2021-01-18T16:18:40.512462 | 2017-03-30T18:35:06 | 2017-03-30T18:35:06 | 86,733,659 | 0 | 0 | null | 2017-03-30T18:09:01 | 2017-03-30T18:09:00 | null | UTF-8 | R | false | false | 778 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inverse<-NULL
set<-function(y){
x<-y
inverse<-NULL
}
get<-function()x
setInverse<-function(inv)inverse<-inv
getInverse<-function()inverse
list(set = set,get = get,setInverse = setInverse,getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse<- x$getInverse()
if(!is.null(inverse)){
message("getting cahched data .")
return(inverse)
}
data < x$get()
inverse<-solve(x,...)
x$setInverse(inverse)
inverse
}
|
15a076b7cd1a24950551383d019338cd74a1fb71 | 27f42cb2f7935016ca9b0e5637aebd272e792d2a | /R/nauf_trms_fr_x_z.R | 94e1933eb63bc423882ce2d248a18b8d0e7a832f | [] | no_license | runehaubo/nauf | 7f6ebc72d0718824a3ca1fd557c91197ee7f60e7 | af3a9adddcfcd123c91c1af86987f1907f69dc32 | refs/heads/master | 2020-03-06T16:55:18.158706 | 2017-10-05T02:32:28 | 2017-10-05T02:32:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 58,001 | r | nauf_trms_fr_x_z.R |
#' Not applicable unordered factor contrasts.
#'
#' The \code{nauf_contrasts} function returns a list of contrasts applied to
#' factors in an object created using a function in the \code{nauf} package.
#' See 'Details'.
#'
#' In the \code{nauf} package, \code{NA} values are used to encode when an
#' unordered factor is truly \emph{not applicable}. This is different than
#' "not available" or "missing at random". The concept applies only to
#' unordered factors, and indicates that the factor is simply not meaningful
#' for an observation, or that while the observation may technically be
#' definable by one of the factor levels, the interpretation of its belonging to
#' that level isn't the same as for other observations. For imbalanced
#' observational data, coding unordered factors as \code{NA} may also be used to
#' control for a factor that is only contrastive within a subset of the data due
#' to the sampling scheme. To understand the output of the
#' \code{nauf_contrasts} function, the treatment of unordered factor contrasts
#' in the \code{nauf} package will first be discussed, using the
#' \code{\link{plosives}} dataset included in the package as an example.
#'
#' In the \code{\link{plosives}} dataset, the factor \code{ling} is coded as
#' either \code{Monolingual}, indicating the observation is from a monolingual
#' speaker of Spanish, or \code{Bilingual}, indicating the observation is from a
#' Spanish-Quechua bilingual speaker. The \code{dialect} factor indicates the
#' city the speaker is from (one of \code{Cuzco}, \code{Lima}, or
#' \code{Valladolid}). The Cuzco dialect has both monolingual and bilingual
#' speakers, but the Lima and Valladolid dialects have only monolingual
#' speakers. In the case of Valladolid, the dialect is not in contact with
#' Quechua, and so being monolingual in Valladolid does not mean the same
#' thing as it does in Cuzco, where it indicates
#' \emph{monolingual as opposed to bilingual}. Lima has Spanish-Quechua
#' bilingual speakers, but the research questions the dataset serves to answer
#' are specific to monolingual speakers of Spanish in Lima. If we leave the
#' \code{ling} factor coded as is in the dataset and use
#' \code{\link[standardize]{named_contr_sum}} to create the contrasts, we obtain
#' the following:
#'
#' \tabular{llrrr}{
#' dialect \tab ling \tab dialectCuzco \tab dialectLima \tab lingBilingual \cr
#' Cuzco \tab Bilingual \tab 1 \tab 0 \tab 1 \cr
#' Cuzco \tab Monolingual \tab 1 \tab 0 \tab -1 \cr
#' Lima \tab Monolingual \tab 0 \tab 1 \tab -1 \cr
#' Valladolid \tab Monolingual \tab -1 \tab -1 \tab -1
#' }
#'
#' With these contrasts, the regression coefficient \code{dialectLima} would not
#' represent the difference between the intercept and the mean of the Lima
#' dialect; the mean of the Lima dialect would be the
#' \code{(Intercept) + dialectLima - lingBilingual}. The interpretation of the
#' \code{lingBilingual} coefficient is similarly affected, and the intercept
#' term averages over the predicted value for the non-existent groups of Lima
#' bilingual speakers and Valladolid bilingual speakers, losing the
#' interpretation as the corrected mean (insofar as there can be a corrected
#' mean in this type of imbalanced data). With the \code{nauf} package, we can
#' instead code non-Cuzco speakers' observations as \code{NA} for the
#' \code{ling} factor (i.e. execute
#' \code{plosives$ling[plosives$dialect != "Cuzco"] <- NA}). These \code{NA}
#' values are allowed to pass into the regression's model matrix, and are then
#' set to \code{0}, effectively creating the following contrasts:
#'
#' \tabular{llrrr}{
#' dialect \tab ling \tab dialectCuzco \tab dialectLima \tab lingBilingual \cr
#' Cuzco \tab Bilingual \tab 1 \tab 0 \tab 1 \cr
#' Cuzco \tab Monolingual \tab 1 \tab 0 \tab -1 \cr
#' Lima \tab NA \tab 0 \tab 1 \tab 0 \cr
#' Valladolid \tab NA \tab -1 \tab -1 \tab 0
#' }
#'
#' Because sum contrasts are used, a value of \code{0} for a dummy variable
#' averages over the effect of the factor, and the coefficient
#' \code{lingBilingual} only affects the predicted value for observations where
#' \code{dialect = Cuzco}. In a regression fit with these contrasts, the
#' coefficient \code{dialectLima} represents what it should, namely the
#' difference between the intercept and the mean of the Lima dialect, and the
#' intercept is again the corrected mean. The \code{lingBilingual} coefficient
#' is now the difference between Cuzco bilingual speakers and the corrected mean
#' \emph{of the Cuzco dialect}, which is \code{(Intercept) + dialectCuzco}.
#' These \code{nauf} contrasts thus allow us to model all of the data in a
#' single model without sacrificing the interpretability of the results. In
#' sociolinguistics, this method is called \emph{slashing} due to the use of a
#' forward slash in GoldVarb to indicate that a factor is not applicable.
#'
#' This same methodology can be applied to other parts of the
#' \code{\link{plosives}} dataset where a factor's interpretation is the same
#' for all observations, but is only contrastive within a subset of the data due
#' to the sampling scheme. The \code{age} and \code{ed} factors (speaker age
#' group and education level, respectively) are factors which can apply to
#' speakers regardless of their dialect, but in the dataset they are only
#' contrastive within the Cuzco dialect; all the Lima and Valladolid speakers
#' are 40 years old or younger with a university education (in the case of
#' Valladolid, the data come from an already-existing corpus; and in the case of
#' Lima, the data were collected as part of the same dataset as the Cuzco data,
#' but as a smaller control group). These factors can be treated just as the
#' \code{ling} factor by setting them to \code{NA} for observations from Lima
#' and Valladolid speakers. Similarly, there is no read speech data for the
#' Valladolid speakers, and so \code{spont} could be coded as \code{NA} for
#' observations from Valladolid speakers.
#'
#' Using \code{NA} values can also allow the inclusion of a random effects
#' structure which only applies to a subset of the data. The
#' \code{\link{plosives}} dataset has data from both read (\code{spont = FALSE};
#' only Cuzco and Lima) and spontaneous (\code{spont = TRUE}; all three
#' dialects) speech. For the read speech, there are exactly repeated measures
#' on 54 items, as indicated by the \code{item} factor. For the
#' spontaneous speech, there are not exactly repeated measures, and so in this
#' subset, \code{item} is coded as \code{NA}. In a regression fit using
#' \code{nauf_lmer}, \code{nauf_glmer}, or \code{nauf_glmer.nb} with \code{item}
#' as a grouping factor, the random effects model matrix is created for the read
#' speech just as it normally is, and for spontaneous speech observations all of
#' the columns are set to \code{0} so that the \code{item} effects only affect
#' the fitted values for read speech observations. In this way, the noise
#' introduced by the read speech items can be accounted for while still
#' including all of the data in one model, and the same random effects for
#' \code{speaker} can apply to all observations (both read and spontaneous),
#' which will lead to a more accurate estimation of the fixed, speaker, and item
#' effects since more information is available than if the read and spontaneous
#' speech were analyzed in separate models.
#'
#' There are two situations in which unordered factors will need more than one set
#' of contrasts: (1) when an unordered factor with \code{NA} values interacts
#' with another unordered factor, and some levels are collinear with \code{NA};
#' and (2) when an unordered factor is included as a slope for a random effects
#' grouping factor that has \code{NA} values, but only a subset of the levels
#' for the slope factor occur when the grouping factor is not \code{NA}. As an
#' example of an interaction requiring new contrasts, consider the interaction
#' \code{dialect * spont} (that is, suppose we are interested in whether the
#' effect of \code{spont} is different for Cuzco and Lima). We code
#' \code{spont} as \code{NA} when \code{dialect = Valladolid}, as mentioned
#' above. This gives the following contrasts for the main effects:
#'
#' \tabular{llrrr}{
#' dialect \tab spont \tab dialectCuzco \tab dialectLima \tab spontTRUE \cr
#' Cuzco \tab TRUE \tab 1 \tab 0 \tab 1 \cr
#' Cuzco \tab FALSE \tab 1 \tab 0 \tab -1 \cr
#' Lima \tab TRUE \tab 0 \tab 1 \tab 1 \cr
#' Lima \tab FALSE \tab 0 \tab 1 \tab -1 \cr
#' Valladolid \tab NA \tab -1 \tab -1 \tab 0
#' }
#'
#' If we simply multiply these \code{dialect} and \code{spont} main effect
#' contrasts together to obtain the contrasts for the interaction (which is what
#' is done in the default \code{\link[stats]{model.matrix}} method), we get
#' following contrasts:
#'
#' \tabular{llrr}{
#' dialect \tab spont \tab dialectCuzco:spontTRUE \tab dialectLima:spontTRUE \cr
#' Cuzco \tab TRUE \tab 1 \tab 0 \cr
#' Cuzco \tab FALSE \tab -1 \tab 0 \cr
#' Lima \tab TRUE \tab 0 \tab 1 \cr
#' Lima \tab FALSE \tab 0 \tab -1 \cr
#' Valladolid \tab NA \tab 0 \tab 0
#' }
#'
#' However, these contrasts introduce an unnecessary parameter to the model
#' which causes collinearity with the main effects since
#' \code{spontTRUE = dialectCuzco:spontTRUE + dialectLima:spontTRUE} in all
#' cases. The functions in the \code{nauf} package automatically recognize when
#' this occurs, and create a second set of contrasts for \code{dialect} in which
#' the \code{Valladolid} level is treated as if it were \code{NA} (through and
#' additional call to \code{\link[standardize]{named_contr_sum}}):
#'
#' \tabular{lr}{
#' dialect \tab dialect.c2.Cuzco \cr
#' Cuzco \tab 1 \cr
#' Lima \tab -1 \cr
#' Valladolid \tab 0
#' }
#'
#' This second set of \code{dialect} contrasts is only used when it needs to be.
#' That is, in this case, these contrasts would be used in the creation of the
#' model matrix columns for the interaction term \code{dialect:spont} term,
#' but not in the creation of the model matrix columns for the main effect terms
#' \code{dialect} and \code{spont}, and when the second set of contrasts is
#' used, \code{.c2.} will appear between the name of the factor and the level so
#' it can be easily identified:
#'
#' \tabular{llrrrr}{
#' dialect \tab spont \tab dialectCuzco \tab dialectLima \tab spontTRUE \tab dialect.c2.Cuzco:spontTRUE \cr
#' Cuzco \tab TRUE \tab 1 \tab 0 \tab 1 \tab 1 \cr
#' Cuzco \tab FALSE \tab 1 \tab 0 \tab -1 \tab -1 \cr
#' Lima \tab TRUE \tab 0 \tab 1 \tab 1 \tab -1 \cr
#' Lima \tab FALSE \tab 0 \tab 1 \tab -1 \tab 1 \cr
#' Valladolid \tab NA \tab -1 \tab -1 \tab 0 \tab 0
#' }
#'
#' Turning now to an example of when a random slope requires new contrasts,
#' consider a random \code{item} slope for \code{dialect}. Because
#' \code{dialect = Valladolid} only when \code{item} is \code{NA}, using the
#' main effect contrasts for \code{dialect} for the \code{item} slope would
#' result in collinearity with the \code{item} intercept in the random effects
#' model matrix:
#'
#' \tabular{llrrr}{
#' dialect \tab item \tab i01:(Intercept) \tab i01:dialectCuzco \tab i01:dialectLima \cr
#' Cuzco \tab i01 \tab 1 \tab 1 \tab 0 \cr
#' Cuzco \tab i02 \tab 0 \tab 0 \tab 0 \cr
#' Cuzco \tab NA \tab 0 \tab 0 \tab 0 \cr
#' Lima \tab i01 \tab 1 \tab 0 \tab 1 \cr
#' Lima \tab i02 \tab 0 \tab 0 \tab 0 \cr
#' Lima \tab NA \tab 0 \tab 0 \tab 0 \cr
#' Valladolid \tab NA \tab 0 \tab 0 \tab 0
#' }
#'
#' This table shows the random effects model matrix for \code{item i01} for all
#' possible scenarios, with the rows corresponding to (in order): a Cuzco
#' speaker producing the read speech plosive in \code{item i01}, a Cuzco speaker
#' producing a read speech plosive in another \code{item}, a Cuzco speaker
#' producing a spontaneous speech plosive, a Lima speaker producing the read
#' speech plosive in \code{item i01}, a Lima speaker producing a read speech
#' plosive in another \code{item}, a Lima speaker producing a spontaneous speech
#' plosive, and a Valladolid speaker producing a spontaneous speech plosive.
#' With the main effect contrasts for \code{dialect},
#' \code{i01:(Intercept) = i01:dialectCuzco + i01:dialectLima} in all cases,
#' causing collinearity. Because this collinearity exists for all read speech
#' item random effects model matrices, the model is unidentifiable. The
#' functions in the \code{nauf} package automatically detect that this is the
#' case, and remedy the situation by creating a new set of contrasts used for
#' the \code{item} slope for \code{dialect}:
#'
#' \tabular{llrr}{
#' dialect \tab item \tab i01:(Intercept) \tab i01:dialect.c2.Cuzco \cr
#' Cuzco \tab i01 \tab 1 \tab 1 \cr
#' Cuzco \tab i02 \tab 0 \tab 0 \cr
#' Cuzco \tab NA \tab 0 \tab 0 \cr
#' Lima \tab i01 \tab 1 \tab -1 \cr
#' Lima \tab i02 \tab 0 \tab 0 \cr
#' Lima \tab NA \tab 0 \tab 0 \cr
#' Valladolid \tab NA \tab 0 \tab 0
#' }
#'
#' If we were to, say, fit the model
#' \code{intdiff ~ dialect * spont + (1 + dialect | item)}, then \code{nauf} would
#' additionally recognize that the same set of altered contrasts for
#' \code{dialect} are required in the fixed effects interaction term
#' \code{dialect:spont} and the \code{item} slope for \code{dialect}, and both
#' would be labeled with \code{.c2.}. In other (rare) cases, more than two sets
#' of contrasts may be required for a factor, in which case they would have
#' \code{.c3.}, \code{.c4.} and so on.
#'
#' In this way, users only need to code unordered factors as \code{NA} in the
#' subsets of the data where they are not contrastive, and \code{nauf} handles
#' the rest. Having described in detail what \code{nauf} contrasts are, we now
#' return to the \code{nauf_contrasts} function. The function can be used on
#' objects of any \code{nauf} model, a \code{\link{nauf.terms}} object, or a
#' model frame made by \code{\link{nauf_model.frame}}. It returns a named list
#' with a matrix for each
#' unordered factor in \code{object} which contains all contrasts associated the
#' factor. For the model \code{intdiff ~ dialect * spont + (1 + dialect | item)},
#' the result would be a list with elements \code{dialect} and \code{spont} that
#' contain the following matrices (see the 'Examples' section for code to
#' generate this list):
#'
#' \tabular{lrrr}{
#' dialect \tab Cuzco \tab Lima \tab .c2.Cuzco \cr
#' Cuzco \tab 1 \tab 0 \tab 1 \cr
#' Lima \tab 0 \tab 1 \tab -1 \cr
#' Valladolid \tab -1 \tab -1 \tab 0
#' }
#'
#' \tabular{lr}{
#' spont \tab TRUE \cr
#' TRUE \tab 1 \cr
#' FALSE \tab -1 \cr
#' NA \tab 0
#' }
#'
#' The default is for the list of contrasts to only contain information about
#' unordered factors. If \code{inc_ordered = TRUE}, then the contrast matrices
#' for any ordered factors in \code{object} are also included.
#'
#' @param object A \code{\link{nauf.terms}} object, a model frame made with
#' \code{\link{nauf_model.frame}}, a \code{nauf.glm} model (see
#' \code{\link{nauf_glm}}), or a \code{\linkS4class{nauf.lmerMod}} or
#' \code{\linkS4class{nauf.glmerMod}} model.
#' @param inc_ordered A logical indicating whether or not ordered factor
#' contrasts should also be returned (default \code{FALSE}).
#'
#' @return A named list of contrasts for all unordered factors in \code{object},
#' and also optionally contrasts for ordered factors in \code{object}. See
#' 'Details'.
#'
#' @examples
#' dat <- plosives
#' dat$spont[dat$dialect == "Valladolid"] <- NA
#'
#' mf <- nauf_model.frame(intdiff ~ dialect * spont + (1 + dialect | item), dat)
#' nauf_contrasts(mf)
#'
#' mf <- nauf_model.frame(intdiff ~ dialect * spont + (1 + dialect | item),
#' dat, ncs_scale = 0.5)
#' nauf_contrasts(mf)
#'
#' @section Note: The argument \code{ncs_scale} changes what value is used for
#' the sum contrast deviations. The default value of \code{1} would give the
#' contrast matrices in 'Details'. A value of \code{ncs_scale = 0.5}, for example,
#' would result in replacing \code{1} with \code{0.5} and \code{-1} with
#' \code{-0.5} in all of the contrast matrices.
#'
#' @seealso \code{\link{nauf_model.frame}}, \code{\link{nauf_model.matrix}},
#' \code{\link{nauf_glFormula}}, \code{\link{nauf_glm}}, and
#' \code{\link{nauf_glmer}}.
#'
#' @export
nauf_contrasts <- function(object, inc_ordered = FALSE) {
info <- nauf.info(object)
contr <- mlapply(levs = info$uf, hasna = info$hasna[names(info$uf)],
same = list(ncs = info$ncs_scale), fun = expand_contr)
if (inc_ordered && length(info$of)) {
contr <- c(contr, lapply(info$of, `[[`, "contrasts"))
}
return(contr)
}
expand_contr <- function(levs, ncs, hasna) {
contr <- lapply(levs, standardize::named_contr_sum, scale = ncs)
if ((n <- length(contr)) > 1) {
contr[2:n] <- mlapply(mat = contr[2:n], cj = 2:n,
same = list(rn = rownames(contr[[1]])), fun = add_contr_zeros)
}
contr <- do.call(cbind, contr)
if (hasna) {
contr <- rbind(contr, 0)
rownames(contr)[nrow(contr)] <- NA
}
return(contr)
}
add_contr_zeros <- function(mat, cj, rn) {
cn <- paste0(".c", cj, ".", colnames(mat))
expanded <- matrix(0, length(rn), length(cn), dimnames = list(rn, cn))
expanded[rownames(mat), ] <- mat
return(expanded)
}
#' Class for \code{terms} objects which contain information about \code{nauf} contrasts.
#'
#' When \code{\link{nauf_model.frame}} is called, a
#' \code{nauf.frame} is returned, and this object's \code{terms}
#' attribute has the (S3) class \code{nauf.terms}. The \code{nauf.terms} object
#' has an attribute \code{nauf.info} which contains all of the information
#' necessary to implement \code{\link{nauf_contrasts}} in regression.
#'
#' The \code{nauf.info} attribute is a list with the following elements:
#' \describe{
#' \item{formula}{The \code{formula} argument to \code{\link{nauf_model.frame}}
#' with double-bars expanded.}
#' \item{resp}{The name of the response variable.}
#' \item{groups}{A named list of random effects grouping factor levels.}
#' \item{uf}{A named list with an elment for each unordered factor. Each of
#' these elements is a list of character vectors indicating the names of the
#' levels that correspond to the elment's number's set of contrasts; i.e.
#' the first element represents the main effect contrasts for the factor,
#' the second element (if present) represents \code{.c2.} contrasts, and so
#' on (see \code{\link{nauf_contrasts}}).}
#' \item{of}{A named list with an element for each ordered factor containing
#' its levels and contrasts.}
#' \item{num}{A named list with an element for each numeric vector variable
#' containing the variables' means.}
#' \item{mat}{A named list with an element for each matrix variable containing
#' the variables' colmun means.}
#' \item{extras}{A character vector giving the names of offsets, weights,
#' mustart, etc.}
#' \item{cc}{A list of contrast changes required as described in
#' \code{\link{nauf_contrasts}}. The first element is a list of the changes
#' required for the fixed effects. If the model has random effects, then
#' there is an additional element for each element of the list returned by
#' \code{\link[lme4]{findbars}}. Each element of \code{cc} is a named list
#' indicating which contrasts in the \code{uf} element of \code{nauf.info}
#' should be used.}
#' \item{hasna}{A named logical vector with an entry for each variable
#' indicating whether nor not the variable contains \code{NA} values.}
#' \item{ncs_scale}{The \code{ncs_scale} argument from the call to
#' \code{\link{nauf_model.frame}}.}
#' }
#'
#' @seealso \code{\link{nauf_contrasts}} and \code{\link{nauf_model.frame}}.
#'
#' @name nauf.terms
NULL
#' @export
model.frame.nauf.formula <- function(formula, data = NULL, subset = NULL,
na.action = na.pass,
drop.unused.levels = TRUE, xlev = NULL,
ncs_scale = attr(formula, "standardized.scale"),
...) {
mc <- match.call()
mc[[1]] <- quote(nauf::nauf_model.frame)
return(eval(mc, parent.frame()))
}
#' @export
model.frame.nauf.terms <- function(formula, data = NULL, subset = NULL,
na.action = na.pass,
drop.unused.levels = TRUE, xlev = NULL,
ncs_scale = attr(formula, "standardized.scale"),
...) {
mc <- match.call()
info <- nauf.info(formula)
drop_class(formula) <- "nauf.terms"
ncs <- info$ncs_scale
mc[[1]] <- quote(stats::model.frame)
mc$formula <- formula
mc$na.action <- na.pass
mc$drop.unused.levels <- TRUE
mc$xlev <- NULL
mc["ncs_scale"] <- NULL
mf <- eval(mc, parent.frame())
attr(mf, "formula") <- info$formula
attr(mf, "terms") <- formula
first_class(mf, "terms") <- "nauf.terms"
last_class(mf) <- "nauf.frame"
nauf.info(mf) <- info
v <- colnames(mf)
uf <- intersect(v, names(info$uf))
of <- intersect(v, names(info$of))
groups <- intersect(v, names(info$groups))
lvs <- lapply(info$uf[uf], `[[`, 1)
contr <- lapply(lvs, standardize::named_contr_sum, scale = ncs)
mf[uf] <- mlapply(x = mf[uf], levels = lvs, contrasts = contr,
fun = standardize::fac_and_contr)
mf[of] <- mlapply(x = mf[of], levels = lapply(info$of[of], `[[`, "levels"),
contrasts = lapply(info$of[of], `[[`, "contrasts"),
same = list(ordered = TRUE), fun = standardize::fac_and_contr)
if (isTRUE(info$allow.new.levels)) {
mf[groups] <- mlapply(fac = mf[groups], levs = info$groups[groups],
hasna = info$hasna[groups], fun = function(fac, levs, hasna) {
wnew <- which(!(fac %in% c(levs, if (hasna) NA)))
fac <- factor(fac, levels = c(levs, "_NEW_"))
fac[wnew] <- "_NEW_"
return(fac)
}
)
} else {
mf[groups] <- mlapply(x = mf[groups], levels = info$groups[groups],
same = list(ordered = FALSE), fun = factor)
}
if (any(sapply(mf, anyNA) & !info$hasna[v])) {
warning("Some variables which did not have NA values when the model was ",
"fit have NA values in the new model frame.")
}
return(mf)
}
#' @export
model.matrix.nauf.terms <- function(object, data = environment(object),
contrasts.arg = NULL, xlev = NULL, ...) {
if (!is.nauf.frame(data)) {
data <- model.frame(object, data)
}
return(nauf_mm(data))
}
#' Create a model frame using \code{nauf} contrasts.
#'
#' \code{nauf_model.frame} creates a model frame which employs
#' \code{\link{nauf_contrasts}} for unordered factors.
#'
#' First, the default method for \code{\link[stats]{model.frame}} is called.
#' Then any variable in the resulting model frame that is not an unordered
#' factor but has only two unique non-\code{NA} values is coerced to an
#' unordered factor. Unordered factors are then assigned contrasts with
#' \code{\link[standardize]{named_contr_sum}}, passing \code{ncs_scale} as the
#' function's \code{scale} argument. Then, necessary contrast changes in
#' interaction terms and random effects slopes are determined as described in
#' \code{\link{nauf_contrasts}}.
#'
#' The recommended usage is to first \code{\link[standardize]{standardize}} the
#' regression variables, and then use the \code{formula} and \code{data}
#' elements in the resulting \code{standardized} object as arguments to
#' \code{nauf_model.frame}. When this is done, \code{ncs_scale} is obtained
#' from the \code{standardized.scale} attribute of the \code{formula}, unless
#' \code{ncs_scale} is specified as a value which does not match the
#' \code{standardized} scale, in which case the explicitly specified
#' \code{ncs_scale} argument is used with a warning. If
#' \code{\link[standardize]{standardize}} is not used prior to calling
#' \code{nauf_model.frame}, then \code{ncs_scale} defaults to \code{1} unless
#' explicitly specified in the function call, in which case the specified value
#' is used.
#'
#' Changes from the following default values are ignored with a warning:
#' \describe{
#' \item{na.action = na.pass}{This default value is required in order for
#' \code{NA} values to be treated as defined in
#' \code{\link{nauf_contrasts}}.}
#' \item{drop.unused.levels = TRUE}{This default value is set because
#' \code{nauf_model.frame} assumes that \code{data} is not new data. To
#' create a \code{nauf.frame} with new data, the \code{terms}
#' attribute of an already existing \code{nauf.frame} (which
#' has class \code{\link{nauf.terms}}) can be used as the
#' \code{formula} argument to \code{\link[stats]{model.frame}}.}
#' \item{xlev = NULL}{This default is necessary for the same reasons as the
#' default value for \code{drop.unused.levels}.}
#' \item{contrasts = NULL}{For unordered factors, contrasts are automatically
#' created with \code{\link[standardize]{named_contr_sum}}, as sum contrasts
#' are necessary to implement \code{\link{nauf_contrasts}}. To specify
#' custom contrasts for ordered factors, the custom contrasts should be
#' explicitly assigned to the ordered factor in \code{data} (this is
#' automatically done if \code{\link[standardize]{standardize}} is used
#' first as recommended).}
#' }
#'
#' @param formula,data,subset,... See \code{\link[stats]{model.frame}}.
#' @param na.action,drop.unused.levels,xlev,contrasts Changes from default
#' values for these arguments are ignored with a warning.
#' @param ncs_scale A positive number passed as the \code{scale} argument to
#' \code{\link[standardize]{named_contr_sum}} for all unordered factor
#' contrasts. The default is to first check whether \code{formula} comes from
#' a \code{standardized} object returned by
#' \code{\link[standardize]{standardize}}. If it is, then the \code{scale}
#' argument from the \code{\link[standardize]{standardize}} call is used. If
#' it is not, then \code{ncs_scale} is set to \code{1}. The value for
#' \code{ncs_scale} can also be set explicitly. If it is set explicitly and
#' \code{formula} is from a \code{standardized} object with a different scale
#' than the explicitly set value, then the explicitly set value
#' is used and a warning is issued.
#'
#' @return A model frame with second class attribute \code{nauf.frame}. Its
#' \code{formula} attribute has class \code{nauf.formula} and its \code{terms}
#' attribute has class \code{\link{nauf.terms}}.
#'
#' @examples
#' dat <- plosives
#' dat$spont[dat$dialect == "Valladolid"] <- NA
#' form <- intdiff ~ voicing * dialect * spont +
#' (1 + voicing * spont | speaker) + (1 + dialect | item)
#'
#' ## default behavior when standardize is not used
#' # defaults to ncs_scale = 1
#' mf <- nauf_model.frame(form, dat)
#'
#' # uses specified ncs_scale = 0.5
#' mf_0.5 <- nauf_model.frame(form, dat, ncs_scale = 0.5)
#'
#' ## standardize first (recommended use)
#' sobj <- standardize(form, dat)
#' sobj_0.5 <- standardize(form, dat, scale = 0.5)
#'
#' # uses ncs_scale = 1 from attr(sobj$formula, "standardized.scale")
#' mf_sobj <- nauf_model.frame(sobj$formula, sobj$data)
#'
#' # uses ncs_scale = 0.5 from attr(sobj_0.5$formula, "standardized.scale")
#' mf_sobj_0.5 <- nauf_model.frame(sobj_0.5$formula, sobj_0.5$data)
#'
#' \dontrun{
#' ## not recommended
#' # uses specified ncs_scale = 0.5 and issues a warning since
#' # attr(sobj$formula, "standardized.scale") = 1
#' mf_warning <- nauf_model.frame(sobj$formula, sobj$data, ncs_scale = 0.5)
#' }
#'
#' @seealso \code{\link{nauf_contrasts}} for a description of the contrasts
#' applied to unordered factors, \code{\link{nauf_model.matrix}} for obtaining
#' a fixed effects model matrix, and \code{\link{nauf_glFormula}} for
#' obtaining both fixed effects and random effects model matrices.
#'
#' @export
nauf_model.frame <- function(formula, data = NULL, subset = NULL,
na.action = na.pass, drop.unused.levels = TRUE,
xlev = NULL, contrasts = NULL,
ncs_scale = attr(formula, "standardized.scale"),
...) {
# Ignore na.action, contrasts, drop.unused.levels, and xlev
mc <- match.call()
mc$na.action <- na.pass
mc$drop.unused.levels <- TRUE
mc$xlev <- NULL
mc$contrasts <- NULL
if (!isTRUE(all.equal(na.action, na.pass))) {
warning("Ignoring 'na.action', must be na.pass")
}
if (!isTRUE(drop.unused.levels)) {
warning("Ignoring 'drop.unused.levels', must be TRUE")
}
if (!is.null(xlev)) {
warning("Ignoring 'xlev', must be NULL")
}
if (!is.null(contrasts)) {
warning("Ignoring 'contrasts', must be NULL")
}
standardized_scale <- attr(formula, "standardized.scale")
if (is.null(ncs <- ncs_scale)) ncs <- 1
if (!is.scalar(ncs, 1)) {
stop("The scale for sum contrasts must be a single positive number")
}
if (!is.null(standardized_scale) && standardized_scale != ncs) {
warning("'formula' is from a standardized object with scale ",
standardized_scale, " but ncs_scale was specified as ", ncs)
}
formula <- stats::formula(formula)
class(formula) <- "formula"
bars <- lme4::findbars(formula)
sb_form <- lme4::subbars(formula)
if (stats::is.empty.model(sb_form)) stop("There are no predictors in 'formula'")
fe_form <- stats::terms(lme4::nobars(formula))
if (!attr(fe_form, "response")) stop("'formula' must have a response")
if (!attr(fe_form, "intercept")) {
stop("There must be a fixed effects intercept")
}
fe_form <- stats::formula(stats::delete.response(fe_form))
groups <- check_groups(formula)
if (!is.data.frame(data)) stop("'data' must be a data.frame")
mc$formula <- sb_form
mc["ncs_scale"] <- NULL
mc[[1]] <- quote(stats::model.frame)
mf <- eval(mc, parent.frame())
mt <- attr(mf, "terms")
fmat <- attr(mt, "factors")
fmat <- fmat[setdiff(rownames(fmat), groups), , drop = FALSE]
if (any(fmat > 1)) {
warning("'nauf' has not been tested for models that violate the ",
"interaction hierarchy")
}
cnms <- colnames(mf)
extras <- find_extras(mf)
rgrp <- setNames(cnms %in% groups, cnms)
mf[rgrp] <- lapply(mf[rgrp], factor, ordered = FALSE)
check <- which(!extras & !rgrp)
mf[check] <- lapply(mf[check], charlogbin_to_uf)
uf <- sapply(mf, is.uf) & !extras & !rgrp
of <- sapply(mf, is.ordered) & !extras
mat <- sapply(mf, is.matrix) & !extras
num <- sapply(mf, is.numeric) & !extras & !mat
hasna <- sapply(mf, anyNA)
uf[1] <- of[1] <- mat[1] <- num[1] <- FALSE
if (any(hasna & !(uf | rgrp))) {
stop("Only unordered factor predictors and random effects grouping factors",
" can have NA values")
}
mf[uf] <- lapply(mf[uf], standardize::named_contr_sum, scale = ncs,
return_contr = FALSE)
attr(mt, "dataClasses")[cnms[uf | rgrp]] <- "factor"
changes <- contrast_changes(fe_form, bars, mf, uf, hasna)
mt <- nauf.terms(mt, formula = lme4::expandDoubleVerts(formula),
resp = cnms[1], groups = lapply(mf[groups], levels),
uf = changes$uf, of = lapply(mf[of], levs_and_contr),
num = lapply(mf[num], mean), mat = lapply(mf[mat], colMeans),
extras = cnms[extras], cc = changes$cc, hasna = hasna, ncs_scale = ncs)
first_class(formula) <- "nauf.formula"
last_class(mf) <- "nauf.frame"
attr(mf, "terms") <- mt
attr(mf, "formula") <- formula
return(mf)
}
nauf.terms <- function(terms, ...) {
first_class(terms) <- "nauf.terms"
attr(terms, "nauf.info") <- list(...)
return(terms)
}
contrast_changes <- function(fixed, bars, mf, uf, hasna) {
ufn <- names(uf)[uf]
changes <- lapply(c(list(fixed), bars), .contrast_changes, mf = mf, uf = uf,
hasna = hasna)
uf <- lapply(mf[ufn], levels)
main <- lapply(changes, `[[`, "lvs")
inter <- lapply(changes, `[[`, "cc")
asgn <- lapply(changes, `[[`, "asgn")
# do.call(main) and do.call(c, do.call(c, inter)) are lists of charvecs
# named by uf; combine with uf and take unique
all_main <- setNames(do.call(c, main), unname(unlist(lapply(main, names))))
all_inter <- setNames(do.call(c, do.call(c, inter)),
unname(unlist(lapply(inter, function(x) lapply(x, names)))))
uf <- lapply(nsplit(c(uf, all_main, all_inter))[ufn], unique)
# uf is now a named list of unique contrast set levels for each factor
# convert to named numeric vectors of contrast references
main <- lapply(main, contr_nums, levlist = uf)
inter <- lapply(inter, function(x) mlapply(levs = x, same = list(levlist = uf),
fun = contr_nums))
# join so one element per form (fixed + bars)
cc <- mlapply(m = main, i = inter, a = asgn, fun = function(m, i, a)
c(m, mlapply(factors = i, assign = a, fun = list)))
return(rsa_nlist(uf, cc))
}
# when contrast_changes is called on a ranef bar, we only care if the contrasts
# change in interactions from the main effect contrasts *for the bar* because
# if the main effect contrasts are not .c1., they will have been changed in
# mf prior to calling ccmat
.contrast_changes <- function(form, mf, uf, hasna) {
lvs <- cc <- asgn <- list()
if (re <- !inherits(form, "formula")) {
group <- varnms(barform(form, 3))
if (is.null(varnms(form <- barform(form, 2)))) {
return(rsa_nlist(lvs, cc, asgn))
}
if (re <- any(hasna[group])) {
mf <- droplevels(mf[!rowna(mf[group]), , drop = FALSE])
}
}
fmat <- attr(stats::terms(form), "factors") > 0
rn <- rownames(fmat)
uf <- uf[rn]
hasna <- hasna[rn]
nauf <- uf & hasna
ufmat <- fmat[uf, , drop = FALSE]
naufmat <- fmat[nauf, , drop = FALSE]
check_inter <- length(inter <- which(colSums(ufmat) > 1 & colSums(naufmat)))
check_main <- re & length(main <- intersect(rn[uf], colnames(fmat)))
if (check_main) {
lvs <- lapply(mf[main], levels)
}
if (check_inter) {
cc <- unique(lapply(inter, function(x) sort(rownames(ufmat)[ufmat[, x]])))
cc <- mlapply(cols = cc, same = list(x = mf), fun = nauf_interaction)
changed <- sapply(cc, `[[`, "changed")
if (any(changed)) {
cc <- lapply(cc[changed], `[[`, "levels")
facs <- lapply(cc, names)
asgn <- mlapply(f = facs, same = list(m = fmat), fun = function(f, m)
which(apply(m[f, , drop = FALSE], 2, all)))
names(asgn) <- names(cc) <- sapply(facs, paste, collapse = ":")
} else {
cc <- list()
}
}
return(rsa_nlist(lvs, cc, asgn))
}
contr_nums <- function(levs, levlist) {
nums <- mapply(in_list, x = levs, lst = levlist[names(levs)])
if (length(nums) && (!is.numeric(nums) || any(nums == 0))) {
stop("failed to create named vector contrast reference numbers")
}
return(nums)
}
nauf_interaction <- function(x, cols) {
nm <- paste(cols, collapse = ":")
x <- x[cols]
mlvs <- lapply(x, function(n) reorder_ft(sort(levels(n))))
# remove any unique combination which involves NAs
x <- unique(x)
x <- droplevels(x[!rowna(x), , drop = FALSE])
if (!nrow(x)) stop("No unique applicable combinations in ", nm)
# remove levels which are redundant
# e.g. f1 in [a, b, c], f2 in [d, e, f, g]
# when f1 = a, f2 in [d, e, f]
# when f1 = b, f2 in [e, f, g]
# when f1 = c, f2 = NA
# then at this point [c] has been dropped form f1,
# but we still need to drop [d, g] from f2
if (empty_cells(x)) {
torm <- mlapply(i = lapply(x, levels), j = 1:ncol(x), same = list(mat = x),
fun = function(j, i, mat) {
do.call(c, mlapply(lev = i, same = list(n = j, m = mat),
fun = function(lev, n, m) {
check <- droplevels(m[m[[n]] %in% lev, -n, drop = FALSE])
if (any(sapply(check, nlevels) == 1)) return(lev)
return(NULL)
}
))
}
)
if (length(torm <- torm[lengths(torm) > 0])) {
f <- names(torm)
x[f] <- mlapply(fac = x[f], levs = torm, fun = function(fac, levs) {
fac[fac %in% levs] <- NA
return(fac)
})
}
x <- unique(droplevels(x[!rowna(x), , drop = FALSE]))
if (!nrow(x)) stop("No unique applicable combinations in ", nm)
}
ilvs <- lapply(x, function(n) reorder_ft(sort(levels(n))))
changed <- !isTRUE(all.equal(mlvs, ilvs))
if (any(unlist(lapply(x, nlevels)) < 2)) {
stop("At least one factor in ", nm,
" has only one level when NAs are removed")
}
# there can still be collinearity, but in this case it isn't structural
# so warning rather than error (i.e. a column will be dropped from
# the model matrix if the interaction is included in a regression)
if (empty_cells(x)) warning("Collinearity in the interaction ", nm)
return(list(levels = ilvs, changed = changed))
}
#' Create a fixed effects model matrix using \code{nauf} contrasts.
#'
#' \code{nauf_model.matrix} creates a model matrix which employs
#' \code{\link{nauf_contrasts}} for unordered factors.
#'
#' Exactly what happens depends on the values of \code{object} and \code{data}.
#' The following possibilities are evaluated in the order listed:
#' \describe{
#' \item{object is a nauf.frame}{All arguments besides \code{object} are
#' ignored, and the information in \code{object} is used to create the model
#' matrix.}
#' \item{data is a nauf.frame}{All arguments besides \code{data} are ignored,
#' and the information in \code{data} is used to create the model matrix.}
#' \item{object is a formula and data is a data.frame}{
#' \code{\link{nauf_model.frame}} is called with \code{formula = object}
#' and \code{data = data}, passing along any additional arguments in
#' \code{...} (including \code{ncs_scale}). Then the model matrix is
#' created using the information in the resulting
#' \code{nauf.frame}.}
#' \item{any other argument values}{An error is returned.}
#' }
#'
#' @param object A \code{nauf.frame} or a regression formula.
#' See 'Details'.
#' @param data A \code{nauf.frame} or a \code{data.frame}
#' containing the variables in \code{object} if \code{object} is a regression
#' formula. See 'Details'.
#' @param ... Further arguments to be passed to \code{\link{nauf_model.frame}}
#' when \code{object} is a regression formula and \code{data} is a
#' \code{data.frame}. See 'Details'.
#'
#' @return A fixed effects model matrix that implements
#' \code{\link{nauf_contrasts}}. Unlike the default
#' \code{\link[stats]{model.matrix}} method, the model matrix does not have a
#' \code{contrasts} attribute, since multiple sets of contrasts may be
#' required for some unordered factors.
#'
#' @examples
#' dat <- plosives
#' dat$spont[dat$dialect == "Valladolid"] <- NA
#' form <- intdiff ~ voicing * dialect * spont +
#' (1 + voicing * spont | speaker) + (1 + dialect | item)
#' sobj <- standardize(form, dat)
#' mf <- nauf_model.frame(sobj$formula, sobj$data)
#'
#' ## the following all result in the same model matrix
#' mm1 <- nauf_model.matrix(mf)
#' mm2 <- nauf_model.matrix(form, mf) # 'form' ignored
#' mm3 <- nauf_model.matrix(sobj$formula, sobj$data)
#'
#' @seealso \code{\link{nauf_contrasts}} for a description of the contrasts
#' applied to unordered factors, \code{\link{nauf_model.frame}} for creating a
#' model frame with \code{nauf} contrasts, and \code{\link{nauf_glFormula}}
#' for obtaining both fixed effects and random effects model matrices.
#'
#' @export
nauf_model.matrix <- function(object = NULL, data = NULL, ...) {
if (is.nauf.frame(object)) return(nauf_mm(object))
if (is.nauf.frame(data)) return(nauf_mm(data))
if (inherits(object, "formula")) {
mc <- match.call()
names(mc)[2] <- "formula"
mc[[1]] <- quote(nauf::nauf_model.frame)
mf <- eval(mc, parent.frame())
return(nauf_mm(mf))
}
stop("If 'object' is not a formula, then either 'object' or 'data' must be\n",
" a model frame created by nauf_model.frame")
}
nauf_mm <- function(mf, ccn = 1) {
attr(mf, "na.action") <- "na.pass"
formula <- attr(mf, "formula")
mt <- attr(mf, "terms")
info <- attr(mt, "nauf.info")
ncs <- info$ncs_scale
ufc <- info$uf
cc <- info$cc[[ccn]]
if (ccn == 1) {
formula <- stats::delete.response(stats::terms(lme4::nobars(formula)))
} else {
formula <- stats::terms(barform(lme4::findbars(formula)[[ccn - 1]], 2))
ccmain <- intersect(names(ufc), names(cc))
if (length(ccmain)) {
mf[ccmain] <- mlapply(fac = mf[ccmain], levs = ufc[ccmain], cj = cc[ccmain],
same = list(ncs = ncs), fun = apply_contrast_changes)
cc <- cc[-which(names(cc) %in% ccmain)]
}
}
mm <- stats::model.matrix(formula, mf)
if (length(cc)) {
mmlist <- list()
asgn <- attr(mm, "assign")
asgn_cc <- sort(unique(unlist(lapply(cc, `[[`, "assign"))))
mmrm <- which(asgn %in% asgn_cc)
asgn <- asgn[-mmrm]
if (length(asgn)) {
mmlist[[1]] <- mm[, -mmrm, drop = FALSE]
}
fmat <- attr(formula, "factors")
ccmms <- lapply(cc, ccmat, mf = mf, ufc = ufc, ncs = ncs, fmat = fmat)
mm <- do.call(cbind, c(mmlist, lapply(ccmms, `[[`, "matrix")))
asgn <- sort(setNames(c(asgn, unlist(lapply(ccmms, `[[`, "assign"))),
colnames(mm)))
mm <- mm[, names(asgn), drop = FALSE]
attr(mm, "assign") <- unname(asgn)
}
attr(mm, "contrasts") <- NULL
mm[is.na(mm)] <- 0
return(mm)
}
apply_contrast_changes <- function(fac, levs, cj, ncs) {
if (cj == 1) return(fac)
levs <- levs[[cj]]
fac <- factor(fac, ordered = FALSE, levels = levs)
contr <- standardize::named_contr_sum(levs, ncs)
colnames(contr) <- paste0(".c", cj, ".", colnames(contr))
contrasts(fac) <- contr
return(fac)
}
ccmat <- function(cc, mf, ufc, ncs, fmat) {
uf <- names(cc$factors)
mf[uf] <- mlapply(fac = mf[uf], levs = ufc[uf], cj = cc$factors,
same = list(ncs = ncs), fun = apply_contrast_changes)
fmat <- fmat[, cc$assign, drop = FALSE] > 0
main <- rownames(fmat)[rowSums(fmat) > 0]
fmat <- fmat[main, , drop = FALSE]
mt <- stats::terms(stats::formula(do.call(paste, c(
list(paste("~", paste(main, collapse = "+"))),
lapply(list_mat_cols(fmat), function(x) paste(main[x], collapse = "*")),
list(sep = "+")))))
mm <- stats::model.matrix(mt, mf)
asgn_mt <- which(colnames(attr(mt, "factors")) %in% colnames(fmat))
asgn_mm <- attr(mm, "assign")
keep <- which(asgn_mm %in% asgn_mt)
mm <- mm[, keep, drop = FALSE]
asgn_mm <- cc$assign[as.numeric(factor(asgn_mm[keep]))]
attr(mm, "assign") <- NULL
return(list(matrix = mm, assign = asgn_mm))
}
#' @importFrom Matrix rBind t sparseMatrix drop0 diag KhatriRao
nauf_mkReTrms <- function(fr, lvs = NULL) {
# based on lme4::mkReTrms
if (!is.nauf.frame(fr)) {
stop("'fr' was not created with nauf_model.frame")
}
bars <- lme4::findbars(attr(fr, "formula"))
if (!length(bars)) {
stop("No random effects terms specified in formula", call. = FALSE)
}
stopifnot(is.list(bars), vapply(bars, is.language, NA), inherits(fr,
"data.frame"))
names(bars) <- lme4_barnames(bars)
term.names <- vapply(bars, lme4_safeDeparse, "")
blist <- mlapply(bar = bars, ccn = 1 + 1:length(bars),
same = list(lvs = lvs, fr = fr), fun = nauf_mkBlist)
nl <- vapply(blist, `[[`, 0L, "nl")
if (any(diff(nl) > 0)) {
ord <- rev(order(nl))
blist <- blist[ord]
nl <- nl[ord]
term.names <- term.names[ord]
}
Ztlist <- lapply(blist, `[[`, "sm")
Zt <- do.call(Matrix::rBind, Ztlist)
names(Ztlist) <- term.names
q <- nrow(Zt)
cnms <- lapply(blist, `[[`, "cnms")
nc <- lengths(cnms)
nth <- as.integer((nc * (nc + 1)) / 2)
nb <- nc * nl
if (sum(nb) != q) {
stop(sprintf("total number of RE (%d) not equal to nrow(Zt) (%d)",
sum(nb), q))
}
boff <- cumsum(c(0L, nb))
thoff <- cumsum(c(0L, nth))
Lambdat <- Matrix::t(do.call(Matrix::sparseMatrix, do.call(Matrix::rBind,
lapply(seq_along(blist), function(i) {
mm <- matrix(seq_len(nb[i]), ncol = nc[i], byrow = TRUE)
dd <- diag(nc[i])
ltri <- lower.tri(dd, diag = TRUE)
ii <- row(dd)[ltri]
jj <- col(dd)[ltri]
data.frame(i = as.vector(mm[, ii]) + boff[i], j = as.vector(mm[, jj]) +
boff[i], x = as.double(rep.int(seq_along(ii), rep.int(nl[i],
length(ii))) + thoff[i]))
}))))
thet <- numeric(sum(nth))
ll <- list(Zt = Matrix::drop0(Zt), theta = thet, Lind = as.integer(Lambdat@x),
Gp = unname(c(0L, cumsum(nb))))
ll$lower <- -Inf * (thet + 1)
ll$lower[unique(Matrix::diag(Lambdat))] <- 0
ll$theta[] <- is.finite(ll$lower)
Lambdat@x[] <- ll$theta[ll$Lind]
ll$Lambdat <- Lambdat
fl <- lapply(blist, `[[`, "ff")
fnms <- names(fl)
if (length(fnms) > length(ufn <- unique(fnms))) {
fl <- fl[match(ufn, fnms)]
asgn <- match(fnms, ufn)
} else {
asgn <- seq_along(fl)
}
names(fl) <- ufn
fl <- do.call(data.frame, c(fl, check.names = FALSE))
attr(fl, "assign") <- asgn
ll$flist <- fl
ll$cnms <- cnms
ll$Ztlist <- Ztlist
return(ll)
}
nauf_mkBlist <- function(bar, ccn, lvs, fr) {
gvars <- varnms(barform(bar, 3))
ff <- interaction(fr[gvars])
if (is.null(lvs)) {
ff <- droplevels(ff)
if (all(is.na(ff))) {
stop("Invalid grouping factor specification, ", deparse(bar[[3]]),
" are all NA", call. = FALSE)
}
} else { # implies predict method
ff <- factor(ff, levels = lvs[[ccn - 1]], ordered = FALSE)
}
mm <- nauf_mm(fr, ccn)
sm <- Matrix::KhatriRao(Matrix::fac2sparse(ff, to = "d",
drop.unused.levels = FALSE), t(mm))
dimnames(sm) <- list(rep(levels(ff), each = ncol(mm)), rownames(mm))
return(list(ff = ff, sm = sm, nl = nlevels(ff), cnms = colnames(mm)))
}
#' Create a model frame and fixed and random effects model matrices using \code{nauf} contrasts.
#'
#' The same as the \code{lme4} \code{\link[lme4]{modular}} functions
#' \code{glFormula} and \code{lFormula}, but implementing
#' \code{\link{nauf_contrasts}}. \code{nauf_lFormula} is used for linear mixed
#' effects regressions (i.e. those that would be fit with
#' \code{\link{nauf_lmer}}) and \code{nauf_glFormula} is used for genarlized
#' linear mixed effects regressions (i.e. those that would be fit with
#' \code{\link{nauf_glmer}} or \code{\link{nauf_glmer.nb}}). Both of the
#' functions contain a call to \code{nauf_mkReTrms}, which serves the same
#' purpose as the \code{lme4} function \code{\link[lme4]{mkReTrms}}, but with
#' \code{\link{nauf_contrasts}}, and, while \code{\link[lme4]{mkReTrms}} is
#' exported by \code{lme4}, \code{nauf_mkReTrms} is an internal function in the
#' \code{nauf} package.
#'
#' @param formula,data,family,REML,subset,weights,offset,control,mustart,etastart,...
#' See \code{\link[lme4]{glFormula}}.
#' @param na.action,contrasts Changes from default values are ignored. See
#' \code{\link{nauf_model.frame}}.
#' @param ncs_scale A positive number to be passed as the \code{scale} argument
#' to \code{\link[standardize]{named_contr_sum}} for all unordered factors.
#' See \code{\link{nauf_model.frame}}.
#'
#' @return A list with the following elements:
#' \describe{
#' \item{fr}{The model frame (with class \code{nauf.frame}).
#' See \code{\link{nauf_model.frame}}.}
#' \item{X}{The fixed effects model matrix with \code{\link{nauf_contrasts}}
#' applied. See \code{\link{nauf_model.matrix}}.}
#' \item{reTrms}{A list containing the random effects model matrix and other
#' information about the random effects structure. The elements of the list
#' have the same structure as that returned by \code{\link[lme4]{mkReTrms}},
#' but incorportating \code{\link{nauf_contrasts}}.}
#' \item{REML}{(\code{nauf_lFormula} only): A logical indicating if restricted
#' maximum likelihood was used (copy of argument).}
#' \item{family}{(\code{nauf_glFormula} only): The regression family (copy
#' of argument).}
#' \item{formula}{The \code{formula} argument, but with any double vertical
#' bars expanded (e.g. \code{(1 + x || subj)} becomes
#' \code{(1 | subj) + (0 + x | subj)}).}
#' \item{wmsgs}{Warning messages (if any).}
#' }
#'
#' @examples
#' dat <- plosives
#' dat$spont[dat$dialect == "Valladolid"] <- NA
#' dat_form <- intdiff ~ voicing * dialect * spont +
#' (1 + voicing * spont | speaker) + (1 + dialect | item)
#' sobj <- standardize(dat_form, dat)
#' lmod <- nauf_lFormula(sobj$formula, sobj$data)
#'
#' vless <- droplevels(subset(dat, voicing == "Voiceless"))
#' vless$fully_voiced <- vless$vdur == 0
#' vless_form <- fully_voiced ~ dialect * spont +
#' (1 + spont | speaker) + (1 + dialect | item)
#' svless <- standardize(vless_form, vless, family = binomial)
#' glmod <- nauf_glFormula(svless$formula, svless$data, family = binomial)
#'
#' @seealso \code{\link{nauf_contrasts}} for a description of the contrasts
#' applied to unordered factors; \code{\link{nauf_model.frame}} and
#' \code{\link{nauf_model.matrix}} for the creation of the \code{fr} and
#' \code{X} elements of the returned list, respectively; and
#' \code{\link{nauf_lmer}}, \code{\link{nauf_glmer.nb}}, and
#' \code{\link{nauf_glmer}} for fitting mixed effects regressions with gaussian,
#' negative binomial, and all other families, respectively.
#'
#' @export
nauf_glFormula <- function(formula, data = NULL, family = gaussian, subset,
weights, na.action = na.pass, offset,
contrasts = NULL, mustart, etastart,
control = lme4::glmerControl(),
ncs_scale = attr(formula, "standardized.scale"),
...) {
# based on lme4::glFormula
control <- control$checkControl
mf <- mc <- match.call()
if (!is.null(contrasts)) warning("Ignoring 'contrasts'; must be NULL")
if (!isTRUE(all.equal(na.action, na.pass))) {
warning("Ignoring 'na.action'; must be na.pass")
}
if (is.linear(family <- get_family(family))) {
mc[[1]] <- quote(nauf::nauf_lFormula)
mc["family"] <- NULL
return(eval(mc, parent.frame()))
} else if (!(is.character(family) && family == "negbin")) {
if (family$family %in% c("quasibinomial", "quasipoisson", "quasi")) {
stop("\"quasi\" families cannot be used in glmer")
}
}
ignoreArgs <- c("start", "verbose", "devFunOnly", "optimizer", "control",
"nAGQ")
l... <- list(...)
l... <- l...[!names(l...) %in% ignoreArgs]
do.call(lme4_checkArgs, c(list("glmer"), l...))
cstr <- "check.formula.LHS"
lme4_checkCtrlLevels(cstr, control[[cstr]])
denv <- lme4_checkFormulaData(formula, data,
checkLHS = control$check.formula.LHS == "stop")
mc$formula <- formula <- stats::as.formula(formula, env = denv)
m <- match(c("data", "subset", "weights", "offset", "ncs_scale",
"mustart", "etastart"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf$na.action <- na.pass
mf[[1L]] <- quote(nauf::nauf_model.frame)
fr.form <- lme4::subbars(formula)
environment(fr.form) <- environment(formula)
for (i in c("weights", "offset")) {
if (!eval(bquote(missing(x = .(i))))) {
assign(i, get(i, parent.frame()), environment(fr.form))
}
}
mf$formula <- formula
fr <- eval(mf, parent.frame())
attr(fr, "formula") <- formula
attr(fr, "offset") <- mf$offset
n <- nrow(fr)
reTrms <- nauf_mkReTrms(fr)
wmsgNlev <- lme4_checkNlevels(reTrms$flist, n = n, control, allow.n = TRUE)
wmsgZdims <- lme4_checkZdims(reTrms$Ztlist, n = n, control, allow.n = TRUE)
wmsgZrank <- lme4_checkZrank(reTrms$Zt, n = n, control, nonSmall = 1e+06,
allow.n = TRUE)
mf[[1L]] <- quote(stats::model.frame)
fixedform <- formula
lme4_RHSForm(fixedform) <- lme4::nobars(lme4_RHSForm(fixedform))
mf$formula <- fixedform
fixedfr <- eval(mf, parent.frame())
attr(attr(fr, "terms"), "predvars.fixed") <- attr(attr(fixedfr,
"terms"), "predvars")
ranform <- formula
lme4_RHSForm(ranform) <- lme4::subbars(lme4_RHSForm(lme4_reOnly(formula)))
mf$formula <- ranform
ranfr <- eval(mf, parent.frame())
attr(attr(fr, "terms"), "predvars.random") <- attr(terms(ranfr), "predvars")
X <- nauf_model.matrix(fr)
if (is.null(rankX.chk <- control[["check.rankX"]])) {
rankX.chk <- eval(formals(lme4::lmerControl)[["check.rankX"]])[[1]]
}
X <- lme4_chkRank.drop.cols(X, kind = rankX.chk, tol = 1e-07)
if (is.null(scaleX.chk <- control[["check.scaleX"]])) {
scaleX.chk <- eval(formals(lme4::lmerControl)[["check.scaleX"]])[[1]]
}
X <- lme4_checkScaleX(X, kind = scaleX.chk)
return(list(fr = fr, X = X, reTrms = reTrms, family = family, formula = formula,
wmsgs = c(Nlev = wmsgNlev, Zdims = wmsgZdims, Zrank = wmsgZrank)))
}
#' @rdname nauf_glFormula
#' @export
nauf_lFormula <- function(formula, data = NULL, REML = TRUE, subset, weights,
na.action = na.pass, offset, contrasts = NULL,
control = lme4::lmerControl(),
ncs_scale = attr(formula, "standardized.scale"),
...) {
# based on lme4::lFormula
control <- control$checkControl
mf <- mc <- match.call()
if (!is.null(contrasts)) warning("Ignoring 'contrasts'; must be NULL")
if (!isTRUE(all.equal(na.action, na.pass))) {
warning("Ignoring 'na.action'; must be na.pass")
}
ignoreArgs <- c("start", "verbose", "devFunOnly", "control")
l... <- list(...)
l... <- l...[!names(l...) %in% ignoreArgs]
do.call(lme4_checkArgs, c(list("lmer"), l...))
if (!is.null(list(...)[["family"]])) {
mc[[1]] <- quote(nauf::nauf_glFormula)
if (missing(control)) mc[["control"]] <- lme4::glmerControl()
return(eval(mc, parent.frame()))
}
cstr <- "check.formula.LHS"
lme4_checkCtrlLevels(cstr, control[[cstr]])
denv <- lme4_checkFormulaData(formula, data,
checkLHS = control$check.formula.LHS == "stop")
formula <- stats::as.formula(formula, env = denv)
lme4_RHSForm(formula) <- lme4::expandDoubleVerts(lme4_RHSForm(formula))
mc$formula <- formula
m <- match(c("data", "subset", "weights", "offset", "ncs_scale"),
names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf$na.action <- na.pass
mf[[1L]] <- quote(nauf::nauf_model.frame)
fr.form <- lme4::subbars(formula)
environment(fr.form) <- environment(formula)
for (i in c("weights", "offset")) {
if (!eval(bquote(missing(x = .(i))))) {
assign(i, get(i, parent.frame()), environment(fr.form))
}
}
mf$formula <- formula
fr <- eval(mf, parent.frame())
attr(fr, "formula") <- formula
attr(fr, "offset") <- mf$offset
n <- nrow(fr)
reTrms <- nauf_mkReTrms(fr)
wmsgNlev <- lme4_checkNlevels(reTrms$flist, n = n, control)
wmsgZdims <- lme4_checkZdims(reTrms$Ztlist, n = n, control, allow.n = FALSE)
if (anyNA(reTrms$Zt)) {
stop("NA in Z (random-effects model matrix): ", "please use ",
shQuote("na.action='na.omit'"), " or ", shQuote("na.action='na.exclude'"))
}
wmsgZrank <- lme4_checkZrank(reTrms$Zt, n = n, control, nonSmall = 1e+06)
mf[[1L]] <- quote(stats::model.frame)
fixedform <- formula
lme4_RHSForm(fixedform) <- lme4::nobars(lme4_RHSForm(fixedform))
mf$formula <- fixedform
fixedfr <- eval(mf, parent.frame())
attr(attr(fr, "terms"), "predvars.fixed") <- attr(attr(fixedfr,
"terms"), "predvars")
ranform <- formula
lme4_RHSForm(ranform) <- lme4::subbars(lme4_RHSForm(lme4_reOnly(formula)))
mf$formula <- ranform
ranfr <- eval(mf, parent.frame())
attr(attr(fr, "terms"), "predvars.random") <- attr(terms(ranfr),
"predvars")
X <- nauf_model.matrix(fr)
if (is.null(rankX.chk <- control[["check.rankX"]])) {
rankX.chk <- eval(formals(lme4::lmerControl)[["check.rankX"]])[[1]]
}
X <- lme4_chkRank.drop.cols(X, kind = rankX.chk, tol = 1e-07)
if (is.null(scaleX.chk <- control[["check.scaleX"]])) {
scaleX.chk <- eval(formals(lme4::lmerControl)[["check.scaleX"]])[[1]]
}
X <- lme4_checkScaleX(X, kind = scaleX.chk)
return(list(fr = fr, X = X, reTrms = reTrms, REML = REML, formula = formula,
wmsgs = c(Nlev = wmsgNlev, Zdims = wmsgZdims, Zrank = wmsgZrank)))
}
|
40f30e3817f7e745aa6bfddd9551ed321a6b9a25 | 781c4b3a0c4ba06fe73c7377a76724649e38221d | /data-raw/TKit_20160822/TKit_fit_lin_mod2.R | aa17bee1a5fda8728b0542b9fd0d2ec55537ce55 | [
"MIT"
] | permissive | andrewdolman/ecostattoolkit | 44b6d4d1e4bbec85f5d7323ea4355064bd48c8ab | 874c86eadeba04dedd3ed0f2e8754694176f6eab | refs/heads/master | 2021-01-20T12:11:06.099136 | 2017-05-31T19:31:56 | 2017-05-31T19:31:56 | 74,139,489 | 0 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 18,324 | r | TKit_fit_lin_mod2.R | #Description: Example script to fit linear models to data of sevoral categories
#Exploratory version
#Geoff Phillips
#Date:10 Aug 2016
#File name for script: TKit_fit_lin_mod2.R
rm(list= ls()) # Clear data
#################################################################################
# Step 1 get the data #
#################################################################################
#Enter file name and path
FName<-"O:\\Work\\Projects\\2016\\JRC_NutrientPhase2\\Data\\Analysis\\2016_08_10\\DataTemplate_Example2.csv"
#Get data and check
data <- read.csv(file = FName, header = TRUE)
dim(data)
summary(data)
#Identify the records with both EQR and TP data to produce a data set without missing values
cc<-complete.cases(data$EQR,data$P)
data.cc<-data[cc,]
dim(data.cc)
#------------------------------------------------------------------------------------------
# Get all data for plotting
#------------------------------------------------------------------------------------------
x<-log10(data.cc$P)
x2<-10^x
y<-data.cc$EQR
z<-as.factor(data.cc$Group)
#------------------------------------------------------------------------------------------
# Assign data used for models to x.u and y.u
#------------------------------------------------------------------------------------------
nut.min<-7 #min nutrient value used for model
nut.max<-100 #max nutrient value used for model
# Enter the high/good and good/moderate boundary values
HG<-0.80
GM<-0.60
x.u<-log10(data.cc$P[data.cc$P<=nut.max &
data.cc$P>=nut.min &
data.cc$Exclude_P==FALSE])
y.u<-data.cc$EQR[data.cc$P<=nut.max &
data.cc$P>=nut.min &
data.cc$Exclude_P==FALSE]
x2.u<-10^x.u #back transformed x for plotting on linear scale
z.u<-as.factor(data.cc$Group[data.cc$P<=nut.max &
data.cc$P>=nut.min &
data.cc$Exclude_P==FALSE])
zN.u<-length(levels(z.u))
# Check the data records are the same length
length(x.u)
length(y.u)
length(z.u)
#..........................................................................................
# Plot the data to check and visualise
#..........................................................................................
MetricUsed<-"Phytoplankton"
# labels for axis
ylb<-"EQR" # Y axis label
xlb<-expression(paste("total phosphorus ","(µg ", L^-1,")"))
plot(y~x2,main=MetricUsed,log="x",ylab=ylb,xlab=xlb,
las=1)
points(y.u~x2.u,pch=19,col=rainbow(zN.u)[z.u])
legend("bottomleft",levels(z.u),col=rainbow(zN.u),pch=19,cex=0.8)
#..........................................................................................
# Compare models including grouping variable, with and without interaction
#..........................................................................................
summary(mod1<-lm(y.u~x.u)) #base model, no group variable
summary(mod1a<-lm(y.u~x.u+z.u))#model with group variable, different intercepts
summary(mod1b<-lm(y.u~x.u*z.u))#model with interaction term, different slopes & intercept
AIC(mod1,mod1a,mod1b) #compare models, select model with lowest AIC
#------------------------------------------------------------------------------------------
# Fit models including group variable if AIC mod1a is lowest
#------------------------------------------------------------------------------------------
#..........................................................................................
# Model 1a OLS Y on X + Z
#..........................................................................................
summary(mod1a<-lm(y.u~x.u+z.u))
# predict values of y from model 1
pred.y1a<-data.frame(predict(mod1a),resid(mod1a),z.u)#Calculate predicted values and residuals
# Calculate upper and lower quartiles of residuals for 1st factor
upresid.1a<-quantile(subset(pred.y1a$resid.mod1a,z.u==levels(z.u)[1]),0.75,na.rm=T)
lwresid.1a<-quantile(subset(pred.y1a$resid.mod1a,z.u==levels(z.u)[1]),0.25,na.rm=T)
# Extract the slope for model 1a
slope1a<-summary(mod1a)[[4]][[2]]
# Extract the intercept for 1st grouping factor of model 1a
int1a<-summary(mod1a)[[4]][[1]]
# Calculate GM and HG boundaries with upper and lower estimates for 1st factor
GM.mod1a<-round(10^((GM-int1a[1])/slope1a),0)
GMU.mod1a<-round(10^((GM-int1a[1]-upresid.1a[1])/slope1a),0)
GML.mod1a<-round(10^((GM-int1a[1]-lwresid.1a[1])/slope1a),0)
HG.mod1a<-round(10^((HG-int1a[1])/slope1a),0)
HGU.mod1a<-round(10^((HG-int1a[1]-upresid.1a[1])/slope1a),0)
HGL.mod1a<-round(10^((HG-int1a[1]-lwresid.1a[1])/slope1a),0)
# Extract quartiles of residuals, intercepts, calculate boundaries for
# remaining factors on model 1a
for (i in 2:zN.u){
upresid.1a<-cbind(upresid.1a,quantile(subset(pred.y1a$resid.mod1a,z.u==levels(z.u)[i]),0.75,na.rm=T))
lwresid.1a<-cbind(lwresid.1a,quantile(subset(pred.y1a$resid.mod1a,z.u==levels(z.u)[i]),0.25,na.rm=T))
int1a<-cbind(int1a,(int1a[1]+summary(mod1a)[[4]][[(i+1)]]))
GM.mod1a<-cbind(GM.mod1a,round(10^((GM-int1a[i])/slope1a),0))
GMU.mod1a<-cbind(GMU.mod1a,round(10^((GM-int1a[i]-upresid.1a[i])/slope1a),0))
GML.mod1a<-cbind(GML.mod1a,round(10^((GM-int1a[i]-lwresid.1a[i])/slope1a),0))
HG.mod1a<-cbind(HG.mod1a,round(10^((HG-int1a[i])/slope1a),0))
HGU.mod1a<-cbind(HGU.mod1a,round(10^((HG-int1a[i]-upresid.1a[i])/slope1a),0))
HGL.mod1a<-cbind(HGL.mod1a,round(10^((HG-int1a[i]-lwresid.1a[i])/slope1a),0))
}
(Sum.mod1a<-data.frame(
R2=c(round(summary(mod1a)[[9]],3)),
N=c(length(x.u)),
slope=c(slope1a),
int=c(int1a),
GM =c(GM.mod1a),
GML=c(GML.mod1a),
GMU=c(GMU.mod1a),
HG =c(HG.mod1a),
HGL=c(HGL.mod1a),
HGU=c(HGU.mod1a),
row.names=levels(z.u)))
#..........................................................................................
# Model 2a OLS X on Y + Z
#..........................................................................................
summary(mod2a<-lm(x.u~y.u+z.u))
# Extract the slope & intercept for 1st factor for model 2a
slope2a<-summary(mod2a)[[4]][[2]]
int2a<-summary(mod2a)[[4]][[1]]
y0<-(0-int2a)/slope2a
for (i in 2:zN.u){
int2a<-cbind(int2a,(int2a[1]+summary(mod2a)[[4]][[(i+1)]]))
y0<-cbind(y0,(0-int2a[i])/slope2a)
}
# Set up a data frame with observed x, factor z and add appropriate intercept value y0a for category
# calculate predicted y values (EQR) and calculate residuals
(pred.y2a<-data.frame(x.u,y.u,z.u,y0[1]))
colnames(pred.y2a)[4]<-"y0.i"
for (i in 2:zN.u){
pred.y2a<-within(pred.y2a,y0.i[z.u==levels(z.u)[i]]<-y0[i])
}
(pred.y2a<-within(pred.y2a,pred.y2a<-x.u*1/slope2a+y0.i)) #calc predicted values pred.y2
(pred.y2a<-within(pred.y2a,resid.2a<-y.u-pred.y2a)) #calc residuals resid.2
# calculate upper and lower 25th 75th quantiles of residuals
upresid.2a<-quantile(subset(pred.y2a$resid.2a,z.u==levels(z.u)[1]),0.75,na.rm=T)
lwresid.2a<-quantile(subset(pred.y2a$resid.2a,z.u==levels(z.u)[1]),0.25,na.rm=T)
for (i in 2:zN.u){
upresid.2a<-cbind(upresid.2a,quantile(subset(pred.y2a$resid.2a,z.u==levels(z.u)[i]),0.75,na.rm=T))
lwresid.2a<-cbind(lwresid.2a,quantile(subset(pred.y2a$resid.2a,z.u==levels(z.u)[i]),0.25,na.rm=T))
}
(GM.mod2a<-round(10^((GM-y0)*slope2a),0)) #Predicted GM boundary value for 1st factor (HA)
(GMU.mod2a<-round(10^((GM-y0-upresid.2a)*slope2a),0))
(GML.mod2a<-round(10^((GM-y0-lwresid.2a)*slope2a),0))
(HG.mod2a<-round(10^((HG-y0)*slope2a),0)) #Predicted GM boundary value for 1st factor (HA)
(HGU.mod2a<-round(10^((HG-y0-upresid.2a)*slope2a),0))
(HGL.mod2a<-round(10^((HG-y0-lwresid.2a)*slope2a),0))
(Sum.mod2a<-data.frame(
R2=c(round(summary(mod2a)[[9]],3)),
N=c(length(x.u)),
slope=c(1/slope2a),
int=c(y0),
GM =c(GM.mod2a),
GML=c(GML.mod2a),
GMU=c(GMU.mod2a),
HG =c(HG.mod2a),
HGL=c(HGL.mod2a),
HGU=c(HGU.mod2a),
row.names=levels(z.u)))
#..........................................................................................
# Model 3a Orthogonal regression (geometric average of models 1 & 2)
#..........................................................................................
# calculate the average slope and intercept of x on y and y on x
(slope3a<-sign(slope1a)*sqrt(slope1a*1/slope2a))#geometric average or orthoganal regression
int3a<-mean(y.u[z.u==levels(z.u)[1]])-(slope3a*mean(x.u[z.u==levels(z.u)[1]]))
for (i in 2:zN.u){
int3a<-cbind(int3a,mean(y.u[z.u==levels(z.u)[i]])-(slope3a*mean(x.u[z.u==levels(z.u)[i]])))
}
# Set up a data frame with observed x, factor z and add appropriate intercept value y0a for category
# calculate predicted y values (EQR) and calculate residuals
(pred.y3a<-data.frame(x.u,y.u,z.u,int3a[1]))
colnames(pred.y3a)[4]<-"int3a.i"
for (i in 2:zN.u){
pred.y3a<-within(pred.y3a,int3a.i[z.u==levels(z.u)[i]]<-int3a[i])
}
(pred.y3a<-within(pred.y3a,pred.y3a<-x.u*slope3a+int3a.i)) #calc predicted values pred.y2
(pred.y3a<-within(pred.y3a,resid.3a<-y.u-pred.y3a))
# calculate upper and lower 25th 75th quantiles of residuals
upresid.3a<-quantile(subset(pred.y3a$resid.3a,z.u==levels(z.u)[1]),0.75,na.rm=T)
lwresid.3a<-quantile(subset(pred.y3a$resid.3a,z.u==levels(z.u)[1]),0.25,na.rm=T)
for (i in 2:zN.u){
upresid.3a<-cbind(upresid.3a,quantile(subset(pred.y3a$resid.3a,z.u==levels(z.u)[i]),0.75,na.rm=T))
lwresid.3a<-cbind(lwresid.3a,quantile(subset(pred.y3a$resid.3a,z.u==levels(z.u)[i]),0.25,na.rm=T))
}
(GM.mod3a<-round(10^((GM-int3a)/slope3a),0)) #Predicted GM boundary value for 1st factor (HA)
(GMU.mod3a<-round(10^((GM-int3a-upresid.3a)/slope3a),0))
(GML.mod3a<-round(10^((GM-int3a-lwresid.3a)/slope3a),0))
(HG.mod3a<-round(10^((HG-int3a)/slope3a),0)) #Predicted GM boundary value for 1st factor (HA)
(HGU.mod3a<-round(10^((HG-int3a-upresid.3a)/slope3a),0))
(HGL.mod3a<-round(10^((HG-int3a-lwresid.3a)/slope3a),0))
(Sum.mod3a<-data.frame(
R2="",
N=c(length(x.u)),
slope=c(slope3a),
int=c(int3a),
GM =c(GM.mod3a),
GML=c(GML.mod3a),
GMU=c(GMU.mod3a),
HG =c(HG.mod3a),
HGL=c(HGL.mod3a),
HGU=c(HGU.mod3a),
row.names=levels(z.u)))
#===========================================================================================
# re-set scales for plotting
xmin<- 5
xmax<- 200
ymin<- 0
ymax<- 1.5
MetricUsed<-"Phytoplankton"
# set up scales & labels for axis
ylb<-"EQR" # Y axis label
xlb<-expression(paste("total phosphorus ","(µg ", L^-1,")"))
win.graph(width=14,height=9) # new graphic window
par(mfrow=c(2,3)) # delete this line if separate plots are required
par(mar=c(4,5,3.5,1))
#..........................................................................................
# Fig A
#..........................................................................................
title<-"Regression of EQR on TP"
plot(y~x2,ylim=c(ymin,ymax),xlim=c(xmin,xmax),main=title,log="x",ylab=ylb,xlab=xlb,
las=1)
points(y.u~x2.u,pch=19,col=rainbow(zN.u)[z.u])
new.x <- data.frame(x = seq(min(x, na.rm=T), max(x, na.rm=T), length = length(x)))#new values of X on log scale
for (i in 1:zN.u){
lines(10^(new.x$x),new.x$x*slope1a+int1a[i],col=rainbow(zN.u)[i])
points(10^(mean(x.u[z.u==levels(z.u)[i]])),
mean(y.u[z.u==levels(z.u)[i]]),pch=3,cex=3,col=rainbow(zN.u)[i])
}
legend("bottomleft",levels(z.u),col=rainbow(zN.u),pch=19,cex=1)
mtext(paste("R2=", round(summary(mod1a)$r.sq,3),sep=""), side = 3, line=0,adj=0,
col="black",cex=0.8)
pvalue<-pf(summary(mod1a)[[10]][[1]],summary(mod1a)[[10]][[2]],summary(mod1a)[[10]][[3]],lower.tail=F)
if(pvalue >=0.001) {mtext(paste("p=", round(pvalue,3), sep=""),side = 3, line =0, adj = 0.25,
col = "black",cex=0.8)}
if(pvalue <0.001) {mtext("p<0.001", side = 3, line= 0, adj = 0.25, col="black",cex=0.8)}
mtext(paste("slope= ", round(slope1a,3)," se ",
round(summary(mod1a)[[4]][[7]],3),sep=""),side = 3,line=0,adj=0.75, col="black",cex=0.8)
abline("h"=GM,lty=2)
abline("v"=GM.mod1a,lty=2,col=rainbow(zN.u))
text(xmin,GM,GM,cex=1,pos=3)
text(GM.mod1a,ymin,GM.mod1a,pos=c(3,4),cex=1)
..........................................................................................
# Fig B
#..........................................................................................
title<-"Regression of TP on EQR"
plot(y~x2,ylim=c(ymin,ymax),xlim=c(xmin,xmax),main=title,log="x",ylab=ylb,xlab=xlb,
las=1)
points(y.u~x2.u,pch=19,col=rainbow(zN.u)[z.u])
new.x <- data.frame(x = seq(min(x, na.rm=T), max(x, na.rm=T), length = length(x)))#new values of X on log scale
for (i in 1:zN.u){
lines(10^(new.x$x),new.x$x*1/slope2a+y0[i],col=rainbow(zN.u)[i])
points(10^(mean(x.u[z.u==levels(z.u)[i]])),
mean(y.u[z.u==levels(z.u)[i]]),pch=3,cex=3,col=rainbow(zN.u)[i])
}
mtext(paste("R2=", round(summary(mod2a)$r.sq,3),sep=""), side = 3, line=0,adj=0, col="black",cex=0.8)
pvalue<-pf(summary(mod2a)[[10]][[1]],summary(mod2a)[[10]][[2]],summary(mod2a)[[10]][[3]],lower.tail=F)
if(pvalue >=0.001) {mtext(paste("p=", pvalue, sep=""),side = 3, line =0, adj = 0.25, col = "black",cex=0.8)}
if(pvalue <0.001) {mtext("p<0.001", side = 3, line= 0, adj = 0.25, col="black",cex=0.8)}
mtext(paste("slope= ", round(1/slope2a,3)," se ",
round(summary(mod2a)[[4]][[7]],3),sep=""), side = 3, line=0,adj=0.75, col="black",cex=0.8)
abline("h"=GM,lty=2)
abline("v"=GM.mod2a,lty=2,col=rainbow(zN.u))
text(xmin,GM,GM,cex=1,pos=3)
text(GM.mod2a,ymin,GM.mod2a,pos=c(3,4),cex=1)
..........................................................................................
# Fig C
#..........................................................................................
title<-"Geometric mean (orthogonal) regression EQR v TP"
plot(y~x2,ylim=c(ymin,ymax),xlim=c(xmin,xmax),main=title,log="x",ylab=ylb, xlab=xlb,las=1)
points(y.u~x2.u,pch=19,col=rainbow(zN.u)[z.u])
new.x <- data.frame(x = seq(min(x, na.rm=T), max(x, na.rm=T), length = length(x)))#new values of X on log scale
for (i in 1:zN.u){
lines(10^(new.x$x),new.x$x*slope3a+int3a[i],col=rainbow(zN.u)[i])
points(10^(mean(x.u[z.u==levels(z.u)[i]])),
mean(y.u[z.u==levels(z.u)[i]]),pch=3,cex=3,col=rainbow(zN.u)[i])
}
mtext(paste("slope= ", round(slope3a,3),sep=""), side = 3, line=0,adj=0.5, col="black",cex=0.8)
abline("h"=GM,lty=2)
abline("v"=GM.mod3a,lty=2,col=rainbow(zN.u))
text(xmin,GM,GM,cex=1,pos=3)
text(GM.mod3a,ymin,GM.mod3a,pos=c(3,4),cex=1)
#..........................................................................................
# Fig D
#..........................................................................................
title<-""
plot(y~x2,ylim=c(ymin,ymax),xlim=c(xmin,xmax),main=title,log="x",ylab=ylb,xlab=xlb,
las=1)
points(y.u~x2.u,pch=19,col=rainbow(zN.u)[z.u])
new.x <- data.frame(x = seq(min(x, na.rm=T), max(x, na.rm=T), length = length(x)))#new values of X on log scale
for (i in 1:zN.u){
lines(10^(new.x$x),new.x$x*slope1a+int1a[i],col=rainbow(zN.u)[i])
points(10^(mean(x.u[z.u==levels(z.u)[i]])),
mean(y.u[z.u==levels(z.u)[i]]),pch=3,cex=3,col=rainbow(zN.u)[i])
}
legend("bottomleft",levels(z.u),col=rainbow(zN.u),pch=19,cex=1)
abline("h"=HG,lty=2)
abline("v"=HG.mod1a,lty=2,col=rainbow(zN.u))
text(xmin,HG,HG,cex=1,pos=3)
text(HG.mod1a,ymin,HG.mod1a,pos=c(3,4),cex=1)
..........................................................................................
# Fig E
#..........................................................................................
title<-""
plot(y~x2,ylim=c(ymin,ymax),xlim=c(xmin,xmax),main=title,log="x",ylab=ylb,xlab=xlb,
las=1)
points(y.u~x2.u,pch=19,col=rainbow(zN.u)[z.u])
new.x <- data.frame(x = seq(min(x, na.rm=T), max(x, na.rm=T), length = length(x)))#new values of X on log scale
for (i in 1:zN.u){
lines(10^(new.x$x),new.x$x*1/slope2a+y0[i],col=rainbow(zN.u)[i])
points(10^(mean(x.u[z.u==levels(z.u)[i]])),
mean(y.u[z.u==levels(z.u)[i]]),pch=3,cex=3,col=rainbow(zN.u)[i])
}
abline("h"=HG,lty=2)
abline("v"=HG.mod2a,lty=2,col=rainbow(zN.u))
text(xmin,HG,HG,cex=1,pos=3)
text(HG.mod2a,ymin,HG.mod2a,pos=c(3,4),cex=1)
..........................................................................................
# Fig F
#..........................................................................................
title<-""
plot(y~x2,ylim=c(ymin,ymax),xlim=c(xmin,xmax),main=title,log="x",ylab=ylb, xlab=xlb,las=1)
points(y.u~x2.u,pch=19,col=rainbow(zN.u)[z.u])
new.x <- data.frame(x = seq(min(x, na.rm=T), max(x, na.rm=T), length = length(x)))#new values of X on log scale
for (i in 1:zN.u){
lines(10^(new.x$x),new.x$x*slope3a+int3a[i],col=rainbow(zN.u)[i])
points(10^(mean(x.u[z.u==levels(z.u)[i]])),
mean(y.u[z.u==levels(z.u)[i]]),pch=3,cex=3,col=rainbow(zN.u)[i])
}
abline("h"=HG,lty=2)
abline("v"=HG.mod3a,lty=2,col=rainbow(zN.u))
text(xmin,HG,HG,cex=1,pos=3)
text(HG.mod3a,ymin,HG.mod3a,pos=c(3,4),cex=1)
###########################################################################################
#Output boundary values,into a series of data.frames called
#Out1
#Out2
#to
#OutN, where N is number of groups in data set (zN.u)
for(i in 1:zN.u){
out<-data.frame(
Type=c(levels(z.u)[i],"",""),
R2=c(round(summary(mod1a)[[9]],3),"",round(summary(mod2a)[[9]],3)),
N=c(length(x.u),"",""),
slope=c(round(slope1a,3),round(slope3a,3),round(1/slope2a,3)),
int=c(round(int1a[i],3),round(int3a[i],3),round(y0[i],3)),
GM =c(GM.mod1a[i],GM.mod3a[i],GM.mod2a[i]),
GML=c(GML.mod1a[i],GML.mod3a[i],GML.mod2a[i]),
GMU=c(GMU.mod1a[i],GMU.mod3a[i],GMU.mod2a[i]),
HG =c(HG.mod1a[i],HG.mod3a[i],HG.mod2a[i]),
HGL=c(HGL.mod1a[i],HGL.mod3a[i],HGL.mod2a[i]),
HGU=c(HGU.mod1a[i],HGU.mod3a[i],HGU.mod2a[i]),
row.names=c("OLS EQR v TP","Orthogonal regression","OLS TP v EQR"))
assign(paste("Out",i,sep=""),out)
}
# The following lines show output on console and then place in clipboard
print(Out1)
write.table(Out1,"clipboard",sep="\t",row.names=F,col.names=F)
#Now paste to Excel
print(Out2)
write.table(Out2,"clipboard",sep="\t",row.names=F,col.names=F)
#Now paste to Excel
print(Out3)
write.table(Out3,"clipboard",sep="\t",row.names=F,col.names=F)
#Now paste to Excel
print(Out4)
write.table(Out4,"clipboard",sep="\t",row.names=F,col.names=F)
#Now paste to Excel
#Add more rows if needed
|
91fa7c55840293d0195230dc6ba2251d8b475603 | 51c7de80d27ea5cd20b83ac0933f5cc7f735a5a3 | /man/summary.scidb.Rd | 03f85fa945a0c8ae09a705d352ab2073aea51c20 | [] | no_license | anukat2015/SciDBR | 3fc5532ae3228260aa9271c9ead6bf83ebde7e8d | c37f064114677b8df34a940529e2aef462f9db80 | refs/heads/master | 2021-01-13T10:07:05.598340 | 2016-01-12T15:37:31 | 2016-01-12T15:37:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 304 | rd | summary.scidb.Rd | \name{summary.scidb}
\alias{summary.scidb}
\title{
Summarize scidb object class.
}
\description{
Summarize scidb object class. This is not a really useful function yet.
}
\usage{
\method{summary}{scidb}(x)
}
\arguments{
\item{x}{ a scidb array object.
}
}
\author{
B. W. Lewis <blewis@paradigm4.com>
}
|
aa6666e1d0e23e2c8c16d24d22857f87f9dfa90f | cf8d4b322f4502ba41b1f2462e3282760538446c | /Plot2.R | 11a56677ecc0acf557d7660d4d7a075fb5b5d0e6 | [] | no_license | tkon22/ExData_Plotting1 | 094e8b9bed58356fa9f01f6862586717ac333913 | 4b17a0224f95dd07b4e035d0d0a08f5d61843c70 | refs/heads/master | 2021-04-06T19:37:44.567209 | 2018-03-11T06:20:52 | 2018-03-11T06:20:52 | 124,721,459 | 0 | 0 | null | 2018-03-11T03:57:55 | 2018-03-11T03:57:55 | null | UTF-8 | R | false | false | 552 | r | Plot2.R | hhpower <- {
hpower <- "household_power_consumption.txt"
hpdata <- read.table(hpower, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
hhpsubSetData <- data[hpdata$Date %in% c("1/2/2007","2/2/2007") ,]
}
#hhstr(hhpsubSetData)
datetime <- strptime(paste(hhpsubSetData$Date, hhpsubSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
hhpGlobalActivePower <- as.numeric(hhpsubSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, hhpGlobalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() |
9aa780d300b4b6aee1cc2b7d1ea4d63082dd7c16 | 68d2ba3f519597e2146f71080712c78d9f34647f | /man/sliderInput.Rd | 79b4ae16c04aa1c232ba79a7c78e8d8a79a43a51 | [
"MIT"
] | permissive | igemsoftware2020/ClusteRsy-Linkoping | 5f040e282dbd2b163cef7be3902060f271121081 | 7399466a2e11e27087ce531357708b983fb365ec | refs/heads/master | 2023-01-02T13:03:44.125587 | 2020-10-27T22:29:01 | 2020-10-27T22:29:01 | 264,438,493 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 579 | rd | sliderInput.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/golem_utils_ui.R
\name{sliderInput}
\alias{sliderInput}
\title{Modified shiny sliderInput func}
\usage{
sliderInput(
inputId,
label,
min,
max,
value,
step = NULL,
round = FALSE,
format = NULL,
locale = NULL,
ticks = TRUE,
animate = FALSE,
width = NULL,
sep = ",",
pre = NULL,
post = NULL,
timeFormat = NULL,
timezone = NULL,
dragRange = TRUE,
tooltip = T,
header = "?",
popup = "Help tips",
pos = "right"
)
}
\description{
Modified shiny sliderInput func
}
|
a54cee1353aa20c9b0b8df2937796638ae5828cb | 9d3b58a64b35f4c209838ed35b2f90d520011c88 | /cachematrix.R | 8e98b87464d335cddd6ad5dc9fd790c69c1f30ad | [] | no_license | ADITYASSSIHL/ProgrammingAssignment2 | 99dae9848cf082d0b54fc1528223b10bfae8cc7d | d34b4e46bab8143ef4514c67ac2a01410ddf5f98 | refs/heads/master | 2022-11-13T06:41:32.752527 | 2020-07-05T07:09:12 | 2020-07-05T07:09:12 | 277,242,737 | 1 | 0 | null | 2020-07-05T06:14:50 | 2020-07-05T06:14:50 | null | UTF-8 | R | false | false | 1,831 | r | cachematrix.R | ## We aim to create two functions to obatain inverse of a given matrix (yeah only an inversable matrix)
##We create two function called makecacheMatirix which wil essentially get you the matrix and also provide the inverse of the matrix
## makeCachematrix is written with both single arrow assignment operators <- and double arrow assignment operators <<-
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set <-function(y){
x<<-y
inv<<-NULL
}
get<-function() {x}
setInverse <- function(inverse) {inv <<- inverse}
getInverse <- function() {inv}
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## We write the cachesolve function which uses the standard solve (inbuild in R to provide the inverse)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv<-x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
mat<-x$get()
inv<-solve(mat,...)
x$setInverse(inv)
inv
}
##now we test the functions we have written
mymatrix<-makeCacheMatrix(matrix(1:4, nrow=2,ncol=2))
#make a 2X2 inversablwe matrix we call this by
mymatrix$get()
##we use the cacheSolve function as passs the mymatrix object and call for inverse
cacheSolve(mymatrix)
##call for the getinverse function now
mymatrix$getInverse()
#We check if the function is working by using the inbuld solve function
a<-matrix(runif(4), nrow=2,ncol=2) # a 2x2 matrix
a_inv<-solve(a) #solve function to obtain inverse
my_a<-makeCacheMatrix(a) #using makecachematrix function
cacheSolve(my_a) #using cachesolve function
my_a_inv<-my_a$getInverse() #obtaining inverse from getinverse
a_inv==my_a_inv #checking solve function is same as getinverse function which we wrote
|
269ef1c461354033fc8863bc36cdcb9082320448 | e2d7132217df7b0e6025e8de04803a94f6a4d7ef | /initial_explorations.R | 9e8fb77bd43702686d26af7bd091e348f9643caa | [] | no_license | apapiu/ShelterOutcomeKaggle | 21a9b0fc0e9152da9ae3992795b6f27aa55e89e0 | 2b4bdf3f5a70af99c47abb82929e80dda61e672d | refs/heads/master | 2020-05-30T14:46:21.506107 | 2016-09-19T21:51:37 | 2016-09-19T21:51:37 | 55,711,549 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,861 | r | initial_explorations.R | library(dplyr)
library(ggplot2)
library(lubridate)
library(ggthemes) # visualization
#initial explorations:
train <- read.csv("data/train.csv", stringsAsFactors = FALSE)
sapply(train, function(x){unique(x) %>% length()})
#AnimalID Name DateTime OutcomeType
#26729 6375 22918 5
#OutcomeSubtype AnimalType SexuponOutcome AgeuponOutcome
#17 2 6 45
#Breed Color
#1380 366
#[1] "AnimalID" "Name" "DateTime"
#[4] "OutcomeType" "OutcomeSubtype" "AnimalType"
#[7] "SexuponOutcome" "AgeuponOutcome" "Breed"
#[10] "Color"
#let's create some dates vars:
train %>%
mutate(year = year(train$DateTime),
month = month(train$DateTime),
day = day(train$DateTime),
hour = hour(train$DateTime)) -> train
#by animal:
train %>% count(SexuponOutcome)
#by sex
ggplot(data = train, aes(x = SexuponOutcome, fill = OutcomeType)) +
geom_bar(stat = "count", position = "fill", width = 0.6) +
coord_flip() +
scale_fill_brewer(palette = "Set1")
# whoa big difference here - intacts have a worse fate by far.
#let's make a function for the plot:
pplot <- function(x) {
x <- substitute(x)
ggplot(data = train, aes_q(x = x, fill = substitute(OutcomeType))) +
geom_bar(stat = "count", position = "fill", width = 0.6) +
coord_flip() +
scale_fill_brewer(palette = "Set1")
}
#too many breeds but let's look at the word Mix:
train$Mix <- 1*(grepl("Mix", train$Breed, fixed = TRUE))
ggplot(data = train, aes(x = Mix, fill = OutcomeType)) +
geom_bar(stat = "count", position = "fill", width = 0.6) +
coord_flip() +
scale_fill_brewer(palette = "Set1")
#non mix fare a bit better.
#names:
train %>% count(Name) %>% arrange(desc(n)) %>%
filter(n> 20) -> popular_names
#let's make a common and an unnamed names feature:
train$popular_name <- 1*(train$Name %in% popular_names$Name[-1])
train$noname <- 1*(train$Name == "")
train$unique_name <- 1*(!(train$Name %in% popular_names$Name))
#noname is important!
pplot(noname)
pplot(popular_name)
pplot(unique_name)
#let's look at date and time:
pplot(year) #meh
pplot(day) #meh
pplot(hour)
#hour seems important but also erratic:
ggplot(filter(train, hour %in% 7:19), aes(x = hour, fill = OutcomeType)) +
geom_bar(stat = "count", position = "fill") +
scale_fill_brewer(palette = "Set1")
#let's look at age:
train %>% count(AgeuponOutcome) -> temp
train$age = ""
train$age[grepl("day", train$AgeuponOutcome)] <- "baby"
train$age[grepl("week", train$AgeuponOutcome)] <- "baby" #1704
train$age[grepl("month", train$AgeuponOutcome)] <- "less1yr" #8339
train$age[grepl("year", train$AgeuponOutcome)] <- train$AgeuponOutcome[grepl("year", train$AgeuponOutcome)]
sapply(train$age[grepl("year", train$AgeuponOutcome)],
function(x) {strsplit(x," ", fixed = TRUE)[[1]][1]})-> train$age[grepl("year", train$AgeuponOutcome)]
train$age[train$age %in% c("11", "12", "13", "14", "15", "16", "17","18", "19", "20")] <- "old"
train$age <- factor(train$age, level = c("baby", "less1yr",
"1", "2", "3", "4", "5", "6" ,"7",
"8", "9", "10", "old"))
#coool!
ggplot(train, aes(x = age, fill = OutcomeType)) +
geom_bar(stat = "count", position = "fill") +
scale_fill_brewer(palette = "Set1")
# less than 1 yr seem to be a lot of these:
ggplot(train, aes(x = age, fill = OutcomeType)) +
geom_bar(stat = "count") +
scale_fill_brewer(palette = "Set1")
# a look at the months:
train[grepl("month", train$AgeuponOutcome),] -> temp
ggplot(temp, aes(x = AgeuponOutcome, fill = OutcomeType)) +
geom_bar(stat = "count", position = "fill") +
scale_fill_brewer(palette = "Set1")
#months might be worthwhile keeping.
# it might be easier to work with the AgeuponOutcome directly,
#let's just keep it like it is right now:
ggplot(train, aes(x = AgeuponOutcome, fill = OutcomeType)) +
geom_bar(stat = "count", position = "fill") +
scale_fill_brewer(palette = "Set1") +
coord_flip() +
theme_few()
# let's make bags of words for breed and color:
library(tm)
train$color <- 1*grepl("/", train$Color)
pplot(color)
train$Color <- gsub("/", " ", train$Color, fixed = TRUE)
corpus <- Corpus(VectorSource(train$Color))
tdm <- DocumentTermMatrix(corpus, list(stopwords = TRUE,
removePunctuation = TRUE,
removeNumbers = TRUE,
bounds = list(global = c(30,Inf))))
design_matrix_color <- as.matrix(tdm)
#breed:
train$breed <- 1*grepl("/", train$Breed)
grepl("[Cc]hihuahua", train$Breed) %>% sum()
train$chiua <- 1*(grepl("Husky", train$Breed))
pplot(Breed)
pplot(breed) #seems vaguely important.
train$Breed <- gsub("/", " ", train$Breed, fixed = TRUE)
corpus <- Corpus(VectorSource(train$Breed))
tdm <- DocumentTermMatrix(corpus, list(stopwords = TRUE,
removePunctuation = TRUE,
removeNumbers = TRUE,
bounds = list(global = c(30,Inf))))
design_matrix_breed <- as.matrix(tdm)
# still around 100 - lots of breeds here:
### I think we've got some decent feature enginnering here:
train %>% count(SexuponOutcome) #one guy unknown
train %>% count(AgeuponOutcome) #bunch of guys unknown
train %>% count(hour)
train$SexuponOutcom[train$SexuponOutcom == ""] <- "Unkown"
train$SexuponOutcome[train$SexuponOutcome == ""] <- "3 years"
design_matrix_gen <- model.matrix(OutcomeType ~ AnimalType + SexuponOutcome +
AgeuponOutcome + hour + noname, data = train)
y = as.factor(train$OutcomeType)
|
26f4d90a2dff9b8fc5dec235a900935cae30b8fe | a664c1e43c37c208e725783a7d785b4c638e78c5 | /ScanStats/man/temporal.normalize.stats.Rd | a6c0866b5075a537d9ca81e6cbffc601a2c72011 | [] | no_license | adalisan/XDATA | f715b6588866cbf9b003512e827f811b42868fe9 | 8c6d21c97e057c211f25606a0c3524a24a968344 | refs/heads/master | 2020-04-10T07:38:20.374000 | 2013-09-16T23:58:32 | 2013-09-16T23:58:32 | 11,539,515 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 761 | rd | temporal.normalize.stats.Rd | \name{temporal.normalize.stats}
\alias{temporal.normalize.stats}
\title{Normalize the maximum scan statistics at each time interval using the statistics most recent el-length time window}
\usage{
temporal.normalize.stats(max.stats.list, el)
}
\arguments{
\item{stats.list}{a named vector that contains the
maximum scan statistics in the graph at each time
interval and names in the named vector correspond to
vertex ids.}
\item{el}{length of time window}
}
\value{
a vector of same length as stats.list. The first (el)
values will be 0, since there is no el-length history to
normalize with respect to
}
\description{
Normalize the maximum scan statistics at each time
interval using the statistics most recent el-length time
window
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.