blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fce54a8cf87aa3ed7ee83373331395ad7b56fd84
|
3094bff921ea86e881e94072b775f690cfe9173e
|
/man/linear.outcome.log.envir.interaction.sds.Rd
|
9aa530e423f19edeef43aa7fab16e200639d4a72
|
[] |
no_license
|
cran/genpwr
|
4a592323fa029e8e954568d2ec2f27efc35bd38c
|
51c256c4ce838a132b56287b63a6db579eb925b0
|
refs/heads/master
| 2021-06-22T07:20:34.615733
| 2021-03-30T23:00:07
| 2021-03-30T23:00:07
| 209,158,670
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,347
|
rd
|
linear.outcome.log.envir.interaction.sds.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linear_outcome_envir_interaction_mle_function.R
\name{linear.outcome.log.envir.interaction.sds}
\alias{linear.outcome.log.envir.interaction.sds}
\title{Function to calculate the standard deviation of y given x for linear models with logistic environment interaction}
\usage{
linear.outcome.log.envir.interaction.sds(
MAF,
P_e,
ES_G,
ES_E,
ES_GE,
mod,
True.Model,
sd_y,
reduced = F
)
}
\arguments{
\item{MAF}{Minor allele Frequency}
\item{P_e}{Population prevalence of logistic environmental factor}
\item{ES_G}{Genetic Effect size}
\item{ES_E}{Environment Effect size}
\item{ES_GE}{Environment x Genetic interaction Effect size}
\item{mod}{Test model}
\item{True.Model}{True model}
\item{sd_y}{Standard deviation of y}
\item{reduced}{logical, indicates whether the X matrix will be used for a reduced model}
}
\value{
The standard deviation of y given x for linear models with logistic environment interaction
}
\description{
Returns the standard deviation of y given x for linear models with logistic environment interaction
}
\examples{
linear.outcome.log.envir.interaction.sds(MAF = 0.1, P_e = 0.2, sd_y = 10,
ES_G = 1.2, ES_E = 1.3, ES_GE = 2, mod = "Dominant", True.Model = "Additive")
}
|
ae82df558d2e69add87dd708cd6e325e84da5d0d
|
d044a356212810ff1436d41f5e281beb4e6c6d5b
|
/R/postprocessing_routines.R
|
625eb4afe05c53fd34a174fe2f2d407cb152486c
|
[
"MIT"
] |
permissive
|
szpiech/gala2-ROH
|
b443492e6a88ca2a1053e6b164eccd4d083014d5
|
fb776072008cb4656cf4da1b9f9a69687aa2897e
|
refs/heads/master
| 2020-03-23T13:24:47.938763
| 2018-07-24T21:40:30
| 2018-07-24T21:40:30
| 141,616,659
| 0
| 0
|
MIT
| 2018-07-19T18:24:57
| 2018-07-19T18:24:56
| null |
UTF-8
|
R
| false
| false
| 8,883
|
r
|
postprocessing_routines.R
|
# ==============================================================================
# Copyright 2018, Asthma Collaboratory
# authors:
# -- Kevin L. Keys
# -- Page C. Goddard
# -- Andy M. Zeiger
# -- Annie Li
# This script contains subroutines for plotting and analyzing GWAS results.
# Source this script after setting the R environment, e.g.
#
# source("set_R_environment.R")
# source("postprocessing_routines.R")
#
# This ensures that all requisite libraries are loaded before defining the
# plotting functions contained herein.
#
# This script postprocessing_routines.R contains no executable code.
#
# It has no input or output.
# ==============================================================================
ConcatenateResults = function(input.prefix, output.path, input.suffix = "ROH.R.out.results", sort.result = FALSE) {
# ConcatenateResults
#
# This function merges the chromosome results into one data frame with probes ordered by P value.
# The function will write the merged data frame to file and return it.
#
# Args:
# input.prefix: the title of the output files. Should coincide with argument output.prefix from
# function PerformAssociationAnalysis(), e.g. ${ROH_DIRECTORY}/${FILENAME_BEFORE_CHROMOSOME}
# output.path: a complete file path where results will be stored.
# input.suffix: the suffix of the file after chromosome. Should coincide with argument
# suffix from function PerformAssociationAnalysis().
# Default: "ROH.R.out.results"
# sort.result: should final data.frame be sorted by p-value?
# Default: FALSE
#
# Output: One data frame with all results
# read the results for chromosome 1 into memory
chr = 1
input.file.path = paste(input.prefix, chr, "ROH.R.out.results", sep = ".")
results.df = fread(input.file.path, header = TRUE)
# make a data frame for concatenating results files
# will tack chromosome number as new leftmost column
nROHs = dim(results.df)[1]
gwas.df = data.table(chr = rep(chr,nROHs), results.df)
# repeat this process for remaining chromosomes
for (chr in 2:22) {
input.file.path = paste(input.prefix, chr, "ROH.R.out.results", sep = ".")
results.df = fread(input.file.path, header = TRUE)
nProbes = dim(results.df)[1]
gwas.temp = data.table(chr = rep(chr, nProbes), results.df)
gwas.df = rbind(gwas.df, gwas.temp)
# keep workspace tidy during loop
rm(gwas.temp)
rm(results.df)
}
# sort frame by p-value
if (sort.result) {
gwas.df = gwas.df[order(gwas.df$p),]
}
# write data frame to file
fwrite(gwas.df, file = output.path)
return(gwas.df)
}
CreateDiagnosticPlots = function(results.filepath, manhattan.plot.filepath, qq.plot.filepath, manhattan.plot.title = "Manhattan plot", threshold = 5e-8, highlight.SNPs = NULL, manhattan.ylims = c(0,8), color = c("black", "blue"), significance.threshold = 5e-8, suggestive.threshold = 1e-7, qq.plot.title = "QQ Plot", qq.plot.subtitle = NULL, qq.xlim = NULL, qq.ylim = NULL) {
# CreateDiagnosticPlots
#
# This function merges the chromosome results into one file with probes ordered by P value
#
# Args:
# results.filepath: path to concatenated GWAS results for one population
# manhattan.plot.filepath: path where Manhattan plot will be saved
# qq.plot.filepath: file name of QQ plot that will be written to working directory
# qq.plot.title: title of QQ plot
# threshold: pvalue limit for labeling SNPs on the plot. SNPs with p-values greater than "threshold"
# are not plotted. Low values of "threshold" make plotting slow and may overlabel the plot.
# Default: 5e-8 (genome-wide significance)
# highlight.SNPs: vector of SNP ids to highlight,
# e.g. highlight.SNPs = c("rs12345", "rs90181294", "rs556782")
# Default: NULL (no SNPs to highlight)
# ylim: the Y-axis limits for the plot
# e.g. [c(min,max)]
# Default: c(0,8), which plots p-values up to 1e-8
# color = a vector of colors (min 2 colors)
# e.g. [color = c("color1", "color2", "color3")]
# Default: c("black", "blue")
# title: an informative plot title
# Default: "Manhattan plot"
# significance.threshold: the Bonferroni significance threshold.
# Default: 5e-8
# suggestive.threshold: the suggestive threshold of significance.
# Default: 1e-7
# qq.plot.subtitle: an informative subtitle for the QQ plot. Leave as NULL to record genomic lambda.
# Default: NULL (will actually output genomic lambda, e.g. "λ = 1.0")
# qq.xlim, qq.ylim: the X-axis Y-axis limits for the QQ plot
# e.g. [qq.xlim = c(xmin, xmax), qq.ylim = c(ymin, ymax)]
# Default: NULL (use default axis limits)
#
# Output: nothing
# Load in GWAS results
gwas.df = fread(results.filepath, header = TRUE)
# Reformat dataframe to correct way for manhattan plot
gwas.df.sub = subset(gwas.df, select = c("chr", "position", "Probe", "p"))
colnames(gwas.df.sub) = c("CHR", "BP", "SNP", "P")
# create Manhttan plot of ROH GWAS results
manhattan.plot = CreateManhattanPlot(gwas.df.sub,
threshold = threshold,
highlight.SNPs = highlight.SNPs,
ylims = manhattan.ylims,
color = color,
title = manhattan.plot.title,
significance.threshold = significance.threshold,
suggestive.threshold = suggestive.threshold,
save.as = manhattan.plot.filepath)
qq.plot = CreateQQPlot(gwas.df.sub,
title = qq.plot.title,
xlim = qq.xlim,
ylim = qq.ylim,
save.as = qq.plot.filepath)
# DISCUSS: perhaps return the diagnostic plots?
return()
}
ComputeSignificanceThreshold = function(input.file) {
# ComputeSignificantThreshold
#
# The function uses coda to analyze the p-values from a GWAS.
# It then computes the effective number of independent tests
# and the corresponding significance threshold for your data
#
# Args:
# input.file: Your input file name. The expected format is
# the output from PerformAssociationAnalysis().
#
# Returns:
# A list with four entries:
# -- number of independent tests
# -- Bonferroni correction for multiple testing
# -- the significance threshold (1 / num_ind_tests)
# -- the suggestive threshold
# =================================================
# Load input file
# =================================================
input = fread(input.file, fill = TRUE)
# =================================================
# Clean data frame
# =================================================
# Make copy of data frame because later the data frame will be rearranged by chromosome number and base position
copy.input = copy(input)
# Sort the data frame based on chromosome number and base position, rather than sorting the data by p-values
# Because effectiveSize() adjusts for autocorrelation between p-values, so if the function is computed on a sorted p-value, you will get a really small number
setorder(copy.input, chr, Probe)
# =================================================
# Calculate the significance threshold
# =================================================
# coda: calculates adjusted significance threshold with Bonferroni correction
# effectiveSize() adjusts for autocorrelation (correlation between values)
# effectiveSize() basically calculates the number of independent tests
# Need to add -log10 when calculating effectiveSize since the function requires large numbers not small numbers(ex:10^-6 will be converted to 6)
total_eff = effectiveSize(-log10(copy.input$p))
# Output is :
# var 1
# Number
# Since we only need the number for the threshold, as.numeric() will fix this and return just a numeric value, and this numeric value is carried throughout the rest of the code
total_eff = as.numeric(total_eff)
# Bonferroni correction: divide alpha level(0.05) by the number of independent test
# <<- makes p.adj variable global, so that it can be called in other scripts
p.adj <<- 0.05/total_eff
# After -log10 transformation
transformation = -log10(p.adj)
# Compute the suggestive threshold
suggestive <<- 1/(2*total_eff)
# List out number of independent tests, the adjusted threshold, and the threshold after -log10 transformation
return(list("n.indep.test" = total_eff, "bon.corr" = p.adj, "transformation" = transformation, "sugg.thresh" = suggestive))
}
|
7bc01f2b3ab4b66c653234962039cd08ecb0be90
|
cbd210c2c2aaebb183944af950c5145714faf1f2
|
/stat6289project1code.R
|
43145f81c6691a8b42798b7e6c0899a43fbcd8ae
|
[] |
no_license
|
yukefu/stat6289-project1
|
5f3a6735447a1aef98e693a78dd84847144a4d01
|
6ea866c7bdf0f1e23be13b86689a8236a75ebf4e
|
refs/heads/master
| 2020-04-04T06:58:00.299156
| 2019-04-18T00:32:17
| 2019-04-18T00:32:17
| 155,763,683
| 0
| 0
| null | 2018-11-04T15:01:46
| 2018-11-01T19:14:18
|
R
|
UTF-8
|
R
| false
| false
| 9,057
|
r
|
stat6289project1code.R
|
data=read.csv("https://raw.githubusercontent.com/yukefu/stat6289-project1/master/sleeping-alone-data-new.csv")
summary(data)
library(tidyr)
library(shiny)
library(ggplot2)
library(DT)
library(dplyr)
table1=round(prop.table(table(data$RelationshipStatus , data$HowOften)),4)
m1=matrix(nrow=36, ncol=5)
m1[,5]=rep("RelationshipStatus",36)
m1[,1]=rep(t(colnames(table1)),each=6)
m1[,3]=rep(rownames(table1),6)
m1[,4]=as.vector(table1*100)
question=c('Which of the following best describes your current relationship status?')
m1[,2]=rep(question,36)
table2=round(prop.table(table(data$HowLongRelationship , data$HowOften)),4)
m2=matrix(nrow=36, ncol=5)
m2[,5]=rep("HowLongRelationship",36)
m2[,1]=rep(t(colnames(table2)),each=6)
m2[,3]=rep(rownames(table2),6)
m2[,4]=as.vector(table2*100)
question=c('How long have you been in your current relationship? If you are not currently in a relationship, please answer according to your last relationship.')
m2[,2]=rep(question,36)
table15=round(prop.table(table(data$Gender , data$HowOften)),4)
m15=matrix(nrow=12, ncol=5)
m15[,5]=rep("Gender",12)
m15[,1]=rep(t(colnames(table15)),each=2)
m15[,3]=rep(rownames(table15),6)
m15[,4]=as.vector(table15*100)
question=c('Gender')
m15[,2]=rep(question,12)
m15=m15[which(m15[,3]!=""),]
table16=round(prop.table(table(data$Age , data$HowOften)),4)
m16=matrix(nrow=24, ncol=5)
m16[,5]=rep("Age",24)
m16[,1]=rep(t(colnames(table16)),each=4)
m16[,3]=rep(rownames(table16),6)
m16[,4]=as.vector(table16*100)
question=c('Age')
m16[,2]=rep(question,24)
data2=data[which(data$Income !=""),]
#there are 195 respondents do not answer income, we have 810 response left.
table17=round(prop.table(table(data2$Income , data2$HowOften)),4)
m17=matrix(nrow=36, ncol=5)
m17[,5]=rep("Income",36)
m17[,1]=rep(t(colnames(table17)),each=6)
m17[,3]=rep(rownames(table17),6)
m17[,4]=as.vector(table17*100)
question=c('Income')
m17[,2]=rep(question,36)
m17=m17[which(m17[,3]!=""),]
data2=data[which(data$Education !=""),]
table18=round(prop.table(table(data2$Education , data2$HowOften)),4)
m18=matrix(nrow=36, ncol=5)
m18[,5]=rep("Education",36)
m18[,1]=rep(t(colnames(table18)),each=6)
m18[,3]=rep(rownames(table18),6)
m18[,4]=as.vector(table18*100)
question=c('Education')
m18[,2]=rep(question,36)
m18=m18[which(m18[,3]!=""),]
data2=data[which(data$Location !=""),]
table19=round(prop.table(table(data2$Location , data2$HowOften)),4)
m19=matrix(nrow=60, ncol=5)
m19[,5]=rep("Location",60)
m19[,1]=rep(t(colnames(table19)),each=10)
m19[,3]=rep(rownames(table19),6)
#m19[,4]=paste(as.vector(table19*100),"%")
m19[,4]=as.vector(table19*100)
question=c('Location')
m19[,2]=rep(question,60)
m19=m19[which(m19[,3]!=""),]
table20=round(prop.table(table(data$HowOften)),4)
m20=matrix(nrow=6, ncol=5)
m20[,5]=rep("All",6)
m20[,3]=rep("All",6)
m20[,1]=t(rownames(table20))
#m20[,4]=paste(as.vector(t(table20)*100),"%")
m20[,4]=as.vector(t(table20)*100)
question=c('All')
m20[,2]=rep(question,6)
m21=rbind(m1,m2,m15,m16,m17,m18,m19,m20)
colnames(m21)<-c("Howoften","Questions","Options","Proportion (%)","Variable Name")
VariableName=matrix(c("Which of the following best describes your current relationship status?",
"How long have you been in your current or last relationship?", "Gender" ,
"Age" ,"Income" , "Education" , "Location" ,
"RelationshipStatus","HowLongRelationship",
"Gender","Age","Income","Education","Location"),ncol=2)
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
conditionalPanel(
'input.dataset==="BasicQuestion"',
wellPanel(
selectInput(inputId = "Howoften",
label = "When both you and your partner are at home, how often do you sleep in separate beds?",
choices = c("All","A few times per month", "A few times per week", "Every night", "Once a month or less","Once a year or less"),
selected = "All"))
),
# Select variable
conditionalPanel(
'input.dataset==="BasicQuestion"',
wellPanel(
selectInput(inputId = "BasicInformation",
label = "Basic Questions:",
choices = c("All",
"Which of the following best describes your current relationship status?" = "RelationshipStatus",
"How long have you been in your current or last relationship?" = "HowLongRelationship",
"Gender" = "Gender",
"Age" = "Age",
"Income" = "Income",
"Education" = "Education",
"Location" = "Location"),
selected = "All"))
),
conditionalPanel(
'input.dataset==="BasicQuestion"',
wellPanel(
sliderInput(inputId ="Range", label ="Range:",
min = 0, max = 100,
value = 100))
),
conditionalPanel(
'input.dataset==="BasicQuestion"',
wellPanel(
actionButton("apply","Who did this amazing data visualization?"),
hr(),
textOutput(outputId = "amazing")
)
)
),
mainPanel = (
tabsetPanel(id= 'dataset' ,type="tabs",
tabPanel("BasicQuestion",
mainPanel(h2("How Many Couples Sleep in Separate Beds?"),
h3("Frequency Table"),
# tableOutput(outputId = "feqtable"),
hr(),
DT::dataTableOutput(outputId = "table21"),
hr(),
h3("Corresponding Bar Chart"),
hr(),
plotOutput(outputId = "graph"),
hr(),
htmlOutput(outputId = "sample_info"),
hr(),
)),
tabPanel("OriginalNews",
tags$iframe(style="height:800px; width:100%; scrolling=yes",
src="https://fivethirtyeight.com/features/dear-mona-how-many-couples-sleep-in-separate-beds/"))
)
),
position = "right"
)
)
server <-function(input, output){
selected_data=reactive(
{data %>% select(c("HowOften",input$BasicInformation)) %>% filter(data[,input$BasicInformation]!= "")})
n_sample=reactive(nrow(selected_data()))
VarName=reactive(
VariableName[which(VariableName[,2]==input$BasicInformation),1])
output$sample_info <-renderUI({
if (input$BasicInformation == "All"){
paste("Note:",nrow(data), "respondents took participate in this survey and provided valid answers.")
}
else if (input$BasicInformation != "All" && input$Howoften == "All"){
paste("Note:",n_sample(),
"respondents give valid answers. The table below shows the frequencies of between 'How often do you sleep in separate beds' and '", VarName(),"'.")
}
else if (input$BasicInformation != "All" && input$Howoften != "All"){
paste("Note:",n_sample(),
"respondents give valid answers. The table below shows the frequencies of between 'How often do you sleep in separate beds' and '",
VarName(),"'.",
nrow(selected_data()[which(selected_data()$HowOften==input$Howoften),]),
"respondents choose '", input$Howoften, "' The table below shows the frequencies of between", input$Howoften, "and '", VarName(),"'.")
}
})
output$table21 <- DT::renderDataTable({
if (input$BasicInformation == "All" && input$Howoften == "All")
{
DT::datatable(data = m21[which(m21[,3]=="All" &
as.numeric(m21[,4])< input$Range),1:4])
}
else{
data1 <- m21
if (input$BasicInformation != "All"){
data1<-data1[which(data1[,5]==input$BasicInformation &
as.numeric(data1[,4])< input$Range),1:4]
}
if (input$Howoften != "All"){
data1<-data1[which(data1[,1]==input$Howoften &
as.numeric(data1[,4])< input$Range),1:4]
}
DT::datatable(data = data1)
}
})
output$graph<-renderPlot(
{
if (input$BasicInformation == "All"){
g=ggplot(data, aes(HowOften))+geom_bar(aes(fill=HowOften))+labs(fill = " ")
}
else if (input$BasicInformation != "All" && input$Howoften == "All"){
g=ggplot(selected_data(), aes(HowOften))+geom_bar(aes(fill=selected_data()[,input$BasicInformation]))+labs(fill = " ")
}
else if (input$BasicInformation != "All" && input$Howoften != "All"){
da=selected_data()[which(selected_data()$HowOften==input$Howoften),]
g=ggplot(da, aes(da[,input$BasicInformation]))+geom_bar(aes(fill=da[,input$BasicInformation]))+labs(fill = " ")+labs(x=VarName())
}
g
}
)
observeEvent(input$apply,
{output$amazing<-renderText({"Her name is Yuke Fu."})})
}
shinyApp(ui = ui, server = server)
|
d5c55785b9f52f6925fa29132ff78cbd12e82fb6
|
e36e8d5859f764ffa3e6f18d2b5dcd6bbd4e80f0
|
/man/al_tracking_chart.Rd
|
6917a3e682aa3db941944a96533c260e99b560f6
|
[
"MIT"
] |
permissive
|
ropensci/rrricanes
|
23855df40a5cc598b94ec90ac9e32c70b291e2a8
|
533454c8e4d3b7dff6dc2a6592a7b304fef41fdb
|
refs/heads/main
| 2023-01-07T17:56:01.118103
| 2022-12-31T18:29:58
| 2022-12-31T18:29:58
| 74,975,357
| 19
| 9
|
NOASSERTION
| 2022-12-31T18:29:59
| 2016-11-28T13:25:12
|
R
|
UTF-8
|
R
| false
| true
| 1,050
|
rd
|
al_tracking_chart.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracking_chart.R
\name{al_tracking_chart}
\alias{al_tracking_chart}
\title{al_tracking_chart}
\usage{
al_tracking_chart(...)
}
\arguments{
\item{...}{Additional parameters for \link{tracking_chart} and ggplot2}
}
\value{
ggplot2 object centered on Atlantic basin.
}
\description{
Build tracking chart centered on Atlantic Basin.
}
\examples{
\dontrun{
# Build map with white land areas, thin black borders
al_tracking_chart(color = "black", size = 0.1, fill = "white")
# 50nm resolution, no states
al_tracking_chart(res = 50, states = FALSE, color = "black", size = 0.1,
fill = "white")
# 50nm resolution, coastlines only
al_tracking_chart(countries = FALSE, res = 50, color = "black", size = 0.1,
fill = "white")
# Adding and modifying with ggplot functions
al_tracking_chart(color = "black", size = 0.1, fill = "white") +
ggplot2::labs(x = "Lon", y = "Lat",
title = "Base Atlantic Tracking Chart")
}
}
\seealso{
\code{\link{tracking_chart}}
}
|
cc4fa61c483d1df2ad3d2c4f706c5ec0a9099f8a
|
52986252c06e022329d2300719ad4a0e3942fcc8
|
/build_plotly_map.R
|
3b8e11e89e1e606a928b94da722b86cac0949dbb
|
[] |
no_license
|
alexpawlowski/biomass-for-rps
|
5d46fd856287b2f727e6fedec81a36b5aacd9bb8
|
78f7d322b007f599309110f45cbc6bd41d02f861
|
refs/heads/master
| 2021-05-11T21:21:15.064641
| 2018-04-09T17:23:00
| 2018-04-09T17:23:00
| 117,466,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 525
|
r
|
build_plotly_map.R
|
devtools::install_github('ropensci/plotly')
library(plotly)
#map is from hrmstr alberusa package. since I'm only planning to use the map, I wanted to reduce a package dependency. No hard feelings @hrbmstr!
us_map <- st_read('geo_data/composite_us_counties.geojson') #'geo_data/us_elided.geojson')
us_map2 <- us_map %>%
st_transform("+proj=lcc +lat_1=33 +lat_2=45 +lat_0=39 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs") %>%
st_simplify(TRUE, dTolerance = 1000)
p <- ggplot(us_map2) + geom_sf()
ggplotly(p)
|
0665b4f47a877958d0f1142769e5777ede2dbf89
|
55ade2b1275b37a351d0ce542827771802d4bcec
|
/plot2.R
|
a5afe3a84963da1c0aeb5cbf2e022010a9fb507d
|
[] |
no_license
|
pkaza/ExData_Plotting1
|
ffbbdc03d3dac534d3f4d6a611607d6dde4b10f4
|
ea64ec7c001b2361cdbb1aedb70e4d1cdda23896
|
refs/heads/master
| 2020-05-29T11:53:58.373900
| 2015-05-10T23:13:11
| 2015-05-10T23:13:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 708
|
r
|
plot2.R
|
##Plot 2
##Load data
#setwd("E:/BOKU_COURSES/R/coursera/exploratory_data_analysis/week_1")
epc<-read.table(file='household_power_consumption.txt',
header=T,sep=';',na='?',colClasses=c('character','character',rep('numeric',7)))
##Subset the 2 days needed
epc_sub<-epc[epc$Date=="1/2/2007" | epc$Date=="2/2/2007",]
datestring<-paste(epc_sub$Date,epc_sub$Time)
epc_sub$DateTime<-strptime(datestring, "%d/%m/%Y %H:%M:%S" )
rownames(epc_sub)<-1:nrow(epc_sub)
##Plot 2
png(filename = "plot2.png",
width = 480, height = 480,
units = "px")
plot(epc_sub$DateTime, epc_sub$Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off()
|
7192f87797c80c4c5bb6ee686a5037a24772a924
|
5d84eaf8c8169db6e71511fe90d99fa6b2938a6c
|
/R/app_ui.R
|
990d0a7e8144c7b0734e3962b132217d85de4fc4
|
[
"MIT"
] |
permissive
|
Eleftheria1/whatsalyze
|
59cd97b523bcd40143b0d6e6308ecc107b06fecd
|
8c5bb862b8f6c90142bf13283c5156960dcaf6f2
|
refs/heads/master
| 2023-03-15T04:20:09.547764
| 2021-03-20T20:39:28
| 2021-03-20T20:39:28
| 347,648,844
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,771
|
r
|
app_ui.R
|
#' The application User-Interface
#'
#' @param request Internal parameter for `{shiny}`.
#' DO NOT REMOVE.
#' @import shiny
#' @import shinydashboard
#' @noRd
app_ui <- function(request) {
tagList(
# Leave this function for adding external resources
golem_add_external_resources(),
# List the first level UI elements here
shiny.info::busy("spinner"),
dashboardPage(
skin = "black",
dashboardHeader(
title = "Whatsalyze",
dropdownMenu(
type = "notifications",
icon = icon("link"),
headerText = "Links",
notificationItem("Source code on Github", icon = icon("github"),
href = "https://github.com/EmanuelSommer/whatsalyze")
)
),
dashboardSidebar(
sidebarMenu(
menuItem(text = "Overview", tabName = "over_stats",
icon = icon("glasses")
),
menuItem(text = "Compare them!",
menuSubItem(text = "Overall", tabName = "comp_overall"),
menuSubItem(text = "By message", tabName = "comp_messages"),
menuSubItem(text = "Emojis", tabName = "comp_emojis"),
icon = icon("chart-bar")
),
menuItem(text = "Multi compare",
tabName = "multi_comp",
icon = icon("balance-scale"))
),
tags$br(),
fluidRow(
column(2),
column(
8,
actionButton(
"inputbutton",
label = "Upload file",
icon = icon("upload")
)
),
column(2)
)
),
dashboardBody(
tags$style(".small-box.bg-yellow { background-color: #58E370 !important; color: #3C252B !important; }"),
tabItems(
# Overview ###############################################
tabItem(
tabName = "over_stats",
tags$div(
style = "text-align: center;color: #3C252B;font-weight: bold;",
tags$h2(
emo::ji_glue(":detective: Let's have a look! :detective:")
)
),
tags$br(),
mod_over_stats_ui("over_stats_ui"),
tags$br(),
mod_over_act_ui("over_act_ui")
),
# Compare them ##########################################
tabItem(
tabName = "comp_overall",
mod_comp_overall_ui("comp_overall_ui")
),
tabItem(
tabName = "comp_messages",
mod_comp_messages_ui("comp_messages_ui")
),
tabItem(
tabName = "comp_emojis",
mod_comp_emojis_ui("comp_emojis_ui")
),
tabItem(
tabName = "multi_comp",
tags$div(
style = "text-align: center;color: #3C252B;font-weight: bold;",
tags$h2(
emo::ji_glue("Let's compare multiple chats! :balance_scale:")
)
),
tags$br(),
mod_multi_table_ui("multi_table_ui")
)
)
)
)
)
}
#' Add external Resources to the Application
#'
#' This function is internally used to add external
#' resources inside the Shiny application.
#'
#' @import shiny
#' @importFrom golem add_resource_path activate_js favicon bundle_resources
#' @noRd
golem_add_external_resources <- function(){
add_resource_path(
'www', app_sys('app/www')
)
tags$head(
favicon(),
bundle_resources(
path = app_sys('app/www'),
app_title = 'whatsalyze'
)
# Add here other external resources
# for example, you can add shinyalert::useShinyalert()
)
}
|
08e5e248b6056b89a6f857e6c279f132df680af5
|
c0e766a6a57e3c5c32f8b0afe130b8df66e6dbf9
|
/rsellPoshmark/R/PM_Table_Var_Summary.R
|
873d143d327acdbb44615817543d26e164460dac
|
[] |
no_license
|
t2tech-corp/Rsell-Packages
|
b450fec180754aa9cf0cf3ab6b369c74c57b7e70
|
047a2348650e5a2ee0bc52500824a34f16167056
|
refs/heads/main
| 2023-03-03T00:22:15.006720
| 2021-02-14T22:58:12
| 2021-02-14T22:58:12
| 329,474,392
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,171
|
r
|
PM_Table_Var_Summary.R
|
#' Creates a Poshmark Summary Table by Variable Name
#'
#' This function Creates a Poshmark Summary Table by Variable Name.
#'
#' @param pm_sales The Poshmark Sales Table for the user.
#' @param pm_var The variable name for grouping
#' @return A Poshmark Summary Table as a data frame.
#' @export
#' @examples
#'
PM_Table_Var_Summary <- function(pm_sales, pm_var) {
pm_var_summary <- pm_sales %>%
filter(!!as.symbol(pm_var) != "") %>%
group_by(!!as.symbol(pm_var)) %>%
summarize(count = n(),
avg_days_listed = round(mean(days_listed), 1),
tot_order_price = sum(order_price),
avg_order_price = round(mean(order_price), 2),
tot_net_earnings = sum(net_earnings),
avg_net_earnings = round(mean(net_earnings), 2),
tot_net_profit = sum(net_profit),
avg_net_profit = round(mean(net_profit), 2)) %>%
ungroup()
###
return(pm_var_summary)
}
|
51ffb218f1715bd0ba73560d6c618aece8c4b156
|
20b9147d2752db0437413074134fefc239914ba9
|
/R/FulfillmentService.R
|
b47ab7aea6259d48ade327f0db07a9582ff34777
|
[] |
no_license
|
charliebone/shopifyr
|
ad07496a0c9314e8ffd729907bb940a00bc54053
|
1c106dc29d34536e32ca3ba03466459c56a9d74e
|
refs/heads/master
| 2021-06-01T23:00:08.938662
| 2019-08-11T01:40:35
| 2019-08-11T01:40:35
| 23,026,426
| 27
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,786
|
r
|
FulfillmentService.R
|
#
# shopifyr: An R Interface to the Shopify API
#
# Copyright (C) 2015 Charlie Friedemann cfriedem @ gmail.com
# Shopify API (c) 2006-2015 Shopify Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########### FulfillmentService functions ###########
#' @templateVar name FulfillmentService
#' @templateVar urlSlug shipping_and_fulfillment/fulfillmentservice
#' @template api
NULL
## GET /admin/api/#{api_version}/fulfillment_services.json
## Receive a list of all FulfillmentServices
#' @rdname FulfillmentService
getFulfillmentServices <- function(...) {
private$.request("fulfillment_services", ...)$fulfillment_services
}
## POST /admin/api/#{api_version}/fulfillment_services.json
## Create a new FulfillmentService
#' @rdname FulfillmentService
createFulfillmentService <- function(fulfillmentService, ...) {
fulfillmentService <- private$.wrap(fulfillmentService, "fulfillment_service", check=FALSE)
private$.request("fulfillment_services", reqType="POST", data=fulfillmentService, ...)$fulfillment_service
}
## GET /admin/api/#{api_version}/fulfillment_services/#{id}.json
## Receive a single FulfillmentService
#' @rdname FulfillmentService
getFulfillmentService <- function(fulfillmentServiceId, ...) {
private$.request(private$.url("fulfillment_services",fulfillmentServiceId), ...)$fulfillment_service
}
## PUT /admin/api/#{api_version}/fulfillment_services/#{id}.json
## Modify an existing FulfillmentService
#' @rdname FulfillmentService
modifyFulfillmentService <- function(fulfillmentService, ...) {
fulfillmentService <- private$.wrap(fulfillmentService, "fulfillment_service")
private$.request(private$.url("fulfillment_services",fulfillmentService$fulfillment_service$id), reqType="PUT", data=fulfillmentService, ...)$fulfillment_service
}
## DELETE /admin/api/#{api_version}/fulfillment_services/#{id}.json
## Remove a FulfillmentService from the database
#' @rdname FulfillmentService
deleteFulfillmentService <- function(fulfillmentServiceId, ...) {
private$.request(private$.url("fulfillment_services",fulfillmentServiceId), reqType="DELETE", ...)
}
|
d638160fe4e8f2e1d97f4fb7899ab535d5b678e6
|
f23c29c28a3aa386372d6f0e0e9faae74cf10296
|
/R/rjournal_article.R
|
ef14dca5f968ab9314e23a8a3d287b4f0b4800ad
|
[] |
no_license
|
xiaoningwang/rticles
|
1d9318ea9f95d3ff2078a2dec5d95db09a091cc7
|
5e81f6aa7ccfbdfb657b3c5786b56bb5ccb4cf88
|
refs/heads/master
| 2023-05-11T19:28:08.593412
| 2023-04-25T19:14:14
| 2023-04-25T19:14:14
| 97,591,351
| 1
| 0
| null | 2017-07-18T11:42:17
| 2017-07-18T11:42:17
| null |
UTF-8
|
R
| false
| false
| 6,446
|
r
|
rjournal_article.R
|
#' R Journal format.
#'
#' Format for creating R Journal articles. Adapted from
#' <https://journal.r-project.org/submissions.html>.
#'
#' This file is only a basic article template. For full details of _The R
#' Journal_ style and information on how to prepare your article for submission,
#' see the [Instructions for Authors](https://journal.r-project.org/share/author-guide.pdf)
#'
#' ## About this format and the R Journal requirements
#'
#' `rticles::rjournal_article` will help you build the correct files requirements:
#'
#' - A R file will be generated automatically using `knitr::purl` - see
#' https://bookdown.org/yihui/rmarkdown-cookbook/purl.html for more information.
#' - A tex file will be generated from this Rmd file and correctly included in
#' `RJwapper.tex` as expected to build `RJwrapper.pdf`.
#' - All figure files will be kept in the default rmarkdown `*_files` folder. This
#' happens because `keep_tex = TRUE` by default in `rticles::rjournal_article`
#' - Only the bib filename is to be modified. An example bib file is included in the
#' template (`RJreferences.bib`) and you will have to name your bib file as the
#' tex, R, and pdf files.
#'
#' # About YAML header fields
#'
#' This section documents some of the YAML fields that can be used with this
#' formats.
#'
#' ## The `author` field in the YAML header
#'
#' | FIELD | TYPE | DESCRIPTION |
#' | ------ | ---- | ----------- |
#' | `name` | *required* | name and surname of the author |
#' | `affiliation` | *required* | name of the author's affiliation |
#' | `address` | *required* | at least one address line for the affiliation |
#' | `url` | *optional* | an additional url for the author or the main affiliation |
#' | `orcid` | *optional* | the authors ORCID if available |
#' | `email` | *required* | the author's e-mail address |
#' | `affiliation2` | *optional* | name of the author's 2nd affiliation |
#' | `address2` | *optional* | address lines belonging to the author's 2nd affiliation |
#'
#' *Please note: Only one `url`, `orcid` and `email` can be provided per author.*
#'
#' ## Other YAML fields
#'
#' | FIELD | TYPE | DESCRIPTION |
#' | ----- | ---- | ----------- |
#' | `bibliography` | *with default* | the BibTeX file with the reference entries |
#'
#' @inheritParams rmarkdown::pdf_document
#' @param ... Arguments to [rmarkdown::pdf_document()].
#' @export
rjournal_article <- function(..., keep_tex = TRUE, citation_package = "natbib") {
rmarkdown::pandoc_available("2.2", TRUE)
base <- pdf_document_format(
"rjournal",
highlight = NULL, citation_package = citation_package,
keep_tex = keep_tex, ...
)
# Render will generate tex file, post-knit hook gerenates the R file,
# post-process hook generates appropriate RJwrapper.tex and
# use pandoc to build pdf from that
base$pandoc$to <- "latex"
base$pandoc$ext <- ".tex"
# Generates R file expected by R journal requirement.
# We do that in the post-knit hook do access input file path.
pk <- base$post_knit
output_R <- NULL
base$post_knit <- function(metadata, input_file, runtime, ...) {
# run post_knit it exists
if (is.function(pk)) pk(metadata, input_file, runtime, ...)
# purl the Rmd file to R code per requirement
temp_R <- tempfile(fileext = ".R")
output_R <<- knitr::purl(input_file, temp_R, documentation = 1, quiet = TRUE)
# Add magic comment about '"do not edit" (rstudio/rstudio#2082)
xfun::write_utf8(c(
"# Generated by `rjournal_article()` using `knitr::purl()`: do not edit by hand",
sprintf("# Please edit %s to modify this file", input_file),
"",
xfun::read_utf8(output_R)
), output_R)
NULL
}
base$post_processor <- function(metadata, utf8_input, output_file, clean, verbose) {
filename <- basename(output_file)
# underscores in the filename will be problematic in \input{filename};
# pandoc will escape underscores but it should not, i.e., should be
# \input{foo_bar} instead of \input{foo\_bar}
if (filename != (filename2 <- gsub("_", "-", filename))) {
file.rename(filename, filename2)
filename <- filename2
}
# Copy purl-ed R file with the correct name
dest_file <- xfun::with_ext(filename, "R")
our_file <- TRUE
if (file.exists(dest_file)) {
# we check this file is generated by us
# otherwise we leave it as is and warn
current_r <- xfun::read_utf8(dest_file)
our_file <- grepl("Generated.*rjournal_article.*do not edit by hand", current_r[1])
if (!our_file) {
warning(
sprintf("R file with name '%s' already exists.", dest_file),
"\nIt will not be overwritten by the one generated",
" during rendering using `knitr::purl()`.",
"\nRemove the existing file to obtain the generated one.",
call. = FALSE
)
}
}
if (our_file) {
# we only overwrite if it is our file
file.copy(output_R, xfun::with_ext(filename, "R"), overwrite = TRUE)
}
unlink(output_R)
# post process TEX file
temp_tex <- xfun::read_utf8(filename)
temp_tex <- post_process_authors(temp_tex)
xfun::write_utf8(temp_tex, filename)
# check bibliography name
bib_filename <- metadata$bibliography
if (length(bib_filename) == 1 &&
xfun::sans_ext(bib_filename) != xfun::sans_ext(filename)) {
msg <- paste0(
"Per R journal requirement, bibliography file and tex file should",
" have the same name. Currently, you have a bib file ", bib_filename,
" and a tex file ", filename, ". Don't forget to rename and change",
" the bibliography field in the YAML header."
)
warning(msg, call. = FALSE)
}
# Create RJwrapper.tex per R Journal requirement
m <- list(filename = xfun::sans_ext(filename))
h <- get_list_element(metadata, c("output", "rticles::rjournal_article", "includes", "in_header"))
h <- c(h, if (length(preamble <- unlist(metadata[c("preamble", "header-includes")]))) {
f <- tempfile(fileext = ".tex")
on.exit(unlink(f), add = TRUE)
xfun::write_utf8(preamble, f)
f
})
t <- find_resource("rjournal", "RJwrapper.tex")
template_pandoc(m, t, "RJwrapper.tex", h, verbose)
tinytex::latexmk("RJwrapper.tex", base$pandoc$latex_engine, clean = clean)
}
# Mostly copied from knitr::render_sweave
base$knitr$opts_chunk$comment <- "#>"
set_sweave_hooks(base)
}
|
ca913f65f644f518d9147c98938a8a24fe9bb85a
|
fee0427e7917df4fd9c9370dbfb06f27d44f02af
|
/R/IPHC_skate_notes.R
|
06ce03bd5a798020adc4c06b2c118c7a76ff4a03
|
[] |
no_license
|
iantaylor-NOAA/BigSkate
|
58da78513d451a4436de52647d637898fa04b739
|
81f140a288c4455a54f80a56ed66c960449743db
|
refs/heads/master
| 2020-05-15T08:50:05.910018
| 2019-05-03T18:59:19
| 2019-05-03T18:59:19
| 182,166,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,807
|
r
|
IPHC_skate_notes.R
|
# note: this file has some data processing and code to make maps,
# this is for Longnose Skate and Big Skate in 2019 based on similar work
# for spiny dogfish in 2011
# to get index of abundance, see file called "IPHC Binomial GLM skates.R"
# read files from IPHC
iphc.dir <- 'c:/SS/skates/indices/IPHC/'
iphc.skate <- read.csv(file.path(iphc.dir,
'TaylorI20190318_FISS-skate-spp sets.csv'),
stringsAsFactors=FALSE)
iphc.fish <- read.csv(file.path(iphc.dir,
'TaylorI20190318_FISS-skate-spp fish.csv'),
stringsAsFactors=FALSE)
# hook adjustment factors
iphc.haf <- read.csv(file.path(iphc.dir,
'TaylorI20190423-FISS-HookAdj.csv'),
stringsAsFactors=FALSE)
# load R packages
require(maps)
require(mapdata)
# load information on EEZ for U.S. west coast
source('c:/SS/skates/R/US.Can.boundary.R')
eez.outer <- read.csv('C:/data/maps/EEZ_polygon_lat_lon.csv')
# make map
map('worldHires', #regions=c("Canada","Mexico"),
#xlim = c(-180, -116.8), ylim = c(32, 60), # include Alaska
#xlim = c(-130, -116.8), ylim = c(32, 49.5), # whole west coast
#xlim = c(-130, -116.8), ylim = c(37, 49.5), # down to San Francisco
xlim = c(-128, -122), ylim = c(41.9, 49.5), # north coast only
col='grey', fill=TRUE, interior=TRUE, lwd=1)
# add US/Canada boundary
lines(US.CAN.lon, US.CAN.lat, lty=2)
# add some text
text(-120, 50, "Canada")
text(-120, 48, "U.S.")
# add IPHC survey points
points(iphc.skate$MidLon,
iphc.skate$MidLat,
col=2, pch=16, cex=.5)
# convert NULL to NA and character to numeric
for(col in c("End.Lat", "End.Lon", "End.Depth",
"Hooks.Retrieved", "Hooks.Fished", "Soak.Time")){
iphc.skate[[col]][iphc.skate[[col]]=="NULL"] <- NA
iphc.skate[[col]] <- as.numeric(iphc.skate[[col]])
}
# check eez.polygon
## polygon(eez.outer, col=3)
## map('worldHires', regions=c("Canada","USA", "Mexico"),
## add=TRUE,
## col='grey', fill=TRUE, interior=TRUE, lwd=1)
## polygon(eez.outer)
# check for stations on the U.S. west coast
# (excludes Puget Sound, which are also in IPHC.Reg.Area=="2A")
iphc.skate$in.eez <- sp::point.in.polygon(point.x = iphc.skate$MidLon,
point.y = iphc.skate$MidLat,
pol.x = eez.outer$lon,
pol.y = eez.outer$lat)
# confirm filtering
points(iphc.skate$MidLon[iphc.skate$in.eez==1],
iphc.skate$MidLat[iphc.skate$in.eez==1],
col=2, pch=16, cex=.3)
good.stations <- unique(iphc.skate$Station[iphc.skate$in.eez==1])
bad.stations <- unique(iphc.skate$Station[iphc.skate$in.eez==0])
# look deeper at the one area which is sometimes in or out
good.stations[good.stations %in% bad.stations]
## [1] 1084
table(iphc.skate$in.eez[iphc.skate$Station==1084])
## 0 1
## 2 17
points(iphc.skate$MidLon[iphc.skate$Station==1084],
iphc.skate$MidLat[iphc.skate$Station==1084],
col=3, pch=16, cex=.3)
## # filter to only points in the U.S.
## iphc.US <- iphc.skate[iphc.skate$in.eez==1,]
# filter to only the good stations (includes 2 observations in Canada at 1084)
iphc.US <- iphc.skate[iphc.skate$Station %in% good.stations,]
# note that hooks observed is usually around 20%
table(iphc.US$Year, round(iphc.US$Hooks.Observed/iphc.US$Hooks.Retrieved,1))
## 0.1 0.2 0.3 0.8 1
## 1999 0 84 0 0 0
## 2001 0 84 0 0 0
## 2002 0 84 0 0 0
## 2003 84 0 0 0 0
## 2004 0 84 0 0 0
## 2005 0 84 0 0 0
## 2006 0 84 0 0 0
## 2007 0 84 0 0 0
## 2008 0 84 0 0 0
## 2009 0 83 1 0 0
## 2010 0 84 0 0 0
## 2011 0 119 2 0 0
## 2012 0 84 0 1 10
## 2013 0 96 0 0 14
## 2014 0 134 0 0 14
## 2015 0 82 1 0 12
## 2016 1 82 0 0 12
## 2017 0 133 0 0 30
## 2018 0 89 0 0 6
# note relatively few sets off Washington and Oregon. Note: Area 2A based on map:
# http://www.iphc.washington.edu/images/iphc/surveyregions_big.gif
table(iphc.skate$Year, iphc.skate$IPHC.Reg.Area)
## 2A 2B 2C 3A 3B 4A 4B 4C 4D 4E CLS
## 1998 0 128 124 376 232 112 73 0 0 0 0
## 1999 84 170 124 375 232 66 86 0 0 0 0
## 2000 0 129 123 374 231 113 90 0 50 0 0
## 2001 84 170 123 374 231 113 90 0 50 0 0
## 2002 84 170 123 378 231 114 89 0 52 0 0
## 2003 84 170 123 374 231 113 89 0 49 0 0
## 2004 84 170 123 374 232 112 89 0 49 0 0
## 2005 84 170 123 374 228 110 89 0 49 0 0
## 2006 84 170 123 374 231 114 89 28 92 22 17
## 2007 84 170 123 374 231 113 89 20 58 0 0
## 2008 84 170 123 375 230 113 89 20 58 0 0
## 2009 84 170 123 374 232 113 89 20 58 0 0
## 2010 84 170 123 374 231 113 89 21 58 0 0
## 2011 136 170 123 374 231 113 89 20 58 0 0
## 2012 97 170 123 374 232 113 89 20 58 0 0
## 2013 112 170 123 374 231 113 89 20 58 0 0
## 2014 163 170 123 374 231 192 90 20 58 0 0
## 2015 96 170 123 374 231 114 89 28 92 23 17
## 2016 96 170 123 376 231 113 90 20 142 0 0
## 2017 178 166 123 374 231 113 202 20 58 0 0
## 2018 110 297 165 370 231 113 90 20 58 0 0
# filter to only stations that were visited prior to 2011
regular.stations <- unique(iphc.US$Station[iphc.US$Year == 2010])
iphc.US <- iphc.US[iphc.US$Station %in% regular.stations,]
# check for ineffective sets (NEED TO FOLLOW UP TO KNOW WHAT THESE MEAN)
table(iphc.US$Year[iphc.US$Effective=="N"],
iphc.US$Ineffective.Code[iphc.US$Effective=="N"])
## DS GI PP ST
## 2004 1 0 0 0
## 2010 1 0 0 0
## 2012 0 2 0 0
## 2016 0 0 0 1
## 2018 0 0 1 0
# make map for just stations to be used:
map('worldHires', #regions=c("Canada","Mexico"),
xlim = c(-128, -122), ylim = c(41.9, 49.5), # north coast only
col='grey', fill=TRUE, interior=TRUE, lwd=1)
# add US/Canada boundary
lines(US.CAN.lon, US.CAN.lat, lty=2)
# add some text
text(-120, 50, "Canada")
text(-120, 48, "U.S.")
# add IPHC survey points
points(iphc.US$MidLon,
iphc.US$MidLat,
col=2, pch=16, cex=.5)
###############################################################3
# work with data on skate samples
###############################################################3
table(iphc.fish$Common.Name)
## Alaska Skate Aleutian Skate Bering Skate
## 2860 4089 547
## Bering/Alaska Skate Bering/Aleutian Skate Big Skate
## 13 12 4066
## Black Skate Butterfly Skate Commander Skate
## 99 34 30
## Deepsea Skate Flathead Skate Golden Skate
## 9 6 5
## Leopard Skate Longnose Skate Mud Skate
## 259 11526 9
## Okhotsk Skate Roughshoulder Skate Sandpaper Skate
## 3 26 160
## Starry Skate unident. Skate Whiteblotched Skate
## 58 1805 1242
## Whitebrow Skate
## 11
# unidentified designation was most common in 1998 to 2003
table(iphc.fish$Year, iphc.fish$Common.Name=="unident. Skate")
## FALSE TRUE
## 1998 372 437
## 1999 791 199
## 2000 875 183
## 2001 789 162
## 2002 766 212
## 2003 879 192
## 2004 1127 61
## 2005 1260 51
## 2006 1232 30
## 2007 1092 35
## 2008 1362 19
## 2009 1539 48
## 2010 1564 26
## 2011 1399 23
## 2012 1201 23
## 2013 1373 6
## 2014 1519 10
## 2015 1609 32
## 2016 1452 8
## 2017 1318 33
## 2018 1545 15
# filter for just U.S. west coast
iphc.fish.US <- iphc.fish[iphc.fish$Station %in% unique(iphc.US$Station),]
# use of unidentified is rare on west coast, can probably be assumed to be
# something rare, and neither Big Skate nor Longnose Skate
table(iphc.fish.US$Common.Name, iphc.fish.US$Year)
## 1999 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
## Bering Skate 0 0 0 0 0 0 0 0 0 0 0 0
## Big Skate 15 8 2 4 13 13 1 4 2 3 10 13
## Black Skate 0 1 6 0 0 0 0 0 0 0 0 0
## Longnose Skate 43 42 21 32 43 41 25 25 37 27 33 27
## Sandpaper Skate 0 0 0 0 0 0 0 0 0 0 0 0
## unident. Skate 0 0 0 3 0 0 0 0 0 0 1 0
## 2012 2013 2014 2015 2016 2017 2018
## Bering Skate 0 0 0 0 0 2 0
## Big Skate 3 8 13 12 16 16 8
## Black Skate 0 0 0 0 0 0 0
## Longnose Skate 32 23 21 30 29 34 40
## Sandpaper Skate 0 0 0 0 2 0 0
## unident. Skate 0 0 0 0 0 0 0
# add species columns to table with info on each set
iphc.US$BigSkate.Observed <- 0
iphc.US$LongnoseSkate.Observed <- 0
iphc.US$OtherSkate.Observed <- 0
# loop over samples to fill in rows
for(irow in 1:nrow(iphc.fish.US)){
row <- iphc.fish.US[irow,]
jrow <- which(iphc.US$Year == row$Year &
iphc.US$Station == row$Station &
iphc.US$Set.No. == row$Set.No.)
if(length(jrow) > 1){
cat('multiple values: irow =', irow, ' jrow =', jrow)
}
# check for Big Skate samples
if(row$Common.Name=="Big Skate"){
iphc.US$BigSkate.Observed[jrow] <-
iphc.US$BigSkate.Observed[jrow] + row$Number.Observed
}
# check for Longose Skate samples
if(row$Common.Name=="Longnose Skate"){
iphc.US$LongnoseSkate.Observed[jrow] <-
iphc.US$LongnoseSkate.Observed[jrow] + row$Number.Observed
}
# check for all other skate
if(!row$Common.Name %in% c("Big Skate", "Longnose Skate")){
iphc.US$OtherSkate.Observed[jrow] <-
iphc.US$OtherSkate.Observed[jrow] + row$Number.Observed
}
}
##### repeat for all stations
iphc.skate$BigSkate.Observed <- 0
iphc.skate$LongnoseSkate.Observed <- 0
iphc.skate$OtherSkate.Observed <- 0
# loop over samples to fill in rows
for(irow in 1:nrow(iphc.fish)){
row <- iphc.fish[irow,]
jrow <- which(iphc.skate$Year == row$Year &
iphc.skate$Station == row$Station &
iphc.skate$Set.No. == row$Set.No.)
if(length(jrow) > 1){
cat('multiple values: irow =', irow, ' jrow =', jrow)
}
# check for Big Skate samples
if(row$Common.Name=="Big Skate"){
iphc.skate$BigSkate.Observed[jrow] <-
iphc.skate$BigSkate.Observed[jrow] + row$Number.Observed
}
# check for Longose Skate samples
if(row$Common.Name=="Longnose Skate"){
iphc.skate$LongnoseSkate.Observed[jrow] <-
iphc.skate$LongnoseSkate.Observed[jrow] + row$Number.Observed
}
# check for all other skate
if(!row$Common.Name %in% c("Big Skate", "Longnose Skate")){
iphc.skate$OtherSkate.Observed[jrow] <-
iphc.skate$OtherSkate.Observed[jrow] + row$Number.Observed
}
}
# confirm that totals match
sum(iphc.US$BigSkate.Observed)
## [1] 355
sum(iphc.fish.US$Number.Observed[iphc.fish.US$Common.Name=="Big Skate"])
## [1] 355
sum(iphc.US$LongnoseSkate.Observed)
## [1] 1261
sum(iphc.fish.US$Number.Observed[iphc.fish.US$Common.Name=="Longnose Skate"])
## [1] 1261
sum(iphc.US$OtherSkate.Observed)
## [1] 27
sum(iphc.fish.US$Number.Observed) - 355 - 1261
## [1] 27
### check for whole coast table
sum(iphc.skate$BigSkate.Observed)
## [1] 10781
sum(iphc.fish$Number.Observed[iphc.fish$Common.Name=="Big Skate"])
## [1] 10781
sum(iphc.skate$LongnoseSkate.Observed)
## [1] 43539
sum(iphc.fish$Number.Observed[iphc.fish$Common.Name=="Longnose Skate"])
## [1] 43539
sum(iphc.skate$OtherSkate.Observed)
## [1] 40849
sum(iphc.fish$Number.Observed) - 10781 - 43539
## [1] 40849
# catch of skates as proportion of hooks observed
iphc.US$BigSkate.prop <- iphc.US$BigSkate.Observed / iphc.US$Hooks.Observed
iphc.US$LongnoseSkate.prop <- iphc.US$LongnoseSkate.Observed / iphc.US$Hooks.Observed
iphc.US$OtherSkate.prop <- iphc.US$OtherSkate.Observed / iphc.US$Hooks.Observed
iphc.skate$BigSkate.prop <- iphc.skate$BigSkate.Observed / iphc.skate$Hooks.Observed
iphc.skate$LongnoseSkate.prop <- iphc.skate$LongnoseSkate.Observed / iphc.skate$Hooks.Observed
iphc.skate$OtherSkate.prop <- iphc.skate$OtherSkate.Observed / iphc.skate$Hooks.Observed
### save data.frames
save(iphc.US, iphc.skate, file=file.path(iphc.dir, "iphc.data_4-10-2019.Rdata"))
### make map with panel for each year
for(spec in c("Big Skate","Longnose Skate")){
spec.short <- gsub(pattern=" ", replacement="", x=spec)
png(file.path(iphc.dir, paste0('IPHC_',spec.short,'_map.png')), width=10, height=10,
units='in', res=300)
par(mfrow=c(2,10),mar=rep(0,4),oma=c(2,4,4,2))
scale <- 15
for(y in sort(unique(iphc.US$Year))){
map('worldHires',xlim=c(-125.8,-123.8),ylim=c(41.8,48.7),
mar=rep(.2,4),fill=TRUE,col='grey80')
good <- iphc.US$Year==y
colname <- paste0(spec.short, '.prop')
vals <- iphc.US[[colname]][good]
points(iphc.US$MidLon[good], iphc.US$MidLat[good],
col=rgb(1,0,0,.3), cex=sqrt(vals)*scale, pch=16)
points(iphc.US$MidLon[good], iphc.US$MidLat[good],
col=1, cex=0.2, pch=16)
mtext(line=.3,y,font=1)
box()
if(y==min(iphc.US$Year)){
propvec <- c(0,0.01,0.02,0.05,0.1)
n <- length(propvec)
lonvec <- rep(-125.3,n)
latvec <- seq(42,43,length=n)
points(lonvec,latvec,col=rgb(1,0,0,.3),cex=sqrt(propvec)*scale, pch=16)
points(lonvec,latvec,col=4,cex=0.2, pch=16)
text(lonvec-.05,latvec,100*propvec,pos=2)
at = axis(2,lab=F)
# add degree symbol as suggested at http://tolstoy.newcastle.edu.au/R/e2/help/07/03/12710.html
axis(2,at=at,lab=parse(text=paste(at,"*degree","~N",sep="")),las=1)
rect(-126,41.6,-125,43.2)
}
# add a vertical axis for 2nd row
if(par()$mfg[1] == 2 & par()$mfg[2] == 1){
axis(2,at=at,lab=parse(text=paste(at,"*degree","~N",sep="")),las=1)
}
#at2 = c(-124,-125)
#axis(1,at=at2,lab=parse(text = paste(format(abs(at2)), "*degree","~W", sep = "")),las=1)
}
mtext(text=paste(spec, "per 100 observed hooks in IPHC longline survey"),
side=3, line=1, outer=TRUE, font=2, cex=1.8)
dev.off()
}
### make big map for whole coast
for(spec in c("Big Skate","Longnose Skate")){
spec.short <- gsub(pattern=" ", replacement="", x=spec)
png(file.path(iphc.dir, paste0('IPHC_coast_',spec.short,'_map.png')), width=10, height=7.5,
units='in', res=300)
#windows(width=10, height=7.5) #, mar=c(6,6,1,1))
scale <- 5
map('worldHires',xlim=c(-179,-120),ylim=c(35,62),
mar=c(6,6,1,1), fill=TRUE,col='grey80')
colname <- paste0(spec.short, '.prop')
good <- 1:nrow(iphc.skate)
vals <- iphc.skate[[colname]][good]
points(iphc.skate$MidLon[good], iphc.skate$MidLat[good],
col=rgb(1,0,0,.1), cex=sqrt(vals)*scale, pch=16)
good <- which(iphc.skate$Year==2017)
points(iphc.skate$MidLon[good], iphc.skate$MidLat[good],
col=4, cex=0.2, pch=16)
box()
propvec <- c(0,0.01,0.02,0.05,0.1)
n <- length(propvec)
lonvec <- rep(-170,n)
latvec <- seq(42,45,length=n)
points(lonvec,latvec,col=rgb(1,0,0,.3),cex=sqrt(propvec)*scale, pch=16)
points(lonvec,latvec,col=1,cex=0.2, pch=16)
text(lonvec-.05,latvec,100*propvec,pos=2)
# add degree symbol as suggested at http://tolstoy.newcastle.edu.au/R/e2/help/07/03/12710.html
at = axis(1,lab=F)
axis(1,at=at,lab=parse(text=paste(abs(at),"*degree","~W",sep="")),las=1)
at2 = axis(2,lab=F)
axis(2,at=at2,lab=parse(text = paste(format(at),"*degree","~N",sep="")),las=1)
## }
mtext(text=paste(spec, "per 100 observed hooks in IPHC longline survey"),
side=3, line=1, outer=FALSE, font=2, cex=1.8)
dev.off()
}
|
143504f45cd2e9535b233172a59dd0c53e1674c5
|
bec9aed59fd626049a72efe98fd1fc022af2475a
|
/My Working Directory/Lecture - Probability.R
|
d38dbb7f94301dc087a27af8b2dab6de79893eac
|
[] |
no_license
|
Nebojsa77/MXN500
|
96a46aea565fc069d58e8d40e429f72cca771102
|
6ee37020f40c33850f82f334bf42f4245ef60f56
|
refs/heads/master
| 2020-05-30T19:11:40.475011
| 2019-06-03T02:11:26
| 2019-06-03T02:11:26
| 189,918,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,385
|
r
|
Lecture - Probability.R
|
library(VennDiagram)
library(ggforce)
draw.pairwise.venn(area1 = 0.8, area2 = 0.4, cross.area = 0.2, #plotting the area for each
category = c("Dog People", "Cat People"), #naming the variables
lty = rep("blank", 2), #make the circles blank in the centre
fill = c("red", "Blue"), #the colour
alpha = rep(0.5, 2), #makes the intersection abit lighter
cat.pos = c(0,0), cat.dist = rep(0.025, 2), scaled = F)
#cat.pos is the position of labels
#unsre what cat.dist does
#rep just repeats the stated thing the amount of times stated
df <- data.frame(n = c(1:6), p = rep(1/6,6)) #rep repeats 1/6 six times, p= is our other variable
ggplot(data = df, aes( x = n, weight = p))+ # so that we are weighting it differently
geom_bar(color = "Black", fill = "dark blue")+ labs(x = "Roll") +
scale_x_discrete(limits = c(seq(1,6)))+ #turns the x scale into a sequence from 1 to 6
scale_y_continuous(name = "P(N=n)")+ # makes the scale contiuous
theme_bw()
#doing a function
f_t <- function(t){return(2/25 * t)}
ggplot(data = data.frame(x = c(0,5)), aes(x = x)) + stat_function(fun=f_t) +
scale_x_continuous(name = "t")+
scale_y_continuous(name= "f(t)") +
theme_classic()
|
e3172abfa131e34da103978b1fc893f96f238e2a
|
d09aea6359ada6c8a78ddeafc1989e93ba9294aa
|
/build/R4.0.2-win64/parameters/doc/demean.R
|
3e09ae0249170121b269880dbba674af8e5377f6
|
[] |
no_license
|
hyunsooseol/SimplyAgree
|
4f28d8656f36be5d8a1e2e266c7ffb9c5c559c00
|
7507b271ce0528f92088d410d7d5f72d7c15d799
|
refs/heads/master
| 2023-05-30T21:45:57.725904
| 2021-06-22T00:13:59
| 2021-06-22T00:13:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,264
|
r
|
demean.R
|
## ----message=FALSE, warning=FALSE, include=FALSE------------------------------
library(knitr)
knitr::opts_chunk$set(
echo = TRUE,
collapse = TRUE,
warning = FALSE,
message = FALSE,
comment = "#>",
eval = TRUE
)
if (!requireNamespace("lme4", quietly = TRUE) ||
!requireNamespace("dplyr", quietly = TRUE) ||
!requireNamespace("ggplot2", quietly = TRUE) ||
!requireNamespace("see", quietly = TRUE) ||
!requireNamespace("lfe", quietly = TRUE)) {
knitr::opts_chunk$set(eval = FALSE)
} else {
library(parameters)
library(lme4)
library(lfe)
}
set.seed(333)
## -----------------------------------------------------------------------------
library(parameters)
data("qol_cancer")
## -----------------------------------------------------------------------------
check_heterogeneity(qol_cancer, select = c("phq4", "education"), group = "ID")
## -----------------------------------------------------------------------------
qol_cancer <- cbind(
qol_cancer,
demean(qol_cancer, select = c("phq4", "QoL"), group = "ID")
)
## -----------------------------------------------------------------------------
fe_model1 <- lm(
QoL ~ 0 + time + phq4_within + ID,
data = qol_cancer
)
# we use only the first two rows, because the remaining rows are
# the estimates for "ID", which is not of interest here...
model_parameters(fe_model1)[1:2, ]
# instead of removing the intercept, we could also use the
# de-meaned response...
fe_model2 <- lm(
QoL_within ~ time + phq4_within + ID,
data = qol_cancer
)
model_parameters(fe_model2)[2:3, ]
# we compare the results with those from the "lfe"-package for panel data
library(lfe)
fe_model3 <- felm(
QoL ~ time + phq4 | ID,
data = qol_cancer
)
model_parameters(fe_model3)
## -----------------------------------------------------------------------------
library(lme4)
mixed_1 <- lmer(
QoL ~ time + phq4_within + phq4_between + (1 | ID),
data = qol_cancer
)
model_parameters(mixed_1)
# compare to FE-model
model_parameters(fe_model1)[1:2, ]
## -----------------------------------------------------------------------------
mixed_2 <- lmer(
QoL ~ time + phq4_within + phq4_between + education + (1 + time | ID),
data = qol_cancer
)
model_parameters(mixed_2)
## ----echo=FALSE---------------------------------------------------------------
f <- "y<sub>it</sub> = β<sub>0</sub> + β<sub>1W</sub> (x<sub>it</sub> - ͞x<sub>i</sub>) + β<sub>2B</sub> ͞x<sub>i</sub> + β<sub>3</sub> z<sub>i</sub> + υ<sub>i0</sub> + υ<sub>i1</sub> (x<sub>it</sub> - ͞x<sub>i</sub>) + ε<sub>it</sub>"
knitr::asis_output(f)
## ----echo=FALSE---------------------------------------------------------------
f <- "<ul><li>x<sub>it</sub> - ͞x<sub>i</sub> is the de-meaned predictor, <em>phq4_within</em></li><li>͞x<sub>i</sub> is the group-meaned predictor, <em>phq4_between</em></li><li>β<sub>1W</sub> is the coefficient for phq4_within (within-subject)</li><li>β<sub>2B</sub> is the coefficient for phq4_between (bewteen-subject)</li><li>β<sub>3</sub> is the coefficient for time-constant predictors, such as `hospital` or `education` (bewteen-subject)</li></ul>"
knitr::asis_output(f)
## -----------------------------------------------------------------------------
rewb <- lmer(
QoL ~ time + phq4_within + phq4_between + education +
(1 + time | ID) + (1 + phq4_within | ID),
data = qol_cancer
)
## -----------------------------------------------------------------------------
model_parameters(rewb)
## -----------------------------------------------------------------------------
random_parameters(rewb)
## -----------------------------------------------------------------------------
library(ggplot2)
library(dplyr)
library(see)
set.seed(123)
n <- 5
b <- seq(1, 1.5, length.out = 5)
x <- seq(2, 2 * n, 2)
d <- do.call(rbind, lapply(1:n, function(i) {
data.frame(x = seq(1, n, by = .2),
y = 2 * x[i] + b[i] * seq(1, n, by = .2) + rnorm(21),
grp = as.factor(2 * i))
}))
d <- d %>%
group_by(grp) %>%
mutate(x = rev(15 - (x + 1.5 * as.numeric(grp)))) %>%
ungroup()
labs <- c("very slow", "slow", "average", "fast", "very fast")
levels(d$grp) <- rev(labs)
d <- cbind(d, demean(d, c("x", "y"), group = "grp"))
## ----echo=FALSE---------------------------------------------------------------
ggplot(d, aes(x, y)) +
geom_point(colour = "#555555", size = 2.5, alpha = .5) +
see::theme_modern() +
labs(x = "Typing Speed", y = "Typing Errors", colour = "Type Experience")
## ----echo=FALSE---------------------------------------------------------------
ggplot(d, aes(x, y)) +
geom_point(colour = "#555555", size = 2.5, alpha = .5) +
geom_smooth(method = "lm", se = F, colour = "#555555") +
see::theme_modern() +
labs(x = "Typing Speed", y = "Typing Errors", colour = "Type Experience")
## -----------------------------------------------------------------------------
m1 <- lm(y ~ x, data = d)
model_parameters(m1)
## ----echo=FALSE---------------------------------------------------------------
ggplot(d, aes(x, y)) +
geom_point(mapping = aes(colour = grp), size = 2.5, alpha = .5) +
geom_smooth(method = "lm", se = F, colour = "#555555") +
see::scale_color_flat() +
see::theme_modern() +
labs(x = "Typing Speed", y = "Typing Errors", colour = "Type Experience")
## ----echo=FALSE---------------------------------------------------------------
ggplot(d, aes(x, y)) +
geom_smooth(mapping = aes(colour = grp), method = "lm", se = FALSE) +
geom_point(mapping = aes(colour = grp), size = 2.2, alpha = .6) +
see::scale_color_flat() +
see::theme_modern() +
labs(x = "Typing Speed", y = "Typing Errors", colour = "Type Experience")
## -----------------------------------------------------------------------------
m2 <- lm(y ~ 0 + x_within + grp, data = d)
model_parameters(m2)[1, ]
## ----echo=FALSE---------------------------------------------------------------
ggplot(d, aes(x, y)) +
geom_point(mapping = aes(colour = grp), size = 2.2, alpha = .6) +
geom_smooth(mapping = aes(x = x_between, y = y_between), method = "lm", se = F, colour = "#444444") +
see::scale_color_flat() +
see::theme_modern() +
labs(x = "Typing Speed", y = "Typing Errors", colour = "Type Experience")
## -----------------------------------------------------------------------------
m3 <- lm(y ~ x_between, data = d)
model_parameters(m3)
## ----echo=FALSE---------------------------------------------------------------
ggplot(d, aes(x, y)) +
geom_smooth(mapping = aes(colour = grp), method = "lm", se = FALSE) +
geom_point(mapping = aes(colour = grp), size = 2.2, alpha = .6) +
geom_smooth(mapping = aes(x = x_between, y = y_between), method = "lm", se = F, colour = "#444444") +
see::scale_color_flat() +
see::theme_modern() +
labs(x = "Typing Speed", y = "Typing Errors", colour = "Type Experience")
## -----------------------------------------------------------------------------
m4 <- lmer(y ~ x_between + x_within + (1 | grp), data = d)
model_parameters(m4)
## -----------------------------------------------------------------------------
m5 <- lmer(y ~ x_between + x_within + (1 + x_within | grp), data = d)
model_parameters(m5)
## -----------------------------------------------------------------------------
set.seed(123)
n <- 5
b <- seq(1, 1.5, length.out = 5)
x <- seq(2, 2 * n, 2)
d <- do.call(rbind, lapply(1:n, function(i) {
data.frame(x = seq(1, n, by = .2),
y = 2 * x[i] + b[i] * seq(1, n, by = .2) + rnorm(21),
grp = as.factor(2 * i))
}))
# create imbalanced groups
d$grp[sample(which(d$grp == 8), 10)] <- 6
d$grp[sample(which(d$grp == 4), 8)] <- 2
d$grp[sample(which(d$grp == 10), 9)] <- 6
d <- d %>%
group_by(grp) %>%
mutate(x = rev(15 - (x + 1.5 * as.numeric(grp)))) %>%
ungroup()
labs <- c("very slow", "slow", "average", "fast", "very fast")
levels(d$grp) <- rev(labs)
d <- cbind(d, demean(d, c("x", "y"), group = "grp"))
# Between-subject effect of typing speed
m1 <- lm(y ~ x_between, data = d)
model_parameters(m1)
# Between-subject effect of typing speed, accounting for goup structure
m2 <- lmer(y ~ x_between + (1 | grp), data = d)
model_parameters(m2)
|
ba84c412871efe3fcc33b0a7d14746cdf73ed349
|
c599338f5a2fd512f6bd1b0bc9605e9e76239b0c
|
/R/distinctivenessNullAnalyses.R
|
ee875b6733bb47ec0912509707c9c101ef5141fb
|
[] |
no_license
|
haiyangzhang798/San-Juan-Islands-Invasion
|
0b5fa643f0bf1719367dc96472bb252ddfd2720d
|
35c34b297ca70278c90794696ba8789834ee42d8
|
refs/heads/master
| 2021-01-09T06:01:41.382653
| 2015-12-08T17:53:39
| 2015-12-08T17:53:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,471
|
r
|
distinctivenessNullAnalyses.R
|
###### Final Analysis of San Juans Dataset ######
###### 6 Nov 2014 ##########
###### Hannah E. Marx #######
## Significance of phylogenetic and functional distinctiveness between invasive and native species within each island community
# compared to a null expectation, randomizing the invasive communtiy in each community with other invasives from the larger species pool
################################################# Read in Final Datasets #####################################################@
source("analysis.R")
source("R/DistinctivenessFunctions.R")
SJfinalTree
head(SJtraitLog)
head(SJcommNew)
head(metadata)
########################################### Significance of Phylogenetic Distances #############################################################
################# RANDOMIZATION == shuffle prese/abs of invasive within each community (ses.DNNS.MDNS.R) ##############
################# Compare observed MMNPDi / MDNSinv-nat to random distribution of invasive spevies in each community
## mean obs MMNPDi < mean null MMNPDi == intorduced species is closer to native relative than a random invasive from species pool
## mean obs MMNPDi = mean null MMNPDi == any introduced species would show the same pattern
## mean obs MMNPDi > mean null MMNPDi == intorduced species is more distant to native relative than random; DNH
################################ DNNS i / MDNS inv-nat ################################
######################## Simulate means, compare null distribution to the observed mean
## Simulate DNNS/MDNS by randomizing invasive communities...NOTE: will take a while to run
SJ_islands <- names(SJcommNew) # names of the islands
sim.null.distrib.SJ <- lapply(SJ_islands, function(x) sim.meanDNNS.MDNS(phy=SJfinalTree, com=SJcommNew, traits=SJtraitLog, island=x, N = 1000))
names(sim.null.distrib.SJ) <- SJ_islands
head(sim.null.distrib.SJ)
head(sim.null.distrib.SJ["All_SanJuanIslands"])
#write.csv(sim.null.distrib.SJ, file="SanJuan.DNNS.MDNS.null1000.csv")
## Observed difference in mean DNNS i / MDNS inv-nat
obs.DNNS.SJ <- lapply(SJ_islands, function(x) phyloDistinct(phy=SJfinalTree, community=SJcommNew, col=x))
names(obs.DNNS.SJ) <- SJ_islands # = phyloObs
head(obs.DNNS.SJ)
summ.DNNS.SJ <- lapply(SJ_islands, function(x) summary.DNNS.MDNS(obs.DNNS.SJ[[x]])) #apply summary funciton across all communities
names(summ.DNNS.SJ) <- SJ_islands
#### Summarize observed mean compared to simualted means == standardized effect size
## p.rank.DNNS.inv <- min(DNNS.inv.rankLo, DNNS.inv.rankHi) / (N + 1)
## proportion of simulated means as or more extreme than observed
ses.SanJuan.DNNS.MDNS <- lapply(names(list.sdf), function(x) ses.PhyloDist(phy=SJfinalTree, com=SJcommNew, island=x, simOneIsland=list.sdf, N=1000))
names(ses.SanJuan.DNNS.MDNS) <- names(list.sdf)
length(ses.SanJuan.DNNS.MDNS) #72
write.csv(ses.SanJuan.DNNS.MDNS, file="output/10_Analyses/PhylogeneticDiversity/Null/SanJuan.SES.phylo.summary.csv")
#pdf("figs/plots/phyloDiv/ses/logNullInvOcc.DNNS.islIncSize.pdf", width=20, height=10)
sum.sesPhyloDist(plottype = "NullObsIntervalDNNS", simPhyloOut = "output/10_Analyses/PhylogeneticDiversity/Null/SanJuan.DNNS.MDNS.null1000.csv",
metadata = metadata)
#dev.off()
#pdf("figs/plots/phyloDiv/ses/logNullInvOcc.MDNS.islIncSize.pdf", width=20, height=10)
sum.sesPhyloDist(plottype = "NullObsIntervalMDNS", simPhyloOut = "output/10_Analyses/PhylogeneticDiversity/Null/SanJuan.DNNS.MDNS.null1000.csv",
metadata = metadata)
#dev.off()
#pdf("figs/plots/phyloDiv/ses/ses.DNNS.MDNS.IncSize.Final.pdf", width=20, height=10)
sum.sesPhyloDist(plottype = "ses", simPhyloOut = "output/10_Analyses/PhylogeneticDiversity/Null/SanJuan.DNNS.MDNS.null1000.csv",
metadata = metadata)
#dev.off()
#pdf("figs/plots/phyloDiv/ses/ses.phylo.SummaryBar.pdf")
sum.sesPhyloDist(plottype = "summary.Bar", simPhyloOut = "output/10_Analyses/PhylogeneticDiversity/Null/SanJuan.DNNS.MDNS.null1000.csv",
metadata = metadata)
#dev.off()
########################################### Significance of Functional Distances #############################################################
remove.islands.sim <- c("All_SanJuanIslands", "Unnamed_west_of_Castle_Island")
SJcommNewSim <- SJcommNew[, -which(names(SJcommNew) %in% remove.islands.sim)]
head(SJcommNewSim)
SJ_islands.sim <- colnames(SJcommNewSim)
############################ Null distributions for each trait...this will take a while
####### seed mass
## Null distribution
sim.null.distrib.SeedMass <- lapply(SJ_islands.sim, function(x) sim.meanNNFD.MFD(phy=SJfinalTree, com=SJcommNewSim, traits=SJtraitLog, island=x, traitname="seedMass", N = 1000))
names(sim.null.distrib.SeedMass) <- SJ_islands.sim
head(sim.null.distrib.SeedMass)
head(sim.null.distrib.SeedMass["Willow_Island"])
#write.csv(sim.null.distrib.SeedMass, file="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.SeedMass.1000.CSV")
#### Height
sim.null.distrib.Height <- lapply(SJ_islands.sim, function(x) sim.meanNNFD.MFD(phy=SJfinalTree, com=SJcommNewSim, traits=SJtraitLog, island=x, traitname="maxHeight", N = 1000))
names(sim.null.distrib.Height) <- SJ_islands.sim
head(sim.null.distrib.Height)
head(sim.null.distrib.Height["Willow_Island"])
#write.csv(sim.null.distrib.Height, file="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.Height.null1000.csv")
#### SLA
## Null distribution
sim.null.distrib.SLA <- lapply(SJ_islands.sim, function(x) sim.meanNNFD.MFD(phy=SJfinalTree, com=SJcommNewSim, traits=SJtraitLog, island=x, traitname="sla", N = 1000))
names(sim.null.distrib.SLA) <- SJ_islands.sim
#write.csv(sim.null.distrib.SLA, file="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.SLA.1000.csv")
####### leaflet
## Null distribution
sim.null.distrib.leafletSize <- lapply(SJ_islands.sim, function(x) sim.meanNNFD.MFD(phy=SJfinalTree, com=SJcommNewSim, traits=SJtraitLog, island=x, traitname="leafletSize", N = 1000))
names(sim.null.distrib.leafletSize) <- SJ_islands.sim
head(sim.null.distrib.leafletSize)
head(sim.null.distrib.leafletSize["Willow_Island"])
#write.csv(sim.null.distrib.leafletSize, file="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.leafletSize.1000.csv")
####### leaf N
## Null distribution
sim.null.distrib.leafN <- lapply(SJ_islands.sim, function(x) sim.meanNNFD.MFD(phy=SJfinalTree, com=SJcommNewSim, traits=SJtraitLog, island=x, traitname="leafN", N = 1000))
names(sim.null.distrib.leafN) <- SJ_islands.sim
head(sim.null.distrib.leafN)
head(sim.null.distrib.leafN["Willow_Island"])
#write.csv(sim.null.distrib.leafN, file="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.leafN.1000.csv")
##################### Summarize results with plots
########### Seed mass ###########
#pdf(file="figs/plots/functionDiv/ses/NullInvOcc.NNFD.seedMass")
sum.sesFunctionDist(plottype = "NullObsIntervalNNFD", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.SeedMass.1000.CSV",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog,
traitname="seedMass", metadata=metadata)
#dev.off()
#pdf(paste("figs/plots/functionDiv/ses/NullInvOcc.MFD", traitname, "pdf", sep="."), width=20, height=10)
sum.sesFunctionDist(plottype = "NullObsIntervalMFD", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.SeedMass.1000.CSV",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="seedMass", metadata=metadata)
#dev.off()
#pdf(paste("figs/plots/functionDiv/ses/ses.SJ.NNFD", traitname, "pdf", sep="."), width=20, height=10)
sum.sesFunctionDist(plottype = "ses", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.SeedMass.1000.CSV",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="seedMass", metadata=metadata)
#dev.off()
#pdf(paste("figs/plots/functionDiv/ses/", traitname, "Functional.SummaryBar.pdf", sep=""))
sum.sesFunctionDist(plottype = "summary.Bar", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.SeedMass.1000.CSV",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="seedMass", metadata=metadata,
saveout=TRUE, outputPath="output/10_Analyses/FunctionalDiversity/Null/SanJuan.SES.seedMass.summary.csv")
#dev.off()
########### Max Height ###########
sum.sesFunctionDist(plottype = "NullObsIntervalNNFD", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.Height.null1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="maxHeight", metadata=metadata)
sum.sesFunctionDist(plottype = "NullObsIntervalMFD", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.Height.null1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="maxHeight", metadata=metadata)
sum.sesFunctionDist(plottype = "ses", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.Height.null1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="maxHeight", metadata=metadata)
sum.sesFunctionDist(plottype = "summary.Bar", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.Height.null1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="maxHeight", metadata=metadata,
saveout=TRUE, outputPath="output/10_Analyses/FunctionalDiversity/Null/SanJuan.SES.maxHeight.summary.csv")
########### sla ###########
sum.sesFunctionDist(plottype = "NullObsIntervalNNFD", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.SLA.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="sla", metadata=metadata)
sum.sesFunctionDist(plottype = "NullObsIntervalMFD", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.SLA.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="sla", metadata=metadata)
sum.sesFunctionDist(plottype = "ses", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.SLA.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="sla", metadata=metadata)
sum.sesFunctionDist(plottype = "summary.Bar", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.SLA.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="sla", metadata=metadata,
saveout=TRUE, outputPath="output/10_Analyses/FunctionalDiversity/Null/SanJuan.SES.sla.summary.csv")
########### leaf size ###########
sum.sesFunctionDist(plottype = "NullObsIntervalNNFD", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.leafletSize.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="leafletSize", metadata=metadata)
sum.sesFunctionDist(plottype = "NullObsIntervalMFD", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.leafletSize.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="leafletSize", metadata=metadata)
sum.sesFunctionDist(plottype = "ses", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.leafletSize.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="leafletSize", metadata=metadata)
sum.sesFunctionDist(plottype = "summary.Bar", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.leafletSize.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="leafletSize", metadata=metadata,
saveout=TRUE, outputPath="output/10_Analyses/FunctionalDiversity/Null/SanJuan.SES.leafletSize.summary.csv")
########### leaf N ###########
sum.sesFunctionDist(plottype = "NullObsIntervalNNFD", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.leafN.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="leafN", metadata=metadata)
sum.sesFunctionDist(plottype = "NullObsIntervalMFD", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.leafN.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="leafN", metadata=metadata)
sum.sesFunctionDist(plottype = "ses", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.leafN.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="leafN", metadata=metadata)
sum.sesFunctionDist(plottype = "summary.Bar", sim.output ="output/10_Analyses/FunctionalDiversity/Null/sim.null.distrib.leafN.1000.csv",
islands.sim = SJ_islands.sim, phyloObs = phyloObs, traits=SJtraitLog, traitname="leafN", metadata=metadata,
saveout=TRUE, outputPath="output/10_Analyses/FunctionalDiversity/Null/SanJuan.SES.leafN.summary.csv")
|
49cf5116a1993aaa17fe3bf81830f76f3d26c077
|
710083f50af7c2363f4c8739067cfe0ea3ca17a3
|
/KiyiAge_Lepaketal_RESULTS.R
|
86d3da4299878915aeb13e442c3b549c35e5a9c5
|
[] |
no_license
|
droglenc/KIYILepak
|
0fc52a238520ef47fc927679d3f566fb6d49da88
|
67f396a90e761d50d055a0bc243384152bea0fbd
|
refs/heads/master
| 2021-05-04T11:27:35.995809
| 2017-08-15T02:03:09
| 2017-08-15T02:03:09
| 44,124,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,320
|
r
|
KiyiAge_Lepaketal_RESULTS.R
|
## Read helper code
source("code/aaaInit.R")
## Set random seed ... bootstraps and ALKs will be the same each time
set.seed(34783475)
################################################################################
## Load and manipulate the length-frequency data from 2014.
lf14 <- read.csv("data/clean/lenfreq_2014.csv") %>%
mutate(year=factor(year),
mon=factor(mon,levels=c("May","Jun","Jul")))
#### For first paragraph of Results
## months when Kiyi were collected
xtabs(~mon,data=lf14)
## locations where Kiyi were collected (and number of locations)
( lf14_locs <- xtabs(~location,data=lf14) )
length(lf14_locs)
## locations where Kiyi were collected by month
xtabs(~mon+location,data=lf14)
## basic length summary of the entire sample
Summarize(~tl,data=lf14)
################################################################################
## Load the Biological Data (for 2014)
bio <- read.csv("data/clean/bio.csv") %>%
mutate(sex=factor(sex,levels=c("juvenile","female","male")))
#### For second paragraph of Results
## summary of number of scales
( sctbl <- xtabs(~scaleAge,data=bio) )
sum(sctbl)
## Between-reader scale comparisons (TAL v. DHO ages)
bio_SS <- filterD(bio,!is.na(scaleAge))
ab_SS <- ageBias(scaleAge_TAL~scaleAge_DHO,data=bio_SS,
nref.lab="Reader 2",ref.lab="Reader 1")
summary(ab_SS,what="bias")
summary(ab_SS,what="symmetry")
ap_SS <- agePrecision(scaleAge_TAL~scaleAge_DHO,data=bio_SS)
summary(ap_SS,what="precision")
summary(ap_SS,what="absolute difference")
plot(ab_SS,show.CI=TRUE)
## summary of the otolith characteristics
( octbl <- xtabs(~otoChar,data=bio) )
# find proportion unuseable
prop.table(octbl)*100
# find proportion of useable that were unreadable
octbl2 <- octbl[-2]
prop.table(octbl2)*100
## Between-reader otolith comparisons (TAL v. DHO ages)
bio_OO <- filterD(bio,otoChar=="USEABLE")
ab_OO <- ageBias(otoAge_TAL~otoAge_DHO,data=bio_OO,
nref.lab="Reader 2",ref.lab="Reader 1")
summary(ab_OO,what="bias")
summary(ab_OO,what="symmetry")
ap_OO <- agePrecision(otoAge_TAL~otoAge_DHO,data=bio_OO)
summary(ap_OO,what="precision")
summary(ap_OO,what="absolute difference")
summary(ap_OO,what="absolute difference",trunc.diff=2)
## Otolith-scales comparisons (only TAL ages)
bio_OS <- bio[complete.cases(bio[,c("scaleAge","otoAge")]),]
ab_OS <- ageBias(scaleAge~otoAge,data=bio_OS,
ref.lab="Otolith Age",nref.lab="Scale Age")
ab_OS_bsum <- summary(ab_OS,what="bias")
ab_OS_bsym <- summary(ab_OS,what="symmetry")
plot(ab_OS,show.CI=TRUE)
#### For third paragraph of Results
xtabs(~sex+otoAge,data=filterD(bio,otoChar=="USEABLE"))
xtabs(~sex+scaleAge,data=bio)
# Remove juvenile fish (same as removing all fish <140 mm) and those without otoages
bio_ALK <- filterD(bio,sex!="juvenile",!is.na(otoAge))
# age frequency
( bio_ALK_agefreq <- xtabs(~otoAge,data=bio_ALK) )
# Get raw LF 2014 data, restrict to fish >= 140 mm, and add an
# otoAge variable to record the new ages
lf14_ages <- lf14 %>%
mutate(otoAge=as.numeric(NA)) %>%
filter(tl>=140) %>%
as.data.frame()
# develop ALK
alk <- prop.table(xtabs(~lcat10+otoAge,data=bio_ALK),margin=1)
# apply ALK
lf14_ages <- alkIndivAge(alk,otoAge~tl,data=lf14_ages)
# Age frequency tables
xtabs(~otoAge,data=lf14_ages)
#### Modal progression for discussion
lf <- read.csv("data/clean/lenfreq_all.csv") %>%
mutate(year=factor(year)) %>%
filterD(year %in% 2003:2014)
library(mixdist)
dtype <- "lnorm"
ctype <- mixconstr(consigma="CCV")
## Trying to following 2003 year-class
lf04 <- filterD(lf,year==2004) %>%
group_by(lcat5) %>% summarize(n=n()) %>% as.mixdata()
lf04_fit <- mix(lf04,mixparam(c(100,200),10),dist=dtype,constr=ctype)
plot(lf04_fit,dist=dtype)
lf04_sum <- summary(lf04_fit)
ycl03_res <- data.frame(year=2004,age=1,
mntl=lf04_sum$parameters$mu[1],
sdtl=lf04_sum$parameters$sigma[1],
setl=lf04_sum$se$mu.se[1])
lf05 <- filterD(lf,year==2005) %>%
group_by(lcat5) %>% summarize(n=n()) %>% as.mixdata()
lf05_fit <- mix(lf05,mixparam(c(100,200),10),dist=dtype,constr=ctype)
plot(lf05_fit,dist=dtype)
lf05_sum <- summary(lf05_fit)
ycl03_res <- rbind(ycl03_res,c(2005,2,lf05_sum$parameters$mu[1],
lf05_sum$parameters$sigma[1],lf05_sum$se$mu.se[1]))
lf06 <- filterD(lf,year==2006) %>%
group_by(lcat5) %>% summarize(n=n()) %>% as.mixdata()
lf06_fit <- mix(lf06,mixparam(c(100,150,200),10),dist=dtype,constr=ctype)
plot(lf06_fit,dist=dtype)
lf06_sum <- summary(lf06_fit)
ycl03_res <- rbind(ycl03_res,c(2006,3,lf06_sum$parameters$mu[2],
lf06_sum$parameters$sigma[2],lf06_sum$se$mu.se[2]))
lf07 <- filterD(lf,year==2007) %>%
group_by(lcat5) %>% summarize(n=n()) %>% as.mixdata()
lf07_fit <- mix(lf07,mixparam(c(120,160,200),10),dist=dtype,constr=ctype)
plot(lf07_fit,dist=dtype)
lf07_sum <- summary(lf07_fit)
ycl03_res <- rbind(ycl03_res,c(2007,4,lf07_sum$parameters$mu[2],
lf07_sum$parameters$sigma[2],lf07_sum$se$mu.se[2]))
lf08 <- filterD(lf,year==2008) %>%
group_by(lcat5) %>% summarize(n=n()) %>% as.mixdata()
lf08_fit <- mix(lf08,mixparam(c(160,200),10),dist=dtype,constr=ctype)
plot(lf08_fit,dist=dtype)
lf08_sum <- summary(lf08_fit)
ycl03_res <- rbind(ycl03_res,c(2008,5,lf08_sum$parameters$mu[1],
lf08_sum$parameters$sigma[1],lf08_sum$se$mu.se[1]))
ycl03_res
## Trying to follow the 2009 year-class
lf10 <- filterD(lf,year==2010) %>%
group_by(lcat5) %>% summarize(n=n()) %>% as.mixdata()
lf10_fit <- mix(lf10,mixparam(c(100,200),10),dist=dtype,constr=ctype)
plot(lf10_fit,dist=dtype)
lf10_sum <- summary(lf10_fit)
ycl09_res <- data.frame(year=2010,age=1,
mntl=lf10_sum$parameters$mu[1],
sdtl=lf10_sum$parameters$sigma[1],
setl=lf10_sum$se$mu.se[1])
lf11 <- filterD(lf,year==2011) %>%
group_by(lcat5) %>% summarize(n=n()) %>% as.mixdata()
lf11_fit <- mix(lf11,mixparam(c(130,200),10),dist=dtype,constr=ctype)
plot(lf11_fit,dist=dtype)
lf11_sum <- summary(lf11_fit)
ycl09_res <- rbind(ycl09_res,c(2011,2,lf11_sum$parameters$mu[1],
lf11_sum$parameters$sigma[1],lf11_sum$se$mu.se[1]))
lf12 <- filterD(lf,year==2012) %>%
group_by(lcat5) %>% summarize(n=n()) %>% as.mixdata()
lf12_fit <- mix(lf12,mixparam(c(150,200),10),dist=dtype,constr=ctype)
plot(lf12_fit,dist=dtype)
lf12_sum <- summary(lf12_fit)
ycl09_res <- rbind(ycl09_res,c(2012,3,lf12_sum$parameters$mu[1],
lf12_sum$parameters$sigma[1],lf12_sum$se$mu.se[1]))
lf13 <- filterD(lf,year==2013) %>%
group_by(lcat5) %>% summarize(n=n()) %>% as.mixdata()
lf13_fit <- mix(lf13,mixparam(c(160,220),c(10,20)),dist=dtype,constr=ctype)
plot(lf13_fit,dist=dtype)
lf13_sum <- summary(lf13_fit)
ycl09_res <- rbind(ycl09_res,c(2013,4,lf13_sum$parameters$mu[1],
lf13_sum$parameters$sigma[1],lf13_sum$se$mu.se[1]))
ycl09_res
################################################################################
## PLOTS
## Which type to make ... varied among journal submissions (tiff for NAJFM)
ptype <- c("PDF","JPG","TIFF")[3]
## Figure 3 ... Scale age comparison
fig3 <- "results/figures/Figure3_ScaleScaleComp"
if (ptype=="JPG") {
jpeg(paste0(fig3,".jpg"),width=4.5,height=4.5,units="in",pointsize=14,family="sans",quality=100,res=144)
} else if (ptype=="PDF") {
pdf(paste0(fig3,".pdf"),width=6,height=6,family="Arial",pointsize=14)
} else tiff(paste0(fig3,".tif"),width=3.5,height=3.5,units="in",pointsize=10,family="sans",res=300)
par(mar=c(3,3,0.5,0.75),mgp=c(1.9,0.5,0),tcl=-0.2,las=1)
plot(ab_SS,xlim=c(2.6,8.4),ylim=c(-2.4,2.4),
ylab="Second Reader - First Reader Age",xlab="First Reader Age")
dev.off()
## Figure 4 ... Otolith age comparison
fig4 <- "results/figures/Figure4_OtoOtoComp"
if (ptype=="JPG") {
jpeg(paste0(fig4,".jpg"),width=4.5,height=4.5,units="in",pointsize=14,family="sans",quality=100,res=144)
} else if (ptype=="PDF") {
pdf(paste0(fig4,".pdf"),width=6,height=6,family="Arial",pointsize=14)
} else tiff(paste0(fig4,".tif"),width=3.5,height=3.5,units="in",pointsize=10,family="sans",res=300)
par(mar=c(3,3,0.5,0.75),mgp=c(1.9,0.5,0),tcl=-0.2,las=1)
plot(ab_OO,xlim=c(4,20),ylim=c(-3.4,2.4),
ylab="Second Reader - First Reader Age",xlab="First Reader Age")
dev.off()
## Figure 5 ... Scale-Otolith age-bias plot
fig5 <- "results/figures/Figure5_ScaleOtoComp"
if (ptype=="JPG") {
jpeg(paste0(fig5,".jpg"),width=4.5,height=4.5,units="in",pointsize=14,family="sans",quality=100,res=144)
} else if (ptype=="PDF") {
pdf(paste0(fig5,".pdf"),width=6,height=6,family="Arial",pointsize=14)
} else tiff(paste0(fig5,".tif"),width=3.5,height=3.5,units="in",pointsize=10,family="sans",res=300)
par(mar=c(3,3,0.5,0.5),mgp=c(1.9,0.5,0),tcl=-0.2,las=1)
plot(ab_OS,xlim=c(3.9,16.1),ylim=c(-11.4,1.4),
ylab="Consensus Scale - Consensus Otolith Age",xlab="Consensus Otolith Age")
dev.off()
## Figure 6 ... Age frequency
fig6 <- "results/figures/Figure6_OtoAgeFreq"
if (ptype=="JPG") {
jpeg(paste0(fig6,".jpg"),width=4.5,height=4.5,units="in",pointsize=14,family="sans",quality=100,res=144)
} else if (ptype=="PDF") {
pdf(paste0(fig6,".pdf"),width=6,height=6,family="Arial",pointsize=14)
} else tiff(paste0(fig6,".tif"),width=3.5,height=3.5,units="in",pointsize=10,family="sans",res=300)
par(mar=c(3,3,0.5,0.5),mgp=c(1.9,0.5,0),tcl=-0.2,las=1)
hist(~otoAge,data=lf14_ages,xlab="Age (years)",w=1,
xaxt="n",yaxt="n",ylim=c(0,300),col="gray70")
axis(1,at=seq(4.5,20.5,1),labels=NA)
axis(1,at=seq(5.5,20.5,5),labels=seq(5,20,5))
axis(2,at=seq(0,300,50),labels=NA)
axis(2,at=seq(0,300,100),labels=seq(0,300,100))
dev.off()
## Figure 7 ... Length Frequency Progression
lf <- read.csv("data/clean/lenfreq_all.csv") %>%
mutate(year=factor(year)) %>%
filterD(year %in% 2003:2014)
tmp <- lf %>% group_by(year) %>% summarize(n=n())
lf %<>% mutate(year2=factor(mapvalues(year,levels(year),
paste0(tmp$year," (n=",tmp$n,")"))))
lvls <- levels(lf$year2)
fig7 <- "results/figures/Figure7_LFProgression"
if (ptype=="JPG") {
jpeg(paste0(fig7,".jpg"),width=6.5,height=9,units="in",pointsize=24,family="sans",quality=100,res=144)
} else if (ptype=="PDF") {
pdf(paste0(fig7,".pdf"),width=6,height=9,family="Arial",pointsize=24)
} else tiff(paste0(fig7,".tif"),width=7.25,height=10,units="in",pointsize=10,family="sans",res=300)
ggplot(lf,aes(x=tl)) +
theme_mhist() +
scale_x_continuous(expand=c(0.02,0),limits=c(40,310),breaks=seq(0,350,50),
labels=c("","",100,"",200,"",300,"")) +
scale_y_continuous(expand=c(0,0),limits=c(0,1.2)) +
geom_histogram(aes(y=..ncount..),binwidth=5,fill="gray70",color="black",size=0.1) +
facet_wrap(~year2,nrow=4,dir="v") +
labs(x="Total Length (mm)",y="Relative Frequency") +
geom_vline(xintercept=110,lty=2) +
geom_text(aes(y=1.10),data=data.frame(tl=88,year2=factor(lvls[2],levels=lvls)),
label="11",size=5) +
geom_text(aes(y=0.4),data=data.frame(tl=95,year2=factor(lvls[4],levels=lvls)),
label="9",size=5) +
geom_text(aes(y=0.65),data=data.frame(tl=90,year2=factor(lvls[8],levels=lvls)),
label="5",size=5)
dev.off()
|
f3b2f99ac43fb3517e06ea43c1c2e8d86d4cf99d
|
c02dc97094e17be13fa2d74b5d5c393cca8510f8
|
/inst/doc/PGSEA.R
|
9fa2e5b6870288f6ea01377b5dc1061a920db666
|
[] |
no_license
|
SiYangming/PGSEA
|
1dabbcf2b793ad635a0a115f41dd37823df978c6
|
44a502effe40ec92a5a8d343bf04d6df2038f6af
|
refs/heads/master
| 2023-02-26T00:34:38.903195
| 2021-02-02T10:27:02
| 2021-02-02T10:27:02
| 335,250,497
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,942
|
r
|
PGSEA.R
|
### R code from vignette source 'PGSEA.Rnw'
###################################################
### code chunk number 1: PGSEA.Rnw:40-43
###################################################
library(PGSEA)
basic <- new("smc",ids=c("gene a","gene b"),reference="simple smc")
str(basic)
###################################################
### code chunk number 2: PGSEA.Rnw:50-53
###################################################
datadir <- system.file("extdata", package = "PGSEA")
sample <- readGmt(file.path(datadir, "sample.gmt"))
str(sample[1])
###################################################
### code chunk number 3: PGSEA.Rnw:58-61
###################################################
data(nbEset)
pg <- PGSEA(nbEset,cl=sample,ref=1:5)
###################################################
### code chunk number 4: PGSEA.Rnw:66-69
###################################################
sub <- factor(c(rep(NA,5),rep("NeuroB",5),rep("NeuroB_MYC+",5)))
smcPlot(pg[,],sub,scale=c(-12,12),show.grid=T,margins=c(1,1,7,13),col=.rwb)
###################################################
### code chunk number 5: PGSEA.Rnw:77-80
###################################################
mcs <- go2smc()[1:10]
pg <- PGSEA(nbEset,cl=mcs,ref=1:5)
###################################################
### code chunk number 6: PGSEA.Rnw:85-88
###################################################
smcPlot(pg[,],sub,scale=c(-12,12),show.grid=T,margins=c(1,1,7,20),col=.rwb)
###################################################
### code chunk number 7: PGSEA.Rnw:98-102
###################################################
#data(VAImcs)
data(VAIgsc)
pg <- PGSEA(nbEset,cl=VAIgsc,ref=1:5)
###################################################
### code chunk number 8: PGSEA.Rnw:107-110
###################################################
smcPlot(pg[,],sub,scale=c(-5,5),show.grid=T,margins=c(1,1,8,14),col=.rwb,r.cex=.7)
|
325ff7ed527cf1878f736cd4f563b0b4e2bea29c
|
f8a99bc9505ecb65f186f4dcb22f0fa9750ec85f
|
/man/make_rc_table.Rd
|
58bc5f3f0b0a37e22adb1319e2657cf6d5f0364b
|
[] |
no_license
|
reptalex/dendromap
|
015171627dc6031172db90414c6f5ec8b90e0722
|
3a0cd41f387fdc1ffbcefa7bbab054835173480c
|
refs/heads/master
| 2021-06-12T00:14:57.326415
| 2021-04-15T20:21:22
| 2021-04-15T20:21:22
| 168,759,654
| 0
| 1
| null | 2021-04-15T20:21:23
| 2019-02-01T21:00:48
|
R
|
UTF-8
|
R
| false
| true
| 694
|
rd
|
make_rc_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_rc_table.R
\name{make_rc_table}
\alias{make_rc_table}
\title{Make table of {row tree,column tree} edge pair statistics}
\usage{
make_rc_table(X, row.tree, col.tree, maxPval = 0.01)
}
\arguments{
\item{X}{dataset whose rows correspond exactly to \code{row.tree$tip.label} and columns correspond to \code{col.tree$tip.label}. NA values not allowed.}
\item{row.tree}{\code{phylo} class object}
\item{col.tree}{\code{phylo} class object}
\item{maxPval}{Cutoff - remove all P-values greater than \code{maxPval}}
}
\description{
Make table of {row tree,column tree} edge pair statistics
}
\examples{
## none yet
}
|
b4bbea46f1ac49029a4be035458d51250fa9ce44
|
cfbb3a909c3740459d76554a652e02a5dcb94706
|
/AHR_test/Code/Compare_gene.R
|
58fbf9275ce16f0f973237e378289f564db78cae
|
[] |
no_license
|
guanxunli/knock_out_test
|
3c620d54c196b38e427efed80f6543e5b71879c3
|
b9e52820def8af024853a273cb0585d08b0ee335
|
refs/heads/master
| 2022-12-07T07:13:00.552788
| 2020-09-01T22:26:08
| 2020-09-01T22:26:08
| 262,395,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,592
|
r
|
Compare_gene.R
|
library(Matrix)
library(Seurat)
library(harmony)
library(ggplot2)
library(ggrepel)
## Add function
dRegulation <- function(manifoldOutput, gKO){
geneList <- rownames(manifoldOutput)
geneList <- geneList[grepl('^X_', geneList)]
geneList <- gsub('^X_','', geneList)
nGenes <- length(geneList)
eGenes <- nrow(manifoldOutput)/2
eGeneList <- rownames(manifoldOutput)
eGeneList <- eGeneList[grepl('^Y_', eGeneList)]
eGeneList <- gsub('^Y_','', eGeneList)
if(nGenes != eGenes){
stop('Number of identified and expected genes are not the same')
}
if(!all(eGeneList == geneList)){
stop('Genes are not ordered as expected. X_ genes should be followed by Y_ genes in the same order')
}
dMetric <- sapply(seq_len(nGenes), function(G){
X <- manifoldOutput[G,]
Y <- manifoldOutput[(G+nGenes),]
I <- rbind(X,Y)
O <- stats::dist(I)
O <- as.numeric(O)
return(O)
})
### BOX-COX
lambdaValues <- seq(-2,2,length.out = 1000)
lambdaValues <- lambdaValues[lambdaValues != 0]
BC <- MASS::boxcox(dMetric~1, plot=FALSE, lambda = lambdaValues)
BC <- BC$x[which.max(BC$y)]
if(BC < 0){
nD <- 1/(dMetric ^ BC)
} else {
nD <- dMetric ^ BC
}
Z <- scale(nD)
dOut <- data.frame(
gene = geneList,
distance = dMetric,
Z = Z
)
dOut <- dOut[order(dOut$distance, decreasing = TRUE),]
FC <- (dOut$distance^2)/mean((dOut$distance[-seq_len(length(gKO))]^2))
pValues <- pchisq(q = FC,df = 1,lower.tail = FALSE)
pAdjusted <- p.adjust(pValues, method = 'fdr')
dOut$FC = FC
dOut$p.value = pValues
dOut$p.adj = pAdjusted
dOut <- as.data.frame.array(dOut)
return(dOut)
}
# load("AHR_test/Results/DR_ori.Rdata")
# gene_DE <- rownames(DE)[DE$p_val_adj < 0.05]
# length(gene_DE)
####################################### Gene compare ##############################################
load('AHR_test/Results/PreenterocytesDR.RData')
O$manifoldAlignment <- O$manifoldAlignment[!grepl('_Rpl|_Rps',rownames(O$manifoldAlignment)),]
DR <- dRegulation(O$manifoldAlignment, 'Ahr')
O$diffRegulation <- DR
gList_net <- O$diffRegulation$gene[O$diffRegulation$p.adj < 0.05]
## daniel's method
load('AHR_test/Results/Preenterocytes.RData')
O$manifoldAlignment <- O$manifoldAlignment[!grepl('_Rpl|_Rps',rownames(O$manifoldAlignment)),]
head(O$manifoldAlignment)
DR <- dRegulation(O$manifoldAlignment, 'Ahr')
O$diffRegulation <- DR
gList_daniel <- O$diffRegulation$gene[O$diffRegulation$p.adj < 0.05]
library(UpSetR)
png('AHR_test/Compare_gene_list/glist_daniel_net.png', width = 600, height = 600, res = 300)
upset(fromList(list(scTenifoldNet=gList_net, daniel_knk=gList_daniel)))
dev.off()
## yan's method
O <- readRDS("AHR_test/Results_Yan/Ahr_yan.rds")
O$manifoldAlignment <- O$manifoldAlignment[!grepl('_Rpl|_Rps',rownames(O$manifoldAlignment)),]
head(O$manifoldAlignment)
O$manifoldAlignment <- O$manifoldAlignment[, 1:3]
DR <- dRegulation(O$manifoldAlignment, 'Ahr')
O$diffRegulation <- DR
gList_yan <- O$diffRegulation$gene[O$diffRegulation$p.adj < 0.05]
png('AHR_test/Compare_gene_list/glist_yan_net.png', width = 600, height = 600, res = 300)
upset(fromList(list(scTenifoldNet=gList_net, yan_knk=gList_yan)))
dev.off()
png('AHR_test/Compare_gene_list/glist_yan_daniel.png', width = 600, height = 600, res = 300)
upset(fromList(list(daniel_knk=gList_daniel, yan_knk=gList_yan)))
dev.off()
## yan's new method
O <- readRDS("AHR_test/Results_YAN_new/data/Ahr_yan_k2_gamma0_method1.rds")
O$manifoldalignment <- O$manifoldalignment[!grepl('_Rpl|_Rps',rownames(O$manifoldalignment)),]
head(O$manifoldalignment)
O$manifoldalignment <- O$manifoldalignment[, 2:4]
DR <- dRegulation(O$manifoldalignment, 'Ahr')
O$diffRegulation <- DR
gList_yan_new <- O$diffRegulation$gene[O$diffRegulation$p.adj < 0.05]
png('AHR_test/Compare_gene_list/glist_yan_net_new.png', width = 600, height = 600, res = 300)
upset(fromList(list(scTenifoldNet=gList_net, yan_knk=gList_yan_new)))
dev.off()
## yan's new norm method
O <- readRDS('AHR_test/Results_YAN_new_norm/data/Ahr_yan_k2_gamma0_method0.rds')
O$manifoldalignment <- O$manifoldalignment[!grepl('_Rpl|_Rps',rownames(O$manifoldalignment)),]
head(O$manifoldalignment)
O$manifoldalignment <- O$manifoldalignment[, 2:3]
DR <- dRegulation(O$manifoldalignment, 'Ahr')
gList_yan_new_norm <- O$diffRegulation$gene[O$diffRegulation$p.adj < 0.05]
png('AHR_test/Compare_gene_list/glist_yan_net_new_norm.png', width = 600, height = 600, res = 300)
upset(fromList(list(scTenifoldNet=gList_net, yan_knk=gList_yan_new_norm)))
dev.off()
############################## significiant gene pathway ##############################################
library(enrichR)
MET_net <- enrichr(gList_net, c('Reactome_2016','BioPlanet_2019','KEGG_2019_Mouse', 'GO_Biological_Process_2018', 'GO_Molecular_Function_2018', 'GO_Cellular_Component_2018'))
MET_net <- do.call(rbind.data.frame, MET_net)
MET_net <- MET_net[order(MET_net$Adjusted.P.value),]
MET_net <- MET_net[MET_net$Adjusted.P.value < 0.05,]
pathway_net <- unique(MET_net$Term)
MET_daniel <- enrichr(gList_daniel, c('Reactome_2016','BioPlanet_2019','KEGG_2019_Mouse', 'GO_Biological_Process_2018', 'GO_Molecular_Function_2018', 'GO_Cellular_Component_2018'))
MET_daniel <- do.call(rbind.data.frame, MET_daniel)
MET_daniel <- MET_daniel[order(MET_daniel$Adjusted.P.value),]
MET_daniel <- MET_daniel[MET_daniel$Adjusted.P.value < 0.05,]
pathway_daniel <- unique(MET_daniel$Term)
MET_yan <- enrichr(gList_yan, c('Reactome_2016','BioPlanet_2019','KEGG_2019_Mouse', 'GO_Biological_Process_2018', 'GO_Molecular_Function_2018', 'GO_Cellular_Component_2018'))
MET_yan <- do.call(rbind.data.frame, MET_yan)
MET_yan <- MET_yan[order(MET_yan$Adjusted.P.value),]
MET_yan <- MET_yan[MET_yan$Adjusted.P.value < 0.05,]
pathway_yan <- unique(MET_yan$Term)
MET_yan_new <- enrichr(gList_yan_new, c('Reactome_2016','BioPlanet_2019','KEGG_2019_Mouse', 'GO_Biological_Process_2018', 'GO_Molecular_Function_2018', 'GO_Cellular_Component_2018'))
MET_yan_new <- do.call(rbind.data.frame, MET_yan_new)
MET_yan_new <- MET_yan_new[order(MET_yan_new$Adjusted.P.value),]
MET_yan_new <- MET_yan_new[MET_yan_new$Adjusted.P.value < 0.05,]
pathway_yan_new <- unique(MET_yan_new$Term)
MET_yan_new_norm <- enrichr(gList_yan_new_norm, c('Reactome_2016','BioPlanet_2019','KEGG_2019_Mouse', 'GO_Biological_Process_2018', 'GO_Molecular_Function_2018', 'GO_Cellular_Component_2018'))
MET_yan_new_norm <- do.call(rbind.data.frame, MET_yan_new_norm)
MET_yan_new_norm <- MET_yan_new_norm[order(MET_yan_new_norm$Adjusted.P.value),]
MET_yan_new_norm <- MET_yan[MET_yan_new_norm$Adjusted.P.value < 0.05,]
pathway_yan_new_norm <- unique(MET_yan_new_norm$Term)
library(UpSetR)
png('AHR_test/Compare_gene_list/pathway_daniel_net.png', width = 600, height = 600, res = 300)
upset(fromList(list(scTenifoldNet=pathway_net, daniel_knk=pathway_daniel)))
dev.off()
png('AHR_test/Compare_gene_list/pathway_yan_net.png', width = 600, height = 600, res = 300)
upset(fromList(list(scTenifoldNet=pathway_net, yan_knk=pathway_yan)))
dev.off()
png('AHR_test/Compare_gene_list/pathway_daniel_yan.png', width = 600, height = 600, res = 300)
upset(fromList(list(daniel_knk=pathway_daniel, yan_knk=pathway_yan)))
dev.off()
# png('AHR_test/Compare_gene_list/pathway_yan_new_net.png', width = 600, height = 600, res = 300)
# upset(fromList(list(scTenifoldNet=pathway_net, yan_knk=pathway_yan_new)))
# dev.off()
png('AHR_test/Compare_gene_list/pathway_yan_new_norm_net.png', width = 600, height = 600, res = 300)
upset(fromList(list(scTenifoldNet=pathway_net, daniel_knk=pathway_yan_new_norm)))
dev.off()
|
53723c642b86294038c7c6df5e6f6a3b6ca85f1e
|
b2d2942d29ed06a330a2c13ab8a1ee1edfddf292
|
/R/CreatePortfolio.R
|
30a9b5c05b1851ea71e6a415b0cd056f9879882d
|
[
"MIT"
] |
permissive
|
Predacity/LendingClub
|
111b0ca33fcc800949fb3fedeee9c08934ba2113
|
706096cbd3c23c5388cc784bd639544e36f81084
|
refs/heads/master
| 2021-01-20T13:22:53.438079
| 2017-04-25T03:17:16
| 2017-04-25T03:17:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 680
|
r
|
CreatePortfolio.R
|
#' Create a new portfolio
#'
#' Create a new portfolio to assign loans to. Notes can be assigned to
#' a portfolio using the API when they are purhcased from the primary
#' market.
#'
#' @param port_name String. Name of the new portfolio
#' @param port_desc String. Portfolio description
#' @inheritParams AccountSummary
#' @export
CreatePortfolio<- function(port_name, port_desc, LC_CRED=NULL){
LC_CRED<-CheckCred(LC_CRED)
postURL<- MakeURL(LC_CRED$investorID,"portfolios")
params<- list("aid" = LC_CRED$investorID,
"portfolioName" = port_name,
"portfolioDescription" = port_desc)
LC_POST(postURL, params, LC_CRED$key)
}
|
66f10e905fbee272fa02989b786af263808911b2
|
cd9c0e636a478f7c9db4a5b41ea556c26b2fa2d1
|
/Task-1.R
|
54820fc2f4bc06cdb09b6c5ef23a8901c4bc8417
|
[] |
no_license
|
priyanka0111/The-Spark-foundation-2021
|
eee19fc43c0c137a61dd22812ad9af3edf2b5919
|
44324dd46f0f1b5da92a0a0787704c73a5de6ac0
|
refs/heads/main
| 2023-04-04T20:30:10.158812
| 2021-04-20T16:53:34
| 2021-04-20T16:53:34
| 359,128,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,013
|
r
|
Task-1.R
|
#THE SPARK FOUNDATION - INTERNSHIP APRIL,2021
#DATA SCIENCE & BUSSINESS ANALYTICS
#TASK-1 Prediction using supervised machine learning
#what will be the predicted score if a student studies for 9.25hrs/day?
#importing the data
student<-read.csv("https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv")
student
names(student)
dim(student)
is.null(student)
#summary of the student data
summary(student)
#simple linear regression model(lm)/ formula= lm(target variable~predictor variable , data=data source)
lm(scores~hours,data=student)
model<-lm(scores~hours,data=student)
model
#visualizing the data
plot(student)
abline(model,col="red")
#summary of the simple linear regression model
summary(model)
#predicting the score
test<-data.frame(hours=9.25)
prediction<-predict(model,test)
prediction
#from the above prediction ,we can say that if a student studies for 9.25hrs/day ,then a student will score = 92.90985
|
6142ecf334130147bee36f92fb21a900d1338ef0
|
f199937e8fbbad3476372858513e2f4498419824
|
/man/scrape_investagram.Rd
|
110934d2184efb3a0ff7701a104cdd3425616ca0
|
[] |
no_license
|
nfrimando/pseR
|
92ac707a9c610b06de9edae22a51ca303d024559
|
d666b95875461a30994cc05af8ed137d859b5969
|
refs/heads/master
| 2022-01-18T15:54:54.812328
| 2022-01-09T09:49:03
| 2022-01-09T09:49:03
| 254,435,856
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 660
|
rd
|
scrape_investagram.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scrape_investagram.R
\name{scrape_investagram}
\alias{scrape_investagram}
\title{Scrape Investagram Stock Info}
\usage{
scrape_investagram(codes, details = c("hist", "bands"))
}
\arguments{
\item{codes}{Vector of Stock Code(s) from PSE (e.g. JFC, FMETF, BPI)}
\item{details}{Vector containing details to scrape. `hist` - dataframe of historical data.
`bands` - basic analytics}
}
\value{
A list with investagram information
}
\description{
This function extracts raw HTML from Investagram UI and
returns list of details
}
\examples{
scrape_investagram(codes = c("JFC", "GLO"))
}
|
3c5aea7bcec7d271f879ef8ccd96b31b2b8823b7
|
49ff0bc7c07087584b907d08e68d398e7293d910
|
/mbg/mbg_core_code/mbg_central/LBDCore/R/fit_earth.R
|
0e7e5ac08ae5291310139ae7cafaf9d8c1caa716
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
db7963836c9ce9cec3ca8da3a4645c4203bf1352
|
4219ee6b1fb122c9706078e03dd1831f24bdaa04
|
refs/heads/master
| 2023-07-30T07:05:28.802523
| 2021-09-27T12:11:17
| 2021-09-27T12:11:17
| 297,317,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,147
|
r
|
fit_earth.R
|
#' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#' @param df PARAM_DESCRIPTION
#' @param covariates PARAM_DESCRIPTION, Default: all_fixed_effects
#' @param additional_terms PARAM_DESCRIPTION, Default: NULL
#' @param weight_column PARAM_DESCRIPTION, Default: NULL
#' @param indicator PARAM_DESCRIPTION
#' @param indicator_family PARAM_DESCRIPTION, Default: 'binomial'
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @examples
#' \dontrun{
#' if (interactive()) {
#' # EXAMPLE1
#' }
#' }
#' @rdname fit_earth
#' @export
fit_earth <- function(df, covariates = all_fixed_effects, additional_terms = NULL, weight_column = NULL, indicator, indicator_family = "binomial") {
# fit earth: a function to fit a multivariate adaptive regression splines
# df: data table with the outcome/indicator and some covariates already extracted. This is different than the gam_cov functions
# covariates: a vector of covariate names and/or formula in the style of all_fixed_effects (e.g. cov1 + cov2 + cov3)
# additional terms: a vector or single character of column names to be included in the model fit
# weight_column: in df, is there a column that specifies the observation weights?
# indicator: name of the column of the DV
# indicator_family: model family
# required packages: earth
# also requires seeg
df <- copy(df) # in case data table scoping gets wonky
the_covs <- format_covariates(add_additional_terms(covariates, additional_terms))
# set response variable
if (indicator_family == "binomial") response <- cbind(success = df[, get(indicator)], failure = df[, N] - df[, get(indicator)])
if (indicator_family == "gaussian") response <- cbind(outcome = df[, get(indicator)])
# sort out weights
# format weights
if (!is.null(weight_column)) {
df[, data_weight := get(weight_column)]
} else {
df[, data_weight := 1]
}
weight_column <- "data_weight"
# fit the earth
message(paste0("Fitting earth"))
model <- earth(x = df[, the_covs, with = F], y = response, weights = df[, get(weight_column)], glm = list(family = indicator_family))
# return the earth object
return(model)
}
|
39e9883ffcec9f705a5baa39ba780590ef99ada8
|
6e32987e92e9074939fea0d76f103b6a29df7f1f
|
/googleaiplatformv1.auto/man/GoogleCloudAiplatformV1ExportDataOperationMetadata.Rd
|
0b1f3880231638df3c64779e6b5be20d0005c94a
|
[] |
no_license
|
justinjm/autoGoogleAPI
|
a8158acd9d5fa33eeafd9150079f66e7ae5f0668
|
6a26a543271916329606e5dbd42d11d8a1602aca
|
refs/heads/master
| 2023-09-03T02:00:51.433755
| 2023-08-09T21:29:35
| 2023-08-09T21:29:35
| 183,957,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 920
|
rd
|
GoogleCloudAiplatformV1ExportDataOperationMetadata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1ExportDataOperationMetadata}
\alias{GoogleCloudAiplatformV1ExportDataOperationMetadata}
\title{GoogleCloudAiplatformV1ExportDataOperationMetadata Object}
\usage{
GoogleCloudAiplatformV1ExportDataOperationMetadata(
gcsOutputDirectory = NULL,
genericMetadata = NULL
)
}
\arguments{
\item{gcsOutputDirectory}{A Google Cloud Storage directory which path ends with '/'}
\item{genericMetadata}{The common part of the operation metadata}
}
\value{
GoogleCloudAiplatformV1ExportDataOperationMetadata object
}
\description{
GoogleCloudAiplatformV1ExportDataOperationMetadata Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Runtime operation information for DatasetService.ExportData.
}
\concept{GoogleCloudAiplatformV1ExportDataOperationMetadata functions}
|
17cef888d2a5cb1671c92be3871d2619e6945573
|
9f881b694e1301e47d3a960ab7e68386d84c9b1a
|
/Assignment 5 Clinical Trial.R
|
6f1dd79e6962ab0aa6f5f114ff4e82549ff7731c
|
[] |
no_license
|
ankitbhargava62/MITx-15.071x-The-Analytics-Edge
|
8479c0e96be036903839a5f75a69f68c1e5b70fc
|
58651ca836a44ebe7c879da7e97f8c91346a6074
|
refs/heads/master
| 2020-03-19T21:08:37.697468
| 2015-05-23T11:03:34
| 2015-05-23T11:03:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,614
|
r
|
Assignment 5 Clinical Trial.R
|
# Assignment 5 Clinical Trial
# Read in the data
clinical_trial = read.csv("clinical_trial.csv", stringsAsFactors=FALSE)
str(clinical_trial)
max(nchar(clinical_trial$abstract))
which.min(nchar(clinical_trial$title))
# Install new packages
#install.packages("tm")
library(tm)
#install.packages("SnowballC")
library(SnowballC)
# TITLE
corpusTitle = Corpus(VectorSource(trials$title))
corpusTitle = tm_map(corpusTitle, tolower)
corpusTitle = tm_map(corpusTitle, PlainTextDocument)
corpusTitle = tm_map(corpusTitle, removePunctuation)
corpusTitle = tm_map(corpusTitle, removeWords, stopwords("english"))
corpusTitle = tm_map(corpusTitle, stemDocument)
dtmTitle = DocumentTermMatrix(corpusTitle)
dtmTitle = removeSparseTerms(dtmTitle, 0.95)
dtmTitle = as.data.frame(as.matrix(dtmTitle))
# ABSTRACT
corpusAbstract = Corpus(VectorSource(trials$abstract))
corpusAbstract = tm_map(corpusTitle, tolower)
corpusAbstract = tm_map(corpusTitle, PlainTextDocument)
corpusAbstract = tm_map(corpusTitle, removePunctuation)
corpusAbstract = tm_map(corpusTitle, removeWords, stopwords("english"))
corpusAbstract = tm_map(corpusTitle, stemDocument)
dtmAbstract = DocumentTermMatrix(corpusAbstract)
dtmAbstract = removeSparseTerms(dtmAbstract, 0.95)
dtmAbstract = as.data.frame(as.matrix(dtmAbstract))
colnames(dtmTitle) = paste0("T", colnames(dtmTitle))
colnames(dtmAbstract) = paste0("A", colnames(dtmAbstract))
dtm = cbind(dtmTitle, dtmAbstract)
dtm$trial = trials$trial
# Split the data
library(caTools)
set.seed(144)
split = sample.split(dtm$trial, SplitRatio = 0.7)
trainSparse = subset(dtm, split==TRUE)
testSparse = subset(dtm, split==FALSE)
table(trainSparse$trial)
# Video 7
# Build a CART model
library(rpart)
library(rpart.plot)
trialCART = rpart(trial ~ ., data=trainSparse, method="class")
prp(trialCART)
predTrain = predict(trialCART)[,2]
summary(predTrain)
table(trainSparse$trial, predTrain>=0.5)
# Evaluate the performance of the model
predictCART = predict(trialCART, newdata=testSparse)[,2]
table(testSparse$trial, predictCART>=0.5)
# Compute accuracy
#(294+18)/(294+6+37+18)
# Baseline accuracy
table(testSparse$Negative)
#300/(300+55)
# Random forest model
library(randomForest)
set.seed(123)
tweetRF = randomForest(Negative ~ ., data=trainSparse)
# Make predictions:
predictRF = predict(tweetRF, newdata=testSparse)
table(testSparse$Negative, predictRF)
# Accuracy:
#(293+21)/(293+7+34+21)
set.seed(123)
tweetlm = glm(Negative ~ ., data=trainSparse, family="binomial")
predictlm = predict(tweetlm, newdata=testSparse, type="response")
table(testSparse$Negative, predictlm>=0.5)
|
c79c3a031648bd6d43d2a558c136135718129d4d
|
c95d61a58f83ea3beae1949c2e4cd8e5f1186c01
|
/BM-challenge/tabs/6. dam.R
|
6d1773f5a44497e5b3dd6cb21c2ab8f483a65536
|
[] |
no_license
|
TMBish/TMBisc
|
d9da0e75320b47a5e539f6124430dc49cef61e4f
|
79378a7e18e02afaf9679ef8f914d9b21f39e453
|
refs/heads/master
| 2023-02-22T19:14:11.210224
| 2023-02-16T06:55:09
| 2023-02-16T06:55:09
| 172,413,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,663
|
r
|
6. dam.R
|
damTab =
fluidRow(
column(width = 8,
tabBox(
title = "Dam Diving",
id = "tabset1", height = "600", width = 12,
tabPanel("Challenge Description",
HTML(
"<strong> <big> Overview </big> </strong> <br/>
<p> Each team must (1 at a time / survivor style) run from the starting line and dive into the dam to
retrieve the golf balls successfully hit to the bottom of the dam. Only one member of each team is allowed in
the dam at a time. The team that retrieves the most balls wins.
</p> <br/> <br/>
<strong> <big> What constitutes participation? </big> </strong> <br/>
<p> Participation is MANDATORY. </p> <br/> <br/>
<strong> <big> How do I win bonus points? </big> </strong> <br/>
<p> NA </p>")),
tabPanel("Results",
hotable("hotable6"),
br(),
actionButton("calc6","calculate",styleclass="primary",size="mini"),
actionButton("up6","upload",styleclass="danger"))
)
),
column(width = 4,
valueBox("Team/Individual", "Team", icon = icon("user"), width = NULL, color = "olive"),
valueBox("Min. Participants", 10, icon = icon("users"), width = NULL, color = "olive"),
valueBox("Points Avaliable", 75, icon = icon("money"), width = NULL, color = "olive"),
valueBox("Bonus Points?", NA, icon = icon("asterisk"), width = NULL, color = "olive")
)
)
calc6 = function(hdf) {
hdf$Score = 15*hdf$Team.Win
return(hdf)
}
|
84100f03649eb0b1a95f127c62773a7c244baee8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/OUwie/examples/OUwie.slice.Rd.R
|
400de193dbb2e42e177289ff281cb146bf5b1a02
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,098
|
r
|
OUwie.slice.Rd.R
|
library(OUwie)
### Name: OUwie.slice
### Title: Generalized Hansen models with time slices
### Aliases: OUwie.slice
### Keywords: models
### ** Examples
data(tworegime)
##Here we want a fixed slice at T=2, assuming the present is T=0:
#library(phytools)
#max.height <- max(nodeHeights(tree))
#timeslices <- max.height - 2
#timeslices <- c(0,timeslices)
#phy.sliced<-make.era.map(tree,timeslices)
#leg<-c("blue3","red3")
#names(leg)<-c(1,2)
#plotSimmap(phy.sliced,leg, pts=FALSE, ftype="off", lwd=1)
##Now fit an BMS model with a single fixed timeslice at time=2:
#ppBM<-OUwie.slice(tree,trait[,c(1,3)],model=c("BMS"), root.station=TRUE, timeslices=c(2))
##Fit an OU model with a single fixed timeslice:
#ppOUM<-OUwie.slice(tree,trait[,c(1,3)],model=c("OUM"), root.station=TRUE, timeslices=c(2))
##Fit an BMS model with an unknown timeslice:
#ppBM<-OUwie.slice(tree,trait[,c(1,3)],model=c("BMS"), root.station=TRUE, timeslices=c(NA))
##Fit an BMS model with an unknown and a fixed timeslice:
#ppBM<-OUwie.slice(tree,trait[,c(1,3)],model=c("BMS"), root.station=TRUE, timeslices=c(NA,2))
|
2467fe59d473a03443afa9d6c21f1a07fcf4089c
|
fd6c510171a206cb9961f3e77ed9a37d49ce4051
|
/wk3/Scoping_Nested_Func.r
|
68b18d8e563a97440382d085cd862b35848fb1e8
|
[] |
no_license
|
chriszeng8/R_Programming
|
8764d908da057d07d758f645346f6bdda7f2b5a0
|
cf7364b8c4dcf05a824acbf419842ddd8bfbb5b4
|
refs/heads/master
| 2016-09-05T20:44:08.950337
| 2015-02-10T07:36:15
| 2015-02-10T07:36:15
| 28,570,335
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 543
|
r
|
Scoping_Nested_Func.r
|
# n_power takes arg n
n_power<-function(n){
# Inside n_power function, define a function within
pow<-function(x){
x^n # note that n is a free variable in the pow function.
# therefore, R will look through the environment to find
# n. In this case, n in defined in the n_power function enviroment.
}
pow
}
#Return a function with n specified
Cube_of<-n_power(3)
Square_of<-n_power(2)
#Pass x values
Cube_of(2)
Square_of(3)
# look the enviroment in which the function is defined
ls(environment(Cube_of))
get("n",environment)
|
eac106fd81c8f6211f5a4498777f33ab18261093
|
aee648231ac0b2fe6509724d0872a1f2f95fa000
|
/CODE/main.r
|
02879c550fb19a515ad1b4928457597c68e2b917
|
[] |
no_license
|
nchaoFORR/IRA-Linked-Tweets-STMs
|
466eab3bceb07a5abe2fe052957a80a5e733f738
|
f0044dcf21061ba1076c4f43571eb29b1df31428
|
refs/heads/master
| 2020-05-16T07:41:21.731721
| 2019-04-23T03:37:29
| 2019-04-23T03:37:29
| 182,884,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 503
|
r
|
main.r
|
# MAIN
require(tidyverse)
require(stm)
require(quanteda)
require(here)
require(lubridate)
require(tidytext)
require(tictoc)
# This file will create three documents in the directory;
# 1. csv containing top frex words in topics
# 2. csv containing top frex words in topics, specific to a particular month
# 3. document-topic proportions
# loads data and pre-processes data
source('helper.r')
# builds structural topic model
source('stm.R')
# extracts data for tableau
source("final_model_extract.r")
|
f058fe510c305954327b4e53332796bf2d4794b1
|
ffe9932351d01e0d74ef7787f23aa3f6e0a8d065
|
/day29/R/day29.R
|
dfb5889f3367e5f3200f5a7240a28f5a5ee9b999
|
[] |
no_license
|
jkaupp/30DayChartChallenge
|
61193d6efd928cc96b044196f683904a1eab0baf
|
6d2530f69ab2acb6b2b82924c6fe5023b2dba7f4
|
refs/heads/master
| 2023-04-23T17:51:36.264431
| 2021-05-01T20:19:52
| 2021-05-01T20:19:52
| 353,776,458
| 22
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,016
|
r
|
day29.R
|
library(tidyverse)
library(here)
library(jkmisc)
library(readxl)
library(janitor)
library(colorspace)
library(scales)
library(ggtext)
library(glue)
parker <- read_xlsx(here('day29', 'data', 'ParkerElephantData.xlsx')) %>%
clean_names()
plot_data <- select(parker,
id = animal_or_elephant_id_number,
sex,
age = age_in_years_if_noted_as_juvenile_est_mos_foetus_etc_enter_zero_here_and_use_note_field_below_to_transcribe,
s_height = s_height_st_shoulder_height_straight_in_inches,
length = total_body_length_inches,
total_weight,
live_weight,
tusks_weight_right,
tusks_weight_left) %>%
mutate(combined_tusk_weight = tusks_weight_right + tusks_weight_right) %>%
mutate(combined_tusk_weight = combined_tusk_weight/1000,
s_height = s_height * 0.025) %>%
filter(sex %in% c("Male", "Female")) %>%
mutate(stip_text = spaced_title(sex),
strip_text = highlight_text(stip_text, if_else(sex == "Male", "#331832", "#D81E5B"),'b', 50))
deviations <- plot_data %>%
select(sex, combined_tusk_weight, s_height) %>%
group_by(sex) %>%
summarize(across(everything(), list("sd" = ~sd(.x, na.rm = TRUE), "mean" = ~mean(.x, na.rm = TRUE)))) %>%
mutate(label = glue("**Mean Shoulder Height** : {round(s_height_mean, 2)} +/- {round(s_height_sd, 2)}m<br>**Mean Combined Tusk Weight** : {round(combined_tusk_weight_mean, 2)} +/- {round(combined_tusk_weight_sd, 2)}kg"))
labels <- plot_data %>%
distinct(sex, strip_text) %>%
mutate(x = 25, y = 1.5)
main <- ggplot(plot_data, aes(x = combined_tusk_weight, y = s_height, color = sex, fill = sex)) +
geom_point(shape = 21, show.legend = FALSE, alpha = 0.5, size = 3) +
theme_jk(grid = "XY") +
labs(x = NULL,
y = NULL) +
scale_x_continuous(breaks = seq(0, 125, 25), labels = c(seq(0, 100, 25), "125kg"), position = "top") +
scale_y_continuous(limits = c(1, 3.5), breaks = 1:3, labels = c(1, 2, "3m")) +
scale_fill_manual(values = c("Male" = "#331832",
"Female" = "#D81E5B")) +
scale_color_manual(values = c("Male" = darken("#331832"),
"Female" = darken("#D81E5B"))) +
theme(plot.background = element_rect(fill = "grey88", color = NA))
facets <- ggplot(plot_data, aes(x = combined_tusk_weight, y = s_height, color = sex, fill = sex)) +
geom_errorbar(data = deviations, aes(x = combined_tusk_weight_mean, ymin = s_height_mean - s_height_sd, ymax = s_height_mean + s_height_sd, color = sex), inherit.aes = FALSE, size = 0.8) +
geom_errorbar(data = deviations, aes(xmin = combined_tusk_weight_mean - combined_tusk_weight_sd, xmax = combined_tusk_weight_mean + combined_tusk_weight_sd, y = s_height_mean, color = sex), inherit.aes = FALSE, size = 0.8) +
geom_point(shape = 21, show.legend = FALSE, alpha = 0.2, size = 2) +
geom_richtext(data = labels, aes(x = x, y = y, label = strip_text), family = "Anton", size = 50, show.legend = FALSE, label.size = unit(0, "mm"), fill = NA, vjust = 0, hjust = 0) +
geom_richtext(data = deviations, aes(x = 25, y = 1.45, label = label), family = "Lato", show.legend = FALSE, label.size = unit(0, "mm"), fill = NA, vjust = 0, hjust = 0) +
theme_jk(grid = FALSE) +
labs(x = NULL,
y = NULL) +
scale_x_continuous(breaks = seq(0, 125, 25), labels = c(seq(0, 100, 25), "125kg"), expand = c(0,0)) +
scale_y_continuous(limits = c(1, 3.5), breaks = 1:3, labels = c(1, 2, "3m"), expand = c(0,0)) +
scale_fill_manual(values = c("Male" = "#331832",
"Female" = "#D81E5B")) +
scale_color_manual(values = c("Male" = darken("#331832"),
"Female" = darken("#D81E5B"))) +
theme(plot.background = element_rect(fill = "transparent", color = NA)) +
facet_wrap(~sex) +
theme(strip.text = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_blank(),
legend.position = "none")
plot <- main +
annotation_custom(ggplotGrob(facets), xmin = 40, xmax = 140, ymin = 0.915, ymax = 2.5) +
labs(title = toupper("Gender Differences in Height and Combined Tusk Weight of East African Elephants"),
subtitle = "Illustrated below in a scatterplot is the relationship between shoulder height and combined tusk weight in East African Elephants. This data is from the Parker Elephant Data Sheets, which are field data compiled during culling (herd thinning)\noperations intended to mitigate elephant overpopulation in 1965-1969 at environmentally stressed sites: Murchison Falls National Park, Budongo Forest, Tsavo, and Mkomasi recorded by Ian Parker.",
caption = "**Data**: Parker Elephant Data Sheets library.ufl.edu/spec/manuscript/guides/parker.htm | **Graphic**: @jakekaupp") +
theme(plot.caption = element_markdown(),
plot.title.position = "plot",
plot.title = element_text(family = "Anton", size = 40))
ggsave(here("day29", "tdcc_day29.png"), plot, width = 18.4, height = 10, device = ragg::agg_png())
altText::alt_text(plot)
|
0f64764dfe4cb89d71513ba98b02c592ac5a5cd1
|
bc86d23f9ce6ff1d2753e5cf0cec753a012aee07
|
/R/Devium Normalization.r
|
d2c0b09c90a90153697f4783e7018870cfaa0160
|
[] |
no_license
|
bw2013/devium
|
a88094d192858c133cfb60ae52d6282805c51cf7
|
e0fa08845c1b8ce6df3551701f640c274aa5985e
|
refs/heads/master
| 2020-12-25T22:58:25.443498
| 2014-06-29T17:11:58
| 2014-06-29T17:11:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,789
|
r
|
Devium Normalization.r
|
# data overview and normalization functions
#function to calculate within and between batch precision for all variables
calc.mRSD<-function(data,batch=data.frame(1:nrow(data)),summary.range=seq(0,100,10),use="mean"){
library(plyr)
#bin summaries into range
if(!is.factor(batch)){batch<-tryCatch(factor(batch[,],levels=unique(batch[,])),error=function(e){factor(batch,levels=unique(batch))}); message("Batch converted to factor.")}
#main object
tmp<-data.frame(batch=batch,data)
#parametric summary
b.m<-ddply(tmp,.(batch),colwise(mean))
b.s<-ddply(tmp,.(batch),colwise(sd))
b.rsd<-b.s/b.m*100
b.rsd[,1]<-b.m[,1]
# #non-parametric summary
# b.m2<-ddply(tmp,.(batch),colwise(median))
# b.s2<-ddply(tmp,.(batch),colwise(IQR))
# b.rsd2<-b.s2/b.m2*100
# med.rsd.np<-apply(b.rsd2[,-1],1,median,na.rm=T) #
#generate summary objects for analytes between all batches
analyte.RSD<-data.frame(mean=apply(b.rsd[,-1,drop=F],2,use,na.rm=T), sd=apply(b.rsd[,-1,drop=F],2,sd,na.rm=T))
colnames(analyte.RSD)[1]<-use# RSD for variables over all batches
analyte.RSD.summary<-split.bins(obj=analyte.RSD[,1],bins=seq(0,100,10)) # summary for variables over all batches
analyte.RSD.summary$percent<-round(analyte.RSD.summary$count/sum(analyte.RSD.summary$count)*100,1)
#generate summary objects for batches based on all analytes
within.batch.RSD<-data.frame(mean=apply(b.rsd[,-1,drop=F],1,use,na.rm=T), sd=apply(b.rsd[,-1,drop=F],1,sd,na.rm=T))
rownames(within.batch.RSD)<-b.rsd[,1]
colnames(within.batch.RSD)[1]<-use
within.batch.RSD.summary<-split.bins(na.omit(within.batch.RSD[,1]),bins=seq(0,100,10)) # ,max(within.batch.RSD[,1]
within.batch.RSD.summary$percent<-round(within.batch.RSD.summary$count/sum(within.batch.RSD.summary$count)*100,1)
#return summary
list(batch.means=b.m,batch.sd=b.s,all.batch.RSD=b.rsd,variable.RSD=analyte.RSD,batch.RSD=within.batch.RSD,variable.RSD.summary=analyte.RSD.summary,batch.RSD.summary=within.batch.RSD.summary)
}
#create summary objects from object produced by calc.mRSD
summarize.performance<-function(obj=gc.perf.raw,sig.figs=2){
#optionally bin objects into an interval range
#batch summary
batch.med.perf<-data.frame(median.RSD=signif(median(obj$batch.RSD[,1],na.rm=TRUE),sig.figs), range=paste(signif(range(obj$batch.RSD[,1],na.rm=TRUE),sig.figs),collapse=", "))
#clean up batch summary
tmp<-obj$batch.RSD.summary
tmp2<-gsub("\\[","",gsub(",","-",gsub("\\]","",gsub("\\(","",fixlc(tmp[,1])))))
batch.summary<-data.frame(RSD=tmp2,count=tmp[,2],percent=round(tmp[,3],0))
batch.summary$cumulative.percent<-cumsum(batch.summary$percent)
#analyte summary
var.med.perf<-data.frame(median.RSD=signif(median(obj$variable.RSD[,1],na.rm=TRUE),sig.figs), range=paste(signif(range(obj$variable.RSD[,1],na.rm=TRUE),sig.figs),collapse=", "))
#clean up analyte summary
tmp<-obj$variable.RSD.summary
tmp2<-gsub("\\[","",gsub(",","-",gsub("\\]","",gsub("\\(","",fixlc(tmp[,1])))))
var.summary<-data.frame(RSD=tmp2,count=tmp[,2],percent=round(tmp[,3],0))
var.summary$cumulative.percent<-cumsum(var.summary$percent)
#return
list(batch=batch.med.perf,batch.summary=batch.summary,variable=var.med.perf,variable.summary=var.summary)
}
#split vector based on probabilities and calculate counts
split.prob<-function(obj,probs=seq(0, 1, 0.25)){
library(plyr)
splits<-quantile(obj,probs)
interval<-cut(obj,splits,include.lowest = TRUE)
tmp<-data.frame(count=obj,interval=interval)
ddply(tmp,.(interval),colwise(length))
}
#split object on bins
split.bins<-function(obj,bins=seq(10,100,10)){
library(plyr)
interval<-cut(obj,bins,include.lowest = TRUE)
tmp<-data.frame(count=obj,interval=interval)
tmp<-ddply(tmp,.(interval),colwise(length))
#need a mechanism to group values over largest bin
int<-fixlc(tmp$interval)
int[is.na(int)]<-paste0(">",max(bins))
tmp$interval<-int
tmp
}
#identify samples with > quantile
over.prob<-function(val=tmp.data$leverage,prob=0.95){
val>quantile(val,probs=prob)
}
#adjust data by batch scalar (add ability to get ratios using QCs and adjust samples)
scalar.batch.adjust<-function(obj,factor,use="median",train=NULL){
#calculate ratio of median of each factor level to global median
#return adjusted value and adjustment for each factor (should be numeric else levels could be broken)
# train can be a logical specifying which subset of the data to use to calculate the ratios
library(plyr)
if(!class(obj)=="data.frame"){obj<-as.data.frame(obj)}
if(is.logical(train)){
full<-data.frame(obj)
split.data<-split(full,factor(train))
train.data<-split.data$"TRUE"
train.factor<-unlist(split(as.data.frame(factor),factor(train))$"TRUE")
} else {
train.data<-obj
train.factor<-factor
}
#remove outliers
global.med<-apply(train.data,2,use,na.rm=TRUE)
#main object
tmp<-data.frame(batch=train.factor,train.data)
#summary for all batches and analytes
b.m<-ddply(tmp,.(batch),colwise(use))
med.ratio<-sweep(b.m[,-1,drop=F],2,unlist(global.med),"/")
#return ratio adjusted data
big.l<-split(obj,factor)
res<-lapply(1:length(big.l),function(i){
tmp<-big.l[[i]]
rat<-unlist(med.ratio[i,])
res<-sweep(tmp,2,rat,"/")
res[is.na(res)]<-0 # for ratios with zero
res
})
adjusted.data<-do.call("rbind",res)
#clean of strange vars
adjusted.data[adjusted.data=="Inf"]<-NA
list(adjusted.data=adjusted.data,ratios=med.ratio)
}
#plot a single variable line plot
summary.lineplot<-function(val,groups=NULL,view.split=NULL,theme=NULL,se=FALSE,extra=NULL,span=0.75,print.plot=TRUE){
library(ggplot2)
#data should minimally contain a single variable of interest(val) and additionally factor identifying groups
vis.data<-data.frame(value=unlist(unname(val)))
vis.data$id<-1:length(vis.data$val)
if(is.null(groups)){vis.data$groups<-1;vis.data$color<-1} else {vis.data$groups<-factor(as.matrix(groups))}
if(is.null(view.split)){
add.facet<-NULL
} else {
vis.data$facet<-vis.data$groups
if(view.split=="y"){
add.facet<-facet_grid(facet ~ .)
} else {
add.facet<-facet_grid( . ~ facet)
}
}
p<-ggplot(data=vis.data,aes(y=value,x=id)) + geom_point(aes(color=groups),alpha=.75,show_guide=FALSE)+
stat_smooth(aes(group=groups,color=groups),method = "loess", size = 1,se=se,alpha=.1,span=span) + theme + add.facet + xlab(colnames(groups))+ylab(colnames(val))+
guides(col = guide_legend(title = colnames(groups))) + extra
if(print.plot){
print(p)
} else {
return(p)
}
}
#box plot for 2 factors with loess smoothing
summary.boxplot2<-function(val,groups=NULL,split.on=NULL,theme=NULL,se=FALSE,span=0.75,extra=NULL,print.plot=TRUE){
#data should minimally contain a single variable of interest(val) and additionally factor identifying groups
library(ggplot2)
vis.data<-data.frame(value=unlist(val))
if(is.null(groups)){
vis.data$groups<-1;vis.data$color<-1
} else {
vis.data$groups<-factor(as.matrix(groups))
}
if(is.null(split.on)){
vis.data$split.on<-""
l.guide<-NULL
# extra<-scale_fill_manual(values ="grey50")
smooth<-NULL
} else {
vis.data$split.on<-factor(as.matrix(split.on))
l.guide<-guides(fill = guide_legend(title = colnames(split.on)))
# extra<-NULL
smooth<-stat_smooth(aes(group=split.on,color=split.on),method = "loess", size = 1.25,se=se,alpha=.1,show_guide=FALSE,span=span)
}
p<-ggplot(data=vis.data,aes(y=value,x=groups)) + geom_boxplot(aes(fill=split.on),alpha=.75) +
smooth + theme + xlab(colnames(groups))+ ylab(colnames(val))+ l.guide
p<-p+extra
if(print.plot){
print(p)
} else {
return(p)
}
}
#create summary plot RSD% analyte mean
RSD.means.plot<-function(obj=list(gc.perf.raw,gc.raw.t1),name=c("Raw","FAME L2 norm"),size=3,alpha=.75,use.log=TRUE,se=FALSE,theme=NULL,extra=NULL){
library(ggplot2)
#check if many or single object
if(length(names(obj))==7){obj<-list(obj)}
#obj can be a list of objects produced by calc.mRSD
#name will be used for legends
res<-lapply(1:length(obj),function(i){
tmp<-obj[[i]]
res<-data.frame(method=name[i],RSD=tmp$variable.RSD[,1],mean=apply(tmp$batch.means[-1],2,mean,na.rm=TRUE))
res$log.mean<-log(res$mean+1)
res
})
vis.data<-do.call("rbind",res)
if(use.log){
p<-ggplot(vis.data,aes(x=log.mean,y=RSD,group=method,color=method,fill=method))+
stat_smooth(method = "loess", size = 1,show_guide=FALSE ,se = se,alpha=.75)+
geom_point(alpha=alpha,size=size) +
theme + xlab("log Mean")+ ylab("RSD")+extra #+scale_color_manual(values=rainbow(3))+
print(p)
} else {
p<-ggplot(vis.data,aes(x=mean,y=RSD,group=method,color=method,fill=method))+
geom_point(alpha=.75)+
stat_smooth(method = "loess", size = 1,show_guide=FALSE ,se = se,alpha=.75)+
theme + xlab("Mean")+ ylab("RSD")+extra #+scale_color_manual(values=rainbow(3))+
print(p)
}
}
#bar plot to summarize performance
RSD.counts.plot<-function(obj,show="variable",plot.obj="count",name="",theme=NULL,extra=NULL,ylabel="number of metabolites"){
if(show=="variable"){
#variables
res<-lapply(1:length(obj),function(i){
tmp<-obj[[i]]
data.frame(method=name[i],tmp$variable.summary)
})
} else {
#samples
res<-lapply(1:length(obj),function(i){
tmp<-obj[[i]]
data.frame(method=name[i],tmp$batch.summary)
})
}
vis.data<-do.call("rbind",res)
#hack to get the sort correct
vis.data<-vis.data[order(fixlc(vis.data$RSD)),]
fix<-grep(">",vis.data$RSD)
if(length(fix)>0){
tmp<-rbind(vis.data[-fix,],vis.data[fix,])
vis.data<-tmp
}
vis.data$interval<-factor(fixlc(vis.data$RSD), levels=unique(fixlc(vis.data$RSD)))
#switch which variable in the data is plotted
vis.data$plot.obj<-vis.data[,plot.obj]
upper<-max(vis.data$plot.obj)
ulim<-if(upper>=10){10} else {upper}
dlim<-if(upper>=10){2} else {1}
ggplot(data=vis.data,aes(x=interval,y=plot.obj,fill=method,group=method))+ geom_bar(position=position_dodge(),stat="identity")+ theme +
scale_y_continuous(minor_breaks = seq(0 , upper, dlim), breaks = seq(0, upper, ulim)) + xlab("RSD")+ylab(ylabel)+ extra #scale_fill_brewer(palette="Set1")
}
#conduct LOESS normalization on a data frame or matrix
loess.normalization<-function(x,y,subset=NULL,progress=TRUE,scale.to=NULL,scale.with=NULL,span=0.75,...){
#subset = logical specifying which subset of the data to be used for fitting
if (progress == TRUE){ pb <- txtProgressBar(min = 0, max = ncol(x), style = 3)} else {pb<-NULL}
span<-tryCatch(unlist(matrix(span,nrow=length(x))),error=function(e){unlist(matrix(span,nrow=nrow(x)))}) # recycle
res<-do.call("cbind",lapply(1:ncol(x),function(i){
tmp.x<-x[,i]
fit<-loess(tmp.x~y,subset=subset,span=span[i],...)
pred<-predict(fit,data.frame(tmp.x=tmp.x))
if (progress == TRUE){setTxtProgressBar(pb, i)}
return(tmp.x-pred) # residuals for train and test
}))
if (progress == TRUE){close(pb)}
if(!is.null(scale.to)){
scale<-apply(x,2,scale.to,na.rm=TRUE)
res<-sweep(res,2,scale,scale.with)
}
return(res)
}
#cross-validation based tuning of LOESS
tune.loess<-function(data,y,folds=7,span.vals=seq(.25,1,by=.05),progress=TRUE){
# X can be a data frame
# wrapper for bisoreg::loess.wrappwer
#returns optimal span for each column in X
library(bisoreg)
if (progress == TRUE){ pb <- txtProgressBar(min = 0, max = ncol(data), style = 3)} else {pb<-NULL}
res<-unlist(lapply(1:ncol(data),function(i){
x<-tryCatch(loess.wrapper(x=data[,i], y=y, span.vals = span.vals, folds = folds)$pars$span, error=function(e){NA})
if (progress == TRUE){setTxtProgressBar(pb, i)}
return(x)
}))
if (progress == TRUE){close(pb)}
cat("\n")
return(res)
}
#calculate RSD
calc.RSD<-function(x,...){sd(x,...)/mean(x,...)}
#test
test<-function(){
spans<-tune.loess(tmp.data[,1:2],y=tmp.retention.time,folds=5)
loess.normalization(x=tmp.data[,1:2],y=tmp.retention.time,span=spans)
}
|
6897612ce32ab809ad84c71267b5e745ad692642
|
57ffef364d9d7a34fea904d0b614ebac6969de07
|
/code/Bayes.R
|
5f54c7d8358588d28629775fb3adf420f8b16a9e
|
[] |
no_license
|
rbnphlp/Credit-Risk-modelling--Group-Project
|
d93fc6ee13e3aba2cc6dc30632891d4469f6c9ed
|
08d14b080929bcfe11f3cbd9894c73f5d6bd26d3
|
refs/heads/master
| 2021-01-20T11:00:29.275571
| 2017-08-28T16:09:04
| 2017-08-28T16:09:04
| 101,661,328
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,521
|
r
|
Bayes.R
|
# Author: 030007254.
#
# Load the data and the required packages.
load("imputed.RData")
library(e1071)
library(caret)
library(pROC)
##########################################################################
#
# Function implements out of the bag (OOB) validation for a
# naive Bayes classifier (NBC) model.
#
# Inputs:
#
# data: Data set to fit our classifier too.
# N: Number of bootstrap resamples for the test data.
# n: Size of test data set for each resample.
#
# Output:
#
# The average balanced accuracy, sensitivity, and specificity.
#
#########################################################################
bayes.oob <- function(data, N = 100, n = 1000){
# Initialise a matrix to hold the results
results <- matrix(NA, nrow = N, ncol = 3)
for(i in 1:N){
index <- sample(1:nrow(data),n)
# Form the train and test data.
test.data <- data[index,]
train.data <- data[-index,]
# Fit the NBC and obtain the confusion matrix.
confu.matrix <- fit.nbc(train.data, test.data)
# calculate the three measures of model quality.
results[i,] <- confu.meas(confu.matrix)
}
results2 <- colSums(results)/N
names(results2) <- c("balanced accuracy", "sensitivity", "specificity")
return(results2)
}
############################################################################
#########################################################################
#
# Function carries out k-fold cv for a NBC model.
#
# Inputs:
#
# data: data set the NBC is fitted too.
# k: number of folds.
#
# Output:
#
# A vector of length 3, containing the average balanced accuracy,
# sensitivity, and specificity.
#
#######################################################################
bayes.cv <- function(data,k){
# Initialise a matrix to hold results.
results <- matrix(NA, nrow = k, ncol = 3)
# Create the folds for our data.
folds <- createFolds(1:nrow(data), k = k)
for(i in 1:k){
# Form the training data.
train.data <- data[-folds[[i]],]
# Form the test data.
test.data <- data[folds[[i]],]
# Fit the NBC and obtain the confusion matrix.
confu.matrix <- fit.nbc(train.data, test.data)
# calculate the three measures of model quality.
results[i,] <- confu.meas(confu.matrix)
}
results2 <- colSums(results)/k
names(results2) <- c("balanced accuracy", "sensitivity", "specificity")
return(results2)
}
################################################################################
#############################################################################
#
# Auxillary function that calculates three measures of model quality.
#
# Inputs:
#
# M: An object of type confusion matrix for a binary classifier.
#
# Output:
#
# A vector of length 3, containing
# the balanced accuracy, sensitivity, and specificity.
#
################################################################################
confu.meas <- function(M){
sens <- M$byClass[1]
spec <- M$byClass[2]
bal.acc <- M$byClass[11]
return(c(bal.acc, sens, spec))
return(results)
}
###############################################################################
######################################################################
#
# Auxillary function. Fits a NBC classifier and returns a confusion matrix.
#
# Inputs:
#
# data1: data set NBC is fitted too.
# data2: data set that predictions are made on.
#
# Output:
#
# A confusion matrix
#
#########################################################################
fit.nbc <- function(data1, data2 = data1){
# Fit the model to the training data.
model <- naiveBayes(GOOD ~. , data = data1)
# Obtain the predictions for test data.
pred <- predict(model, newdata = data2)
# Calculate the confusion matrix.
confu.matrix <- confusionMatrix(pred, data2[,1], positive = "1")
return(confu.matrix)
}
##########################################################################
# Load the imputed data sets into R.
# data1 obtained using mean and mode imputation
# data.mice obtained using the mice package.
load("imputed.RData")
# The model using the data set obtianed using mean and mode imputation.
bayes.cv(data1,10)
# Now assess model quality using a ROC curve.
# Fit a Naive Bayes Classifier (NBC).
model <- naiveBayes(GOOD~. , data = data1)
# Obtain model predictions.
pred.class <- predict(model, data1, type = "raw")[,1]
roc.curve <- roc(data1$GOOD, pred.class, ci = TRUE)
# Calculate the AUC.
auc(roc.curve)
# Calculate 95% confidence interval for AUC.
roc.curve$ci
# Caluclate 95% confidence interval for sensitivity and specificity.
ci(roc.curve, of="se")
ci(roc.curve, of="sp")
plot(roc.curve, print.auc = TRUE,
identity.col = "red", legacy.axes = TRUE, xlab = "FPR", ylab = "TPR")
# Repeat the above for the imputed data set obtained using mice.
bayes.cv(data.mice,10)
# Now assess model quality using a ROC curve.
# Fit a Naive Bayes Classifier (NBC).
model <- naiveBayes(GOOD~. , data = data.mice)
# Obtain model predictions.
pred.class <- predict(model, data.mice, type = "raw")[,1]
roc.curve <- roc(data.mice$GOOD, pred.class, ci = TRUE)
# Calculate the AUC.
auc(roc.curve)
# Calculate 95% confidence interval for AUC.
roc.curve$ci
plot(roc.curve, print.auc = TRUE,
identity.col = "red", legacy.axes = TRUE, xlab = "FPR", ylab = "TPR")
|
99b8fcd7125b6ace3dbd1a489a9c5ce0a1b203e6
|
3c376428e65f38f6a1188ca6fb6a8319d22de501
|
/R/fsc.run.R
|
5325d3ad59c0d96590ea901b40b704a3dfafd2b5
|
[] |
no_license
|
thierrygosselin/skeleSim
|
84c114fcb0684120b4be298864dfbb4c7d06b508
|
dbbfd19ccfe1f45509b076083dc026d6281ce4ad
|
refs/heads/master
| 2021-01-18T00:51:07.706902
| 2016-07-28T17:05:54
| 2016-07-28T17:05:54
| 64,245,124
| 0
| 0
| null | 2016-07-26T18:32:13
| 2016-07-26T18:32:13
| null |
UTF-8
|
R
| false
| false
| 3,075
|
r
|
fsc.run.R
|
#' @name fsc.run
#' @title Run fastsimcoal
#' @description Run fastsimcoal
#'
#' @param params a \linkS4class{skeleSim.params} object.
#'
#' @return a modified \linkS4class{skeleSim.params} object with the results of
#' a fastsimcoal run.
#'
#' @export
#'
fsc.run <- function(params) {
label <- currentLabel(params)
sc <- currentScenario(params)
# Check that folder is empty
if(file.exists(params@wd)) for(f in dir(label, full.names = T)) file.remove(f)
locus.type <- c("dna","msat","snp")[which(c("DNA","MICROSAT")==sc@simulator.params@locus.params[1,1])]
params@rep.sample <- fastsimcoal(
pop.info = fscPopInfo(pop.size=sc@pop.size,
sample.size=sc@sample.size,
sample.times=sc@simulator.params@sample.times,
growth.rate=sc@simulator.params@growth.rate),
# locus.params = sc@simulator.params@locus.params,
locus.params = fscLocusParams(locus.type=locus.type,num.loci=1,mut.rate=sc@simulator.params@locus.params[,4],
chromosome=1:dim(sc@simulator.params@locus.params)[1],
sequence.length=ifelse(locus.type=="dna",sc@sequence.length,NULL)),
mig.rates = sc@migration,
hist.ev = sc@simulator.params@hist.ev,
label = label,
quiet = params@quiet,
exec = sc@simulator.params@fastsimcoal.exec,
num.cores = 1
)
return(params)
}
#' @name fsc.write
#' @title Write fastsimcoal files
#' @description Run fastsimcoal
#'
#' @param params a \linkS4class{skeleSim.params} object.
#'
#' @return a modified \linkS4class{skeleSim.params} object with the results of
#' a fastsimcoal run.
#'
#' @export
#'
fsc.write <- function(params) {
numsc <- length(params@scenarios)
for (s in 1:numsc)
{
# print(paste("s=",s))
params@current.scenario <- s
sc <- currentScenario(params)
label <- paste0(currentLabel(params),"-",s)
# Check that folder is empty
if(file.exists(params@wd)) for(f in dir(label, full.names = T)) file.remove(f)
locus.type <- c("dna","msat","snp")[which(c("DNA","MICROSAT")==sc@simulator.params@locus.params[1,1])]
# print("about to run fscWrite")
tmp <- fscWrite(
# tmp <- fscWrite(
pop.info = fscPopInfo(pop.size=sc@pop.size,
sample.size=sc@sample.size,
sample.times=sc@simulator.params@sample.times,
growth.rate=sc@simulator.params@growth.rate),
# locus.params = sc@simulator.params@locus.params,
locus.params = fscLocusParams(locus.type=locus.type,num.loci=1,
mut.rate=sc@simulator.params@locus.params[,4],
chromosome=1:dim(sc@simulator.params@locus.params)[1],
sequence.length=ifelse(locus.type=="dna",sc@sequence.length,NULL)),
mig.rates = sc@migration,
hist.ev = sc@simulator.params@hist.ev,
label = label
)
# print(tmp)
}
}
|
bc1badfcba5731500819eb4eeade7d5aae9c4e54
|
bcb9aea78c90f9e243ddf3a82524796b0ecf52a8
|
/man/imputeLFQ.Rd
|
9585829888bc4576f7d60f12bbd8b25d210bf58c
|
[
"MIT"
] |
permissive
|
MassDynamics/MassExpression
|
ab35451ac89662b992d3633b5c06bea5ce2c521c
|
24b59e6cb7afc07b8b4b59473c72dca93582fe41
|
refs/heads/main
| 2023-05-10T15:38:58.488481
| 2023-05-04T22:43:13
| 2023-05-04T22:43:13
| 377,716,166
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 815
|
rd
|
imputeLFQ.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imputeLFQ.R
\name{imputeLFQ}
\alias{imputeLFQ}
\title{This function does MNAR imputation}
\usage{
imputeLFQ(myQuantDT, id_type, int_type, f_imputeStDev, f_imputePosition)
}
\arguments{
\item{myQuantDT}{quantification data}
\item{id_type}{the id column of myQuantDT that indicates}
\item{int_type}{the column of myQuantDT to be imputed}
\item{f_imputeStDev}{The Standard Deviation parameter for MNAR Imputation}
\item{f_imputePosition}{The Position parameter for MNAR Imputation}
}
\value{
quantification data with missing values imputed
}
\description{
This function does MNAR imputation
}
\examples{
## dt_int <- imputeLFQ(myQuantDT = dt_int,
## id_type = "id",
## int_type = "log2NInt", #log2Intensity
## 0.3,
## 1.8,
## )
}
|
2e6946bf677d032bb6fa5f21dba877d07fe23d4a
|
61c188bba8f228b0f14f4bae7c2fa3dcd1f7b3a2
|
/man/getCurrentInBed.Rd
|
2946c9ee7b0d9684bd411191bc2a488d68893ee6
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
covid19br/now_fcts
|
24cb3b7bcbf47b827e50fec43f0dd9647c89dde4
|
44479971618513ef23e82ac277c749b8384e12f8
|
refs/heads/master
| 2023-02-27T01:34:07.757658
| 2021-02-05T20:41:10
| 2021-02-05T20:41:10
| 273,057,036
| 0
| 1
|
CC0-1.0
| 2020-07-07T00:27:17
| 2020-06-17T19:04:42
|
R
|
UTF-8
|
R
| false
| true
| 304
|
rd
|
getCurrentInBed.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getCurrentInBed.R
\name{getCurrentInBed}
\alias{getCurrentInBed}
\title{getCurrentInBed}
\usage{
getCurrentInBed(df, date, UTI)
}
\arguments{
\item{df}{df}
\item{date}{date}
\item{UTI}{UTI}
}
\description{
getCurrentInBed
}
|
7833aa39549d6ed98c0e62a342d9dad240f30dd2
|
f5224269ceced4aaeb094a2a16096794c9ce2761
|
/SARS-CoV-2_TimeSeries/parameters.R
|
2a36b443465a59cf8f0f0659f7527e4737d8706e
|
[
"MIT"
] |
permissive
|
jilimcaoco/MPProjects
|
2842e7c3c358aa1c4a5d3f0a734bb51046016058
|
5b930ce2fdf5def49444f1953457745af964efe9
|
refs/heads/main
| 2023-06-15T04:00:46.546689
| 2021-06-29T02:57:46
| 2021-06-29T02:57:46
| 376,943,636
| 0
| 0
|
MIT
| 2021-06-29T02:57:47
| 2021-06-14T20:08:32
| null |
UTF-8
|
R
| false
| false
| 707
|
r
|
parameters.R
|
parameters <- list(
databases = list(
covid18cq1 = list(
host = "covid19primary.cgymeokijgns.us-east-1.rds.amazonaws.com",
port = 3306,
user = "covid19primary",
password = "atop-9most-5Inn-Dandruff9",
schema = "covid19cq1"
)
),
python_env = "/home/ubuntu/anaconda3/envs/sextonlab",
base_dir = "/home/ubuntu/projects/SARS-CoV-2_TimeSeries",
plate_ids_fname = "raw_data/plate_ids_20210125.tsv",
# google drive path:
# momeara_lab/Morphological Profiling/SARS-CoV-2/Screening Runs/SARS Project Plate Log
plate_map_googlesheet_id = "1sJ7UekU9aEh-56Sy4mpeOmVCGlEh8wcnKjj6ubtBI-o"
)
|
c80282369210789fca9d738511412488ac522fe9
|
622436e52cf1ffb105b2f9af141cabc22ac51246
|
/P1/Prueba1.R
|
3c54a39719b598b6e9195e03dbce1ea26dacdbc9
|
[] |
no_license
|
EdePB/Simulacion
|
6f25d97a0f3039f3a8c502fb5a13ec1b9db77206
|
1846530bbf16a99052a0400c7d799deb77bf1c5b
|
refs/heads/master
| 2020-12-24T07:04:04.794711
| 2017-12-12T08:36:40
| 2017-12-12T08:36:40
| 99,721,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
r
|
Prueba1.R
|
caminata<-function(dim,dur){
pos <-rep(0,dim)
posOrigen<- rep(0, dim)
orig <- 0
datos<- data.frame()
for (t in 1:dur) {
cambiar<-sample(1:dim,1)
cambio<-1
if (runif(1) < 0.5) {
cambio<- -1
}
pos[cambiar] <- pos[cambiar] + cambio
if (all (pos==posOrigen)){
orig <- orig + 1
}
}
return(orig)
}
for(dim in 1:10){
png("p1er.png")
bloxpot(data.matrix(datos), xlab="dimension", ylab="posicionOrigen", mean="analisis")
}
graphics.off()
|
4a95dcb71c07f5e825327effb97b028fa9141e32
|
05a2342858956ba4922a8c0a15a9eb70c28cae7b
|
/r-essentials-gists/ch0526.R
|
c83f5a01c53c1020d4ebafd082b60549b75955f6
|
[] |
no_license
|
jtlai0921/AEL023000_-
|
c0120f78d4c195a703263d57ae76de8544d5771a
|
d9a95c63bda951b0e2f8bbb338b63bb388ca8005
|
refs/heads/master
| 2020-12-19T02:26:47.710279
| 2020-01-22T14:37:24
| 2020-01-22T14:37:24
| 235,592,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 179
|
r
|
ch0526.R
|
my_arr <- array(1:20, dim = c(2, 2, 5))
my_arr[1, 2, 2] # 選出 7
my_arr[1, , 2] # 選出第二個矩陣的第一個 row
my_arr[, 2, 2] # 選出第二個矩陣的第二個 column
|
e9a2f1c7760e687e7b28668472f436a9174df375
|
02741e2e8e6d693c0b770a9b34ce33cfc8c6e504
|
/R/CreateConnection.R
|
e9c77c75949df2c59ac38fe96a5fcf2530b3ffc3
|
[] |
no_license
|
RGLab/ImmuneSpaceR
|
dc6baa18a9a574692d352f775b5a3c4efdb1908d
|
165c75953186548517bcf1fa26204c22bc1ae164
|
refs/heads/main
| 2022-12-21T12:18:18.942221
| 2022-12-14T22:36:51
| 2022-12-14T22:36:51
| 20,756,066
| 25
| 16
| null | 2022-12-13T17:52:17
| 2014-06-12T05:40:31
|
R
|
UTF-8
|
R
| false
| false
| 1,910
|
r
|
CreateConnection.R
|
#' @title CreateConnection
#'
#' @name CreateConnection
#'
#' @param study A \code{"character"} vector naming the study.
#' @param login A \code{"character"}. Optional argument. If there is no netrc
#' file a temporary one can be written by passing login and password of an
#' active ImmuneSpace account.
#' @param password A \code{"character"}. Optional. The password for the selected
#' login.
#' @param verbose A \code{"logical"} whether to print the extra details for
#' troubleshooting.
#' @param onTest A \code{"logical"} whether to connect to the test server
#' (https://datatools-dev.immunespace.org/) instead of the production server
#' (https://datatools.immunespace.org/).
#'
#' @description Constructor for \code{ImmuneSpaceConnection} class.
#'
#' @details Instantiates an \code{ImmuneSpaceConnection} for \code{study}
#' The constructor will try to take the values of the various `labkey.*`
#' parameters from the global environment. If they don't exist, it will use
#' default values. These are assigned to `options`, which are then used by the
#' \code{ImmuneSpaceConnection} class.
#'
#' @return an instance of an \code{ImmuneSpaceConnection}
#'
#' @seealso \code{\link{ImmuneSpaceConnection}}
#'
#' @examples
#' \dontrun{
#' # Single study
#' con <- CreateConnection("SDY269")
#' # Cross study
#' con <- CreateConnection("")
#' }
#'
#' sdy <- try(CreateConnection("SDY269"))
#' if (inherits(sdy, "try-error")) {
#' warning("Read the Introduction vignette for more information on how to set
#' up a .netrc file.")
#' }
#' @export
#' @importFrom utils packageVersion
#' @importFrom curl has_internet nslookup
CreateConnection <- function(study = NULL,
login = NULL,
password = NULL,
verbose = FALSE,
onTest = FALSE) {
ISCon$new(study, login, password, verbose, onTest)
}
|
a4b1250fdb6f76a95e3e6e62bd5509481d3bc9e9
|
ec1917f9fd8b89252fc25e98943a0318955a1db5
|
/Plot1.R
|
ad9d80b40e10298d0cd1e0ac6bf9ef71f6405b6f
|
[] |
no_license
|
subratasaharia/ExData_Plotting1
|
740fc19079f47bc79a1a8d6ad96fdec384e5d343
|
1e4a02ef4e43211d2c7f5ace7f557f971a65c9f5
|
refs/heads/master
| 2020-12-24T09:07:48.647984
| 2016-11-11T11:47:28
| 2016-11-11T11:47:28
| 73,307,208
| 0
| 0
| null | 2016-11-09T17:48:50
| 2016-11-09T17:48:50
| null |
UTF-8
|
R
| false
| false
| 809
|
r
|
Plot1.R
|
# Course Project 1: Exploratory data analysis
unzip("./Coursera 4/Week 1/exdata%2Fdata%2Fhousehold_power_consumption.zip",
exdir="./Coursera 4/Week 1/Course Project")
PrjData<-read.table("./Coursera 4/Week 1/Course Project/household_power_consumption.txt",
header= TRUE, sep=";", stringsAsFactors = FALSE)
Extract<-PrjData[(as.Date(PrjData$Date,"%d/%m/%Y")==as.Date("2007-02-01")|
as.Date(PrjData$Date,"%d/%m/%Y")==as.Date("2007-02-02")),]
Extracted<-cbind(as.data.frame.Date(as.Date(Extract$Date,"%d/%m/%Y")),
as.POSIXlt(strptime(Extract$Time,"%H:%M:%S")),Extract[,3:9])
png(file="Plot1.png")
hist(as.numeric(Extracted$Global_active_power),col = "Red",
xlab="Global Active Power(kilowatts)",
main="Global Active Power")
dev.off()
|
e0df87b2cf076e66d3c6f2718fbb11293556dca3
|
9a8198dd11f74d0b6f094b469b1ba0d46d754024
|
/ReDCM_run_modelspace/redcm_modelspace_mxlist.R
|
517284140935c1c51c4d9b924a4a15310cd153de
|
[] |
no_license
|
aranyics/UD-redcm-on-Azure
|
0d922651dabb838f02bb68666f29c6cdb3203271
|
cc88f8297a7e031dd4a8266cb5733e5f2d520fc6
|
refs/heads/main
| 2023-04-06T01:42:13.270895
| 2021-04-22T07:05:55
| 2021-04-22T07:05:55
| 360,419,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,805
|
r
|
redcm_modelspace_mxlist.R
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly = TRUE)
# test if there is at least one argument: if not, return an error
if (length(args)==0) {
stop("At least one argument must be supplied (subject)", call.=FALSE)
}
## -----------------------------------------------------------------------------------------
## set environmental variables
.libPaths(rev(dir('~/R/x86_64-pc-linux-gnu-library', full.names=TRUE)))
cat( 'R package path: ' )
cat( .libPaths(), '\n', sep=':')
suppressPackageStartupMessages( library("ReDCM") )
subject = args[1]
Amask = c(0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0)
Abase = c(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1)
Bmask = t( array(
c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1),
dim = c(16, 3)) )
digits = 3
thisFile <- function() {
cmdArgs <- commandArgs(trailingOnly = FALSE)
needle <- "--file="
match <- grep(needle, cmdArgs)
if (length(match) > 0) {
# Rscript
return(normalizePath(sub(needle, "", cmdArgs[match])))
} else {
# 'source'd via R console
return(normalizePath(sys.frames()[[1]]$ofile))
}
}
denan.mx = function(mx)
{
mx[is.nan(mx)] = 0
return (mx)
}
source(paste(dirname(thisFile()), '/redcm_mask_mid.R', sep = ''))
dcmdir = paste(dirname( thisFile() ), '/../../../results/modelspace/vdmodel_4node_', subject, '/dcm/', sep = '')
outdir = paste(dcmdir, '../dcm_ms/', sep='')
dir.create(outdir, showWarnings = FALSE)
d = list.files(dcmdir, recursive = TRUE)
for (i in 1:length(d))
{
id = tail( strsplit(d[i], '_')[[1]], 1 )
#cat(d[i])
load( paste(dcmdir, d[i], sep = '') )
if (i == 1)
{
nn = DCMe@M[[1]]@l
nm = DCMe@M[[1]]@m
cols = c(apply(array(1:nn), 1, function(x){rep(x, nn)}))
rows = rep(1:nn,nn)
Bs = apply(array(1:nm), 1, function(x){paste('B', x, sep = '')})
Cs = apply(array(1:nm), 1, function(x){paste('C', x, sep = '')})
dcm.hd = c('ID', 'mID', 'A', Bs, 'C', 'Fe')
dcm.hd = c(dcm.hd, paste(rep('A_', nn*nn), cols, rows, sep=''), paste(rep('pA_', nn*nn), cols, rows, sep=''))
for (k in 1:nm)
{
dcm.hd = c(dcm.hd, paste(rep(paste(Bs[k],'_',sep =''), nn*nn), cols, rows, sep=''), paste(rep(paste('p',Bs[k],'_',sep =''), nn*nn), cols, rows, sep=''))
}
for (k in 1:nm)
{
dcm.hd = c(dcm.hd, paste(rep(paste(Cs[k],'_',sep =''), nn), 1:nn, sep=''), paste(rep(paste('p',Cs[k],'_',sep =''), nn), 1:nn, sep=''))
}
dcm.length = length(dcm.hd)
dcm.hd = paste(dcm.hd, collapse = ',')
fileConn<-file(paste(outdir,"fullspace_dcm.csv", sep = ''), "wt")
writeLines(dcm.hd, fileConn)
close(fileConn)
}
aid = redcm_mask_matrix2id(DCMe@Ep@A, Amask, digits)
bid = apply(array(1:DCMe@M[[1]]@m), 1, function(k) {redcm_mask_matrix2id(DCMe@Ep@B[,,k], Bmask[k,], digits)})
cid = "000"
dcm = c(id, paste(c(aid, bid), collapse = ''), aid, bid, cid)
dcm = c(dcm, round(DCMe@Fe, 4))
dcm = c( dcm, c(round(DCMe@Ep@A, 4)), c(denan.mx(round(DCMe@Pp@A, 4))) )
for (m in 1:DCMe@M[[1]]@m)
dcm = c( dcm, c(round(DCMe@Ep@B[,,m], 4)), c(denan.mx(round(DCMe@Pp@B[,,m], 4))) )
dcm = c( dcm, c(round(DCMe@Ep@C, 4)), c(denan.mx(round(DCMe@Pp@C, 4))) )
dcm = paste(dcm, collapse = ',')
fileConn<-file(paste(outdir,"fullspace_dcm.csv", sep = ''), "at")
writeLines(dcm, fileConn)
close(fileConn)
if (i %% 1000 == 0)
cat('-')
}
df = read.table(paste(outdir,"fullspace_dcm.csv", sep = ''), sep = ',', header = TRUE, colClasses = 'character')
df = df[order(as.numeric(df$ID)),]
df[,-c(2:7)] = lapply(df[,-c(2:7)], as.numeric)
write.table(df, paste(outdir,"fullspace_dcm.csv", sep = ''), sep = ',', col.names = TRUE, row.names = FALSE)
cat('\n')
|
6e9b5b62abe98772cfbeab6acd734386191ca0e2
|
26a780f7a54e4f5f9fe8de3b1293040657f812b5
|
/ui.R
|
11dc78b8098b569e870ef781e131d086260997fa
|
[] |
no_license
|
RajasekaranKR/Developing-Data-Products-Week-4-Project-Shiny-Application
|
3722379961e787e157105cf07a00142a12fb1821
|
0bb48ee5b56346ecd075d3547a4ddfe1a7857cdb
|
refs/heads/master
| 2020-03-30T09:21:03.880183
| 2018-10-01T10:45:02
| 2018-10-01T10:45:02
| 151,072,406
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,591
|
r
|
ui.R
|
library(shiny)
shinyUI(
fluidPage(
titlePanel("The relationship between variables and miles per gallon(MPG)"),
sidebarLayout(
sidebarPanel(
selectInput("variable","variable:",
c("Number of cylinders"="cyl",
"Displacement(cu.in)"="disp",
"Grosshorsepower"="hp",
"Rear axle ratio"="drat",
"Weight(lb/1000)"="wt",
"1/4 mile time"="qsec",
"V/S"="vs",
"Transmission"="am",
"Number of forward gears"="gear",
"Number of carburators"="carb"
)),
checkboxInput("outliers","ShowBoxPlot's outliers",FALSE)
),
mainPanel(
h3(textOutput("caption")),
tabsetPanel(type="tabs",
tabPanel("BoxPlot",plotOutput("mpgBoxPlot")),
tabPanel("Regression model",
plotOutput("mpgPlot"),
verbatimTextOutput("fit")
))
)
),
tabPanel("check the Source Code",
h2("All the SourceCode can be found in coursera"),
hr(),
h3("Peer Assessment/Regression Model Course Project"),
helpText("You work for Motor Trend, a magazine about the
automobile industry looking out a dataset of a
collaboration between a set of variables and
miles per gallon(MPG)(outcome).They are particularly
interested in the following two questions:"),
h3("Important"),
p("A dataframe with 32 observations on 11 variables"),
a("https://class.coursera.org/regmodels-008")
),
tabPanel("More Data Detail",
h2("Motor Trend Car Road Tests"),
hr(),
h3("Description"),
helpText("The data was exctracted from the 1974
Motor Trend US Magazine,","and comprises
fuel consumption and aspects of automobile
design and performance","for 32 automobiles (1973-74 models)"),
h3("format"),
p("A data frame with 32 observations on 11 variables"),
p("[,1] mpg Miles / (US) gallon"),
p("[,2] cyl Number of cylinders"),
p("[,3] disp Displacement (cu.in)"),
p("[,4] hp Gross horsepower"),
p("[,5] drat Rear axle ratio"),
p("[,6] wt Weight(lb/1000)"),
p("[,7] qsec 1/4 miltime"),
p("[,8] vs V / S"),
p("[,9] am Transmission (0=automatic, 1=manual)"),
p("[,10] gear Number of forward gears"),
p("[,11] carb Number of Carburators"),
h3("Source"),
p("Henderson and Vellman (1981), Building multiple regression
models interactively Biometrics",391-411),
tabPanel("Go back to my Github repository",
a("https://github.com/rajasekaran tab=repository"),
hr(),
h2("I believe you are happy with the Shiny App"),
h2("The name of the repository is DataProducts")
)
)
)
)
|
57da14cfa403582b3ebb322b069889b8cae365e2
|
b7db4753fba2d5a32c9e905b6637fb00561c96c5
|
/data-raw/greeness_plot.R
|
10ff5ce51e102dd02f167768e6988fb85e493ca1
|
[] |
no_license
|
yujiex/pubPriCmp
|
a49cc9a589fa2a11cf754b4c8722319ff80734f8
|
358b33ed31003ea825426122ba9129532fd2f97a
|
refs/heads/master
| 2020-03-20T15:33:20.624146
| 2019-10-29T17:47:48
| 2019-10-29T17:47:48
| 137,515,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,089
|
r
|
greeness_plot.R
|
## Following steps from
##https://timogrossenbacher.ch/2016/12/beautiful-thematic-maps-with-ggplot2-only/#clear-workspace-and-install-necessary-packages
detachAllPackages <- function() {
basic.packages.blank <- c("stats",
"graphics",
"grDevices",
"utils",
"datasets",
"methods",
"base")
basic.packages <- paste("package:", basic.packages.blank, sep = "")
package.list <- search()[ifelse(unlist(gregexpr("package:", search())) == 1,
TRUE,
FALSE)]
package.list <- setdiff(package.list, basic.packages)
if (length(package.list) > 0) for (package in package.list) {
detach(package, character.only = TRUE)
print(paste("package ", package, " detached", sep = ""))
}
}
detachAllPackages()
if (!require(rgeos)) {
install.packages("rgeos", repos = "http://cran.us.r-project.org")
require(rgeos)
}
if (!require(rgdal)) {
install.packages("rgdal", repos = "http://cran.us.r-project.org")
require(rgdal)
}
if (!require(raster)) {
install.packages("raster", repos = "http://cran.us.r-project.org")
require(raster)
}
if(!require(ggplot2)) {
install.packages("ggplot2", repos="http://cloud.r-project.org")
require(ggplot2)
}
if(!require(viridis)) {
install.packages("viridis", repos="http://cloud.r-project.org")
require(viridis)
}
if(!require(dplyr)) {
install.packages("dplyr", repos = "https://cloud.r-project.org/")
require(dplyr)
}
if(!require(gtable)) {
install.packages("gtable", repos = "https://cloud.r-project.org/")
require(gtable)
}
if(!require(grid)) {
install.packages("grid", repos = "https://cloud.r-project.org/")
require(grid)
}
if(!require(readxl)) {
install.packages("readxl", repos = "https://cloud.r-project.org/")
require(readxl)
}
if(!require(magrittr)) {
install.packages("magrittr", repos = "https://cloud.r-project.org/")
require(magrittr)
}
if(!require(tidyr)) {
install.packages("tidyr", repos = "https://cloud.r-project.org/")
require(tidyr)
}
if(!require(maptools)) {
install.packages("maptools", repos = "https://cloud.r-project.org/")
require(maptools)
}
## Generic theme
theme_map <- function(...) {
theme_minimal() +
theme(
text = element_text(family = "Ubuntu Regular", color = "#22211d"),
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
# panel.grid.minor = element_line(color = "#ebebe5", size = 0.2),
panel.grid.major = element_line(color = "#ebebe5", size = 0.2),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "#f5f5f2", color = NA),
panel.background = element_rect(fill = "#f5f5f2", color = NA),
legend.background = element_rect(fill = "#f5f5f2", color = NA),
panel.border = element_blank(),
...
)
}
## read in data
df_green = readr::read_csv("~/Dropbox/thesis/code/pubPriCmp/data-raw/greeness_combine_source.csv") %>%
tibble::as_data_frame() %>%
{.}
df_green_long = df_green %>%
tidyr::gather(`Source`, `Rating`, `Majority`:`Forbes`) %>%
{.}
## read in geo-data
gde_state <-
rgdal::readOGR("~/Dropbox/thesis/code/pubPriCmp/data-raw/geo_data/cb_2017_us_state_20m.shp",
layer="cb_2017_us_state_20m")
## projection settings
crs(gde_state) <- "+proj=somerc +lat_0=46.95240555555556
+lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000
+ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs"
## turn shapefile in to dataframe
map_state_fortified = ggplot2::fortify(gde_state, region="NAME")
## Cannot tell which state is which
head(map_state_fortified)
## takes long, not finishing
map_state_fortified %>%
ggplot2::ggplot() +
ggplot2::geom_polygon(ggplot2::aes(x=lat, y=long, group=group))
|
ca743a7923ef71de6da22a409834e343094ffdfd
|
4dced39355fb06b47320044cb55a8af13f4a6f1a
|
/Plot4.R
|
84c8f7ced5d7b8df317add68ab6c260392570958
|
[] |
no_license
|
Rabun0510/ExData_Plotting1
|
c025194e9072b5933a6dd25014b83404dd9e60f8
|
76e83190cc23444503d792fc7db849edb3db2903
|
refs/heads/master
| 2020-12-28T21:05:47.627045
| 2015-04-08T19:56:27
| 2015-04-08T19:56:27
| 33,628,820
| 0
| 0
| null | 2015-04-08T19:53:45
| 2015-04-08T19:53:44
| null |
UTF-8
|
R
| false
| false
| 2,102
|
r
|
Plot4.R
|
# Exdata-013 project 1 plot 4
#Read the original data
originalData <- read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", stringsAsFactors = FALSE)
#Add a column of DateTime that appends the date and time into one POSIXlt object
elecData <- cbind("DateTime" = strptime(paste(originalData$Date, originalData$Time), format = "%d/%m/%Y %H:%M:%S"), originalData)
#Filter the data according to the date
lowerDate <- as.POSIXlt("2007-02-01")
upperDate <- as.POSIXlt("2007-02-03")
dataToSelect <- (elecData$DateTime >= lowerDate)&(elecData$DateTime <= upperDate)
plotData <- elecData[dataToSelect,]
plotData <- plotData[!is.na(plotData$DateTime),]
#Set plot parameter mfrow to 2 by 2 and plot all 4 required plots
par(mfrow = c(2,2))
#Plot 1
plot(plotData$DateTime, plotData$Global_active_power, type = "n", xlab = "", ylab = "Global Active Power (kilowatts)")
lines(plotData$DateTime, plotData$Global_active_power, type = "l", lwd = 1, lty = "solid")
#Plot 2
plot(plotData$DateTime, plotData$Voltage, type = "n", xlab = "datetime", ylab = "Voltage")
lines(plotData$DateTime, plotData$Voltage, type = "l", lwd = 1, lty = "solid")
#Plot 3
plot(plotData$DateTime, plotData$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering", height = 480, width = 480)
lines(plotData$DateTime, plotData$Sub_metering_1, type = "l", lwd = 1, lty = "solid", col = "black")
lines(plotData$DateTime, plotData$Sub_metering_2, type = "l", lwd = 1, lty = "solid", col = "red")
lines(plotData$DateTime, plotData$Sub_metering_3, type = "l", lwd = 1, lty = "solid", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black","red","blue"), lty = c("solid","solid", "solid"),bty = "n")
#Plot 4
plot(plotData$DateTime, plotData$Global_reactive_power, type = "n", xlab = "datetime", ylab = "Global_Reactive_Power")
lines(plotData$DateTime, plotData$Global_reactive_power, type = "l", lwd = 1, lty = "solid")
#Save the histogram plot as a png file
dev.copy(png, file = "plot4.png", width = 480, height = 480)
dev.off()
|
3408a1ac3c3bc205856bc16f3afc45ce5ea16dbb
|
56083c0756218eb2611923bc3353ee82a2171df1
|
/ZZ_archive/model_sim_simulation_SJ(3).R
|
181d052ceed6d39ba995316ee8aba881ff9830a2
|
[] |
no_license
|
yimengyin16/model_SJ
|
d512c26d8283bc2b1c375518a9f6195e362fb1e1
|
4dcaf1bafd871f64d617b61c59111a67b430467c
|
refs/heads/main
| 2023-06-06T06:26:34.791754
| 2021-06-28T20:50:33
| 2021-06-28T20:50:33
| 359,182,697
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 38,649
|
r
|
model_sim_simulation_SJ(3).R
|
## This script conducts the simulation of the finance of the plan
## Development log
{
#' What's new in ver SJ(3)
#' - add new EEC policies: shared NC and shared ADC for SJ P&F
}
# New parameter to be added (Special settings):
run_sim <- function(i.r_ = i.r,
sim_paramlist_ = sim_paramlist,
Global_paramlist_ = Global_paramlist){
# Run the section below when developing new features.
# dev --
#
# i.r_ = i.r
# sim_paramlist_ = sim_paramlist
# Global_paramlist_ = Global_paramlist
# dev --
assign_parmsList(Global_paramlist_, envir = environment())
assign_parmsList(sim_paramlist_, envir = environment())
valData <- readRDS(paste0(dir_val, "val_", sim_paramlist_$val_name, ".rds"))
# note that "dir_val" is defined outside the function
tn <- val_tierName # "sumTiers" # name for aggregate values in valData$aggLiab.
if(tn == "sumTiers"){
tier_names <- names(valData$indivLiab) # names of all component tiers
} else {
tier_names <- tn
}
#*****************************************************************************
# Special settings for using baseline UAAL ####
#*****************************************************************************
if(use_baselineUAAL|newBasisPolicyChg|use_baselineMA){
df_baseline <- readRDS(paste0(dir_outputs, "sim_", sim_name_baseline, ".rds"))$results
}
if(use_baselineUAAL|newBasisPolicyChg){
# df_baseline <- readRDS(paste0(dir_outputs, "sim_", sim_name_baseline, ".rds"))$results
UAAL.year1.baseline <- df_baseline %>% filter(sim == 0, year == init_year) %>% pull(UAAL)
AL.year1.baseline <- df_baseline %>% filter(sim == 0, year == init_year) %>% pull(AL)
}
# Set initial assets
if(use_baselineMA){
# df_baseline <- readRDS(paste0(dir_outputs, "sim_", sim_name_baseline, ".rds"))$results
init_MA_type <- "MA0"
init_AA_type <- "AA0"
MA_0 <- df_baseline %>% filter(sim == 0, year == init_year) %>% pull(MA)
AA_0 <- df_baseline %>% filter(sim == 0, year == init_year) %>% pull(AA)
}
#*****************************************************************************
# Preparation for contingent COLA ####
#*****************************************************************************
# Only apply contingent COLA to service retirees
ls_servRet0 <- list()
for(tierName in tier_names){
# dev --
# tierName = tier_names[1]
ls_servRet0[[tierName]] <-
left_join(
valData$pop[[tierName]]$wf_servRet.la,
valData$indivLiab[[tierName]]$servRet.la,
by = c("ea", "age", "year", "year_servRet")
) %>%
mutate(across(everything(), na2zero)) %>%
mutate(start_year = year - (age - ea),
age_servRet = age - (year - year_servRet),
grp = tierName
) %>%
select(grp, start_year, ea, age, age_servRet, year_servRet, year, B.servRet.la, ALx.servRet.la, n_servRet.la, ax.servRet)
ls_servRet0[[tierName]] %<>%
mutate(B.servRet.la = ifelse(year_servRet == year , B.servRet.la, 0),
ALx.servRet.la = ifelse(year == init_year , ALx.servRet.la, 0)) %>%
filter(age >= age_servRet) %>%
arrange(start_year, ea, age_servRet, age) %>%
group_by(start_year, ea, age_servRet) %>%
# remove groups with no retirees
mutate(n_sum = sum(n_servRet.la)) %>%
ungroup() %>%
filter(n_sum != 0) %>%
mutate(n_sum = NULL)
}
ls_servRet0 <- bind_rows(ls_servRet0)
# ls_servRet0$regularAll
#*****************************************************************************
# Defining variables in simulation ####
#*****************************************************************************
# Now we do the actuarial valuations
# In each period, following values will be caculated:
# AL: Total Actuarial liability, which includes liabilities for active workers and pensioners.
# NC: Normal Cost
# MA: Market value of assets.
# AA: Actuarial value of assets.
# EAA:Expected actuarial value of assets.
# UAAL: Unfunded accrued actuarial liability, defined as AL - NC
# EUAAL:Expected UAAL.
# PR: payroll
# LG: Loss/Gain, total loss(positive) or gain(negative), Caculated as LG(t+1) = (UAAL(t) + NC(t))(1+i) - C - Ic - UAAL(t+1),
# AM: Amount to be amortized at period t.
# i is assumed interest rate. ELs of each period will be amortized seperately.
# SC: Supplement cost
# ADC: actuarially required contribution by employer. NC + SC - EEC
# C : Actual contribution
# C_ADC: shortfall in paying ADC
# B : Total beneift Payment
# Ic: Assumed interest from contribution, equal to i*C if C is made at the beginning of time period. i.r is real rate of return.
# Ia: Assumed interest from AA, equal to i*AA if the entire asset is investible.
# Ib: Assumed interest loss due to benefit payment, equal to i*B if the payment is made at the beginning of period
# I.r : Total ACTUAL interet gain, I = i.r*(AA + C - B), if AA is all investible, C and B are made at the beginning of period.
# Funded Ratio: AA / AL
# C_PR: contribution as % of payroll
# Formulas
# AL(t), NC(t), B(t) at each period are calculated using the workforce matrix and the liability matrix.
# MA(t+1) = AA(t) + I(t) + C(t) - B(t), AA(1) is given
# EAA(t+1)= AA(t) + EI(t)
# AA(t+1) = (1-w)*EAA(t+1) + w*MA(t+1)
# I.r(t) = i.r(t)*[AA(t) + C(t) - B(t)]
# Ia(t) = i * AA(t)
# Ib(t) = i * B(t)
# Ic(t) = i * C(t)
# EI(t) = Ia(t) - Ib(t) + Ic(t)
# ADC = NC(t) + SC(t)
# ADC.ER = NC(t) + SC(t) - EEC(t)
# C(t) = NC(t) + SC(t)
# UAAL(t) = AL(t) - AA(t)
# EUAAL(t) = [UAAL(t-1) + NC(t-1)](1+i(t-1)) - C(t-1) - Ic(t-1)
# LG(t) = UAAL(t) - EUAAL for t>=2 ; LG(1) = -UAAL(1) (LG(1) may be incorrect, need to check)
# More on LG(t): When LG(t) is calculated, the value will be amortized thourgh m years. This stream of amortized values(a m vector) will be
# placed in SC_amort[t, t + m - 1]
# SC = sum(SC_amort[,t])
# ExF = B(j) - C(j)
# About gains and losses
# In this program, the only source of gain or loss is the difference between assumed interest rate i and real rate of return i.r,
# which will make I(t) != Ia(t) + Ic(t) - Ib(t)
# Set up data frame
penSim0 <- data.frame(year = init_year:(init_year + nyear - 1)) %>%
mutate(
# standard variables
AL = 0, #
MA = 0, #
AA = 0, #
EAA = 0, #
FR = 0, #
ExF = 0, #
UAAL = 0, #
EUAAL= 0, #
LG = 0, #
Amort_basis = 0, # amount to be amortized: AM(t) = LG(t) + [ADC(t - 1) - C(t-1)]*[1 + i(t-1)], i.e. actuarial loss/gain plus shortfall in paying NC+SC in last period(plus interests)
NC = 0, #
SC = 0, #
EEC = 0, #
ERC = 0, #
ADC = 0, #
ADC.ER = 0, #
C = 0, #
C_ADC= 0, #
B = 0, #
I.r = 0, #
I.e = 0, #
I.dif= 0,
Ia = 0, #
Ib = 0, #
Ic = 0, #
i = i,
i.r = 0,
PR = 0,
nactives = 0,
nretirees = 0,
nterms = 0,
ADC_PR = 0,
C_PR = 0,
# Contingent COLA
cola_actual = cola_default,
# SJ shared ADC
EEC.NC = 0,
EEC.SC = 0,
COLA_triggered = 0
## Additional/plan specific variables
)
# penSim0 <- as.list(penSim0)
#valData$aggLiab[[tn]]$active
#*****************************************************************************
# Defining variables in simulation ####
#*****************************************************************************
# AL(j)
# AL for active members
penSim0$AL.active.servRet <- valData$aggLiab[[tn]]$active[, "ALx.servRet.laca"]
penSim0$AL.active.defrRet <- valData$aggLiab[[tn]]$active[, "ALx.defrRet"]
penSim0$AL.active.disbRet <- valData$aggLiab[[tn]]$active[, "ALx.disbRet"]
penSim0$AL.active.death <- valData$aggLiab[[tn]]$active[, "ALx.death"]
penSim0$AL.active <-
with(penSim0, AL.active.servRet +
AL.active.defrRet +
AL.active.disbRet +
AL.active.death)
# AL for members in pay status
penSim0$AL.servRet <- valData$aggLiab[[tn]]$servRet.la[, "ALx.servRet.la"]
penSim0$AL.defrRet <- valData$aggLiab[[tn]]$defrRet[, "ALx.defrRet"]
penSim0$AL.disbRet <- valData$aggLiab[[tn]]$disbRet[, "ALx.disbRet"]
penSim0$AL.death <- valData$aggLiab[[tn]]$death[, "ALx.death"]
#penSim0$AL.ca <- AggLiab_$ca[, "liab.ca.yearsum"]
penSim0$AL.nonactive <-
with(penSim0, AL.servRet +
AL.defrRet +
AL.disbRet +
AL.death)
# Total AL
penSim0$AL <- with(penSim0, AL.active + AL.nonactive)
# NC(j)
penSim0$NC.servRet <- valData$aggLiab[[tn]]$active[, "NCx.servRet.laca"]
penSim0$NC.defrRet <- valData$aggLiab[[tn]]$active[, "NCx.defrRet"]
penSim0$NC.disbRet <- valData$aggLiab[[tn]]$active[, "NCx.disbRet"]
penSim0$NC.death <- valData$aggLiab[[tn]]$active[, "NCx.death"]
penSim0$NC <- with(penSim0, NC.servRet + NC.defrRet + NC.disbRet + NC.death)
# PVFB(j)
penSim0$PVFB.active.servRet <- valData$aggLiab[[tn]]$active[, "PVFBx.servRet.laca"]
penSim0$PVFB.active.defrRet <- valData$aggLiab[[tn]]$active[, "PVFBx.defrRet"]
penSim0$PVFB.active.disbRet <- valData$aggLiab[[tn]]$active[, "PVFBx.disbRet"]
penSim0$PVFB.active.death <- valData$aggLiab[[tn]]$active[, "PVFBx.death"]
penSim0$PVFB.active <- with(penSim0, PVFB.active.servRet + PVFB.active.defrRet + PVFB.active.disbRet + PVFB.active.death)
penSim0$PVFB.nonactive <- with(penSim0, AL.nonactive)
penSim0$PVFB <- with(penSim0, PVFB.active + PVFB.nonactive)
# Note this is the total PVFB for actives. PVFB for retirees/beneficiaries are the same as AL.
# PVFNC(j)
penSim0$PVFNC.servRet <- valData$aggLiab[[tn]]$active[, "PVFNCx.servRet.laca"]
penSim0$PVFNC.defrRet <- valData$aggLiab[[tn]]$active[, "PVFNCx.defrRet"]
penSim0$PVFNC.disbRet <- valData$aggLiab[[tn]]$active[, "PVFNCx.disbRet"]
penSim0$PVFNC.death <- valData$aggLiab[[tn]]$active[, "PVFNCx.death"]
penSim0$PVFNC <- with(penSim0, PVFNC.servRet + PVFNC.defrRet + PVFNC.disbRet + PVFNC.death)
# B(j)
penSim0$B.servRet <- valData$aggLiab[[tn]]$servRet.la[, "B.servRet.la"]
penSim0$B.defrRet <- valData$aggLiab[[tn]]$defrRet[, "B.defrRet"]
penSim0$B.disbRet <- valData$aggLiab[[tn]]$disbRet[, "B.disbRet"]
penSim0$B.death <- valData$aggLiab[[tn]]$death[, "B.death"]
# penSim0$B.ca <- valData$aggLiab[[tn]]$ca[, "B.ca"]
penSim0$B <- with(penSim0, B.servRet + B.defrRet + B.disbRet + B.death)
# PR(j)
penSim0$PR <- valData$aggLiab[[tn]]$active[, "PR"]
# EEC(j)
penSim0$EEC <- 0 # valData$aggLiab[[tn]]$active[, "EEC"]
# PVFEEC(j)
#penSim0$PVFEEC <- valData$aggLiab[[tn]]$active[, "PVFEEC"]
# PVFS(j)
penSim0$PVFS <- valData$aggLiab[[tn]]$active[, "PVFSx"]
# Number of members
penSim0$n_actives <- valData$aggLiab[[tn]]$active[, "nactives"]
penSim0$n_servRet <- valData$aggLiab[[tn]]$servRet.la[, "n_servRet.la"]
penSim0$n_defrRet <- valData$aggLiab[[tn]]$defrRet[, "n_defrRet"]
penSim0$n_disbRet <- valData$aggLiab[[tn]]$disbRet[, "n_disbRet"]
penSim0$n_deathBen <- valData$aggLiab[[tn]]$death[, "n_deathBen"]
#penSim0$ndisb.ca.R0S1 <- valData$aggLiab[[tn]]$disb.ca[, "n.disb.R0S1"]
#penSim0$n.ca.R1 <- valData$aggLiab[[tn]]$ca[, "n.R1"]
#penSim0$n.ca.R0S1 <- valData$aggLiab[[tn]]$ca[, "n.R0S1"]
# Convert penSim0 to a list when all preps are done.
# It is faster to extract elements from lists than from frame data frames.
penSim0 <- as.list(penSim0)
#
# check valuation results
# penSim0 %>%
# filter(year == 2019) %>%
# select(year, AL, B, PR, NC,
# AL.active,
# AL.active.servRet,
# AL.active.disbRet,
# AL.active.defrRet,
# AL.active.death,
# AL.servRet,
# NC.servRet,
# NC.disbRet,
# NC.defrRet,
# NC.death,
# ) %>%
# mutate(NC_PR = 100 * NC / PR)
#*****************************************************************************
# Setting up initial amortization payments ####
#*****************************************************************************
# Matrix representation of amortization of amort payment schedule: better visualization but larger size
# Rows: schedule of amortization payments for losses/gains occurred in a certain year.
# Columns: each column contains all the amortization payments that need to be paid in a simulation year.
# Column i corresponds to year i.
# Column sums give the total amortization payments.
# use_baselineUAAL <- TRUE
# newBasisPolicyChg <- FALSE
# calibration
# valData$init_amort_raw[[tn]] %<>%
# mutate(year.remaining = year.remaining + 1)
# Set up the matrix for SC starting from year 1
m.max <- max(valData$init_amort_raw[[tn]]$year.remaining, m) # max number of rows and columns needed
m.max <- max(m.max, 20) # for amortization of amort. of policy changes
SC_amort0 <- matrix(0, nyear + m.max, nyear + m.max)
# SC_amort0
SC_amort.init <- matrix(0, nrow(valData$init_amort_raw[[tn]]), nyear + m.max)
# Adjustment factor for initial amortization payments
# Factor is defined as the initial model UAAL as a proportion of UAAL in AV.
# WARNING: Does not work with "method 2" for AA.
MA.year1.model <- switch(init_MA_type,
MA0 = MA_0, # Use preset value
AL = penSim0$AL[1], # Assume inital fund equals inital liability.
AL_pct = penSim0$AL[1] * MA_0_pct) # Inital MA is a proportion of inital AL
AA.year1.model <- switch(init_AA_type,
AA0 = AA_0, # Use preset value
noSmoothing = MA.year1.model, # Assume inital fund equals inital liability.
AL_pct = penSim0$AL[1] * AA_0_pct) # Inital MA is a proportion of inital AL
AL.year1.model <- penSim0$AL[1]
UAAL.year1.model <- AL.year1.model - AA.year1.model
# factor.initAmort <- UAAL.year1.model / [replace with UAAL from plan doc]
# Notes: Theoretically, the AV UAAL should be equal to the sum of outsftanding amortization balance.
# Need to check the document
if(use_baselineUAAL){
factor.initAmort <- UAAL.year1.baseline / sum(valData$init_amort_raw[[tn]]$balance)
} else {
factor.initAmort <- UAAL.year1.model / sum(valData$init_amort_raw[[tn]]$balance)
}
# Adjust existing amortization basis
init_amort <-
valData$init_amort_raw[[tn]] %>%
mutate(balance = balance * factor.initAmort)
# create new amort. basis for the difference in AL from the baseline run.
# Note that the amortization period of 20 years for policy changes is hard coded below for SJ P&F
if(use_baselineUAAL & newBasisPolicyChg){
init_amort %<>%
add_row(balance = AL.year1.model - AL.year1.baseline,
year.remaining = 20,
amort.method = "cp",
amort.type = "closed",
skipY1 = FALSE)
SC_amort.init <- rbind(SC_amort.init, SC_amort.init[1,])
}
if(useAVamort){
SC_amort.init.list <- mapply(amort_LG,
p = init_amort$balance,
m = init_amort$year.remaining,
method = init_amort$amort.method,
skipY1 = init_amort$skipY1,
MoreArgs = list(i = i, g = salgrowth_amort, end = FALSE), SIMPLIFY = F)
for(j in 1:nrow(SC_amort.init)){
SC_amort.init[j, 1:init_amort$year.remaining[j]] <- SC_amort.init.list[[j]]
}
}
# SC_amort.init
# Comibining matrices of initial amortization and new amortization
nrow.initAmort <- nrow(SC_amort.init)
SC_amort0 <- rbind(SC_amort.init, SC_amort0)
# Notes:
# The amortization basis of year j should be placed in row nrow.initAmort + j - 1.
# This creates the issue for year 1 that whether we should use the amortization from the document or that calculated from the model
# Need to think this through.
# For sim -1, which is for check model consistency, all amort payments for past gains/losses are set to 0.
## save(SC_amort0, file = "SC_amort0.RData")
#*****************************************************************************
# Asset smoothing ####
#*****************************************************************************
# To be modified with MEPERS asset smoothing method
## Case 1: Recognizing equal amounts every year
# In year Y, The ith element of s.vector is the proportion of the investment
# gains/losses in year Y - (s.year - i + 1) to be EXCLUDED from AA in year Y.
# E.g.The asset smoothing period is 5 (5 elements in s.vector) and the last element is 0.8,
# then only 20% of the investment gain/loss in year Y-1 will be recognized
# in the calculation of actuarial value of assets in year Y.
s.vector <- seq(0,1,length = s.year + 1)[-(s.year+1)]; s.vector
## Case 2: Recognizing unequal amounts every year
# s.vector <- c(0, 0.2, 0.4, 0.55, 0.7)
# if(length(s.vector) != s.year) warning("Incorrect asset smoothing period.")
## Adjusting initil unrecognized returns for the MA-AA difference in the model
# TEMP for MEPERS
# valData$init_unrecReturns.unadj <-
# tribble(
# ~year, ~DeferredReturn,
# 2020, 1,
# 2021, 1,
# 2022, 1
# )
init_unrecReturns.adj <- mutate(valData$init_unrecReturns.unadj,
DeferredReturn = DeferredReturn * (MA.year1.model - AA.year1.model)/sum(DeferredReturn),
DeferredReturn.annualTot = sum(DeferredReturn) - cumsum(DeferredReturn) # Initial unrecognized return to be subtracted from AA in each year
) %>%
mutate(across(.fns = na2zero))
# init_unrecReturns.adj
#*****************************************************************************
# Simuation ####
#*****************************************************************************
cl <- makeCluster(ncore)
registerDoParallel(cl)
penSim_results <- foreach(k = -2:nsim, .packages = c("dplyr", "tidyr", "purrr")) %dopar% {
#k <- -2
# initialize
penSim <- penSim0
SC_amort <- SC_amort0
ls_servRet <- ls_servRet0
if(k == -1) SC_amort[,] <- 0
penSim[["i.r"]] <- i.r_[, as.character(k)]
source("functions.R")
for (j in 1:nyear){
# it_j <- iterators::iter(1:nyear)
#j <- iterators::nextElem(it_j); j
#***********************************
# __MA(j), AA(j) and UAAL(j) ####
#***********************************
# Year 1
if(j == 1) {penSim$MA[j] <- ifelse(k == -1, penSim$AL[j], # k = -1 is for testing model consistency
switch(init_MA_type,
MA0 = MA_0, # Use preset value
AL = penSim$AL[j], # Assume inital fund equals inital liability.
AL_pct = penSim$AL[j] * MA_0_pct) # Inital MA is a percentage of the inital AL
)
penSim$AA[j] <- ifelse(k == -1, penSim$MA[j], # AA = MA (always true for if k == -1 regardless of the value of init_AA_type)
switch(init_AA_type,
AA0 = AA_0, # Use preset value
noSmoothing = penSim$MA[j], # Intial AA is equal to MA
AL_pct = penSim$AL[j] * AA_0_pct # Inital AA is a proportion of inital AL
)
)
# Year 2 and after
} else {
penSim$MA[j] <- with(penSim, MA[j - 1] + I.r[j - 1] + C[j - 1] - B[j - 1])
penSim$EAA[j] <- with(penSim, AA[j - 1] + I.e[j - 1] + C[j - 1] - B[j - 1])
penSim$AA[j] <- switch(smooth_method,
method1 = with(penSim, MA[j] - sum(s.vector[max(s.year + 2 - j, 1):s.year] * I.dif[(j-min(j, s.year + 1)+1):(j-1)])), # MA minus unrecognized losses and gains
method2 = with(penSim, MA[j])
)
}
## Incorporating initial unrecognized returns
# - The unrecognized returns/losses, which are the differences between initial MA and AA,
# will be recognized over time.
# - The schedule is constructed based on info in plan document and the asset smoothing policy
# - The schedule from the plan document is adjusted for the MA-AA difference in the model, which may not be always equal to the document value.
if((init_AA_type %in% c("AL_pct", "AA0")) & useAVunrecReturn & k != -1 ){
# Adjust AA for inital unrecognized returns
if((j - 1 + init_year) %in% init_unrecReturns.adj$year) penSim$AA[j] <- penSim$AA[j] - with(init_unrecReturns.adj, DeferredReturn.annualTot[year == (j - 1 + init_year)])
}
## Apply corridor for MA, MA must not deviate from AA by more than 20%.
if(corridor){
penSim$AA[j] <- with(penSim, ifelse(AA[j] > s.upper * MA[j], s.upper * MA[j], AA[j]))
penSim$AA[j] <- with(penSim, ifelse(AA[j] < s.lower * MA[j], s.lower * MA[j], AA[j]))
}
#***********************************************************
# __Pre-risk-sharing: Liability and funded status ####
#***********************************************************
# j <- 2018
# cola_actual <- 0.01
if(useContingentCOLA){
if(j > 1) {
# for(tierName in tier_names){
#
# ls_servRet[[tierName]] <-
# mutate(ls_servRet[[tierName]],
# B.servRet.la = ifelse(year == (init_year + j - 1) & year > year_servRet,
# lag(B.servRet.la, 1, 0) * (1 + penSim$cola_actual[j-1]),
# B.servRet.la),
# ALx.servRet.la = B.servRet.la * ax.servRet)
# }
# MEPERS:
# - Note that ax.servRet is calculated based on the long-term COLA assumption of 1.91%
# - B.servRet.la is updated with the COLA determined in the previous year, so it will not
# be modified by the risk-sharing process below.
ls_servRet <-
mutate(ls_servRet,
B.servRet.la = ifelse(year == (init_year + j - 1) & year > year_servRet,
lag(B.servRet.la, 1, 0) * (1 + penSim$cola_actual[j-1]),
B.servRet.la),
ALx.servRet.la = B.servRet.la * ax.servRet)
}
#ls_servRet %>% filter(start_year == 2000, ea ==34, age_servRet == 53)
# Calculate total benefit and AL for service retirees
# MEPERS: again, B.servRet[j] will not be futher modified in the current loop
penSim$B.servRet[j] <-
(filter(ls_servRet, year == init_year + j - 1) %>% summarise(B.servRet.la = sum(B.servRet.la * n_servRet.la)))$B.servRet.la
penSim$AL.servRet[j] <-
(filter(ls_servRet, year == init_year + j - 1) %>% summarise(AL.servRet.la = sum(ALx.servRet.la * n_servRet.la)))$AL.servRet.la
# penSim$AL.servRet[j] <-
# ls_servRet %>%
# map(~ (filter(.x, year == init_year + j - 1) %>% summarise(AL.servRet.la = sum(ALx.servRet.la * n_servRet.la)))$AL.servRet.la) %>%
# unlist %>%
# sum()
#
#
# penSim$B.servRet[j] <-
# ls_servRet %>%
# map(~ (filter(.x, year == init_year + j - 1) %>% summarise(B.servRet.la = sum(B.servRet.la * n_servRet.la)))$B.servRet.la) %>%
# unlist %>%
# sum()
# penSim$AL.servRet[j] <- (filter(ls_servRet, year == init_year + j - 1) %>% summarise(AL.servRet.la = sum(ALx.servRet.la * n_servRet.la)))$AL.servRet.la
# penSim$B.servRet[j] <- (filter(ls_servRet, year == init_year + j - 1) %>% summarise(B.servRet.la = sum(B.servRet.la * n_servRet.la)))$B.servRet.la
# Total liability and benefit: actives and retirees
penSim$AL.nonactive[j] <- with(penSim, AL.servRet[j] +
AL.defrRet[j] +
AL.disbRet[j] +
AL.death[j])
penSim$AL[j] <- with(penSim, AL.active[j] + AL.nonactive[j])
penSim$B[j] <- with(penSim, B.servRet[j] + B.defrRet[j] + B.disbRet[j] + B.death[j])
}
# Funded ratios
penSim$FR_MA[j] <- with(penSim, MA[j] / AL[j])
penSim$FR_AA[j] <- with(penSim, AA[j] / AL[j])
# if(!is.na(use_lowerDR)){
# penSim$FR_MA_lowerDR[j] <- with(penSim, MA[j] / AL[j] * cola_lowerDR_fixedALratio )
#
# }
# UAAL(j)
penSim$UAAL[j] <- with(penSim, AL[j] - AA[j])
#*************************************************************
# __Pre risk sharing: Amoritization costs ####
#*************************************************************
# Notes on useAVamort
# if useAVamort is FALSE: It is assumed that the entire UAAL in year 1 is the loss/gain that occurs in year 0
# It is then amortized using the amortization policy.
#
# if useAVamort is TRUE: Use the schedule of losses/gains and amortization payments from the plan documeht.
# LG(j)
# Note that what is amortized at time t is the sum of 1) actuarial loss/gain(LG) during t -1, and 2) shortfall in paying ADC(C_ADC) at (t-1)
if (j == 1){
penSim$EUAAL[j] <- 0
penSim$LG[j] <- with(penSim, UAAL[j]) # This is the intial underfunding, rather than actuarial loss/gain if the plan is established at period 1.
penSim$Amort_basis[j] <- with(penSim, LG[j])
} else {
penSim$EUAAL[j] <- with(penSim, (UAAL[j - 1] + NC[j - 1])*(1 + i[j - 1]) - C[j - 1] - Ic[j - 1])
penSim$LG[j] <- with(penSim, UAAL[j] - EUAAL[j])
# penSim$Amort_basis[j] <- with(penSim, LG[j] - (C_ADC[j - 1]) * (1 + i[j - 1]))
penSim$Amort_basis[j] <- with(penSim, LG[j]) # - (C_ADC[j - 1]) * (1 + i[j - 1]))
}
# Amortize LG(j)
if(j > ifelse(useAVamort, 1, 0)){
# if useAVamort is TRUE, AV amort will be used for j = 1, not the one calcuated from the model.
if(amort_type == "closed") SC_amort[nrow.initAmort + j - 1, j:(j + m - 1)] <- amort_LG(penSim$Amort_basis[j], i, m, salgrowth_amort, end = FALSE, method = amort_method, skipY1 = FALSE)
}
# Supplemental cost in j
penSim$SC[j] <- switch(amort_type,
closed = sum(SC_amort[, j]),
open = amort_LG(penSim$UAAL[j], i, m, salgrowth_amort, end = FALSE, method = amort_method, skipY1 = FALSE)[1])
#*****************************************************************************************
# __ADC, ERC, and total contribution with individual cost method ####
#****************************************************************************************
if(EEC_type == "fixedRate"){
# EEC(j)
penSim$EEC[j] <- with(penSim, PR[j] * EEC_fixedRate)
# ADC(j)
if(nonNegC){
penSim$ADC[j] <- with(penSim, max(0, NC[j] + SC[j]))
penSim$ADC.ER[j] <- with(penSim, max(0, ADC[j] - EEC[j])) #ifelse(ADC[j] > EEC[j], ADC[j] - EEC[j], 0))
# Adjustment of EEC
if(!EEC_fixed) penSim$EEC[j] <- with(penSim, ifelse(ADC[j] > EEC[j], EEC[j], ADC[j])) # penSim$EEC[j] <- with(penSim, EEC[j]) else
} else {
# Allow for negative ADC and C
penSim$ADC[j] <- with(penSim, NC[j] + SC[j])
if(EEC_fixed) {penSim$ADC.ER[j] <- with(penSim, ADC[j] - EEC[j]) # EEC is fixed
# EEC is not fixed
# 1. when ADC > EEC. Employees pay fixed EEC and employer pays the rest
} else if(with(penSim, ADC[j] > EEC[j])) {
penSim$ADC.ER[j] <- with(penSim, ADC[j] - EEC[j])
# 2. when 0 < ADC < EEC. Employees pay the entire ADC and employer pays 0.
} else if(with(penSim, ADC[j] <= EEC[j] & ADC[j] > 0)) {
penSim$ADC.ER[j] <- 0
penSim$EEC[j] <- with(penSim, ADC[j])
# 3. when ADC < 0, employees pay zero and employer pays nagative value (withdraw -ADC)
} else if(with(penSim, ADC[j] <= 0)) {
penSim$ADC.ER[j] <- with(penSim, ADC[j])
penSim$EEC[j] <- 0
}
}
# ERC
penSim$ERC[j] <- switch(ConPolicy,
ADC = with(penSim, ADC.ER[j]), # Full ADC
ADC_cap = with(penSim, min(ADC.ER[j], PR_pct_cap * PR[j])), # ADC with cap. Cap is a percent of payroll
Fixed = with(penSim, PR_pct_fixed * PR[j]) # Fixed percent of payroll
)
}
if(EEC_type == "sharedNC"){
# EEC(j)
penSim$EEC[j] <- with(penSim, NC[j] * EEC_NCshare)
# ADC(j)
if(nonNegC){
penSim$ADC[j] <- with(penSim, max(0, NC[j] + SC[j]))
penSim$ADC.ER[j] <- with(penSim, max(0, ADC[j] - EEC[j]))
# Adjustment of EEC
if(!EEC_fixed) penSim$EEC[j] <- with(penSim, ifelse(ADC[j] > EEC[j], EEC[j], ADC[j])) # penSim$EEC[j] <- with(penSim, EEC[j]) else
} else {
# Allow for negative ADC and C
penSim$ADC[j] <- with(penSim, NC[j] + SC[j])
if(EEC_fixed) {penSim$ADC.ER[j] <- with(penSim, ADC[j] - EEC[j]) # EEC is fixed
# EEC is not fixed
# 1. when ADC > EEC. Employees pay fixed EEC and employer pays the rest
} else if(with(penSim, ADC[j] > EEC[j])) {
penSim$ADC.ER[j] <- with(penSim, ADC[j] - EEC[j])
# 2. when 0 < ADC < EEC. Employees pay the entire ADC and employer pays 0.
} else if(with(penSim, ADC[j] <= EEC[j] & ADC[j] > 0)) {
penSim$ADC.ER[j] <- 0
penSim$EEC[j] <- with(penSim, ADC[j])
# 3. when ADC < 0, employees pay zero and employer pays nagative value (withdraw -ADC)
} else if(with(penSim, ADC[j] <= 0)) {
penSim$ADC.ER[j] <- with(penSim, ADC[j])
penSim$EEC[j] <- 0
}
}
# ERC
penSim$ERC[j] <- switch(ConPolicy,
ADC = with(penSim, ADC.ER[j]), # Full ADC
ADC_cap = with(penSim, min(ADC.ER[j], PR_pct_cap * PR[j])), # ADC with cap. Cap is a percent of payroll
Fixed = with(penSim, PR_pct_fixed * PR[j]) # Fixed percent of payroll
)
}
## Employees share a fixed proportion of ADC
if(EEC_type == "sharedADC"){
# Note:
# - Shared ADC for SJ P&F
# - Negative EEC and ERC are NOT allowed: EEC = ERC = 0 if NC + SC < 0
penSim$ADC[j] <- with(penSim, max(0, NC[j] + SC[j]))
penSim$EEC.NC[j] <- with(penSim, NC[j] * EEC_ADCshare)
if(j == 1) penSim$EEC.SC[j] <- with(penSim, max(0, SC[j] * EEC_initSCshare))
if(j > 1){
penSim$EEC.SC[j] <- with(penSim, min( (EEC.SC[j-1]/PR[j-1] + EECrate_UALchgCap) * PR[j],
SC[j] * EEC_ADCshare))
penSim$EEC.SC[j] <- na2zero(penSim$EEC.SC[j]) # NaN when PR[j-1] is 0.
# penSim$EEC.SC[j] <- max(0, penSim$EEC.SC[j])
}
penSim$EEC[j] <- penSim$EEC.NC[j] + penSim$EEC.SC[j]
penSim$EEC[j] <- max( penSim$EEC[j], penSim$NC[j] * EEC_floorNCshare)
penSim$ERC[j] <- with(penSim, max(0, ADC[j] - EEC[j]))
# penSim$ADC[j] <- with(penSim, max(0, NC[j] + SC[j]))
#
#
# penSim$EEC[j] <- with(penSim, ADC[j] * EEC_share )
# penSim$EEC[j] <- with(penSim, max(PR[j] * 0, EEC[j] ))
# penSim$EEC[j] <- with(penSim, min(PR[j] * EEC_cap, EEC[j]))
#
# penSim$ERC[j] <- with(penSim, min(PR[j] * ERC_cap, ERC[j]))
}
# C(j)
penSim$C[j] <- with(penSim, EEC[j] + ERC[j])
# C(j) - ADC(j)
penSim$C_ADC[j] <- with(penSim, C[j] - ADC[j])
# }
#******************************************
# __SJ COLA ####
#******************************************
# funded ratio based COLA
if(useContingentCOLA){
# if(!is.na(use_lowerDR)){
#
# penSim$FR_MA_lowerDR[j] <- with(penSim, MA[j] / (AL[j] * cola_lowerDR_fixedALratio))
# if(penSim$FR_MA_lowerDR[j] >= 0.9999) penSim$cola_actual[j] <- cola_max_FR else penSim$cola_actual[j] <- cola_min_FR # use 99.99 to avoid rounding issue
#
# } else {
if(penSim$FR_MA[j] >= 0.9999) penSim$cola_actual[j] <- cola_max else penSim$cola_actual[j] <- cola_min # use 99.99 to avoid rounding issue
#}
}
# # AA_0_pct <- MA_0_pct <- 0.75
# #(penSim$SC[j] + penSim$NC[j])/penSim$PR[j]
#
# if(cola_type == "riskSharing" ){
#
# penSim$COLA_triggered[j] <- 1
#
#
#
# # COLA must be in the range of 0~2.5%
#
# penSim$cola_actual[j] <- max(min(cola_max, COLA_target), cola_min)
# }
#******************************************
# __Investment income ####
#******************************************
# Ia(j), Ib(j), Ic(j) Components of expected investment income
penSim$Ia[j] <- with(penSim, MA[j] * i[j])
penSim$Ib[j] <- with(penSim, B[j] * i[j])
penSim$Ic[j] <- with(penSim, C[j] * i[j])
# I.e(j) Expected investment income
penSim$I.e[j] <- with(penSim, i[j] * (MA[j] + C[j] - B[j] ))
# I.r(j) Actual investment income
penSim$I.r[j] <- with(penSim, i.r[j] *( MA[j] + C[j] - B[j])) # C[j] should be multiplied by i.r if assuming contribution is made at year end.
# I.dif(j) = I.r(j) - I.e(j): Difference between expected and actual investment incomes (for asset smoothing)
penSim$I.dif[j] <- with(penSim, I.r[j] - I.e[j])
}
as.data.frame(penSim)
}
stopCluster(cl)
penSim_results
#*****************************************************************************
# Combining results into a data frame. ####
#*****************************************************************************
# penSim_results %>% filter(sim == 3)
penSim_results <- bind_rows(penSim_results) %>%
mutate(
## Standard output variables
sim = rep(-2:nsim, each = nyear),
sim_name = sim_paramlist_$sim_name,
val_name = sim_paramlist_$val_name,
# tier_Mode = tier_Mode,
# singleTier_select = singleTier_select,
# return_scenario = return_scenario,
FR_AA = 100 * AA / exp(log(AL)),
FR_MA = 100 * MA / exp(log(AL)),
NC_PR = 100 * NC / PR,
SC_PR = 100 * SC / PR,
ERC_PR = 100 * ERC / PR,
EEC_PR = 100 * EEC / PR,
C_PR = 100 * C / PR,
B_PR = 100 * B / PR,
ExF = C - B,
ExF_PR = 100 * ExF / PR,
ExF_MA = 100 * ExF / MA,
PR.growth = ifelse(year > 1, 100 * (PR / lag(PR) - 1), NA)
## Plan specific variables
#
## Optional ratios
# UAAL_PR = 100 * UAAL / PR,
# MA_PR = 100 * MA / PR,
# AA_PR = 100 * AA / PR,
# AL_PR = 100 * AL / PR,
# AL.act_PR = 100 * AL.act / PR,
# AL.la_PR = 100 * AL.la / PR,
# # AL.ca_PR = 100 * AL.ca / PR,
# AL.term_PR = 100 * AL.term / PR,
# #AL._PR = 100 * AL.Ben / PR,
# ADC_PR = 100 * ADC / PR,
# NC.laca_PR = 100 * NC.laca / PR,
# NC.v_PR = 100 * NC.v / PR
) %>%
relocate(sim_name, val_name, sim, year)
return(penSim_results)
}
# penSim_results %>%
# filter(sim == 0) %>%
# select(year, NC_PR, ERC_PR, PVFB.tot, PVFS, PVFEEC, AA) %>%
# mutate(
# NCrate_AGG = 100* (PVFB.tot - AA)/PVFS,
# ERCrate_AGG = 100* (PVFB.tot - PVFEEC - AA)/PVFS )
#
|
70e4880293c5024c62714497fcf77fad84b4ff3e
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/SEHmodel/R/SEHmodel-package.R
|
3ea63d62ede77a30f3d9569887210cf290bcb54b
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,266
|
r
|
SEHmodel-package.R
|
# SEHmodel-package.R
# Part of the SEHmodel package.
#
# Copyright (C) 2015 Melen Leclerc <melen.leclerc@rennes.inra.fr>
# Jean-Francois Rey <jean-francois.rey@paca.inra.fr>
# Samuel Soubeyrand <Samuel.Soubeyrand@avignon.inra.fr>
# Emily Walker <emily.walker@avignon.inra.fr>
# INRA - BioSP Site Agroparc - 84914 Avignon Cedex 9
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#' @encoding UTF-8
#' @title Spatial Exposure-Hazard Model for Exposure and Impact Assessment on Exposed Individuals
#' @description A model coupling polygon and point processes for assessing risk due to contaminant sources and their impact on exposed individuals
#' @aliases SEHmodel-package SEHmodel
#'
#' @author Emily Walker \email{emily.walker@@avignon.inra.fr}
#' @author Jean-Francois Rey \email{jean-francois.rey@@paca.inra.fr}
#' @author Melen Leclerc \email{melen.leclerc@@rennes.inra.fr}
#' @author Samuel Soubeyrand \email{Samuel.Soubeyrand@@avignon.inra.fr}
#' @author Marc Bourotte \email{marc.bourotte@@avignon.inra.fr}
#'
#' Maintainer: Jean-Francois REY \email{jean-francois.rey@@paca.inra.fr}
#' @docType package
#' @name SEHmodel-package
#' @details \tabular{ll}{
#' Package: \tab SEHmodel\cr
#' Type: \tab Package\cr
#' Version: \tab 0.0.11\cr
#' Date: \tab 2016-02-04\cr
#' License: \tab GPL (>=2)\cr
#' }
#'
#' The SEHmodel package contains functions and methods for quantifying spatio-temporal variation in contamination risk
#' around known polygon sources of contaminants, and quantifies the impact of the contaminants on the surrounding population of individuals
#' which are located in habitat areas and are susceptible to the contaminants.
#'
#' The package implements an exposure-hazard model based on (i) tools of stochastic geometry (marked polygon and point processes)
#' for structuring the landscape and describing the location of exposed individuals,
#' (ii) a method based on a dispersal kernel describing the dissemination of contaminant particles from polygon sources,
#' and (iii) an ecotoxicological equation describing how contaminants affect individuals of the exposed population.
#'
#' @references Emily Walker and al. , SEHmodel: Spatially-explicit model for exposure and impact assessment. Submitted.
#' @keywords model spatial survival
#' @seealso \code{\link{demo.pollen.run}}
#' @examples
#' ## Run a simulation
#' \dontrun{
#' library("SEHmodel")
#' demo.pollen.run()
#' }
#' @import methods
NULL
|
f04395349382f0b457e83a1a261a31877accc966
|
687f406146fb5e9ab34fd5b7e3ed31efba195ed5
|
/PointsCleanUp.R
|
ca06142578554046168132dd3fc6535f87a4ca69
|
[] |
no_license
|
darwilliams/RandomForests
|
9065addecf83a777ef2acf0616e9a6be5983ac82
|
43cf200fffbd684bd867eff4e12679028f54bdf9
|
refs/heads/master
| 2021-01-22T02:39:18.489351
| 2016-12-06T20:17:41
| 2016-12-06T20:17:41
| 68,651,853
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62,780
|
r
|
PointsCleanUp.R
|
# Read in raw points and clean up column names, values, and export again
#note that you'll need to fix truncated column names on reimport
#### Load Package ######
list.of.packages <- c("caret",
"raster",
"rgeos",
"rgdal",
"sp",
"spdep",
"spatstat",
"gplots",
"ggplot2",
"plyr",
"dplyr", ## to be loaded before foreach to avoid "assertion failed" errors
"magrittr",
"rlist",
"lazyeval",
"randomForest",
"rgl",
"vegan",
"snow",
"lubridate",
"doParallel",
"foreach",
"data.table",
"tidyverse",
"forcats",
"stringr"
)
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] ## named vector members whose name is "Package"
if(length(new.packages)) install.packages(new.packages) ## install only unavailable packages
for (pack in list.of.packages){
library(pack, character.only=TRUE) ## call all the packages in list.of.packages
}
#### Read in Data #####
points.path <- "E:\\MetroVancouverData\\Training_Validation_Points"
points.filename <- "MetroVan_gt_Bins1_16"
points.raw <- readOGR(dsn=points.path, layer=points.filename)
# change column names to be meaningful for points and objects
new_names <- read_csv("E:\\MetroVancouverData\\Training_Validation_Points\\Bufs_joined\\Bin1point_buf_joinfieldnames.csv",col_names = FALSE)
new_names$X1
names(points.raw) <- new_names$X1
# drop previous spatial join info
names(points.raw)
drops2 <- c("CID.x","ORIG_FID","Shape_Leng","distance","CID.y","coords.x1","coords.x2")
points.raw.short <- points.raw[,!(names(points.raw) %in% drops2)]
names(points.raw.short)
head(points.raw.short@data)
# remove NA rows
indices <- !is.na(points.raw.short@data$One_m_Class_1_1st_choice)
indices
points.short <- (points.raw.short[which(indices),])
dim((points.raw.short))#should be 7506
dim((points.short)) #should be equal for Class 1
#note that for clouds/ice and shadow only class 1 exists.
#These values will have to be duplicated into class 2 and 3 slots for use in RF
#fix any mispelled class names for the ground truth points
#the fact that the attribute table data are imported as factors is really helpful
#I can see all the different permutations of the same entries in the dataset
(str(points.short@data))
summary(points.short)
levels(points.short@data$One_m_Class_1_1st_choice)
levels(points.short@data$One_m_Class_2_1st_choice)
levels(points.short@data$One_m_Class_3_1st_choice)
##### ID unique values and tidy #####
##### One_m_Class_1_1st_choice ####
unique(points.short@data$One_m_Class_1_1st_choice)
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "bare", replacement = "Bare")
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "bare_$", replacement = "Bare")
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "Bare_", replacement = "Bare")
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "built-up", replacement = "Built-up")
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "Built_up", replacement = "Built-up")
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "Vegeation", replacement = "Vegetation")
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "vegetation", replacement = "Vegetation")
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "^Veg$", replacement = "Vegetation")
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "^veg$", replacement = "Vegetation")
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "shadow", replacement = "Shadow")
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "^Ice$", replacement = "Clouds/Ice")
a <- points.short
a <- which(a@data$One_m_Class_1_1st_choice == "non-photosynthetic_veg")
points.short@data[a,]
levels(points.short@data$One_m_Class_2_1st_choice)
points.short@data$One_m_Class_2_1st_choice[a] <- "Non-photosynthetic"
points.short@data$One_m_Class_2_1st_choice[a]
points.short@data[a,]
points.short@data$One_m_Class_1_1st_choice <- points.short@data$One_m_Class_1_1st_choice %>%
gsub(pattern = "non-photosynthetic_veg", replacement = "Vegetation")
levels(points.short@data$Five_m_Class_2_1st_choice)
points.short@data$Five_m_Class_2_1st_choice[a] <- "Non-photosynthetic"
points.short@data$Five_m_Class_2_1st_choice[a]
points.short@data[a,]
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "non-photosynthetic_veg", replacement = "Vegetation")
points.short@data[a,]
rm(a)
unique(points.short@data$One_m_Class_1_1st_choice)
points.short@data$One_m_Class_1_1st_choice <- as.factor(points.short@data$One_m_Class_1_1st_choice)
levels(points.short@data$One_m_Class_1_1st_choice)
##### One_m_Class_2_1st_choice ####
levels(points.short@data$One_m_Class_2_1st_choice)
unique(points.short@data$One_m_Class_2_1st_choice)
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "barren", replacement = "Barren")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "BArren", replacement = "Barren")
#check to make sure "Bare" classification isn't a hierarchy screw up
b <- points.short
b <- which(b@data$One_m_Class_2_1st_choice == "Bare")
points.short@data[b,]
rm(b)
#looks like it can be changed to Barren
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Bare", replacement = "Barren")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Building", replacement = "Buildings")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "BUilding", replacement = "Buildings")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Buildingss", replacement = "Buildings")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "building", replacement = "Buildings")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "^Grass$", replacement = "Grass-herb")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Grass-Hern", replacement = "Grass-herb")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Grass-Herb", replacement = "Grass-herb")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Grass_Herb", replacement = "Grass-herb")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "grass-herb", replacement = "Grass-herb")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Non-photosynthic", replacement = "Non-photosynthetic")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Non-photostnthetic", replacement = "Non-photosynthetic")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Non_photosynthetic", replacement = "Non-photosynthetic")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Non-photsynthetic", replacement = "Non-photosynthetic")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "paved", replacement = "Paved")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Paved_", replacement = "Paved")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "PAved", replacement = "Paved")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "SHrub", replacement = "Shrub")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "shrub", replacement = "Shrub")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Shrubs", replacement = "Shrub")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "shurb", replacement = "Shrub")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "SOil", replacement = "Soil")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "soil", replacement = "Soil")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Tree_Canopy", replacement = "Tree_canopy")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "tree_canopy", replacement = "Tree_canopy")
points.short@data$One_m_Class_2_1st_choice <- points.short@data$One_m_Class_2_1st_choice %>%
gsub(pattern = "Tree_canopy", replacement = "Trees")
points.short@data$One_m_Class_2_1st_choice <- as.factor(points.short@data$One_m_Class_2_1st_choice)
levels(points.short@data$One_m_Class_2_1st_choice)
##### One_m_Class_3_1st_choice ####
levels(points.short@data$One_m_Class_3_1st_choice)
unique(points.short@data$One_m_Class_3_1st_choice)
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Natual_Barren", replacement = "Natural_barren")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Natural_Barre", replacement = "Natural_barren")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Natural_barrenn", replacement = "Natural_barren")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Modifeid_G-H", replacement = "Modified_G-H")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "modified_G-H", replacement = "Modified_G-H")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_G_H", replacement = "Modified_G-H")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "modified_grass-herb", replacement = "Modified_G-H")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Mofified_G-H", replacement = "Modified_G-H")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_G-H`", replacement = "Modified_G-H")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "^Linear_Paved", replacement = "Linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "^linear_paved", replacement = "Linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "^Linear_Paved_", replacement = "Linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "^Linear_paved_", replacement = "Linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Non_linear_Paved", replacement = "Non-linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Non_Linear_paved", replacement = "Non-linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Non_linear_paved", replacement = "Non-linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Non-Linear_paved", replacement = "Non-linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "non-linear_Paved", replacement = "Non-linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "non-Linear_paved", replacement = "Non-linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Non-linear_Paved", replacement = "Non-linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Non-linear_paved", replacement = "Non-linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Non-Linear_Paved", replacement = "Non-linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "non-linear_paved", replacement = "Non-linear_paved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Modifiied_Barren", replacement = "Modified_barren")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_Barren", replacement = "Modified_barren")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_BArren", replacement = "Modified_barren")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Modifeid_Soil", replacement = "Modified_soil")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_Soil", replacement = "Modified_soil")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "modified_soil", replacement = "Modified_soil")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_soil", replacement = "Modified_soil")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "modified_Soil", replacement = "Modified_soil")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Unpaved_Linear", replacement = "Linear_unpaved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "UnPaved_Linear", replacement = "Linear_unpaved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Unpaved_linear", replacement = "Linear_unpaved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "unpaved_linear", replacement = "Linear_unpaved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "linear_unpaved", replacement = "Linear_unpaved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "unpaved_Linear", replacement = "Linear_unpaved")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Linear_pavedelev", replacement = "Linear_paved_elevated")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Decidous", replacement = "Deciduous")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "deciduous", replacement = "Deciduous")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "Confierous", replacement = "Coniferous")
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "^Mixed$", replacement = "Mixed_tree")
#to check
"Non-linear"
a <- points.short
a <- which(a@data$One_m_Class_3_1st_choice == "Non-linear")
points.short@data[a,]
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "^Non-linear$", replacement = "Non-linear_paved")
"Non-Linear"
a <- points.short
a <- which(a@data$One_m_Class_3_1st_choice == "Non-Linear")
points.short@data[a,]
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "^Non-Linear$", replacement = "Non-linear_paved")
"Natural"
a <- points.short
a <- which(a@data$One_m_Class_3_1st_choice == "Natural")
points.short@data[a,]
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "^Natural$", replacement = "Natural_G-H")
"Canopy"
a <- points.short
a <- which(a@data$One_m_Class_3_1st_choice == "Canopy")
points.short@data[a,]
rm(a)
points.short@data$One_m_Class_3_1st_choice <- points.short@data$One_m_Class_3_1st_choice %>%
gsub(pattern = "^Canopy$", replacement = "Coniferous")
unique(points.short@data$One_m_Class_3_1st_choice)
points.short@data$One_m_Class_3_1st_choice <- as.factor(points.short@data$One_m_Class_3_1st_choice)
levels(points.short@data$One_m_Class_3_1st_choice)
#### Five_m_Class1_1st_choice #####
unique(points.short@data$Five_m_Class1_1st_choice)
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "bare", replacement = "Bare")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "bare_$", replacement = "Bare")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "Bare_", replacement = "Bare")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "built-up", replacement = "Built-up")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "Built_up", replacement = "Built-up")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "Built-up_", replacement = "Built-up")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "Built_-up", replacement = "Built-up")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "Vegeation", replacement = "Vegetation")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "vegetation", replacement = "Vegetation")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "Vegetaion", replacement = "Vegetation")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "^Veg$", replacement = "Vegetation")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "^Vef$", replacement = "Vegetation")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "^veg$", replacement = "Vegetation")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "shadow", replacement = "Shadow")
points.short@data$Five_m_Class1_1st_choice <- points.short@data$Five_m_Class1_1st_choice %>%
gsub(pattern = "^Ice$", replacement = "Clouds/Ice")
unique(points.short@data$Five_m_Class1_1st_choice)
points.short@data$Five_m_Class1_1st_choice <- as.factor(points.short@data$Five_m_Class1_1st_choice)
levels(points.short@data$Five_m_Class1_1st_choice)
#### Five_m_Class_2_1st_choice ####
unique(points.short@data$Five_m_Class_2_1st_choice)
levels(points.short@data$Five_m_Class_2_1st_choice)
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "barren", replacement = "Barren")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "BArren", replacement = "Barren")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Bareen", replacement = "Barren")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Barren_", replacement = "Barren")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Buid", replacement = "Buildings")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Building", replacement = "Buildings")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "building", replacement = "Buildings")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Buildings_", replacement = "Buildings")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Buildingss", replacement = "Buildings")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "grass-herb", replacement = "Grass-herb")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Grass-Herb", replacement = "Grass-herb")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Grass_Herb", replacement = "Grass-herb")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Non-_Photosynthetic", replacement = "Non-photosynthetic")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Non-Photosynthetic", replacement = "Non-photosynthetic")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Non-photsynthetic", replacement = "Non-photosynthetic")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Non_photostnthetic", replacement = "Non-photosynthetic")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Non_photosynthetic", replacement = "Non-photosynthetic")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "other_built", replacement = "Other_built")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Other_Built", replacement = "Other_built")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "paved", replacement = "Paved")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Paved_", replacement = "Paved")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Paved_", replacement = "Paved")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Paved__", replacement = "Paved")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "PAved", replacement = "Paved")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "shrub", replacement = "Shrub")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Shrubs", replacement = "Shrub")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "soil", replacement = "Soil")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "tree_Canopy", replacement = "Trees")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Tree_Canopy", replacement = "Trees")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Tree_Canopy_", replacement = "Trees")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Tree_canopy_", replacement = "Trees")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Tree_canopy", replacement = "Trees")
points.short@data$Five_m_Class_2_1st_choice <- points.short@data$Five_m_Class_2_1st_choice %>%
gsub(pattern = "Trees_", replacement = "Trees")
points.short@data$Five_m_Class_2_1st_choice <- as.factor(points.short@data$Five_m_Class_2_1st_choice)
levels(points.short@data$Five_m_Class_2_1st_choice)
#### For all 1st choice shrub entries, assign shrub to Class 3 and turn Class 2 shrubs into "Trees" ####
a <- points.short
a <- which(a@data$One_m_Class_2_1st_choice == "Shrub")
points.short@data[a,]
levels(points.short@data$One_m_Class_3_1st_choice)
points.short@data$One_m_Class_3_1st_choice[a] <- "Shrub"
points.short@data$One_m_Class_3_1st_choice[a]
head(points.short@data[a,])
points.short@data$One_m_Class_2_1st_choice[a] <- "Trees"
head(points.short@data[a,])
a <- points.short
a <- which(a@data$Five_m_Class_2_1st_choice == "Shrub")
points.short@data[a,]
levels(points.short@data$Five_m_Class_3_1st_choice)
points.short@data$Five_m_Class_3_1st_choice[a] <- "Shrub"
points.short@data$Five_m_Class_3_1st_choice[a]
head(points.short@data[a,])
points.short@data$Five_m_Class_2_1st_choice[a] <- "Trees"
head(points.short@data[a,])
#### Five_m_Class_3_1st_choice ####
levels(points.short@data$Five_m_Class_3_1st_choice)
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Confierous", replacement = "Coniferous")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Coniferious", replacement = "Coniferous")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Decidious", replacement = "Deciduous")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Decidous", replacement = "Deciduous")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Deciduoud", replacement = "Deciduous")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "deciduous", replacement = "Deciduous")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "^linear_paved", replacement = "Linear_paved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "^Linear_Paved", replacement = "Linear_paved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "^Linear_Paved_", replacement = "Linear_paved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "^Linear_paved_", replacement = "Linear_paved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "linear_unpaved", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "unpaved_linear", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Unpaved_linear", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Unpaved_Linear", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "UnPaved_Linear", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "^Mixed$", replacement = "Mixed_tree")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Modfied_Barren", replacement = "Modified_barren")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_Barren", replacement = "Modified_barren")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Barren_Modified", replacement = "Modified_barren")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Modifeid_G-H", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Modifid_G-H", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_G-h", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "modified_grass-herb", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_grass-herb", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_Grass_Herb", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Modifies_G-H", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "modified_soil", replacement = "Modified_soil")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_Soil", replacement = "Modified_soil")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_SOil", replacement = "Modified_soil")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Modified_soil", replacement = "Modified_soil")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "modified_Soil", replacement = "Modified_soil")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Natural_Bare", replacement = "Natural_barren")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Natural_Baren", replacement = "Natural_barren")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Natural_Barre", replacement = "Natural_barren")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Natural_Barren", replacement = "Natural_barren")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Natural_BArren", replacement = "Natural_barren")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Natural_Barrren", replacement = "Natural_barren")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Naural_Barren", replacement = "Natural_barren")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Natural_barrenn", replacement = "Natural_barren")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Non-linear_Paved", replacement = "Non-linear_paved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Non-Linear_paved", replacement = "Non-linear_paved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Non-Linear_Paved", replacement = "Non-linear_paved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Non-linear_paveed", replacement = "Non-linear_paved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Non_linear_Paved", replacement = "Non-linear_paved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Non_Linear_paved", replacement = "Non-linear_paved")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Linear_paved_elev", replacement = "Linear_paved_elevated")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Linear_pavedelev", replacement = "Linear_paved_elevated")
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "Other_Built", replacement = "Other_built")
#to check
# "Linear"
# "Non-linear"
#"Water"
#"Non-linear"
a <- points.short
a <- which(a@data$Five_m_Class_3_1st_choice == "Non-linear")
points.short@data[a,]
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "^Non-linear$", replacement = "Non-linear_paved")
#"Linear"
a <- points.short
a <- which(a@data$Five_m_Class_3_1st_choice == "Linear")
points.short@data[a,]
points.short@data$Five_m_Class_3_1st_choice <- points.short@data$Five_m_Class_3_1st_choice %>%
gsub(pattern = "^Linear$", replacement = "Linear_paved")
#"Water"
a <- points.short
a <- which(a@data$Five_m_Class_3_1st_choice == "Water")
points.short@data[a,]
points.short@data$Five_m_Class1_2nd_choice[a] <- "Water"
points.short@data$Five_m_Class_2_1st_choice[a] <- NA
points.short@data$Five_m_Class_2_2nd_choice[a] <- "Water"
points.short@data$Five_m_Class_3_1st_choice[a] <- NA
unique(points.short@data$Five_m_Class_3_1st_choice)
points.short@data$Five_m_Class_3_1st_choice <- as.factor(points.short@data$Five_m_Class_3_1st_choice)
levels(points.short@data$Five_m_Class_3_1st_choice)
#### 2nd choice variables ####
#### One_m_Class_1_2nd_choice ####
unique(points.short@data$One_m_Class_1_2nd_choice)
levels(points.short@data$One_m_Class_1_2nd_choice)
a <- points.short
a <- which(a@data$One_m_Class_1_2nd_choice == "`")
points.short@data[a,]
points.short@data$One_m_Class_1_2nd_choice[a] <- NA
points.short@data$One_m_Class_1_2nd_choice <- points.short@data$One_m_Class_1_2nd_choice %>%
gsub(pattern = "bare", replacement = "Bare")
points.short@data$One_m_Class_1_2nd_choice <- points.short@data$One_m_Class_1_2nd_choice %>%
gsub(pattern = "vegetation", replacement = "Vegetation")
points.short@data$One_m_Class_1_2nd_choice <- points.short@data$One_m_Class_1_2nd_choice %>%
gsub(pattern = "^Veg$", replacement = "Vegetation")
points.short@data$One_m_Class_1_2nd_choice <- points.short@data$One_m_Class_1_2nd_choice %>%
gsub(pattern = "shadow", replacement = "Shadow")
points.short@data$One_m_Class_1_2nd_choice <- points.short@data$One_m_Class_1_2nd_choice %>%
gsub(pattern = "water", replacement = "Water")
a <- points.short
a <- which(a@data$One_m_Class_1_2nd_choice == "non-photosynthetic_veg")
points.short@data[a,]
levels(points.short@data$One_m_Class_2_2nd_choice)
points.short@data$One_m_Class_2_2nd_choice[a] <- "Non-photosynthetic"
points.short@data$One_m_Class_2_2nd_choice[a]
points.short@data[a,]
points.short@data$One_m_Class_1_2nd_choice <- points.short@data$One_m_Class_1_2nd_choice %>%
gsub(pattern = "non-photosynthetic_veg", replacement = "Vegetation")
points.short@data$One_m_Class_1_2nd_choice <- as.factor(points.short@data$One_m_Class_1_2nd_choice)
#### One_m_Class_2_2nd_choice ####
levels(points.short@data$One_m_Class_2_2nd_choice)
points.short@data$One_m_Class_2_2nd_choice <- points.short@data$One_m_Class_2_2nd_choice %>%
gsub(pattern = "Barren__", replacement = "Barren")
points.short@data$One_m_Class_2_2nd_choice <- points.short@data$One_m_Class_2_2nd_choice %>%
gsub(pattern = "Building", replacement = "Buildings")
points.short@data$One_m_Class_2_2nd_choice <- points.short@data$One_m_Class_2_2nd_choice %>%
gsub(pattern = "grass-herb", replacement = "Grass-herb")
points.short@data$One_m_Class_2_2nd_choice <- points.short@data$One_m_Class_2_2nd_choice %>%
gsub(pattern = "Grass-Herb", replacement = "Grass-herb")
points.short@data$One_m_Class_2_2nd_choice <- points.short@data$One_m_Class_2_2nd_choice %>%
gsub(pattern = "Grass_herb", replacement = "Grass-herb")
points.short@data$One_m_Class_2_2nd_choice <- points.short@data$One_m_Class_2_2nd_choice %>%
gsub(pattern = "Non-photosythetic", replacement = "Non-photosynthetic")
points.short@data$One_m_Class_2_2nd_choice <- points.short@data$One_m_Class_2_2nd_choice %>%
gsub(pattern = "Non_photosynthetic", replacement = "Non-photosynthetic")
points.short@data$One_m_Class_2_2nd_choice <- points.short@data$One_m_Class_2_2nd_choice %>%
gsub(pattern = "Other_Built", replacement = "Other_built")
points.short@data$One_m_Class_2_2nd_choice <- points.short@data$One_m_Class_2_2nd_choice %>%
gsub(pattern = "soil", replacement = "Soil")
points.short@data$One_m_Class_2_2nd_choice <- points.short@data$One_m_Class_2_2nd_choice %>%
gsub(pattern = "Tree_Canopy", replacement = "Tree_canopy")
points.short@data$One_m_Class_2_2nd_choice <- points.short@data$One_m_Class_2_2nd_choice %>%
gsub(pattern = "Tree_canopy", replacement = "Trees")
unique(points.short@data$One_m_Class_2_2nd_choice)
points.short@data$One_m_Class_2_2nd_choice <- as.factor(points.short@data$One_m_Class_2_2nd_choice)
levels(points.short@data$One_m_Class_2_2nd_choice)
#### One_m_Class_3_2nd_choice ####
levels(points.short@data$One_m_Class_3_2nd_choice)
points.short@data$One_m_Class_3_2nd_choice <- points.short@data$One_m_Class_3_2nd_choice %>%
gsub(pattern = "deciduous", replacement = "Deciduous")
points.short@data$One_m_Class_3_2nd_choice <- points.short@data$One_m_Class_3_2nd_choice %>%
gsub(pattern = "^Linear_Paved", replacement = "Linear_paved")
points.short@data$One_m_Class_3_2nd_choice <- points.short@data$One_m_Class_3_2nd_choice %>%
gsub(pattern = "Unpaved_linear", replacement = "Linear_unpaved")
points.short@data$One_m_Class_3_2nd_choice <- points.short@data$One_m_Class_3_2nd_choice %>%
gsub(pattern = "Modified_Barren", replacement = "Modified_barren")
points.short@data$One_m_Class_3_2nd_choice <- points.short@data$One_m_Class_3_2nd_choice %>%
gsub(pattern = "modified_grass-herb", replacement = "Modified_G-H")
points.short@data$One_m_Class_3_2nd_choice <- points.short@data$One_m_Class_3_2nd_choice %>%
gsub(pattern = "modified_soil", replacement = "Modified_soil")
points.short@data$One_m_Class_3_2nd_choice <- points.short@data$One_m_Class_3_2nd_choice %>%
gsub(pattern = "Modified_Soil", replacement = "Modified_soil")
points.short@data$One_m_Class_3_2nd_choice <- points.short@data$One_m_Class_3_2nd_choice %>%
gsub(pattern = "Non-linear_Paved", replacement = "Non-linear_paved")
points.short@data$One_m_Class_3_2nd_choice <- as.factor(points.short@data$One_m_Class_3_2nd_choice)
#### Five_m_Class1_2nd_choice ####
levels(points.short@data$Five_m_Class1_2nd_choice)
points.short@data$Five_m_Class1_2nd_choice <- points.short@data$Five_m_Class1_2nd_choice %>%
gsub(pattern = "bare", replacement = "Bare")
points.short@data$Five_m_Class1_2nd_choice <- points.short@data$Five_m_Class1_2nd_choice %>%
gsub(pattern = "BAre", replacement = "Bare")
points.short@data$Five_m_Class1_2nd_choice <- points.short@data$Five_m_Class1_2nd_choice %>%
gsub(pattern = "Bare_", replacement = "Bare")
points.short@data$Five_m_Class1_2nd_choice <- points.short@data$Five_m_Class1_2nd_choice %>%
gsub(pattern = "built-up", replacement = "Built-up")
points.short@data$Five_m_Class1_2nd_choice <- points.short@data$Five_m_Class1_2nd_choice %>%
gsub(pattern = "Built_up", replacement = "Built-up")
points.short@data$Five_m_Class1_2nd_choice <- points.short@data$Five_m_Class1_2nd_choice %>%
gsub(pattern = "^Clouds$", replacement = "Clouds/Ice")
points.short@data$Five_m_Class1_2nd_choice <- points.short@data$Five_m_Class1_2nd_choice %>%
gsub(pattern = "^veg$", replacement = "Vegetation")
points.short@data$Five_m_Class1_2nd_choice <- points.short@data$Five_m_Class1_2nd_choice %>%
gsub(pattern = "^Veg$", replacement = "Vegetation")
points.short@data$Five_m_Class1_2nd_choice <- points.short@data$Five_m_Class1_2nd_choice %>%
gsub(pattern = "Vegatation", replacement = "Vegetation")
points.short@data$Five_m_Class1_2nd_choice <- points.short@data$Five_m_Class1_2nd_choice %>%
gsub(pattern = "vegetation", replacement = "Vegetation")
points.short@data$Five_m_Class1_2nd_choice <- points.short@data$Five_m_Class1_2nd_choice %>%
gsub(pattern = "shadow", replacement = "Shadow")
#to check
# "Barren"
# "Grass-Herb"
# "non-photosynthetic_veg"
# "Barren"
a <- points.short
a <- which(a@data$Five_m_Class1_2nd_choice == "Barren")
points.short@data[a,]
points.short@data$Five_m_Class1_2nd_choice[a] <- "Bare"
points.short@data[a,]
#"Grass-Herb"
a <- points.short
a <- which(a@data$Five_m_Class1_2nd_choice == "Grass-Herb")
points.short@data[a,]
points.short@data$Five_m_Class1_2nd_choice[a] <- "Vegetation"
points.short@data$Five_m_Class_2_2nd_choice[a] <- "Grass-herb"
points.short@data$Five_m_Class_3_2nd_choice[a] <- "Modified_G-H"
# "non-photosynthetic_veg"
a <- points.short
a <- which(a@data$Five_m_Class1_2nd_choice == "non-photosynthetic_veg")
points.short@data[a,]
levels(points.short@data$Five_m_Class_2_2nd_choice)
points.short@data$Five_m_Class1_2nd_choice[a] <- "Vegetation"
points.short@data$Five_m_Class_2_2nd_choice[a] <- "Non-photosynthetic"
unique(points.short@data$Five_m_Class1_2nd_choice)
points.short@data$Five_m_Class1_2nd_choice <- as.factor(points.short@data$Five_m_Class1_2nd_choice)
#### Five_m_Class_2_2nd_choice ####
levels(points.short@data$Five_m_Class_2_2nd_choice)
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "barren", replacement = "Barren")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "Building", replacement = "Buildings")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "grass-herb", replacement = "Grass-herb")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "Grass-Herb", replacement = "Grass-herb")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "^pav$", replacement = "Paved")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "paved", replacement = "Paved")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "shrub", replacement = "Shrub")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "Shrubs", replacement = "Shrub")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "soil", replacement = "Soil")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "tree_canopy", replacement = "Trees")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "Tree_canopy", replacement = "Trees")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "Tree_Canopy", replacement = "Trees")
points.short@data$Five_m_Class_2_2nd_choice <- points.short@data$Five_m_Class_2_2nd_choice %>%
gsub(pattern = "Tree_Canopy_", replacement = "Trees")
#to check
"_"
"p"
a <- points.short
a <- which(a@data$Five_m_Class_2_2nd_choice == "_")
points.short@data[a,]
points.short@data$Five_m_Class_2_2nd_choice[a] <- NA
a <- points.short
a <- which(a@data$Five_m_Class_2_2nd_choice == "p")
points.short@data[a,]
points.short@data$Five_m_Class_2_2nd_choice[a] <- "Paved"
points.short@data$Five_m_Class_2_2nd_choice <- as.factor(points.short@data$Five_m_Class_2_2nd_choice)
levels(points.short@data$Five_m_Class_2_2nd_choice)
#### For all 2nd choice shrub entries, assign shrub to Class 3 and turn Class 2 shrubs into "Trees" ####
a <- points.short
a <- which(a@data$One_m_Class_2_2nd_choice == "Shrub")
points.short@data[a,]
levels(points.short@data$One_m_Class_3_2nd_choice)
points.short@data$One_m_Class_3_2nd_choice <- as.character(points.short@data$One_m_Class_3_2nd_choice)
points.short@data$One_m_Class_3_2nd_choice[a] <- "Shrub"
points.short@data$One_m_Class_3_2nd_choice[a]
head(points.short@data[a,])
points.short@data$One_m_Class_2_2nd_choice[a] <- "Trees"
head(points.short@data[a,])
points.short@data$One_m_Class_3_2nd_choice <- as.factor(points.short@data$One_m_Class_3_2nd_choice)
a <- points.short
a <- which(a@data$Five_m_Class_2_2nd_choice == "Shrub")
points.short@data[a,]
levels(points.short@data$Five_m_Class_3_2nd_choice)
points.short@data$Five_m_Class_3_2nd_choice <- as.character(points.short@data$Five_m_Class_3_2nd_choice)
points.short@data$Five_m_Class_3_2nd_choice[a] <- "Shrub"
points.short@data$Five_m_Class_3_2nd_choice[a]
head(points.short@data[a,])
points.short@data$Five_m_Class_2_2nd_choice[a] <- "Trees"
head(points.short@data[a,])
points.short@data$Five_m_Class_3_2nd_choice <- as.factor(points.short@data$Five_m_Class_3_2nd_choice)
#### Five_m_Class_3_2nd_choice ####
levels(points.short@data$Five_m_Class_3_2nd_choice)
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Conifeous", replacement = "Coniferous")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Deciduosu", replacement = "Deciduous")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "deciduous", replacement = "Deciduous")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "^linear_paved", replacement = "Linear_paved")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "^Linear_Paved", replacement = "Linear_paved")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "linear_unpaved", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Paved_Linear", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "unpaved_linear", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "unPaved_linear", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Unpaved_linear", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Unpaved_Linear", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "unpaved_Linear", replacement = "Linear_unpaved")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Modifeid_G-H", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Modified_G-H", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Modified_G_H", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "modified_grass-herb", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Modified_grass-herb", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Modifies_G-H", replacement = "Modified_G-H")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Modifid_soil", replacement = "Modified_soil")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "modified_soil", replacement = "Modified_soil")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Modified_Soil", replacement = "Modified_soil")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Modfied_Barren", replacement = "Modified_barren")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Natural_Barern", replacement = "Natural_barren")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Natural_Barern", replacement = "Natural_barren")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Natural_Barren", replacement = "Natural_barren")
points.short@data$Five_m_Class_3_2nd_choice <- points.short@data$Five_m_Class_3_2nd_choice %>%
gsub(pattern = "Non-linear_Paved", replacement = "Non-linear_paved")
points.short@data$Five_m_Class_3_2nd_choice <- as.factor(points.short@data$Five_m_Class_3_2nd_choice)
#remove classifier notes column
points.short@data$Classifier_notes <- NULL
##### write out cleaned up points ####
points.path <- "E:\\MetroVancouverData\\Training_Validation_Points"
points.filename <- "MetroVan_gt_Bins1_16_tidy"
points_variables <- as.data.frame(names(points.short))
write_csv(points_variables,paste0(points.path,"\\points_variables_tidy.csv"),col_names = FALSE)
writeOGR(points.short, points.path, points.filename, driver="ESRI Shapefile", overwrite_layer=TRUE)
#load tidy data
points.path <- "E:\\MetroVancouverData\\Training_Validation_Points"
points.filename <- "MetroVan_gt_Bins1_16_tidy"
#### read in cleaned up points ####
points.clean <- readOGR(dsn=points.path, layer=points.filename)
# change column names to be meaningful for points and objects
names (points.clean)
points.names <- read_csv(file.path(points.path,"points_variables_tidy.csv"),col_names = FALSE)
names (points.clean) <- points.names$X1
names(points.clean)
##### fill na values ####
#### one_m ####
head(points.clean@data)
which(is.na(points.clean@data$One_m_Class_1_1st_choice)) #check for na's in row 1
onem.class2.na.index <- which(is.na(points.clean@data$One_m_Class_2_1st_choice))
points.clean@data$One_m_Class_1_1st_choice[onem.class2.na.index]
#looks like a couple Built-up entries didn't get class 2 inputs
points.clean@data[c(5450,5573),]
points.clean@data$One_m_Class_2_1st_choice[c(5450,5573)] <- "Paved"
points.clean@data[c(5450,5573),]
#add shadow to class 2 and 3
onem.class2.na.index <- which(is.na(points.clean@data$One_m_Class_2_1st_choice))
onem.shadow.index <- which(points.clean@data$One_m_Class_1_1st_choice == "Shadow")
points.clean@data[onem.shadow.index,]
levels(points.clean@data$One_m_Class_2_1st_choice)
points.clean@data$One_m_Class_2_1st_choice <- forcats::fct_expand(points.clean@data$One_m_Class_2_1st_choice, "Shadow")
points.clean@data$One_m_Class_2_1st_choice[onem.shadow.index] <- "Shadow"
levels(points.clean@data$One_m_Class_3_1st_choice)
points.clean@data$One_m_Class_3_1st_choice <- forcats::fct_expand(points.clean@data$One_m_Class_3_1st_choice, "Shadow")
points.clean@data$One_m_Class_3_1st_choice[onem.shadow.index] <- "Shadow"
#add clouds/ice to class 2 and 3
onem.cloudice.index <- which(points.clean@data$One_m_Class_1_1st_choice == "Clouds/Ice")
points.clean@data[onem.cloudice.index,]
levels(points.clean@data$One_m_Class_2_1st_choice)
points.clean@data$One_m_Class_2_1st_choice <- forcats::fct_expand(points.clean@data$One_m_Class_2_1st_choice, "Clouds/Ice")
points.clean@data$One_m_Class_2_1st_choice[onem.cloudice.index] <- "Clouds/Ice"
levels(points.clean@data$One_m_Class_3_1st_choice)
points.clean@data$One_m_Class_3_1st_choice <- forcats::fct_expand(points.clean@data$One_m_Class_3_1st_choice, "Clouds/Ice")
points.clean@data$One_m_Class_3_1st_choice[onem.cloudice.index] <- "Clouds/Ice"
#look at Bare w/ NAs
points.clean@data[c(3717,3814),]
points.clean@data$One_m_Class_2_1st_choice[c(3717,3814)] <- "Barren"
points.clean@data$One_m_Class_2_2nd_choice[c(3717,3814)] <- NA
points.clean@data$One_m_Class_3_1st_choice[c(3717,3814)] <- "Natural_barren"
points.clean@data$One_m_Class_3_2nd_choice[c(3717,3814)] <- NA
points.clean@data$Five_m_Class1_1st_choice[c(3717,3814)] <- "Bare"
points.clean@data$Five_m_Class_2_1st_choice[c(3717,3814)] <- "Barren"
points.clean@data$Five_m_Class_3_1st_choice[c(3717,3814)] <- "Natural_barren"
points.clean@data$Five_m_Class_3_2nd_choice[c(3717,3814)] <- NA
#look at Water w/ NAs
points.clean@data[c(3757,3854),]
points.clean@data$One_m_Class_2_1st_choice[c(3757,3854)] <- "Water"
levels(points.clean@data$One_m_Class_3_1st_choice)
points.clean@data$One_m_Class_3_1st_choice <- forcats::fct_expand(points.clean@data$One_m_Class_3_1st_choice, "Water")
points.clean@data$One_m_Class_3_1st_choice[c(3757,3854)] <- "Water"
#Class 3 na's
which(is.na(points.clean@data$One_m_Class_3_1st_choice))
onem.class3.na.index <- which(is.na(points.clean@data$One_m_Class_3_1st_choice))
unique(points.clean@data$One_m_Class_2_1st_choice[onem.class3.na.index])
#figure out what those trees are doing in there
whichtrees <- which(points.clean@data$One_m_Class_2_1st_choice[onem.class3.na.index] == "Trees")
onem.class3.na.index[whichtrees]
points.clean@data[c(2543,3043),]
points.clean@data$One_m_Class_3_1st_choice[c(2543,3043)] <- "Coniferous"
points.clean@data$One_m_Class_3_2nd_choice[c(2543,3043)] <- NA
#set other nas to class 2 values
levels(points.clean@data$One_m_Class_3_1st_choice)
onem.class3.na.index <- which(is.na(points.clean@data$One_m_Class_3_1st_choice))
points.clean@data$One_m_Class_3_1st_choice <- forcats::fct_expand(points.clean@data$One_m_Class_3_1st_choice, "Buildings","Non-photosynthetic","Other_Built")
points.clean@data$One_m_Class_3_1st_choice[onem.class3.na.index] <- points.clean@data$One_m_Class_2_1st_choice[onem.class3.na.index]
##### five_m ####
which(is.na(points.clean@data$Five_m_Class1_1st_choice)) #check for na's in row 1
fivem.class1.na.index <- which(is.na(points.clean@data$Five_m_Class1_1st_choice))
points.clean@data$Five_m_Class1_1st_choice[fivem.class1.na.index]
points.clean@data[fivem.class1.na.index,]
points.clean <- points.clean[-fivem.class1.na.index,]
points.clean
#class2
which(is.na(points.clean@data$Five_m_Class_2_1st_choice))
fivem.class2.na.index <- which(is.na(points.clean@data$Five_m_Class_2_1st_choice))
points.clean@data$Five_m_Class_2_1st_choice[fivem.class2.na.index]
points.clean@data$Five_m_Class1_1st_choice[fivem.class2.na.index]
#add shadow to class 2 and 3
fivem.class2.na.index <- which(is.na(points.clean@data$Five_m_Class_2_1st_choice))
fivem.shadow.index <- which(points.clean@data$Five_m_Class1_1st_choice == "Shadow")
points.clean@data[fivem.shadow.index,]
levels(points.clean@data$Five_m_Class_2_1st_choice)
points.clean@data$Five_m_Class_2_1st_choice <- forcats::fct_expand(points.clean@data$Five_m_Class_2_1st_choice, "Shadow")
points.clean@data$Five_m_Class_2_1st_choice[fivem.shadow.index] <- "Shadow"
levels(points.clean@data$Five_m_Class_3_1st_choice)
points.clean@data$Five_m_Class_3_1st_choice <- forcats::fct_expand(points.clean@data$Five_m_Class_3_1st_choice, "Shadow")
points.clean@data$Five_m_Class_3_1st_choice[fivem.shadow.index] <- "Shadow"
#add clouds/ice to class 2 and 3
fivem.cloudice.index <- which(points.clean@data$Five_m_Class1_1st_choice == "Clouds/Ice")
points.clean@data[fivem.cloudice.index,]
levels(points.clean@data$Five_m_Class_2_1st_choice)
points.clean@data$Five_m_Class_2_1st_choice <- forcats::fct_expand(points.clean@data$Five_m_Class_2_1st_choice, "Clouds/Ice")
points.clean@data$Five_m_Class_2_1st_choice[fivem.cloudice.index] <- "Clouds/Ice"
levels(points.clean@data$Five_m_Class_3_1st_choice)
points.clean@data$Five_m_Class_3_1st_choice <- forcats::fct_expand(points.clean@data$Five_m_Class_3_1st_choice, "Clouds/Ice")
points.clean@data$Five_m_Class_3_1st_choice[fivem.cloudice.index] <- "Clouds/Ice"
#look at Bare w/ NAs
which(is.na(points.clean@data$Five_m_Class_2_1st_choice))
fivem.class2.na.index <- which(is.na(points.clean@data$Five_m_Class_2_1st_choice))
points.clean@data$Five_m_Class_2_1st_choice[fivem.class2.na.index]
points.clean@data$Five_m_Class1_1st_choice[fivem.class2.na.index]
points.clean@data[c(3720,3721,3815,3816),]
points.clean@data$Five_m_Class_2_1st_choice[c(3720,3721,3815,3816)] <- "Barren"
points.clean@data$Five_m_Class_2_2nd_choice <- forcats::fct_expand(points.clean@data$Five_m_Class_2_2nd_choice, "Clouds/Ice")
points.clean@data$Five_m_Class_2_2nd_choice[c(3720,3721,3815,3816)] <- "Clouds/Ice"
points.clean@data$Five_m_Class_3_1st_choice[c(3720,3721,3815,3816)] <- "Natural_barren"
points.clean@data$Five_m_Class_3_2nd_choice <- forcats::fct_expand(points.clean@data$Five_m_Class_3_2nd_choice, "Clouds/Ice")
points.clean@data$Five_m_Class_3_2nd_choice[c(3720,3721,3815,3816)] <- "Clouds/Ice"
#look at Water w/ NAs
points.clean@data[c(3750,3845),]
points.clean@data$Five_m_Class_2_1st_choice[c(3750,3845)] <- "Water"
levels(points.clean@data$Five_m_Class_3_1st_choice)
points.clean@data$Five_m_Class_3_1st_choice <- forcats::fct_expand(points.clean@data$Five_m_Class_3_1st_choice, "Water")
points.clean@data$Five_m_Class_3_1st_choice[c(3750,3845)] <- "Water"
#class 3
which(is.na(points.clean@data$Five_m_Class_3_1st_choice))
fivem.class3.na.index <- which(is.na(points.clean@data$Five_m_Class_3_1st_choice))
unique(points.clean@data$Five_m_Class_2_1st_choice[fivem.class3.na.index])
#figure out what those barren points are doing in there
whichbarren <- which(points.clean@data$Five_m_Class_2_1st_choice[fivem.class3.na.index] == "Barren")
fivem.class3.na.index[whichbarren]
points.clean@data[c(2174,2674),]
points.clean@data$Five_m_Class_3_1st_choice[c(2174,2674)] <- "Natural_barren"
points.clean@data$Five_m_Class_3_2nd_choice[c(2174,2674)] <- NA
#set other nas to class 2 values
which(is.na(points.clean@data$Five_m_Class_3_1st_choice))
fivem.class3.na.index <- which(is.na(points.clean@data$Five_m_Class_3_1st_choice))
unique(points.clean@data$Five_m_Class_2_1st_choice[fivem.class3.na.index])
levels(points.clean@data$Five_m_Class_3_1st_choice)
points.clean@data$Five_m_Class_3_1st_choice <- forcats::fct_expand(points.clean@data$Five_m_Class_3_1st_choice, "Buildings","Non-photosynthetic","Other_built")
points.clean@data$Five_m_Class_3_1st_choice[fivem.class3.na.index] <- points.clean@data$Five_m_Class_2_1st_choice[fivem.class3.na.index]
##### write out cleaned up points with no NAs ####
points.path <- "E:\\MetroVancouverData\\Training_Validation_Points"
points.filename <- "MetroVan_gt_Bins1_16_tidy_noNA"
points_variables <- as.data.frame(names(points.clean))
write_csv(points_variables,paste0(points.path,"\\points_variables_tidy.csv"),col_names = FALSE)
writeOGR(points.clean, points.path, points.filename, driver="ESRI Shapefile", overwrite_layer=TRUE)
#### add column for points falling in urban landuse areas? ####
# lu.path <- "E:\\MetroVancouverData\\LandUse"
# lu.filename <- "LandUse2011"
# lu <- readOGR(dsn=lu.path, layer=lu.filename)
#
# lu_unique <- unique(lu@data$LU_CodeDes)
# built_up_lu <- lu_unique[c(3,5,7,8,10:23)]
#
# points.lu <- over(points.short,lu)
# str(points.lu)
# points.lu <- points.lu[LU_CodeDes %in% built_up_lu,]
#
# data.subset <- points.lu@data %>%
# filter(LU_CodeDes %in% built_up_lu)
#### END ####
|
3a40c00eff1ce258a5cc70af3c047d8e0667e8e9
|
1c91d71b270acebefc918a40ce12799317d58522
|
/man/DM1.Rd
|
dfd9d93d6f7d22a8f68b9812631be03c21cf8dfd
|
[] |
no_license
|
cran/StatPerMeCo
|
6ab2a03e6b85c4064d050fa1b26a5f582a332885
|
e8735fa477bb8fc9e85f07719724bcf2ef09be96
|
refs/heads/master
| 2021-01-19T16:50:06.018743
| 2017-04-14T17:05:37
| 2017-04-14T17:05:37
| 88,291,278
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 641
|
rd
|
DM1.Rd
|
\name{dM1}
\alias{dM1}
\title{
Distance measure defined in Eq. (3.1) of Li et al. (2016)
}
\description{
Compute the distance measure defined in Eq. (3.1) of Li et al. (2016) to compare the factor loading matrix in its Monte Carlos experiments.
}
\usage{
dM1(A, Ahat)
}
\arguments{
\item{A}{
The original factor loading matrix A
}
\item{Ahat}{
The estimated factor loading matrix A}
}
\references{
Li, W., Gao, J., Li, K., & Yao, Q. (2016). Modeling Multivariate Volatilities via Latent Common Factors. Journal of Business & Economic Statistics, 34(4), 564-573.
}
\author{
Carlos Trucios
}
\keyword{Factor loading matrix}
\keyword{DM1}
|
d19ec191b11db92812327ae19ac0eee41da34f8f
|
b49dbb013041ed2d62aeb9129ce07b4659ddd43e
|
/data-raw/cog_com_2019.R
|
af32947ecc8bc27895b43347caf02e843e6e6799
|
[
"MIT"
] |
permissive
|
InseeFrLab/DoReMIFaSolData
|
46db6f9b60e9338cb4d6d47e72d57a389a6acc11
|
d67c97adbd0210f572e0ef5e1466967b514e4dc8
|
refs/heads/main
| 2023-06-24T17:12:23.393165
| 2021-08-26T09:08:55
| 2021-08-26T09:08:55
| 306,734,369
| 3
| 5
|
MIT
| 2023-06-08T14:50:53
| 2020-10-23T19:55:45
|
R
|
UTF-8
|
R
| false
| false
| 1,128
|
r
|
cog_com_2019.R
|
#' Code Officiel Géographique 2019
#'
#' Liste des communes au 1er janvier 2019
#'
#' @format Une table avec 37 930 lignes et 11 variables
#' \describe{
#' \item{TYPCOM}{Type de commune}
#' \item{COM}{Code commune}
#' \item{REG}{Code région}
#' \item{DEP}{Code département}
#' \item{ARR}{Code arrondissement}
#' \item{TNCC}{Type de nom en clair}
#' \item{NCC}{Nom en clair (majuscules)}
#' \item{NCCENR}{Nom en clair (typographie riche)}
#' \item{LIBELLE}{Nom en clair (typographie riche) avec article}
#' \item{CAN}{Code canton. Pour les communes « multi-cantonales » code décliné de 99 à 90 (pseudo-canton) ou de 89 à 80 (communes nouvelles) }
#' \item{COMPARENT}{Code de la commune parente pour les arrondissements municipaux et les communes associées ou déléguées. }
#' }
#'
#' @source \url{https://www.insee.fr/fr/information/3720946}
cog_com_2019 <- telechargerDonnees(donnees = "COG_COMMUNE", date = "2019", col_types = "ccccccccccc")
Encoding(cog_com_2019$ncc) <- "UTF-8"
Encoding(cog_com_2019$nccenr) <- "UTF-8"
Encoding(cog_com_2019$libelle) <- "UTF-8"
usethis::use_data(cog_com_2019, overwrite = TRUE)
|
ae90a3639053d961639df420d17d11611f771c31
|
7270049faff20a227119f8196f3779a0f9c21b8c
|
/R_code_temp_interpolation.r
|
4a03b9dd0895d75b4f4d692fcabbc4fa30e20aed
|
[] |
no_license
|
albertoruggeri4/monitoring
|
fe6c44f04ed9bec9a0ca16e515b0a386c773db4c
|
1ac0bbca428217296bab7a8262c9263041692168
|
refs/heads/master
| 2022-12-17T15:14:09.531066
| 2020-09-14T14:30:17
| 2020-09-14T14:30:17
| 252,198,364
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,153
|
r
|
R_code_temp_interpolation.r
|
#### data of measurement of different chimical compost of soil and different vegetation of the same soil
#Spatial statistics
#Interpolation only spatial and of the species distribution modelling
#"How much dense the point are in this area?"
setwd("C:/Lab")
library(spatstat) #estimate the density of the plot
inp <- read.table("dati_plot55_LAST3.csv", sep=";", head=T)
#Casentino Forest (beech forest)
head(inp)
attach(inp) #it's the same with plot(inp$X,inp$Y)
#distribution of the point
plot(X,Y)
#planar point pattern(ppp) == explain what is the X and Y and their range
#to know the min e max (range)
summary(inp)
ppp(x=X, y=Y, c(716000,718000),c(4859000,4861000))
#marks function to analyze each values
inppp <- ppp(x=X, y=Y, c(716000,718000),c(4859000,4861000))
names(inppp) #to see wich variables we want
marks(inppp) <- Canopy.cov
#function smooth (spatstat library) to smooth the set -- interpolate
canopy <- Smooth(inppp)
plot(canopy)
points(inppp, col="green")
#the density in the nord are lower instead the density of the south part
#we can make this maps for different variables to see the correlation
# lichens -- tree -- airpollution
marks(inppp) <- cop.lich.mean
lichs <- Smooth(inppp)
plot(lichs)
points(inppp)
#There is no congruent between canopy and lich
#we can make a par(mfrow) or a stack --> more elegant
#output <- stack(canopy, lichs)
#plot(output)
par(mfrow=c(1,2))
plot(canopy)
points(inppp)
plot(lichs)
points(inppp)
#final plot (original name)
plot(Canopy.cov, cop.lich.mean, col="red", pch=19, cex=2)
#spatial correlation
####### 2nd Part #######
# DUNE SPECIES -- PSAMMOPHILOUS VEGETATION
inp.psam <- read.table("dati_psammofile.csv", sep=';' , head=T)
attach(inp.psam)
#Organic carbon (C_org) in the soil -
head(inp.psam)
plot(E,N)
#how we seen in the different point patterns, this is a clumped dispersion!
summary(inp.psam) #min and max
inp.psam.ppp <- ppp(x=E,y=N,c(356450,372240),c(5059800,5064150))
#explain which type of variables we are going to use
marks(inp.psam.ppp) <- C_org
#we estimate values where there were not values
C <- Smooth(inp.psam.ppp)
plot(C)
points(inp.psam.ppp)
|
5fac5f63c896a436ddbccf5974d28b90fe2a88a7
|
c3cf36095797ad2538e98091cec1610ed39a180f
|
/Data Visualization/Project 1/02 Visualizations/ggplot_filterPrice.R
|
213a333d60c6d220e06ca304eaadcea6aa237c5e
|
[] |
no_license
|
evanaj12/R_and_other_examples
|
4f1440dfb4989e7503e8631650619beba1eae34c
|
baa2a39a2b5b9a4cec5034d8e54a6a277a5ca134
|
refs/heads/master
| 2021-01-09T21:46:58.137191
| 2017-02-14T18:13:08
| 2017-02-14T18:13:08
| 48,967,571
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 174
|
r
|
ggplot_filterPrice.R
|
price <- ggplot(dfjoined, aes(x=as.Date(ORDER_DATE, "%Y-%m-%d"), y=as.Date(SHIPPED_DATE, "%Y-%m-%d"), color= factor(UNIT_PRICE))) + geom_point()+ facet_wrap(~CUSTOMER_STATE)
|
fcd4ac95e639bd2e0ba81e18273086cce5f1077f
|
3581b1fb06e1455db0cbafd1c43e2ac71a250734
|
/R/NlmeSetupVarFunction.R
|
b23bcbe2d40d0fa792295b91f3d335d201b9e8bb
|
[] |
no_license
|
cran/mixlow
|
a4f85e942ef212bfbed81551c2d68b9197e2dc99
|
779a43f75afd02322ade30e575a45a2d1bd81e79
|
refs/heads/master
| 2016-09-05T13:24:41.248119
| 2012-03-20T00:00:00
| 2012-03-20T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,894
|
r
|
NlmeSetupVarFunction.R
|
`NlmeSetupVarFunction` <-
function(ord, dat1)
{
## sets up the variance functions that are used when calling NLME
var.function.m1 = NULL
var.function.m2 = NULL
var.function.m3 = NULL
var.function.m4 = NULL
var.function.s1 = NULL
var.function.s2 = NULL
var.function.s3 = NULL
var.function.s4 = NULL
if (length(ord) > 1)
{
flist = list(0)
for(ii in 1:length(levels(dat1$drg)))
{
strr = paste("flist$'",ii,"' = 0.5", sep="",collapse="")
eval(parse(text=strr))
}
flist = flist[-1]
var.function.m1 <- NULL
var.function.m2 <- varIdent(form = ~1|drg)
var.function.m3 <- varPower(form = ~(fitted(.))|drg, fixed= flist)
var.function.m4 <- varPower(form = ~(fitted(.))|drg)
# set fixed effects list
fixedEffects=list(0)
fixedEffects[[1]] <- as.formula(g~drg-1)
fixedEffects[[2]] <- as.formula(p~drg-1)
fixedEffects[[3]] <- as.formula(u~1)
}
# if only one drug
if (length(ord) == 1)
{
var.function.s1 <- NULL
var.function.s2 <- varPower(form = ~fitted(.), fixed= 0.5)
var.function.s3 <- varPower(form = ~fitted(.))
var.function.s4 <- varConstPower(form = ~fitted(.), fixed= list(power = 0.5) )
# set fixed effects list
fixedEffects=list(0)
fixedEffects[[1]] <- as.formula(g~1)
fixedEffects[[2]] <- as.formula(p~1)
fixedEffects[[3]] <- as.formula(u~1)
}
# set up random effects list
randomEffects = list(0)
randomEffects$tray[[1]] = as.formula(u ~ 1)
randomEffects = randomEffects[-1]
return (list(fixedEffects=fixedEffects, var.function.m1=var.function.m1, var.function.m2=var.function.m2,
var.function.m3=var.function.m3, var.function.m4=var.function.m4, var.function.s1=var.function.s1,
var.function.s2=var.function.s2, var.function.s3=var.function.s3, var.function.s4=var.function.s4, randomEffects=randomEffects))
}
|
05a1b513cd767b86fdaab7ade64202d2884e31f9
|
1f3a93bde4e7c51370361d82a5d9debc61b69a90
|
/inst/doc/Sensitivity_analysis.R
|
9f436229d7774458468f935600e9edefcc4d7c70
|
[] |
no_license
|
cran/multirich
|
81229988d50d14a402aebc409a0f2a4176f24d55
|
3cf0674f0e8fe0c25ca1503e2d47660fa439328b
|
refs/heads/master
| 2021-06-04T10:25:42.730839
| 2021-05-14T22:30:11
| 2021-05-14T22:30:11
| 30,429,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,231
|
r
|
Sensitivity_analysis.R
|
## -----------------------------------------------------------------------------
#Load package for calculation of multivariate richness & sensitivity analysis tools
library(multirich)
#Set up labels
sp.lbl = sprintf("sp%s",seq(1,15,1))
com.lbl = c("pool","com1","com2","com3")
tr.lbl = c("tr1","tr2")
#Set up traits and species x trait matrix
tr1 = c(1,1,1,1,1,2,2,2,2,3,3,3,4,4,5)
tr2 = c(1,2,3,4,5,1,2,3,4,1,2,3,1,2,1)
in.mat = matrix(c(tr1,tr2),ncol = 2, dimnames = list(sp.lbl,tr.lbl))
#Set up communities
pool = rep(1,15)
com1 = c(1,0,0,0,1,0,0,0,0,0,0,1,0,0,1)
com2 = c(1,1,0,0,0,1,1,0,0,0,0,0,0,0,0)
com3 = c(1,0,0,0,0,0,1,0,0,0,0,0,0,0,0)
in.com = matrix(c(pool,com1,com2,com3),nrow = 4,byrow = T,dimnames = list(com.lbl,sp.lbl))
tr1.breaks = tr2.breaks = get.breaks(1,5)
breaks = list(tr1.breaks, tr2.breaks)
out.pdf = "none" #Specifying a file here will save the result to file.
# Traitspace here is less than whole - because of a "known" trade-off between the two traits
in.traitspaces = c(3,6,10,15)
# The results object can be used to do further manipulations of the output data
results = sensitivity.analysis(in.mat, in.com, breaks, out.pdf, in.traitspaces = in.traitspaces)
|
f3b226d7c06ac67710c94e53aa96d34f99a569ac
|
5f66254ed2531989ee3851525e457ba00ae0ed4c
|
/r_sripts/05-logit_probit.R
|
66b039443f1260e67b044d898a0917f99a99bb39
|
[] |
no_license
|
graf-anapaula/mushrooms
|
4fffcd2ef7fa34c9d0eadc24eea1ce3901b74af2
|
c0a223334d86cb5ffeb511f8d857fbae895c7531
|
refs/heads/master
| 2023-06-30T23:52:57.823319
| 2021-07-19T03:58:03
| 2021-07-19T03:58:03
| 383,635,520
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 687
|
r
|
05-logit_probit.R
|
# Librerías ====
library(tidyverse)
library(tidymodels)
library(lmtest)
# lectura de datos ====
mushrooms <- read_csv("data/mushrooms_dummy.csv")
# split data ====
set.seed(20210709)
split <- initial_split(mushrooms, split = 0.8)
train <- training(split)
test <- testing(split)
probit <- glm(data = train,
class ~ .,
family = binomial(link = "probit"))
summary(probit)
logit <- glm(data = train,
class ~ .,
family = "binomial")
summary(logit)
class_test <- test %>% select(class)
pred_test_probit <- predict(probit, new_data = test %>% select(-class))
pred_test_probit <- bind_cols(pred_test_probit, class_test)
|
41415db9b6fdc3a7561217b90c1f08aab6df8074
|
53b2358c6089be2c51ac2768a77fc303d563550d
|
/assembly-scripts/tsv-ase-bin.R
|
d7f03a7025b4b50faddc8eb12d7ca620419d01e0
|
[] |
no_license
|
bethsheets/Population-Genomics-via-RNAseq
|
684e7cd5a667a335f7b3e1111e1ccd6eb85533c6
|
3cb3ee912f855e8a9981874f4ff160551f8b8db3
|
refs/heads/docs
| 2020-04-12T05:42:48.038672
| 2019-07-12T17:24:37
| 2019-07-12T17:24:37
| 60,875,099
| 2
| 10
| null | 2017-04-14T15:16:23
| 2016-06-10T20:29:39
|
Python
|
UTF-8
|
R
| false
| false
| 2,211
|
r
|
tsv-ase-bin.R
|
library('permute')
library('data.table')
dat<-read.delim('HETS.tsv')
dat<-dat[,c('CHROM','POS','SAMPLE','AO.1','RO.1')]
dat$AIsign<-dat$AO.1/(dat$RO.1+dat$AO.1)
dat$AI<-abs(0.5-dat$AIsign)
dat$DP.1<-dat$RO.1+dat$AO.1
dat$SNP<-paste(dat$CHROM,dat$POS,sep='-')
dat$LOCUS<-paste(dat$SAMPLE, dat$SNP,sep='-')
shufTab<-function(dt,vec,response,group){
dt$shuff<-dt[,get(response)][vec]
return(dt[,mean(shuff),by=group][,get('V1')])
}
dat$DPbin=cut(dat$DP.1,breaks=c(seq(10,100,10),seq(200,1000,100),Inf))
locTest<-function(currdat,n=100,response='AIsign',group='SNP',strata='DP.1',minGroup=2,minObs=15,two.tail=F,minDP=10){
currdat<-currdat[currdat$DP.1>=minDP,]
currdat<-(currdat[currdat[,group]%in%names(table(currdat[,group]))[table(currdat[,group])>=minGroup],])
print(dim(currdat))
if(nrow(currdat)<minObs|length(unique(currdat[,group]))<2) return(list(res=NULL,data=currdat))
currdat$key<-1:nrow(currdat)
currdat<-data.table(currdat)
setkey(currdat,key)
agg<-currdat[,list(CHROM=CHROM[1],POS=POS[1],DP.1=mean(DP.1),mean(get(response))),by=group]
setnames(agg,5,response)
print('shuffling within blocks...')
perm<-shuffleSet(nrow(currdat),n,control=how(within=Within(type='free'),blocks=currdat[,get(strata)]))
if(nrow(perm)<n | min(dim(perm))<=1) return(list(res=NULL,data=currdat))
print('shuffling data...')
testmat<-apply(perm,1,function(vec) shufTab(currdat,vec,response,group))
print('concatenating matrices...')
testmat<-as.matrix(cbind(agg,testmat))[,-c(1:4)]
if(two.tail==F) agg$p<-apply(testmat,1,function(v) length(which(v[2:length(v)]>=v[1]))/n)
else agg$p<-apply(testmat,1,function(v) min(min(length(which(v[2:length(v)]>=v[1]))/n,length(which(v[2:length(v)]<=v[1]))/n)*2,1))
agg$p.adj<-p.adjust(agg$p,method='BH')
return(agg)
}
resPOS<-locTest(dat,n=5000,response='AIsign',group='SNP',strata='DPbin',minGroup=4,two.tail=T,minDP=20)
write.table(unique(resPOS[resPOS$p.adj<0.05,]$CHROM),quote=F,row.names=F,col.names=F,file='ASEcontigs.txt')
write.table(unique(resPOS[resPOS$p.adj<0.05,]$SNP),quote=F,row.names=F,col.names=F,file='ASEsnps.txt')
save(resPOS,dat,file='resPOS.Rdata')
print(table(resPOS$p.adj<0.05))
|
0b1e1f9716f89b779bb08de3aad077c88b7cc4d8
|
22c5e08e719342636aecd8fdbab7d8545d1d38a1
|
/code 19-23aug.R
|
db7cf0fdad1a9f7169d681bebb9422375fd5620c
|
[] |
no_license
|
robin93/Stat_struc_Data
|
dce7575846318a54a003b08780a4ceb0f5ed74b4
|
1f5ad157463721c5765530cbb08efc734a7b21da
|
refs/heads/master
| 2020-12-24T14:00:52.709412
| 2015-11-19T20:02:44
| 2015-11-19T20:02:44
| 41,617,380
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,019
|
r
|
code 19-23aug.R
|
n = 300
x = rnorm(n)
# Empirical distribution
x = sort(x)
y = (1:(n-1))/n
#par(mfrow=c(1,1))
plot(c(rep(x,each=2)),c(0,rep(y,each=2),1),type="l")
lines(x,dnorm(x)) #plots the probability density function for the random values of x
lines(x,pnorm(x)) #plots the cumulative distribution function for the random values of x
# Normal plot (QQ plot for normal distribution)
qqnorm(x)
qqline(x)
# QQ plot
z = rchisq(300, df = 3)
qqplot(qchisq(ppoints(300), df = 3), z)
qqline(z, distribution = function(p) qchisq(p, df = 3))
# PP plot
z = sort(z)
plot(pchisq(z,df=3),(1:n)/n,xlim=c(0,1),ylim=c(0,1)); lines(c(0,1),c(0,1))
plot(pchisq(x,df=3),(1:n)/n,xlim=c(0,1),ylim=c(0,1)); lines(c(0,1),c(0,1))
# Kolmogorov-Smirnov test based on max separation between EDF & theoretical CDF
n = 30
x = rnorm(n)
ks.test(x,pnorm)
# Empirical distribution with KS test p-value
edf = ecdf(x)
gridpt = min(x) + (max(x) - min(x))*(0:100000)*.00001
plot(gridpt,edf(gridpt),type="l",xlab="value",ylab="distribution function")
lines(gridpt,pnorm(gridpt))
pval = ks.test(x,pnorm)$p; pval = floor(1000*pval+.5)*0.001
text(fivenum(x)[2],0.9,paste("KS test p-value = ",pval))
# Shapiro-Wilk test
n = 30
x = rnorm(n,0,2)
shapiro.test(x)
# normal plot with KS and SW tests
qqnorm(x)
lines(c(-3,3),c(-3,3),col=4)
psw = shapiro.test(x)$p; psw = floor(1000*psw+.5)*0.001
pks = ks.test(x,pnorm)$p; pks = floor(1000*pks+.5)*0.001
text(fivenum(x)[2],fivenum(x)[5]-.5,paste("SW test p-value = ",psw))
text(fivenum(x)[2],fivenum(x)[5]-1,paste("KS test p-value = ",pks))
# smooth density from histogram
par(mfrow=c(1,2))
hist(x,probability=T)
plot(density(x),ylim=c(0,0.42),main="density estimate")
# chi-sq test for discrete data
n = 1000
x = rbinom(n,5,.5)
obscount = hist(x,breaks=(c(0:6)-0.5),plot=F)$counts
prob = dbinom((0:5),5,.45)
chisq.test(obscount, p = prob)
# another example
x = floor(10*runif(1000))
obscount = hist(x,breaks=(c(0:10)-0.5),plot=F)$counts
prob = rep(1/length(obscount), length(obscount))
chisq.test(obscount, p = prob)
|
5bd7a53165d4baa38aab55763239213f1f9e45b4
|
4e6b153694b33cb33d7c057e3d03cfd390a7cbf9
|
/R/hello.R
|
0743825658219bceed26eb46f3e2738d16c06a69
|
[] |
no_license
|
Tutuchan/gettext
|
d87f12f36984a751bea74894bad03b25932e71de
|
ad9102853da546f3038f42df125116a50e8a224c
|
refs/heads/master
| 2021-01-13T16:42:57.512798
| 2016-12-29T14:31:04
| 2016-12-29T14:54:56
| 77,619,092
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 338
|
r
|
hello.R
|
#' hello
#'
#' @export
hello <- function() {
gettext("Hello, world!")
}
#' hello2
#'
#' @export
hello2 <- function() {
gettext("Hello, world!", domain = "R-gettext")
}
#' app
#'
#' @import shiny
#' @export
app <- function() {
app_dir <- system.file("app", package = "gettext")
shiny::runApp(app_dir, display.mode = "normal")
}
|
45ff69e5f174a95a6b0e6d217f40ddaf2562858a
|
714ecbdc38da1d141c2f7913e7f9ecf8011e3e61
|
/plot2.R
|
5a7a8ee15cfa684bd918b8c2e72fe71306aaa331
|
[] |
no_license
|
nsulikowski/ExData_Plotting1
|
f185828fdb341c02bacb852c0cdea27dcf0d8265
|
d38d232e86054fbb427b4b77ac0fad6dd41c1c9e
|
refs/heads/master
| 2020-12-25T20:08:15.457189
| 2016-04-09T12:31:03
| 2016-04-09T12:31:03
| 55,818,552
| 0
| 0
| null | 2016-04-09T00:55:30
| 2016-04-09T00:55:30
| null |
UTF-8
|
R
| false
| false
| 831
|
r
|
plot2.R
|
library(dplyr)
library(readr)
library(lubridate)
data <- read_delim(
file="C:/Users/Nestor/Documents/Coursera/CourseProject1/household_power_consumption.txt",
col_names = TRUE,
col_types = "ccnnnnnnn",
na = c("?"),
delim=";"
)
data1 <-
mutate(data, DateTime=as.POSIXct(strptime(paste(Date,Time), format="%d/%m/%Y %H:%M:%S")) ) %>%
filter( DateTime>=as.POSIXct("2007-02-01", tz="GMT") & Date1 < as.POSIXct("2007-02-03", tz="GMT") )
str(data1)
#png("plot1.png",width=480,height=480,units="px")
with(data1, plot(Date1, Global_active_power) )
axis.Date(1, at = seq(d[1], d[100], length.out=25),
labels = seq(d[1], d[100], length.out=25),
format= "%m/%d/%Y", las = 2)
#dev.off() col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)"
|
d7922cc5f29fd5ec72b2636fb19c7e03ed996c2f
|
7b2f2d82d958351d4b714b98beb9aeb287869260
|
/man/Waterfall.Rd
|
9b2ae7c24d656a2a0648e8e611def9fbbd06a624
|
[] |
no_license
|
Displayr/flipStartup
|
5fa3dc7e136b444c7def95084afadebdb969f343
|
1c9557ea91b472dac99b10e4b9580aa6e2badc49
|
refs/heads/master
| 2023-07-29T16:29:21.282415
| 2023-07-14T05:50:15
| 2023-07-14T05:50:15
| 60,380,987
| 2
| 1
| null | 2023-07-14T05:50:17
| 2016-06-03T22:18:00
|
R
|
UTF-8
|
R
| false
| true
| 547
|
rd
|
Waterfall.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/waterfall.R
\name{Waterfall}
\alias{Waterfall}
\title{Waterfall}
\usage{
Waterfall(x, periods = NULL)
}
\arguments{
\item{x}{A RevenueGrowthAccounting object.}
\item{periods}{A vector of \code{character} indicating the period(s) to plot
(relative to the previous period). If NULL, the total, aggregated across all periods, is shown.}
}
\description{
Creates a waterfall table showing the source(s) of change in sales for one period,
relative to the previous period.
}
|
f8d5a5377239da6d5e733f734e1f0026bc7df228
|
9f07de54ad532abecda1db51c4204fee79a2c3dd
|
/man/bioparameters.Rd
|
92a3b2bff48e32391644ba9a5da3febc4ac9765f
|
[
"MIT"
] |
permissive
|
alfcrisci/rAedesSim
|
c0feb85870f1838f43e056271830ae253f655177
|
2437af7e4fa13e23ce89bc30614c9fa24ca17e54
|
refs/heads/master
| 2021-01-17T13:35:56.955121
| 2017-12-04T13:41:30
| 2017-12-04T13:41:30
| 88,733,291
| 1
| 1
| null | 2017-07-07T13:18:08
| 2017-04-19T10:41:36
|
R
|
UTF-8
|
R
| false
| true
| 1,798
|
rd
|
bioparameters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bioparameters.r
\name{bioparameters}
\alias{bioparameters}
\title{bioparameters}
\usage{
bioparameters(alfa_l = 1, alfa_a = 0, exp_lar = 1, l_density = 100,
sex_ratio = 0.5, egn = 16, ef = 0.83, inib = 0, sspp = "Albopictus",
genus_sspp = "Aedes", order_sspp = "Diptera", geo_area = "Tuscany",
name_location = NA)
}
\arguments{
\item{alfa_l}{numeric Coefficient of competition between individuals at larval stage.}
\item{alfa_a}{numeric Coefficient of competition between individuals at adult stage.}
\item{exp_lar}{numeric Exponential Gompertz parameter for competition modulation. Default=1.}
\item{l_density}{numeric Expected larval density. Default is 70.}
\item{egn}{numeric the Mean eggs during ovideposition cycle. Default is 63.}
\item{inib}{numeric Inibition rate parameter ( 0-1) Default is 0.}
\item{sspp}{character Name of the species. Default is "Albopictus".}
\item{genus_sspp}{character Name of the genus of the species. Default is "Aedes".}
\item{order_sspp}{character Name of the order of the species. Default is "Diptera".}
\item{geo_area}{character Name of geographic area. Default is "Tuscany".}
\item{name_location}{character Name of location.}
\item{sexratio}{numeric Sex ratio of population. Default is 0.5.}
\item{name_location}{numeric mean elevation of container location. Default is missing.}
}
\value{
S3 object Bioparameters object
}
\description{
Bioparameters is a function to instantiate an S3 object containing biological parameters used in simulation for rAedesSim .
}
\author{
Istituto di Biometeorologia Firenze Italy Alfonso crisci \email{a.crisci@ibimet.cnr.it} ASL LUCCA Marco Selmi \email{marco.selmi@uslnordovest.toscana.it }
}
\keyword{bioparameters}
|
a6bc63f6075d79d45e986be480e93981f9c964c8
|
5d17b85808e9dac3dcad3d0caa223cf6c3ae1221
|
/man/inertCoordSysVarOut.Rd
|
72df3da5fd535f7b8fe4f180cc78aa8c6b628b47
|
[] |
no_license
|
ebmtnprof/qid
|
3c818f3ece373d61562bd0b4c428fa04643a9181
|
648fa6eeaf3d7ebb9e7fb94f41507d63b84ff685
|
refs/heads/master
| 2020-04-16T09:02:31.772340
| 2019-01-14T00:13:14
| 2019-01-14T00:13:14
| 151,756,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,991
|
rd
|
inertCoordSysVarOut.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inertCoord.R
\name{inertCoordSysVarOut}
\alias{inertCoordSysVarOut}
\title{Compares variants of the inertia-coordination model for predicting the system variable from the dynamic parameters.}
\usage{
inertCoordSysVarOut(basedata, sysVarType, dist0name, dist1name, sysVarName,
coVar = FALSE)
}
\arguments{
\item{basedata}{A dataframe containing the inertia-coordination parameter estimates produced by the "indivInertCoord" function.}
\item{sysVarType}{Whether the system variable is "dyad", which means both partners have the same socre, or "indiv" which means the partners can have different scores}
\item{dist0name}{A name for the level-0 of the distinguishing variable (e.g., "Women").}
\item{dist1name}{A name for the level-1 of the distinguishing variable (e.g., "Men").}
\item{sysVarName}{A name for the system variable being predicted (e.g., "Satisfaction").}
\item{coVar}{}
}
\value{
The function returns a list including: 1) the lm or lme objects containing the full results for each model (called "models"), and 2) adjusted R^2 information for each model (called "R2"). The function also displays histograms of the residuals and plots of the predicted values against observed values for each model.
}
\description{
The dynamic parameters used in these models come from a set including both people's inertia (inert0 and inert1) and coordination (coord0 and coord1) estimates. The 4 models compared are a baseline intercept-only, inertia-only (inert0 + inert1), coordination-only (coord0 + coord1) and full inertia-coordination (inert0 + inert1 + coord0 + coord1) models. The system variable can be either dyadic (sysVarType = "dyad"), where both partners have the same score (e.g., relationship length) or individual (sysVarType = "indiv"), where the partners can have different scores (e.g., age). If it is individual then both actor and partner effects of the dynamic parameters are included.
}
|
ca25959a828e6787dce0299078b9ac892aa39c19
|
8cd2b32bad1ab4205131eee63aeda637ab9f8a8f
|
/3 way ANOVA code.R
|
8dae3b0043c18af4953775898031f40165ecd510
|
[] |
no_license
|
pathomas18/2016_REU_Thomas
|
3cafd448dce6352342b1158a41fafdc35b3278d9
|
f62fc55f5f6dd49d7f6ec12db9095adb923ef3f8
|
refs/heads/master
| 2021-01-24T22:06:57.560167
| 2016-08-23T14:43:31
| 2016-08-23T14:43:31
| 63,876,372
| 0
| 0
| null | 2016-07-21T18:25:13
| 2016-07-21T14:28:47
| null |
UTF-8
|
R
| false
| false
| 1,408
|
r
|
3 way ANOVA code.R
|
##########################################################
# 3 way ANOVA
# Growth Chamber Experiment
# data from: Documents< IU Biodiversity< GCH_CO2.csv
#########################################################
setwd("~/Documents/IU BioDiversity")
#clear environment
rm(list=ls())
CO2<-read.csv("GCH_CO2.csv", header=T)
summary(CO2)
#make variables factors
rpf=factor(CO2$rpf)
rpf<-factor(rpf, labels=c("rpf+", "rpf-"))
> is.factor(rpf)
[1] TRUE
> levels(rpf)
[1] "rpf+" "rpf-"
soil=factor(CO2$soil)
soil<-factor(soil, labels=c("sterile", "live"))
soil
is.factor(soil)
levels(soil)
plant=factor(CO2$plant)
plant<-factor(plant, labels=c("plant", "without plant"))
plant
is.factor(plant)
levels(plant)
#While I couldn't do an ANOVA for all weeks yet, I wanted to at least look at them individually
#ran 3-way ANOVA for each week seperately
week.1aov<- aov(week.1~rpf*soil*plant, data=CO2)
summary(week.1aov)
#week.1 shows only soil and soil:plant interaction are significant (<0.05)
week.2aov<- aov(week.2~rpf*soil*plant, data=CO2)
> summary(week.2aov)
#week.2 shows only soil as significant
week.3aov<- aov(week.3~rpf*soil*plant, data=CO2)
> summary(week.3aov)
#soil and plant are significant
week.4aov<- aov(week.4~rpf*soil*plant, data=CO2)
> summary(week.4aov)
# rpf:soil:plant interactions are significant
week.5aov<- aov(week.5~rpf*soil*plant, data=CO2)
> summary(week.5aov)
#plant is significant
|
7337d364c47d66a189268981ad216ad9dbf0ac58
|
c571b69e85a9ca90cd9ba02bc40fd2bb2391be23
|
/code/5_inla_mesh.R
|
beda6d96c0b82a4ec833be4e2e65b5dfad173119
|
[] |
no_license
|
ric70x7/ElectricityAfrica
|
9de27a1de3a23e4b0b4ec66b066eedf00913f84a
|
291c2141a62f4de4d9bcdc5181efdbc789c5955a
|
refs/heads/master
| 2022-11-17T09:54:35.703626
| 2022-11-10T18:48:13
| 2022-11-10T18:48:13
| 62,674,968
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,515
|
r
|
5_inla_mesh.R
|
# INLA preliminaries
# ------------------
#
# Edited: September 27, 2016
library(INLA)
library(raster)
library(maptools)
graphics.off()
rm(list = ls())
set.seed(100)
load("code_output/df_model.RData")
afri_main <- rgdal::readOGR("data/Africa_main_country/Africa_main_country.shp")
# Split data
df_model <- subset(df_model, year <= 2013)
num.train <- round(sum(df_model$obfuscated)*.7)
ix <- sample(1:sum(df_model$obfuscated))
df.train <- subset(df_model, obfuscated)[ix[1:num.train],]
df.test1 <- subset(df_model, obfuscated)[ix[(1 + num.train):sum(df_model$obfuscated)],]
df.test2 <- subset(df_model, !obfuscated)
# Define a list to store metadata
meta <- list()
meta$points <- list()
meta$num <- list()
meta$ix <- list()
# Temporal mesh
mesh.t <- inla.mesh.1d(loc = df.train$year, interval = c(min(df.train$year), max(df.train$year)))
# Spatial mesh
afri_border <- unionSpatialPolygons(afri_main, rep(1, nrow(afri_main)))
afri_segment <- inla.sp2segment(afri_border)
mesh.s <- inla.mesh.2d(boundary = afri_segment, max.edge = 1.5, cutoff = 1.4)
afr.spde <- inla.spde2.matern(mesh = mesh.s, alpha = 2)
# Indices associated to the observations
meta$num$data <- nrow(df.train)
meta$points$time <- df.train$year
meta$points$span.period <- range(mesh.t$loc)
meta$ix$time.order <- meta$points$time - min(meta$points$span.period) + 1
meta$num$time.knots <- max(meta$points$span.period) - min(df.train$year) + 1
meta$points$spatial <- df.train[, c("lon", "lat")]
# Index of observations
meta$ix$stack <- list()
|
859fd1323b31bfe36fbfab9b7557419a69a472f7
|
fed543ff068a5b1ae51c19e95b2471952d72adb7
|
/R/help.R
|
b8a6ae451ade2901812ac8cc32a56eb6e926f011
|
[] |
no_license
|
githubmpc/marimba2
|
848627c80de60e4ef53d623a3f6e7960c5afcbc4
|
332c283592d9cad4ca1a4ee4cc652709580b62f3
|
refs/heads/master
| 2021-01-20T02:19:13.442176
| 2017-10-18T14:51:17
| 2017-10-18T14:51:17
| 101,313,331
| 0
| 0
| null | 2017-10-18T14:52:26
| 2017-08-24T15:59:52
|
R
|
UTF-8
|
R
| false
| false
| 884
|
r
|
help.R
|
#' CNP analysis for parent-offspring trios
#'
#' what marimba does
#'
#' @docType package
#' @name marimba
#' @import methods
#' @import abind
#' @importFrom Hmisc rMultinom
#' @importFrom reshape2 dcast melt
#' @importFrom gtools rdirichlet
#' @importFrom ggplot2 ggplot geom_line aes facet_wrap geom_density xlab ylab geom_hline geom_histogram geom_polygon scale_color_manual scale_y_sqrt scale_fill_manual guides guide_legend geom_jitter
#' @importFrom matrixStats rowMaxs
#' @importFrom magrittr "%>%" set_colnames
#' @importFrom tidyr gather
#' @importFrom dplyr left_join mutate select filter arrange group_by summarize n starts_with bind_rows bind_cols
#' @importFrom tibble as.tibble tibble
#' @importFrom mclust Mclust mclustBIC
#' @importFrom HWEBayes DirichSampHWE
#' @importFrom coda effectiveSize gelman.diag mcmc mcmc.list
#' @importFrom purrr map map_dbl map_chr
NULL
|
4da819252e5eb7356a044fa947ddba4c00a78bfb
|
296c938a755f774a5ac571c6a2ced0e9f55bcbce
|
/lec/ex4.1.R
|
86e5e919914843a2f0842b4d23f61dc62dfeb866
|
[] |
no_license
|
peach07up/stat426
|
d2ce04244252a08a1d2b433f478b9745b44c1d5e
|
279c3247e0742311e7145e4c293b3750de0d309b
|
refs/heads/master
| 2020-03-09T01:17:30.595311
| 2018-04-07T08:34:57
| 2018-04-07T08:34:57
| 128,510,342
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 762
|
r
|
ex4.1.R
|
### Example 4.1: Psych Data (Logistic Regression)
psych <- read.table("psych.txt", header=TRUE)
head(psych)
### Logistic Regression: Separate X variables for the 5 questions
psychfit1 <- glm(ill ~ x1 + x2 + x3 + x4 + x5, family=binomial, data=psych)
# canonical logit link used by default
summary(psychfit1)
# effects have same direction, but none appears significant
# (possible collinearity?)
### Logistic Regression: One X variable = sum of all question scores
xsum <- apply(psych[,2:6], 1, sum)
psychfit2 <- glm(ill ~ xsum, family=binomial, data=psych)
summary(psychfit2)
# significant fit
### Plot fitted probabilities versus total score
plot(xsum, fitted(psychfit2), xlab="Total Score",
ylab="Fitted Probability of Illness")
|
d5b8d0dd6406f1aebb3388bc62f19d2dac81ab68
|
40c65fff3847662ce46d2afd73acf8b68b785107
|
/man/enw_formula_as_data_list.Rd
|
d565ee02f0107ba6a37368136e3d2f4e272da226
|
[
"MIT"
] |
permissive
|
epinowcast/epinowcast
|
b4d4562603938e9a184d3450d9387f92908cd6bc
|
98ec6dbe3c84ecbe3d55ce988e30f8e7cc6b776d
|
refs/heads/main
| 2023-09-05T18:19:10.985900
| 2023-09-05T12:13:49
| 2023-09-05T12:13:49
| 422,611,952
| 23
| 5
|
NOASSERTION
| 2023-09-14T09:57:09
| 2021-10-29T14:47:06
|
R
|
UTF-8
|
R
| false
| true
| 1,988
|
rd
|
enw_formula_as_data_list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model-tools.R
\name{enw_formula_as_data_list}
\alias{enw_formula_as_data_list}
\title{Format formula data for use with stan}
\usage{
enw_formula_as_data_list(formula, prefix, drop_intercept = FALSE)
}
\arguments{
\item{formula}{The output of \code{\link[=enw_formula]{enw_formula()}}.}
\item{prefix}{A character string indicating variable
label to use as a prefix.}
\item{drop_intercept}{Logical, defaults to \code{FALSE}. Should the
intercept be included as a fixed effect or excluded. This is used internally
in model modules where an intercept must be present/absent.}
}
\value{
A list defining the model formula. This includes:
\itemize{
\item \verb{prefix_fintercept:} Is an intercept present for the fixed effects design
matrix.
\item \code{prefix_fdesign}: The fixed effects design matrix
\item \code{prefix_fnrow}: The number of rows of the fixed design matrix
\item \code{prefix_findex}: The index linking design matrix rows to observations
\item \code{prefix_fnindex}: The length of the index
\item \code{prefix_fncol}: The number of columns (i.e effects) in the fixed effect
design matrix (minus 1 if \code{drop_intercept = TRUE}).
\item \code{prefix_rdesign}: The random effects design matrix
\item \code{prefix_rncol}: The number of columns (i.e random effects) in the random
effect design matrix (minus 1 as the intercept is dropped).
}
}
\description{
Format formula data for use with stan
}
\examples{
f <- enw_formula(~ 1 + (1 | cyl), mtcars)
enw_formula_as_data_list(f, "mtcars")
# A missing formula produces the default list
enw_formula_as_data_list(prefix = "missing")
}
\seealso{
Functions used to help convert models into the format required for stan
\code{\link{enw_model}()},
\code{\link{enw_priors_as_data_list}()},
\code{\link{enw_replace_priors}()},
\code{\link{enw_sample}()},
\code{\link{remove_profiling}()},
\code{\link{write_stan_files_no_profile}()}
}
\concept{modeltools}
|
883979549690da2a3c056da5200153a3bfc37d5e
|
28c3fb6c82f93c5090a5c7b3b672fc4c4324853d
|
/R-EstudioSeVaAntes/R-Estudio/estudioSeVaAntes.R
|
4dc6aea36fa87f5acef890c68d9e1b2a294007aa
|
[] |
no_license
|
IgnacioGarrido/gitst_optimization_bscthesis
|
d0cef95dd7450d72a0e0ff25213c2cf7f5730965
|
2a0617a0076c414a823abc558ecaa58578dd66ef
|
refs/heads/master
| 2020-09-09T18:22:51.258566
| 2019-11-25T00:22:30
| 2019-11-25T00:22:30
| 221,525,514
| 1
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 19,779
|
r
|
estudioSeVaAntes.R
|
library(readr)
library("ggplot2")
precio_energia <- read.csv("~/Universidad/4/SegundoSemestre/TFG/___AuxiliarParaToquetearSinCagarla/R-EstudioSeVaAntes/precioEnergía4días.csv", header = T, sep = "\t")
coche_inv <- read.delim("~/Universidad/4/SegundoSemestre/TFG/___AuxiliarParaToquetearSinCagarla/R-EstudioSeVaAntes/Escenario VI/coche_inv.txt", header = F, sep = "\t")
coche_pri <- read.delim("~/Universidad/4/SegundoSemestre/TFG/___AuxiliarParaToquetearSinCagarla/R-EstudioSeVaAntes/Escenario VI/coche_pri.txt", header = F, sep = "\t")
coche_ver <- read.delim("~/Universidad/4/SegundoSemestre/TFG/___AuxiliarParaToquetearSinCagarla/R-EstudioSeVaAntes/Escenario VI/coche_ver.txt", header = F, sep = "\t")
coche_oto <- read.delim("~/Universidad/4/SegundoSemestre/TFG/___AuxiliarParaToquetearSinCagarla/R-EstudioSeVaAntes/Escenario VI/coche_oto.txt", header = F, sep = "\t")
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
df_coche = data.frame(coche_inv,coche_pri, coche_ver, coche_oto)
for(t in 1:4){
coche <- df_coche[,t]
horacoche <- 1:24
coche1_vector <- 1:24
coche2_vector <- 1:24
coche3_vector <- 1:24
coche4_vector <- 1:24
coche5_vector <- 1:24
coche6_vector <- 1:24
for(i in 1:length(coche)){
if(strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][1] == 1){
coche1_vector[as.integer(sub(".","", strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][3]))] = as.double(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]][length(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]])-1])
} else if(strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][1] == 2){
coche2_vector[as.integer(sub(".","", strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][3]))] = as.double(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]][length(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]])-1])
} else if(strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][1] == 3){
coche3_vector[as.integer(sub(".","", strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][3]))] = as.double(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]][length(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]])-1])
} else if(strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][1] == 4){
coche4_vector[as.integer(sub(".","", strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][3]))] = as.double(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]][length(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]])-1])
} else if(strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][1] == 5){
coche5_vector[as.integer(sub(".","", strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][3]))] = as.double(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]][length(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]])-1])
} else if(strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][1] == 6){
coche6_vector[as.integer(sub(".","", strsplit(strsplit(toString(coche[i]), split =" ")[[1]][1], split=" ")[[1]][3]))] = as.double(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]][length(strsplit(strsplit(toString(coche[i]), split ="INF")[[1]][1], split=" ")[[1]])-1])
}
}
coche1_vector[8] = strsplit(toString(coche[8]), split =" ")[[1]][3]
coche2_vector[9] = strsplit(toString(coche[33]), split =" ")[[1]][3]
coche3_vector[9] = strsplit(toString(coche[57]), split =" ")[[1]][3]
coche4_vector[9] = strsplit(toString(coche[81]), split =" ")[[1]][3]
coche5_vector[6] = strsplit(toString(coche[102]), split =" ")[[1]][3]
coche6_vector[15] = strsplit(toString(coche[135]), split =" ")[[1]][3]
for(i in 1:24){
if(i < 8 || i > 19){
coche1_vector[i] <- NA
}
if(i < 9 || i > 16){
coche2_vector[i] <- NA
}
if(i < 9 || i > 19){
coche3_vector[i] <- NA
}
if(i < 9 || i > 17){
coche4_vector[i] <- NA
}
if(i < 6 || i > 15){
coche5_vector[i] <- NA
}
if(i < 15 || i > 21){
coche6_vector[i] <- NA
}
}
if(t == 1){
df_coche_inv = data.frame(horacoche, coche1_vector, coche2_vector, coche3_vector, coche4_vector, coche5_vector, coche6_vector)
indx <- sapply(df_coche_inv, is.factor)
df_coche_inv[indx] <- lapply(df_coche_inv[indx], function(x) as.numeric(as.character(x)))
}else if(t==2){
df_coche_pri = data.frame(horacoche, coche1_vector, coche2_vector, coche3_vector, coche4_vector, coche5_vector, coche6_vector)
indx <- sapply(df_coche_pri, is.factor)
df_coche_pri[indx] <- lapply(df_coche_pri[indx], function(x) as.numeric(as.character(x)))
}else if(t==3){
df_coche_ver = data.frame(horacoche, coche1_vector, coche2_vector, coche3_vector, coche4_vector, coche5_vector, coche6_vector)
indx <- sapply(df_coche_ver, is.factor)
df_coche_ver[indx] <- lapply(df_coche_ver[indx], function(x) as.numeric(as.character(x)))
}else if(t==4){
df_coche_oto = data.frame(horacoche, coche1_vector, coche2_vector, coche3_vector, coche4_vector, coche5_vector, coche6_vector)
indx <- sapply(df_coche_oto, is.factor)
df_coche_oto[indx] <- lapply(df_coche_oto[indx], function(x) as.numeric(as.character(x)))
}
}
hora_entrada <- c(8,9,9,9,6,15)
hora_salida <- c(19,16,19,17,15,21)
#Vectores que almacenan el dinero ganado/perdido
seVa_1 <- rep(0,24)
seVa_2 <- rep(0,24)
seVa_3 <- rep(0,24)
seVa_4 <- rep(0,24)
seVa_5 <- rep(0,24)
seVa_6 <- rep(0,24)
#Bucle invierno
for(i in 1:24){
if(i <= hora_entrada[1] || i > hora_salida[1]){
seVa_1[i] <- NA
}else{
for(n in (hora_entrada[1]+1):i){
seVa_1[i] <- seVa_1[i] + (df_coche_inv$coche1_vector[n]-df_coche_inv$coche1_vector[n-1])*precio_energia$p_invierno[n]
}
}
if(i <= hora_entrada[2] || i > hora_salida[2]){
seVa_2[i] <- NA
}else{
for(n in (hora_entrada[2]+1):i){
seVa_2[i] <- seVa_2[i] + (df_coche_inv$coche2_vector[n]-df_coche_inv$coche2_vector[n-1])*precio_energia$p_invierno[n]
}
}
if(i <= hora_entrada[3] || i > hora_salida[3]){
seVa_3[i] <- NA
}else{
for(n in (hora_entrada[3]+1):i){
seVa_3[i] <- seVa_3[i] + (df_coche_inv$coche3_vector[n]-df_coche_inv$coche3_vector[n-1])*precio_energia$p_invierno[n]
}
}
if(i <= hora_entrada[4] || i > hora_salida[4]){
seVa_4[i] <- NA
}else{
for(n in (hora_entrada[4]+1):i){
seVa_4[i] <- seVa_4[i] + (df_coche_inv$coche4_vector[n]-df_coche_inv$coche4_vector[n-1])*precio_energia$p_invierno[n]
}
}
if(i <= hora_entrada[5] || i > hora_salida[5]){
seVa_5[i] <- NA
}else{
for(n in (hora_entrada[5]+1):i){
seVa_5[i] <- seVa_5[i] + (df_coche_inv$coche5_vector[n]-df_coche_inv$coche5_vector[n-1])*precio_energia$p_invierno[n]
}
}
if(i <= hora_entrada[6] || i > hora_salida[6]){
seVa_6[i] <- NA
}else{
for(n in (hora_entrada[6]+1):i){
seVa_6[i] <- seVa_6[i] + (df_coche_inv$coche6_vector[n]-df_coche_inv$coche6_vector[n-1])*precio_energia$p_invierno[n]
}
}
df_SeVaAntes_inv <- data.frame(seVa_1, seVa_2, seVa_3, seVa_4, seVa_5, seVa_6)
}
seVa_1 <- rep(0,24)
seVa_2 <- rep(0,24)
seVa_3 <- rep(0,24)
seVa_4 <- rep(0,24)
seVa_5 <- rep(0,24)
seVa_6 <- rep(0,24)
#Bucle primavera
for(i in 1:24){
if(i <= hora_entrada[1] || i > hora_salida[1]){
seVa_1[i] <- NA
}else{
for(n in (hora_entrada[1]+1):i){
seVa_1[i] <- seVa_1[i] + (df_coche_pri$coche1_vector[n]-df_coche_pri$coche1_vector[n-1])*precio_energia$p_primavera[n]
}
}
if(i <= hora_entrada[2] || i > hora_salida[2]){
seVa_2[i] <- NA
}else{
for(n in (hora_entrada[2]+1):i){
seVa_2[i] <- seVa_2[i] + (df_coche_pri$coche2_vector[n]-df_coche_pri$coche2_vector[n-1])*precio_energia$p_primavera[n]
}
}
if(i <= hora_entrada[3] || i > hora_salida[3]){
seVa_3[i] <- NA
}else{
for(n in (hora_entrada[3]+1):i){
seVa_3[i] <- seVa_3[i] + (df_coche_pri$coche3_vector[n]-df_coche_pri$coche3_vector[n-1])*precio_energia$p_primavera[n]
}
}
if(i <= hora_entrada[4] || i > hora_salida[4]){
seVa_4[i] <- NA
}else{
for(n in (hora_entrada[4]+1):i){
seVa_4[i] <- seVa_4[i] + (df_coche_pri$coche4_vector[n]-df_coche_pri$coche4_vector[n-1])*precio_energia$p_primavera[n]
}
}
if(i <= hora_entrada[5] || i > hora_salida[5]){
seVa_5[i] <- NA
}else{
for(n in (hora_entrada[5]+1):i){
seVa_5[i] <- seVa_5[i] + (df_coche_pri$coche5_vector[n]-df_coche_pri$coche5_vector[n-1])*precio_energia$p_primavera[n]
}
}
if(i <= hora_entrada[6] || i > hora_salida[6]){
seVa_6[i] <- NA
}else{
for(n in (hora_entrada[6]+1):i){
seVa_6[i] <- seVa_6[i] + (df_coche_pri$coche6_vector[n]-df_coche_pri$coche6_vector[n-1])*precio_energia$p_primavera[n]
}
}
df_SeVaAntes_pri <- data.frame(seVa_1, seVa_2, seVa_3, seVa_4, seVa_5, seVa_6)
}
seVa_1 <- rep(0,24)
seVa_2 <- rep(0,24)
seVa_3 <- rep(0,24)
seVa_4 <- rep(0,24)
seVa_5 <- rep(0,24)
seVa_6 <- rep(0,24)
#Bucle verano
for(i in 1:24){
if(i <= hora_entrada[1] || i > hora_salida[1]){
seVa_1[i] <- NA
}else{
for(n in (hora_entrada[1]+1):i){
seVa_1[i] <- seVa_1[i] + (df_coche_ver$coche1_vector[n]-df_coche_ver$coche1_vector[n-1])*precio_energia$p_verano[n]
}
}
if(i <= hora_entrada[2] || i > hora_salida[2]){
seVa_2[i] <- NA
}else{
for(n in (hora_entrada[2]+1):i){
seVa_2[i] <- seVa_2[i] + (df_coche_ver$coche2_vector[n]-df_coche_ver$coche2_vector[n-1])*precio_energia$p_verano[n]
}
}
if(i <= hora_entrada[3] || i > hora_salida[3]){
seVa_3[i] <- NA
}else{
for(n in (hora_entrada[3]+1):i){
seVa_3[i] <- seVa_3[i] + (df_coche_ver$coche3_vector[n]-df_coche_ver$coche3_vector[n-1])*precio_energia$p_verano[n]
}
}
if(i <= hora_entrada[4] || i > hora_salida[4]){
seVa_4[i] <- NA
}else{
for(n in (hora_entrada[4]+1):i){
seVa_4[i] <- seVa_4[i] + (df_coche_ver$coche4_vector[n]-df_coche_ver$coche4_vector[n-1])*precio_energia$p_verano[n]
}
}
if(i <= hora_entrada[5] || i > hora_salida[5]){
seVa_5[i] <- NA
}else{
for(n in (hora_entrada[5]+1):i){
seVa_5[i] <- seVa_5[i] + (df_coche_ver$coche5_vector[n]-df_coche_ver$coche5_vector[n-1])*precio_energia$p_verano[n]
}
}
if(i <= hora_entrada[6] || i > hora_salida[6]){
seVa_6[i] <- NA
}else{
for(n in (hora_entrada[6]+1):i){
seVa_6[i] <- seVa_6[i] + (df_coche_ver$coche6_vector[n]-df_coche_ver$coche6_vector[n-1])*precio_energia$p_verano[n]
}
}
df_SeVaAntes_ver <- data.frame(seVa_1, seVa_2, seVa_3, seVa_4, seVa_5, seVa_6)
}
seVa_1 <- rep(0,24)
seVa_2 <- rep(0,24)
seVa_3 <- rep(0,24)
seVa_4 <- rep(0,24)
seVa_5 <- rep(0,24)
seVa_6 <- rep(0,24)
#Bucle otoño
for(i in 1:24){
if(i <= hora_entrada[1] || i > hora_salida[1]){
seVa_1[i] <- NA
}else{
for(n in (hora_entrada[1]+1):i){
seVa_1[i] <- seVa_1[i] + (df_coche_oto$coche1_vector[n]-df_coche_oto$coche1_vector[n-1])*precio_energia$p_otoño[n]
}
}
if(i <= hora_entrada[2] || i > hora_salida[2]){
seVa_2[i] <- NA
}else{
for(n in (hora_entrada[2]+1):i){
seVa_2[i] <- seVa_2[i] + (df_coche_oto$coche2_vector[n]-df_coche_oto$coche2_vector[n-1])*precio_energia$p_otoño[n]
}
}
if(i <= hora_entrada[3] || i > hora_salida[3]){
seVa_3[i] <- NA
}else{
for(n in (hora_entrada[3]+1):i){
seVa_3[i] <- seVa_3[i] + (df_coche_oto$coche3_vector[n]-df_coche_oto$coche3_vector[n-1])*precio_energia$p_otoño[n]
}
}
if(i <= hora_entrada[4] || i > hora_salida[4]){
seVa_4[i] <- NA
}else{
for(n in (hora_entrada[4]+1):i){
seVa_4[i] <- seVa_4[i] + (df_coche_oto$coche4_vector[n]-df_coche_oto$coche4_vector[n-1])*precio_energia$p_otoño[n]
}
}
if(i <= hora_entrada[5] || i > hora_salida[5]){
seVa_5[i] <- NA
}else{
for(n in (hora_entrada[5]+1):i){
seVa_5[i] <- seVa_5[i] + (df_coche_oto$coche5_vector[n]-df_coche_oto$coche5_vector[n-1])*precio_energia$p_otoño[n]
}
}
if(i <= hora_entrada[6] || i > hora_salida[6]){
seVa_6[i] <- NA
}else{
for(n in (hora_entrada[6]+1):i){
seVa_6[i] <- seVa_6[i] + (df_coche_oto$coche6_vector[n]-df_coche_oto$coche6_vector[n-1])*precio_energia$p_otoño[n]
}
}
df_SeVaAntes_oto <- data.frame(seVa_1, seVa_2, seVa_3, seVa_4, seVa_5, seVa_6)
}
#Se hacen los csv
write.csv(df_SeVaAntes_inv, "seVaAntes_inv.csv")
write.csv(df_SeVaAntes_pri, "seVaAntes_pri.csv")
write.csv(df_SeVaAntes_ver, "seVaAntes_ver.csv")
write.csv(df_SeVaAntes_oto, "seVaAntes_oto.csv")
#Gráficas
p1 <- ggplot()
p1 <- p1+geom_line(data=precio_energia, aes(x=1:24, y=(p_invierno*10)), colour="grey")
p1 <- p1+geom_line(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_1), colour="green")+geom_point(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_1), colour="green")
p1 <- p1+geom_line(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_2), colour="blue")+geom_point(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_2), colour="blue")
p1 <- p1+geom_line(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_3), colour="red")+geom_point(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_3), colour="red")
p1 <- p1+geom_line(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_4), colour="purple")+geom_point(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_4), colour="purple")
p1 <- p1+geom_line(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_5), colour="black")+geom_point(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_5), colour="black")
p1 <- p1+geom_line(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_6), colour="orange")+geom_point(data=df_SeVaAntes_inv, aes(x=1:24, y=seVa_6), colour="orange")
p1 <- p1+ylim(-2,2)+xlab("Hora")+ylab("€")+ggtitle("INVIERNO")+theme(plot.title = element_text(hjust = 0.5))
#p1 <- p1+scale_colour_manual(name="",values=c("VE 1" = "green","VE 2"="blue", "VE 3"="red", "VE 4"="purple", "VE 5"="black", "VE 6"="orange"))
p1
p2 <- ggplot()
p2 <- p2+geom_line(data=precio_energia, aes(x=1:24, y=(p_primavera*10)), colour="grey")
p2 <- p2+geom_line(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_1), colour="green")+geom_point(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_1), colour="green")
p2 <- p2+geom_line(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_2), colour="blue")+geom_point(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_2), colour="blue")
p2 <- p2+geom_line(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_3), colour="red")+geom_point(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_3), colour="red")
p2 <- p2+geom_line(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_4), colour="purple")+geom_point(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_4), colour="purple")
p2 <- p2+geom_line(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_5), colour="black")+geom_point(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_5), colour="black")
p2 <- p2+geom_line(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_6), colour="orange")+geom_point(data=df_SeVaAntes_pri, aes(x=1:24, y=seVa_6), colour="orange")
p2 <- p2+ylim(-2,2)+xlab("Hora")+ylab("€")+ggtitle("PRIMAVERA")+theme(plot.title = element_text(hjust = 0.5))
#p2 <- p2+scale_colour_manual(name="",values=c("VE 1" = "green","VE 2"="blue", "VE 3"="red", "VE 4"="purple", "VE 5"="black", "VE 6"="orange"))
p2
p3 <- ggplot()
p3 <- p3+geom_line(data=precio_energia, aes(x=1:24, y=(p_verano*10)), colour="grey")
p3 <- p3+geom_line(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_1), colour="green")+geom_point(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_1), colour="green")
p3 <- p3+geom_line(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_2), colour="blue")+geom_point(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_2), colour="blue")
p3 <- p3+geom_line(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_3), colour="red")+geom_point(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_3), colour="red")
p3 <- p3+geom_line(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_4), colour="purple")+geom_point(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_4), colour="purple")
p3 <- p3+geom_line(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_5), colour="black")+geom_point(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_5), colour="black")
p3 <- p3+geom_line(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_6), colour="orange")+geom_point(data=df_SeVaAntes_ver, aes(x=1:24, y=seVa_6), colour="orange")
p3 <- p3+ylim(-2,2)+xlab("Hora")+ylab("€")+ggtitle("VERANO")+theme(plot.title = element_text(hjust = 0.5))
#p3 <- p3+scale_colour_manual(name="",values=c("VE 1" = "green","VE 2"="blue", "VE 3"="red", "VE 4"="purple", "VE 5"="black", "VE 6"="orange"))
p3
p4 <- ggplot()
p4 <- p4+geom_line(data=precio_energia, aes(x=1:24, y=(p_otoño*10)), colour="grey")
p4 <- p4+geom_line(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_1), colour="green")+geom_point(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_1), colour="green")
p4 <- p4+geom_line(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_2), colour="blue")+geom_point(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_2), colour="blue")
p4 <- p4+geom_line(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_3), colour="red")+geom_point(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_3), colour="red")
p4 <- p4+geom_line(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_4), colour="purple")+geom_point(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_4), colour="purple")
p4 <- p4+geom_line(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_5), colour="black")+geom_point(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_5), colour="black")
p4 <- p4+geom_line(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_6), colour="orange")+geom_point(data=df_SeVaAntes_oto, aes(x=1:24, y=seVa_6), colour="orange")
p4 <- p4+ylim(-2,2)+xlab("Hora")+ylab("€")+ggtitle("OTOÑO")+theme(plot.title = element_text(hjust = 0.5))
#p4 <- p4+scale_colour_manual(name="",values=c("VE 1" = "green","VE 2"="blue", "VE 3"="red", "VE 4"="purple", "VE 5"="black", "VE 6"="orange"))
p4
win.metafile("grafcoche.wmf")
multiplot(p1, p3, p2, p4, cols=2)
dev.off()
|
4131c5c5cbba3cdc2ab43853937024f7cf0a2f1f
|
e84a6b95403e9c9aecd8870819a21d93b30498b7
|
/man/getRoadWorks.Rd
|
721d9e452bfdf5e00cc812d4d5996351a8ded1d5
|
[] |
no_license
|
00mjk/ltaer
|
d5ecf83d18738ca0c50e5575ac063a636126ffd2
|
572fb1d9676b514de850e89ed984267ee7f5746f
|
refs/heads/master
| 2022-12-10T01:04:38.781394
| 2020-08-31T13:12:46
| 2020-08-31T13:12:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 519
|
rd
|
getRoadWorks.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getRoadWorks.R
\name{getRoadWorks}
\alias{getRoadWorks}
\title{Road Works}
\usage{
getRoadWorks(api_key)
}
\arguments{
\item{api_key}{API key for LTA's Datamall}
}
\value{
A dataframe containing the details for each current or planned road work, such as the start date, end date, and road name
}
\description{
Returns all road works currently being carried out or scheduled to be carried out.
}
\examples{
\donttest{
getRoadWorks(mykey)
}
}
|
178c11ad5867ccc0f757ccec2e5c3f5d905421ca
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.storage/man/backup_list_copy_jobs.Rd
|
9ecd21ebdc1559e71c631b9747e75e087e4af8b8
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 3,109
|
rd
|
backup_list_copy_jobs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backup_operations.R
\name{backup_list_copy_jobs}
\alias{backup_list_copy_jobs}
\title{Returns metadata about your copy jobs}
\usage{
backup_list_copy_jobs(NextToken, MaxResults, ByResourceArn, ByState,
ByCreatedBefore, ByCreatedAfter, ByResourceType, ByDestinationVaultArn,
ByAccountId)
}
\arguments{
\item{NextToken}{The next item following a partial list of returned items. For example,
if a request is made to return maxResults number of items, NextToken
allows you to return more items in your list starting at the location
pointed to by the next token.}
\item{MaxResults}{The maximum number of items to be returned.}
\item{ByResourceArn}{Returns only copy jobs that match the specified resource Amazon Resource
Name (ARN).}
\item{ByState}{Returns only copy jobs that are in the specified state.}
\item{ByCreatedBefore}{Returns only copy jobs that were created before the specified date.}
\item{ByCreatedAfter}{Returns only copy jobs that were created after the specified date.}
\item{ByResourceType}{Returns only backup jobs for the specified resources:
\itemize{
\item \code{DynamoDB} for Amazon DynamoDB
\item \code{EBS} for Amazon Elastic Block Store
\item \code{EC2} for Amazon Elastic Compute Cloud
\item \code{EFS} for Amazon Elastic File System
\item \code{RDS} for Amazon Relational Database Service
\item \verb{Storage Gateway} for AWS Storage Gateway
}}
\item{ByDestinationVaultArn}{An Amazon Resource Name (ARN) that uniquely identifies a source backup
vault to copy from; for example,
\code{arn:aws:backup:us-east-1:123456789012:vault:aBackupVault}.}
\item{ByAccountId}{The account ID to list the jobs from. Returns only copy jobs associated
with the specified account ID.}
}
\value{
A list with the following syntax:\preformatted{list(
CopyJobs = list(
list(
AccountId = "string",
CopyJobId = "string",
SourceBackupVaultArn = "string",
SourceRecoveryPointArn = "string",
DestinationBackupVaultArn = "string",
DestinationRecoveryPointArn = "string",
ResourceArn = "string",
CreationDate = as.POSIXct(
"2015-01-01"
),
CompletionDate = as.POSIXct(
"2015-01-01"
),
State = "CREATED"|"RUNNING"|"COMPLETED"|"FAILED",
StatusMessage = "string",
BackupSizeInBytes = 123,
IamRoleArn = "string",
CreatedBy = list(
BackupPlanId = "string",
BackupPlanArn = "string",
BackupPlanVersion = "string",
BackupRuleId = "string"
),
ResourceType = "string"
)
),
NextToken = "string"
)
}
}
\description{
Returns metadata about your copy jobs.
}
\section{Request syntax}{
\preformatted{svc$list_copy_jobs(
NextToken = "string",
MaxResults = 123,
ByResourceArn = "string",
ByState = "CREATED"|"RUNNING"|"COMPLETED"|"FAILED",
ByCreatedBefore = as.POSIXct(
"2015-01-01"
),
ByCreatedAfter = as.POSIXct(
"2015-01-01"
),
ByResourceType = "string",
ByDestinationVaultArn = "string",
ByAccountId = "string"
)
}
}
\keyword{internal}
|
9a049f7ca6c3729e2df984d17290834505b2d106
|
3bae8fce0fd6baddfb7c5d00f74ebb1cad0566d5
|
/plot3.R
|
c9fec909d90978f51f01c90a6347269da6f54a44
|
[] |
no_license
|
withbiviz/datasciencecoursera
|
debd0eb71fc1c28ea1ba1d1a34bc392d7bc50bd7
|
c9c3233c2fea11c14a27f2b69a7c84b39a63dd3b
|
refs/heads/master
| 2021-01-21T22:05:25.106071
| 2017-09-30T21:48:50
| 2017-09-30T21:48:50
| 95,158,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 887
|
r
|
plot3.R
|
f<-read.csv("household_power_consumption.txt",sep=";")
s<-f[(as.Date(f$Date,"%d/%m/%Y")>="2007-02-01" & as.Date(f$Date,"%d/%m/%Y")<="2007-02-02"),]
png("plot3.png",width=480,height=480)
plot(strptime(paste(s$Date,s$Time),"%d/%m/%Y %H:%M:%S"),as.integer(s$Sub_metering_1), ylab="Enerny sub metering",xlab="",type="l")
legend("topright",c("sub_metering_1","sub_metering_2","sub_metering_3"),xjust=0,lty=c(1,1),lwd=c(2.5,2.5),col=c("black","blue","red"))
par(new = TRUE)
plot(strptime(paste(s$Date,s$Time),"%d/%m/%Y %H:%M:%S"),as.numeric(s$Sub_metering_2)/10,ylim=range(c(0,30)), axes=FALSE, xlab = "", ylab = "",type="l",col="red")
par(new = TRUE)
plot(strptime(paste(s$Date,s$Time),"%d/%m/%Y %H:%M:%S"),as.numeric(s$Sub_metering_3),ylim=range(c(0,30)), axes=FALSE, xlab = "", ylab = "",type="l",col="blue")
#dev.copy(png,file="plot3.png",height = 480, width = 480)
dev.off()
|
a58bf6cb02d5f03bf336906528a7bb46471426ed
|
7e2482a8dafe5968140797d1cc9d4cfd079f6d2e
|
/applicazione/readme.rd
|
962b3a1ec7b6220d9a724faa4fce0d71f5be1ace
|
[] |
no_license
|
redelmondo77/util
|
fca242cafc0c4f6eb72c9de53a107ac33461129c
|
4d56c3f7429b7d9bcc2ab762b411dd9dfb00a508
|
refs/heads/master
| 2020-03-19T08:54:21.066845
| 2018-06-29T11:42:55
| 2018-06-29T11:42:55
| 136,220,449
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,777
|
rd
|
readme.rd
|
INSTALL
install
https://spring.io/tools/sts/all
open prospective git
clone repository git: redelmondo77/util
import maven project /applicazione
spring
maven>update project
HOME
md C:\dev\
md C:\dev\home\
md C:\dev\home\applicazione
md C:\dev\home\applicazione\config
md C:\dev\home\applicazione\sessionStore
md C:\dev\home\applicazione\storage
md C:\dev\home\applicazione\tomcat
md C:\dev\home\applicazione\tomcatLogging
echo #HOMECONFIG > C:\dev\home\applicazione\config\application.yml
override application properties with application.yml
WEB TEST
run as > spring boot app
http://localhost/applicazione
DEPLOY
## spring boot as fat jar
run as>maven install
[INFO] Installing .... to C:\dev\.m2\repository\it\applicazione\applicazione\0.0.1-SNAPSHOT\applicazione-0.0.1-SNAPSHOT.war
copy C:\dev\.m2\repository\it\applicazione\applicazione\0.0.1-SNAPSHOT\applicazione-0.0.1-SNAPSHOT.war C:\dev\home\applicazione
RUN
java -jar C:\dev\home\applicazione\applicazione-0.0.1-SNAPSHOT.war --spring.config.location=C:/dev/home/applicazione/config/application.yml
DB CONSOLE
http://localhost/applicazione/h2
LINKS
https://getbootstrap.com/docs/4.0/
https://fontawesome.com/icons?d=gallery
https://www.thymeleaf.org/doc/articles/standarddialect5minutes.html
${...} : Variable expressions.
*{...} : Selection expressions.
#{...} : Message (i18n) expressions.
@{...} : Link (URL) expressions.
~{...} : Fragment expressions.
use scss
https://getbootstrap.com/docs/4.0/getting-started/theming/
https://github.com/warmuuh/libsass-maven-plugin
https://www.w3schools.com/html/html_layout.asp
https://www.w3schools.com/bootstrap/default.asp
|
54341803ad12381a26227595938e0e3fa216d31f
|
885a1f38eb89a4d5ad3853c58ee6674a34d83f15
|
/CEN180_in_t2t-col.20210610/quantiles/CEN180_quantile_frequency_chrProfilesPlot_circlize_v02.R
|
6376e0513f30117b201609692c31e6880b36be46
|
[] |
no_license
|
ajtock/repeats
|
62164d160ad553b0ad366e2269c1dea3953c8ea8
|
e4c7b9d505bcaa7957f559ffd5c7f1db3e1002e4
|
refs/heads/master
| 2021-12-29T07:24:15.739614
| 2021-12-15T15:54:58
| 2021-12-15T15:54:58
| 162,921,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,884
|
r
|
CEN180_quantile_frequency_chrProfilesPlot_circlize_v02.R
|
#!/applications/R/R-4.0.0/bin/Rscript
# author: Andy Tock
# contact: ajt200@cam.ac.uk
# date: 17.06.2021
# Plot windowed CEN180 frequencies for CEN180 sequences within orderingFactor quantiles
# Usage:
# /applications/R/R-4.0.0/bin/Rscript CEN180_quantile_frequency_chrProfilesPlot_circlize_v02.R 'Chr1,Chr2,Chr3,Chr4,Chr5' 4 10000 perchrom 220721
#chrName <- unlist(strsplit("Chr1,Chr2,Chr3,Chr4,Chr5",
# split = ","))
#quantiles <- 4
#genomeBinSize <- 10000
#quantileDef <- "genomewide"
#quantileDef <- "perchrom"
#date <- "170621"
args <- commandArgs(trailingOnly = T)
chrName <- unlist(strsplit(args[1],
split = ","))
quantiles <- as.integer(args[2])
genomeBinSize <- as.integer(args[3])
quantileDef <- args[4]
date <- as.character(args[5])
if(floor(log10(genomeBinSize)) + 1 < 4) {
genomeBinName <- paste0(genomeBinSize, "bp")
} else if(floor(log10(genomeBinSize)) + 1 >= 4 &
floor(log10(genomeBinSize)) + 1 <= 6) {
genomeBinName <- paste0(genomeBinSize/1e3, "kb")
} else if(floor(log10(genomeBinSize)) + 1 >= 7) {
genomeBinName <- paste0(genomeBinSize/1e6, "Mb")
}
options(stringsAsFactors = F)
options(scipen=999)
library(parallel)
library(GenomicRanges)
library(circlize)
library(ComplexHeatmap)
library(gridBase)
library(viridis)
plotDir <- paste0("plots/")
system(paste0("[ -d ", plotDir, " ] || mkdir -p ", plotDir))
# Define quantile colours
quantileColours <- c("red", "orange", "dodgerblue", "navy")
makeTransparent <- function(thisColour, alpha = 210)
{
newColour <- col2rgb(thisColour)
apply(newColour, 2, function(x) {
rgb(red = x[1], green = x[2], blue = x[3],
alpha = alpha, maxColorValue = 255)
})
}
quantileColours <- makeTransparent(quantileColours)
# Genomic definitions
fai <- read.table("/home/ajt200/analysis/nanopore/t2t-col.20210610/t2t-col.20210610.fa.fai", header = F)
chrs <- fai$V1[which(fai$V1 %in% chrName)]
chrLens <- fai$V2[which(fai$V1 %in% chrName)]
regionGR <- GRanges(seqnames = chrs,
ranges = IRanges(start = 1,
end = chrLens),
strand = "*")
CENstart <- c(14840750, 3724530, 13597090, 4203495, 11783990)[which(fai$V1 %in% chrName)]
CENend <- c(17558182, 5946091, 15733029, 6977107, 14551874)[which(fai$V1 %in% chrName)]
CENGR <- GRanges(seqnames = chrs,
ranges = IRanges(start = CENstart,
end = CENend),
strand = "*")
if(quantileDef == "genomewide") {
# Load table of windowed feature frequencies for each quantile
CENH3_in_bodies_CEN180 <- read.table(paste0("quantiles_by_CENH3_in_bodies/", paste0(chrName, collapse = "_"), "/",
"CEN180_frequency_per_", genomeBinName,
"_", quantiles, "quantiles_",
"_by_CENH3_in_bodies",
"_of_CEN180_in_t2t-col.20210610_",
paste0(chrName, collapse = "_"), "_smoothed.tsv"),
header = T, sep = "\t")
HORlengthsSum_CEN180 <- read.table(paste0("quantiles_by_HORlengthsSum/", paste0(chrName, collapse = "_"), "/",
"CEN180_frequency_per_", genomeBinName,
"_", quantiles, "quantiles_",
"_by_HORlengthsSum",
"_of_CEN180_in_t2t-col.20210610_",
paste0(chrName, collapse = "_"), "_smoothed.tsv"),
header = T, sep = "\t")
wSNV_CEN180 <- read.table(paste0("quantiles_by_wSNV/", paste0(chrName, collapse = "_"), "/",
"CEN180_frequency_per_", genomeBinName,
"_", quantiles, "quantiles_",
"_by_wSNV",
"_of_CEN180_in_t2t-col.20210610_",
paste0(chrName, collapse = "_"), "_smoothed.tsv"),
header = T, sep = "\t")
} else if(quantileDef == "perchrom") {
# Load table of windowed feature frequencies for each quantile
CENH3_in_bodies_CEN180List <- lapply(seq_along(chrName), function(x) {
read.table(paste0("quantiles_by_CENH3_in_bodies/", chrName[x], "/",
"CEN180_frequency_per_", genomeBinName,
"_", quantiles, "quantiles_",
"_by_CENH3_in_bodies",
"_of_CEN180_in_t2t-col.20210610_",
chrName[x], "_smoothed.tsv"),
header = T, sep = "\t")
})
if(length(chrName) > 1) {
CENH3_in_bodies_CEN180 <- do.call(rbind, CENH3_in_bodies_CEN180List)
} else {
CENH3_in_bodies_CEN180 <- CENH3_in_bodies_CEN180List[[1]]
}
HORlengthsSum_CEN180List <- lapply(seq_along(chrName), function(x) {
read.table(paste0("quantiles_by_HORlengthsSum/", chrName[x], "/",
"CEN180_frequency_per_", genomeBinName,
"_", quantiles, "quantiles_",
"_by_HORlengthsSum",
"_of_CEN180_in_t2t-col.20210610_",
chrName[x], "_smoothed.tsv"),
header = T, sep = "\t")
})
if(length(chrName) > 1) {
HORlengthsSum_CEN180 <- do.call(rbind, HORlengthsSum_CEN180List)
} else {
HORlengthsSum_CEN180 <- HORlengthsSum_CEN180List[[1]]
}
wSNV_CEN180List <- lapply(seq_along(chrName), function(x) {
read.table(paste0("quantiles_by_wSNV/", chrName[x], "/",
"CEN180_frequency_per_", genomeBinName,
"_", quantiles, "quantiles_",
"_by_wSNV",
"_of_CEN180_in_t2t-col.20210610_",
chrName[x], "_smoothed.tsv"),
header = T, sep = "\t")
})
if(length(chrName) > 1) {
wSNV_CEN180 <- do.call(rbind, wSNV_CEN180List)
} else {
wSNV_CEN180 <- wSNV_CEN180List[[1]]
}
}
# Load table of windowed log2(ChIP/input) coverage for CENH3 ChIP-seq
CENH3 <- read.table(paste0("/home/ajt200/analysis/CENH3_seedlings_Maheshwari_Comai_2017_GenomeRes/",
"snakemake_ChIPseq_t2t-col.20210610/mapped/both/tsv/log2ChIPcontrol/",
"WT_CENH3_Rep1_ChIP_SRR4430537_WT_CENH3_Rep1_input_SRR4430555_MappedOn_t2t-col.20210610_lowXM_both_sort_norm_binSize",
genomeBinName, "_smoothed.tsv"),
header = T, sep = "\t")
# Load table of windowed superfam TE frequencies
Gypsy <- read.table(paste0("/home/ajt200/analysis/nanopore/t2t-col.20210610/annotation/TEs_EDTA/t2t-col.20210610_TEs_Gypsy_LTR_frequency_per_",
genomeBinName, "_smoothed.tsv"),
header = T, sep = "\t")
# Load table of windowed CEN180 frequencies
CEN180freq <- read.table(paste0("/home/ajt200/analysis/nanopore/t2t-col.20210610/annotation/CEN180/t2t-col.20210610_CEN180_frequency_per_",
genomeBinName, "_smoothed.tsv"),
header = T, sep = "\t")
# Convert to BED-like format for use with circlize
CENH3_in_bodies_CEN180_bed <- data.frame(chr = CENH3_in_bodies_CEN180$chr,
start = CENH3_in_bodies_CEN180$window-1,
end = CENH3_in_bodies_CEN180$window-1+genomeBinSize,
value1 = CENH3_in_bodies_CEN180[,3],
value2 = CENH3_in_bodies_CEN180[,4],
value3 = CENH3_in_bodies_CEN180[,5],
value4 = CENH3_in_bodies_CEN180[,6])
HORlengthsSum_CEN180_bed <- data.frame(chr = HORlengthsSum_CEN180$chr,
start = HORlengthsSum_CEN180$window-1,
end = HORlengthsSum_CEN180$window-1+genomeBinSize,
value1 = HORlengthsSum_CEN180[,3],
value2 = HORlengthsSum_CEN180[,4],
value3 = HORlengthsSum_CEN180[,5],
value4 = HORlengthsSum_CEN180[,6])
wSNV_CEN180_bed <- data.frame(chr = wSNV_CEN180$chr,
start = wSNV_CEN180$window-1,
end = wSNV_CEN180$window-1+genomeBinSize,
value1 = wSNV_CEN180[,3],
value2 = wSNV_CEN180[,4],
value3 = wSNV_CEN180[,5],
value4 = wSNV_CEN180[,6])
CENH3_bed <- data.frame(chr = CENH3$chr,
start = CENH3$window-1,
end = CENH3$window-1+genomeBinSize,
value1 = CENH3[,6])
Gypsy_bed <- data.frame(chr = Gypsy$chr,
start = Gypsy$window-1,
end = Gypsy$window-1+genomeBinSize,
value1 = Gypsy[,4])
CEN180freq_bed <- data.frame(chr = CEN180freq$chr,
start = CEN180freq$window-1,
end = CEN180freq$window-1+genomeBinSize,
value1 = CEN180freq[,4])
# Redefine end coordinate of last window to match chrLens for each chromosome
for(x in seq_along(chrs)) {
CENH3_in_bodies_CEN180_bed[CENH3_in_bodies_CEN180_bed$chr == chrs[x],][dim(CENH3_in_bodies_CEN180_bed[CENH3_in_bodies_CEN180_bed$chr == chrs[x],])[1],]$end <- chrLens[x]
HORlengthsSum_CEN180_bed[HORlengthsSum_CEN180_bed$chr == chrs[x],][dim(HORlengthsSum_CEN180_bed[HORlengthsSum_CEN180_bed$chr == chrs[x],])[1],]$end <- chrLens[x]
wSNV_CEN180_bed[wSNV_CEN180_bed$chr == chrs[x],][dim(wSNV_CEN180_bed[wSNV_CEN180_bed$chr == chrs[x],])[1],]$end <- chrLens[x]
CENH3_bed[CENH3_bed$chr == chrs[x],][dim(CENH3_bed[CENH3_bed$chr == chrs[x],])[1],]$end <- chrLens[x]
Gypsy_bed[Gypsy_bed$chr == chrs[x],][dim(Gypsy_bed[Gypsy_bed$chr == chrs[x],])[1],]$end <- chrLens[x]
CEN180freq_bed[CEN180freq_bed$chr == chrs[x],][dim(CEN180freq_bed[CEN180freq_bed$chr == chrs[x],])[1],]$end <- chrLens[x]
}
# Define feature density heatmap colour scale interpolation
#rich8to6equal <- c("#000041", "#0000CB", "#0081FF", "#FDEE02", "#FFAB00", "#FF3300")
Gypsy_col_fun <- colorRamp2(quantile(Gypsy_bed$value1,
c(0.5, 0.6, 0.7, 0.8, 0.9, 1.0),
na.rm = T),
viridis_pal()(6))
CEN180freq_col_fun <- colorRamp2(quantile(CEN180freq_bed$value1,
c(0.90, 0.92, 0.94, 0.96, 0.98, 1.0),
na.rm = T),
viridis_pal()(6))
# Define corresponding heatmap legends
lgd_Gypsy <- Legend(col_fun = Gypsy_col_fun, title = "GYPSY", title_gp = gpar(fontface = "bold.italic"), title_position = "leftcenter-rot")
lgd_CEN180freq <- Legend(col_fun = CEN180freq_col_fun, title = "CEN180", title_gp = gpar(fontface = "bold.italic"), title_position = "leftcenter-rot")
lgd_list1 <- packLegend(lgd_Gypsy, lgd_CEN180freq)
lgd_CENH3_in_bodies_CEN180 <- Legend(at = c("Q1", "Q2", "Q3", "Q4"), type = "lines", legend_gp = gpar(col = quantileColours, lwd = 1.5),
background = NULL, title = "CENH3", title_position = "leftcenter-rot")
lgd_HORlengthsSum_CEN180 <- Legend(at = c("Q1", "Q2", "Q3", "Q4"), type = "lines", legend_gp = gpar(col = quantileColours, lwd = 1.5),
background = NULL, title = "HORs", title_position = "leftcenter-rot")
lgd_wSNV_CEN180 <- Legend(at = c("Q1", "Q2", "Q3", "Q4"), type = "lines", legend_gp = gpar(col = quantileColours, lwd = 1.5),
background = NULL, title = "SNVs", title_position = "leftcenter-rot")
lgd_list2 <- packLegend(lgd_CENH3_in_bodies_CEN180, lgd_HORlengthsSum_CEN180, lgd_wSNV_CEN180)
CEN180_bed <- read.table(paste0("/home/ajt200/analysis/nanopore/t2t-col.20210610/annotation/CEN180/CEN180_in_t2t-col.20210610_",
paste0(chrName, collapse = "_"), ".bed"),
header = F, colClasses = c(rep(NA, 3), rep("NULL", 5)))
colnames(CEN180_bed) <- c("chr", "start", "end")
CENAthila_bed <- read.table(paste0("/home/ajt200/analysis/nanopore/t2t-col.20210610/annotation/CENAthila/CENAthila_in_t2t-col.20210610_",
paste0(chrName, collapse = "_"), ".bed"),
header = F, colClasses = c(rep(NA, 3), rep("NULL", 2)))
colnames(CENAthila_bed) <- c("chr", "start", "end")
## circlize
genomeDF <- data.frame(chr = chrs,
start = c(12e6, 2e6, 11e6, 2e6, 10e6),
end = c(20e6, 8e6, 18e6, 10e6, 17e6))
# Initialize circular layout
circlize_plot <- function() {
gapDegree <- 6
circos.par(track.height = 0.15,
canvas.xlim = c(-1.1, 1.1),
canvas.ylim = c(-1.1, 1.1),
gap.degree = c(rep(1, length(chrs)-1), gapDegree),
start.degree = 90-(gapDegree/2))
circos.genomicInitialize(data = genomeDF,
plotType = NULL,
tickLabelsStartFromZero = FALSE)
circos.track(ylim = c(0, 1),
bg.col = "grey70",
bg.border = NA,
track.height = 0.05,
panel.fun = function(x, y) {
circos.genomicAxis(h = "top",
labels.facing = "clockwise",
tickLabelsStartFromZero = FALSE)
})
sapply(seq_along(chrs), function(x) {
circos.text(x = (CENstart[x]+CENend[x])/2,
y = 0.5,
sector.index = chrs[x],
track.index = 1,
labels = paste0("CEN", x),
niceFacing = TRUE,
cex = 0.8,
col = "black",
font = 4)
})
# Gypsy heatmap
circos.genomicHeatmap(bed = do.call(rbind, lapply(seq_along(chrs), function(x) {
Gypsy_bed[Gypsy_bed$chr == chrs[x] &
Gypsy_bed$start >= genomeDF$start[x] &
Gypsy_bed$end <= genomeDF$end[x],] } )),
col = Gypsy_col_fun,
border = NA,
side = "inside",
heatmap_height = 0.05,
connection_height = NULL)
# CEN180 heatmap
circos.genomicHeatmap(bed = do.call(rbind, lapply(seq_along(chrs), function(x) {
CEN180freq_bed[CEN180freq_bed$chr == chrs[x] &
CEN180freq_bed$start >= genomeDF$start[x] &
CEN180freq_bed$end <= genomeDF$end[x],] } )),
col = CEN180freq_col_fun,
border = NA,
side = "inside",
heatmap_height = 0.05,
connection_height = NULL)
# CENAthila rainfall plot
circos.genomicRainfall(data = do.call(rbind, lapply(seq_along(chrs), function(x) {
CENAthila_bed[CENAthila_bed$chr == chrs[x] &
CENAthila_bed$start >= genomeDF$start[x] &
CENAthila_bed$end <= genomeDF$end[x],] } )),
bg.border = NA,
track.height = 0.05,
pch = 16,
cex = 0.4,
col = c("#0000FF80"))
set_track_gap(gap = 0.005)
# Plot windowed CEN180 frequency for each quantile
circos.genomicTrack(data = do.call(rbind, lapply(seq_along(chrs), function(x) {
CENH3_in_bodies_CEN180_bed[CENH3_in_bodies_CEN180_bed$chr == chrs[x] &
CENH3_in_bodies_CEN180_bed$start >= genomeDF$start[x] &
CENH3_in_bodies_CEN180_bed$end <= genomeDF$end[x],] } )),
panel.fun = function(region, value, ...) {
circos.genomicLines(region,
value,
col = quantileColours,
lwd = 1.5,
lty = 1,
area = FALSE,
...)
}, bg.border = NA)
circos.yaxis(side = "left", sector.index = get.all.sector.index()[1], labels.cex = 0.3, tick.length = convert_x(0.5, "mm"), lwd = 0.5)
set_track_gap(gap = 0.005)
circos.genomicTrack(data = do.call(rbind, lapply(seq_along(chrs), function(x) {
HORlengthsSum_CEN180_bed[HORlengthsSum_CEN180_bed$chr == chrs[x] &
HORlengthsSum_CEN180_bed$start >= genomeDF$start[x] &
HORlengthsSum_CEN180_bed$end <= genomeDF$end[x],] } )),
panel.fun = function(region, value, ...) {
circos.genomicLines(region,
value,
col = quantileColours,
lwd = 1.5,
lty = 1,
area = FALSE,
...)
}, bg.border = NA)
circos.yaxis(side = "left", sector.index = get.all.sector.index()[1], labels.cex = 0.3, tick.length = convert_x(0.5, "mm"), lwd = 0.5)
set_track_gap(gap = 0.005)
circos.genomicTrack(data = do.call(rbind, lapply(seq_along(chrs), function(x) {
wSNV_CEN180_bed[wSNV_CEN180_bed$chr == chrs[x] &
wSNV_CEN180_bed$start >= genomeDF$start[x] &
wSNV_CEN180_bed$end <= genomeDF$end[x],] } )),
panel.fun = function(region, value, ...) {
circos.genomicLines(region,
value,
col = quantileColours,
lwd = 1.5,
lty = 1,
area = FALSE,
...)
}, bg.border = NA)
circos.yaxis(side = "left", sector.index = get.all.sector.index()[1], labels.cex = 0.3, tick.length = convert_x(0.5, "mm"), lwd = 0.5)
set_track_gap(gap = 0.005)
circos.genomicTrack(data = do.call(rbind, lapply(seq_along(chrs), function(x) {
CENH3_bed[CENH3_bed$chr == chrs[x] &
CENH3_bed$start >= genomeDF$start[x] &
CENH3_bed$end <= genomeDF$end[x],] } )),
panel.fun = function(region, value, ...) {
circos.genomicLines(region,
value,
col = "purple",
area = TRUE,
baseline = 0,
border = NA,
...)
}, bg.border = NA)
circos.yaxis(side = "left", at = seq(0, 4, by = 2), sector.index = get.all.sector.index()[1], labels.cex = 0.3, tick.length = convert_x(0.5, "mm"), lwd = 0.5)
# Reset graphic parameters and internal variables
circos.clear()
}
pdf(paste0(plotDir,
"CEN180_frequency_per_", genomeBinName,
"_", quantileDef, "_", quantiles, "quantiles",
"_of_CEN180_in_t2t-col.20210610_",
paste0(chrName, collapse = "_"), "_circlize_zoom_v", date, ".pdf"))
circlize_plot()
draw(lgd_list2, x = unit(4, "mm"), y = unit(4, "mm"), just = c("left", "bottom"))
draw(lgd_list1, x = unit(1, "npc") - unit(2, "mm"), y = unit(4, "mm"), just = c("right", "bottom"))
dev.off()
|
d1e8d9c283151b5c86532b0c8df1f8255d5c97c3
|
6fac254783d83faeb0c20f2bf4f4ea4c39b93fe5
|
/kick/kick_ordered_logistic_regression.R
|
377aafe25b317e5b673e9bdc65ad5f576e389dbb
|
[] |
no_license
|
KamonohashiPerry/stan100knocks
|
29b7f8c9b5353300ef9ab9396629066f4dca68b8
|
0c606276484ac994e69323bf2affe75adf654eec
|
refs/heads/master
| 2020-04-24T18:27:50.341869
| 2019-08-17T09:44:26
| 2019-08-17T09:44:26
| 172,180,297
| 0
| 1
| null | 2019-03-11T12:04:21
| 2019-02-23T06:31:01
|
R
|
UTF-8
|
R
| false
| false
| 1,980
|
r
|
kick_ordered_logistic_regression.R
|
library(tidyverse)
library(rstan)
library(GGally)
library(shinystan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
wine_dataset <- read.csv("http://ieor.berkeley.edu/~ieor265/homeworks/winequality-red.csv", sep=";" )
y <- wine_dataset$quality - 2
x <- as.matrix(wine_dataset %>% select(-quality))
x <- scale(x)
# Visualization -----------------------------------------------------------
ggpairs(wine_dataset)
# Estimation --------------------------------------------------------------
# https://mc-stan.org/docs/2_18/stan-users-guide/ordered-logistic-section.html
N <- nrow(x)
K <- length(table(y))
D <- ncol(x)
data_customer_list_test <- list(N=N,
D=D,
K=K,
y=y,
x=x)
fit <- stan(file = "model/ordered_logistic_regression.stan",
data = data_customer_list_test,
iter = 1000,
chains = 4)
summary(fit)
traceplot(fit)
# Convergence Check -------------------------------------------------------
launch_shinystan(fit)
# Result Plot -------------------------------------------------------------
source('common.R')
ms <- rstan::extract(fit)
N_mcmc <- length(ms$lp__)
param_names <- c('mcmc', colnames(wine_dataset %>% select(-quality)))
d_est <- data.frame(1:N_mcmc, ms$b)
colnames(d_est) <- param_names
d_qua <- data.frame.quantile.mcmc(x=param_names[-1], y_mcmc=d_est[,-1])
d_melt <- reshape2::melt(d_est, id=c('mcmc'), variable.name='X')
d_melt$X <- factor(d_melt$X, levels=rev(levels(d_melt$X)))
p <- ggplot()
p <- p + theme_bw(base_size=18)
p <- p + coord_flip()
p <- p + geom_violin(data=d_melt, aes(x=X, y=value), fill='white', color='grey80', size=2, alpha=0.3, scale='width')
p <- p + geom_pointrange(data=d_qua, aes(x=X, y=p50, ymin=p2.5, ymax=p97.5), size=1)
p <- p + labs(x='parameter', y='value')
p <- p + scale_y_continuous(breaks=seq(from=-2, to=6, by=2))
p
|
4329bc98a555b58af2521d7e1d770b9868225af8
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.management/man/organizations_describe_organizational_unit.Rd
|
89ea23168a95345149a53e4dcb1560f614e4684e
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,141
|
rd
|
organizations_describe_organizational_unit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/organizations_operations.R
\name{organizations_describe_organizational_unit}
\alias{organizations_describe_organizational_unit}
\title{Retrieves information about an organizational unit (OU)}
\usage{
organizations_describe_organizational_unit(OrganizationalUnitId)
}
\arguments{
\item{OrganizationalUnitId}{[required] The unique identifier (ID) of the organizational unit that you want
details about. You can get the ID from the
\code{\link[=organizations_list_organizational_units_for_parent]{list_organizational_units_for_parent}}
operation.
The \href{https://en.wikipedia.org/wiki/Regex}{regex pattern} for an
organizational unit ID string requires "ou-" followed by from 4 to 32
lowercase letters or digits (the ID of the root that contains the OU).
This string is followed by a second "-" dash and from 8 to 32 additional
lowercase letters or digits.}
}
\description{
Retrieves information about an organizational unit (OU).
See \url{https://www.paws-r-sdk.com/docs/organizations_describe_organizational_unit/} for full documentation.
}
\keyword{internal}
|
abf78c5bab6710525644a5da6de16c0103f7ace9
|
6d0d99414cd9ff9cd2fd744372b4c88cd7975944
|
/RScript_Cluster.R
|
8a1931dd30b766888c33f6003c0377acd4a85882
|
[] |
no_license
|
snandi/Project_Stat760
|
bbee2e76018eb6197afd4de409aeb18b182df5c6
|
4c0a3677640dadbb6f1dd4383da35815332f05c6
|
refs/heads/master
| 2021-01-17T14:07:16.894395
| 2016-06-14T23:55:35
| 2016-06-14T23:55:35
| 27,285,955
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 106
|
r
|
RScript_Cluster.R
|
library(clusterSim)
data(data_ratio)
kmeans(data_ratio,data_ratio[initial.Centers(data_ratio, 10),])
|
8e30bc12cd80a3f506cb17a27865c39eae78801a
|
d73ed13605f35ff1b779e69eb394d41ca26f4a2b
|
/06_Corpus/chap6_poetryCorpus.R
|
090c7cdd50ab6a04a802400174d2cddd5d8c0eef
|
[
"MIT"
] |
permissive
|
piperandrew/enumerations
|
41df8c15ee108da30defa0ef2e6b739949d9e247
|
9b6678f10fcc8d3ef18f115865be917c9fb56813
|
refs/heads/master
| 2021-01-25T14:32:39.031961
| 2018-09-07T16:11:59
| 2018-09-07T16:11:59
| 123,704,006
| 22
| 2
|
MIT
| 2018-12-02T02:09:38
| 2018-03-03T15:23:45
|
R
|
UTF-8
|
R
| false
| false
| 137,111
|
r
|
chap6_poetryCorpus.R
|
### Chap 6 Corpus ###
#These scripts can be used to understand the shape of a poet's corpus and career
#Section 1 provides transformations necessary for the models used
#Section 2 concerns measures to understand "vulnerability".
#Section 3 concerns understanding "late style"
library("tm")
library("SnowballC")
library("proxy")
library("corpcor")
library("reshape2")
library("lsa")
library("zoo")
library("koRpus")
library("ggplot2")
library("scales")
library("ggrepel")
library("igraph")
library("splitstackshape")
library("dendextend")
#### Section One: Transformations ####
#in order build the models described in this chapter, you will need to transform
#poems into 4 different representations:
#1. lexical
#2. semantic
#3. part of speech
#4. phonetic
#in the accompanying data all transformations are available for download
#you can also use code below and accompanying python scripts to transform your own data
#1 and #2 are performed during each model.
#3 is contained below
#4 is contained in the python script library under "Phoneme Translator"
#3
### POS Transformation ###
#this script transforms a directory of text files into their part of speech representation
#it only works on German and English, not French
### Step 0: Prepare POS Directories ###
#unload all previous packages as they will conflict with NLP packages.
require("NLP")
library("openNLP")
library("openNLPdata")
sent_token_annotator <- Maxent_Sent_Token_Annotator(language = "de")
word_token_annotator <- Maxent_Word_Token_Annotator(language = "de")
pos_tag_annotator <- Maxent_POS_Tag_Annotator(language = "de")
setwd("~/Sites/Topologies - Data/Topologies - Data (Poetry)/PoetryCorpusesAuthor/")
filenames<-list.files("PoetryAuthors_German2", full.names=FALSE)
for (i in 1:length(filenames)) {
setwd("~/Sites/Topologies - Data/Topologies - Data (Poetry)/PoetryCorpusesAuthor/PoetryAuthors_German2")
filenames2<-list.files(filenames[i], pattern="*.txt", full.names=FALSE)
dir<-paste("~/Sites/Topologies - Data/Topologies - Data (Poetry)/PoetryCorpusesAuthor/PoetryAuthors_German2/", filenames[i], sep="")
setwd(dir)
for (j in 1:length(filenames2)){
setwd(dir)
work<-scan(filenames2[j], what="character", quote="")
work.clean<- gsub("\\d", "", work)
text.whole<-paste(work.clean, collapse=" ") # collapse into single chunk
text.char<-as.String(text.whole)
a2 <- annotate(text.char, list(sent_token_annotator, word_token_annotator))
a3 <- annotate(text.char, pos_tag_annotator, a2)
a3w <- subset(a3, type == "word")
tags <- sapply(a3w$features, `[[`, "POS")
tags.final<-paste(tags, collapse = " ")
dir2<-paste("~/Sites/Topologies - Data/Topologies - Data (Poetry)/PoetryCorpusesAuthor/PoetryAuthors_German2_POS", "/", filenames[i], "_POS", sep="")
setwd(dir2)
write(tags.final, file = filenames2[j])
}
}
##### Section Two: Vulnerabilities #####
#6.1
#This script produces a hierarchical tree based on the similarities between the different
#editions of Leaves of Grass. It uses four primary features: lexical, semantic, part of speech and phonemes.
#it takes as input four separate representations of a poet's work:
#1. lexical representation of a poet's work using tfidf
#2. syntactic representation of a poet's work (POS) using tfidf
#3. phonetic representation of a poet's work (PHON) using tfdidf
#4. semantic representation of a poet's work using Latent Semantic Analysis (LSA)
#it then combines similarity matrices based on these features into a single representation
#and then runs hierarchical clustering on that matrix
#master dir
dir0<-paste("[INPUT DIRECTORY]")
setwd(dir0)
#ingest poems
corpus1 <- VCorpus(DirSource("Whitman_LeavesOfGrass_Editions_Full"), readerControl=list(language="English"))
corpus1 <- tm_map(corpus1, content_transformer(stripWhitespace))
corpus1 <- tm_map(corpus1, content_transformer(tolower))
corpus1 <- tm_map(corpus1, content_transformer(removePunctuation))
corpus1 <- tm_map(corpus1, content_transformer(removeNumbers))
corpus1 <- tm_map(corpus1, stemDocument, language = "english")
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
scaling1<-rowSums(corpus1.matrix)
wordcount.df<-data.frame(scaling1)
#remove stopwords
corpus1 <- tm_map(corpus1, removeWords, stopwords("English"))
#remake dtm
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
#tfidf
corpus.tfidf<-weightTfIdf(corpus1.dtm, normalize = TRUE)
corpus.tfidf.mat<-as.matrix(corpus.tfidf, stringsAsFactors=F)
#LSA
data(stopwords_en)
nums<-c(1:5000)
myMatrix<-textmatrix("Whitman_LeavesOfGrass_Editions_Full", stemming=TRUE, language="english", minWordLength=2, maxWordLength=FALSE, minDocFreq=1, maxDocFreq=FALSE, minGlobFreq=FALSE, maxGlobFreq=FALSE, stopwords=stopwords_de, vocabulary=NULL, phrases=NULL, removeXML=FALSE, removeNumbers=FALSE)
myMatrix<-myMatrix[!row.names(myMatrix) %in% nums,]
myMatrix<-lw_logtf(myMatrix) * gw_idf(myMatrix)
myLSAspace<-lsa(myMatrix, dims=dimcalc_share())
cosine.dist2<-simil(t(as.textmatrix(myLSAspace)), method="cosine")
cosine.matrix2<-as.matrix(cosine.dist2, stringsAsFactors=F)*100
#POS
corpus2 <- VCorpus(DirSource("Whitman_LeavesOfGrass_Editions_Full_POS"), readerControl=list(language="English"))
corpus2 <- tm_map(corpus2, content_transformer(stripWhitespace))
corpus2 <- tm_map(corpus2, content_transformer(function(x) gsub("\\$", "", x)))
#take 1-2 POSgrams
BigramTokenizer <- function(x) unlist(lapply(ngrams(words(x), 1:2), paste, collapse = " "), use.names = FALSE)
corpus2.dtm<-DocumentTermMatrix(corpus2, control = list(tokenize = BigramTokenizer, wordLengths=c(1,Inf)))
corpus2.matrix<-as.matrix(corpus2.dtm, stringsAsFactors=F)
corpus2.tfidf<-weightTfIdf(corpus2.dtm, normalize = TRUE)
corpus2.tfidf.mat<-as.matrix(corpus2.tfidf, stringsAsFactors=F)
#PHONEMES
corpus3 <- VCorpus(DirSource("Whitman_LeavesOfGrass_Editions_Full_PHON"))
corpus3 <- tm_map(corpus3, content_transformer(stripWhitespace))
corpus3.dtm<-DocumentTermMatrix(corpus3, control=list(wordLengths=c(1,Inf), removePunctuation = FALSE, stopwords = FALSE, tolower=FALSE))
corpus3.matrix<-as.matrix(corpus3.dtm, stringsAsFactors=F)
#tfidf
corpus3.tfidf<-weightTfIdf(corpus3.dtm, normalize = TRUE)
corpus3.tfidf.mat<-as.matrix(corpus3.tfidf, stringsAsFactors=F)
#add POS & PHONEMES to LEXICAL DTM
#this creates a single large feature space of POS, Phonemes, and words
all.tfidf<-cbind(corpus.tfidf.mat, corpus2.tfidf.mat, corpus3.tfidf.mat)
#then create similarity matrix for all poems based on these features
cosine.dist1<-simil(all.tfidf, method = "cosine")
cosine.matrix1<-as.matrix(cosine.dist1, stringsAsFactors=F)*100
#combine LSA with LEX/POS/PHON similarity matrices
#this effectively weights the semantic representation the same as all other 3 combined
#see the appendix in the book for a full discussion and chap 6 for a validation
#### this is only one way to build the model which I tested on numerous cases ####
#### there are many other ways one could do this -- more testing needed!! ####
cosine.matrix1[is.na(cosine.matrix1)]<-0 #Lex/POS Matrix #change 0 to 1 for heat map
cosine.matrix2[is.na(cosine.matrix2)]<-0 #LSA Matrix #change 0 to 1 for heat map
cosine.matrix<-(cosine.matrix1+cosine.matrix2)/2
#prepare for clustering exercise
d<-cosine.matrix
d[d==0]<-NA
#turn into distance matrix
d<-100-d
d[is.na(d)]<-0
d<-d/100
row.names(d)<-c("1855", "1856", "1860", "1867", "1871", "1881", "1891")
#run hierarchical clustering
d.dist<-as.dist(d)
fit <- hclust(d.dist, method="ward.D")
plot(fit, horiz=T)
#predict optimal number of clusters
dend_k<-find_k(fit, krange = 2:(nrow(d.dist)-1))
### Fig. 6.1 ###
dend<-as.dendrogram(fit)
plot(dend, xlab="", ylab="", horiz=T)
title(main="The Editions of Leaves of Grass", sub="Fig. 6.1 Stylistic affinities of the different editions of Walt Whitman's Leaves of Grass\n\nSource: Andrew Piper, Enumerations: Data and Literary Study (2018)")
#6.2
#Network of the relationship of pages to each other in the different editions of Leaves of Grass
#the figure shows the 1891 edition based on the four features above
#this script creates a network of relationships between the pages of a given volume of poetry
#it is assumed that pages have been normalized to be roughly the same length (i.e. chunked)
#each page is considered a node
#an edge is drawn between a page and its "most similar pages", where similarity is determined using the
#four features described above
#it does so in an evolutionary fashion meaning that a page can only be connected to a page that has come
#before it. This will make more sense when considering poets' careers (where a poem can only be similar
#to poems that the poet has already written).
#the script takes as input three directories:
#1. lexical representation of a poet's works
#2. syntactic representation of a poet's works (POS)
#3. phonetic representation of a poet's works (PHON)
#it outputs a directory of edge lists for every work, where nodes = pages of that work and edges = similarity
#it uses a cut-off to establish whether two pages are "similar" (see script for details)
#here "works" are considered volumes of poetry ("Leaves of Grass"). Later in the chapter,
#a poet's career is considered the "work" and the individual poems are equivalent to "pages" in this model
#create directories
#master dir
dir0<-paste("[INSERT DIRECTORY]")
#lexical directories
dir1<-paste(dir0, "/Whitman_LeavesOfGrass_Chunked_300/", sep="")
#POS directories
dir2<-paste(dir0,"/Whitman_LeavesOfGrass_Chunked_300_POS/", sep="")
#PHONEME directories
dir3<-paste(dir0,"/Whitman_LeavesOfGrass_Chunked_300_PHON/",sep="")
#start
setwd(dir0)
filenames0<-list.files("Whitman_LeavesOfGrass_Chunked_300", full.names=FALSE)
filenames1<-list.files("Whitman_LeavesOfGrass_Chunked_300_POS", full.names=FALSE)
filenames2<-list.files("Whitman_LeavesOfGrass_Chunked_300_PHON", full.names=FALSE)
final.df<-NULL
for (m in 7:length(filenames0)) {
print(m)
#LEXICAL
setwd(dir1)
corpus1 <- VCorpus(DirSource(filenames0[m]), readerControl=list(language="English"))
corpus1 <- tm_map(corpus1, content_transformer(stripWhitespace))
corpus1 <- tm_map(corpus1, content_transformer(tolower))
corpus1 <- tm_map(corpus1, content_transformer(removePunctuation))
corpus1 <- tm_map(corpus1, content_transformer(removeNumbers))
corpus1 <- tm_map(corpus1, stemDocument, language = "english")
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
scaling1<-rowSums(corpus1.matrix)
wordcount.df<-data.frame(scaling1)
#remove stopwords
corpus1 <- tm_map(corpus1, removeWords, stopwords("English"))
#remake dtm
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
#tfidf
corpus.tfidf<-weightTfIdf(corpus1.dtm, normalize = TRUE)
corpus.tfidf.mat<-as.matrix(corpus.tfidf, stringsAsFactors=F)
#LSA
data(stopwords_en)
dir4<-paste(dir1, filenames0[m], sep="")
nums<-c(1:5000)
myMatrix<-textmatrix(dir4, stemming=TRUE, language="english", minWordLength=2, maxWordLength=FALSE, minDocFreq=1, maxDocFreq=FALSE, minGlobFreq=FALSE, maxGlobFreq=FALSE, stopwords=stopwords_de, vocabulary=NULL, phrases=NULL, removeXML=FALSE, removeNumbers=FALSE)
myMatrix<-myMatrix[!row.names(myMatrix) %in% nums,]
myMatrix<-lw_logtf(myMatrix) * gw_idf(myMatrix)
myLSAspace<-lsa(myMatrix, dims=dimcalc_share())
cosine.dist2<-simil(t(as.textmatrix(myLSAspace)), method="cosine")
cosine.matrix2<-as.matrix(cosine.dist2, stringsAsFactors=F)*100
#POS
setwd(dir2)
corpus2 <- VCorpus(DirSource(filenames1[m]), readerControl=list(language="English"))
corpus2 <- tm_map(corpus2, content_transformer(stripWhitespace))
corpus2 <- tm_map(corpus2, content_transformer(function(x) gsub("\\$", "", x)))
#take 1-2 POSgrams
BigramTokenizer <- function(x) unlist(lapply(ngrams(words(x), 1:2), paste, collapse = " "), use.names = FALSE)
corpus2.dtm<-DocumentTermMatrix(corpus2, control = list(tokenize = BigramTokenizer, wordLengths=c(1,Inf)))
corpus2.matrix<-as.matrix(corpus2.dtm, stringsAsFactors=F)
corpus2.tfidf<-weightTfIdf(corpus2.dtm, normalize = TRUE)
corpus2.tfidf.mat<-as.matrix(corpus2.tfidf, stringsAsFactors=F)
#PHONEMES
setwd(dir3)
corpus3 <- VCorpus(DirSource(filenames2[m]))
corpus3 <- tm_map(corpus3, content_transformer(stripWhitespace))
corpus3.dtm<-DocumentTermMatrix(corpus3, control=list(wordLengths=c(1,Inf), removePunctuation = FALSE, stopwords = FALSE, tolower=FALSE))
corpus3.matrix<-as.matrix(corpus3.dtm, stringsAsFactors=F)
#tfidf
corpus3.tfidf<-weightTfIdf(corpus3.dtm, normalize = TRUE)
corpus3.tfidf.mat<-as.matrix(corpus3.tfidf, stringsAsFactors=F)
#add POS & PHONEMES to LEXICAL DTM
all.tfidf<-cbind(corpus.tfidf.mat, corpus2.tfidf.mat, corpus3.tfidf.mat)
#similarity matrix
cosine.dist1<-simil(all.tfidf, method = "cosine")
cosine.matrix1<-as.matrix(cosine.dist1, stringsAsFactors=F)*100
#combine LSA with LEX/POS
cosine.matrix1[is.na(cosine.matrix1)]<-0 #Lex/POS Matrix #change 0 to 1 for heat map
cosine.matrix2[is.na(cosine.matrix2)]<-0 #LSA Matrix #change 0 to 1 for heat map
cosine.matrix<-(cosine.matrix1+cosine.matrix2)/2
#setwd("[SET DIRECTORY]")
cosine.file<-paste(filenames0[m], "_SIM.csv", sep = "")
#write.csv(cosine.matrix, file=cosine.file)
#generate statistical significance threshold
test<-sm2vec(cosine.matrix)
global.cut<-mean(test)+(1.96*sd(test))
sim.v<-vector()
for (z in 1:nrow(cosine.matrix)){
sub<-sort(cosine.matrix[z,], decreasing=T)[1:100]
sim.v<-append(sim.v, sub)
}
sim.mean<-mean(sim.v)
sim.sd<-sd(sim.v)
### Step 2: Make Edge List ###
edge.list<-data.frame()
for (i in 3:nrow(cosine.matrix)) {
#get word count of page
count.source<-wordcount.df[row.names(wordcount.df) == row.names(cosine.matrix)[i],]
#subset matrix by all poems up to target poem
corp.select<-cosine.matrix[1:i,1:i]
#OR subset matrix by most recent 100 pages (to control for differing lengths of editions)
# if (i > 100){
# corp.select<-cosine.matrix[(i-99):i,(i-99):i]
# }else{
# corp.select<-cosine.matrix[1:i,1:i]
# }
#sort poems in decreasing order
corp.test<-corp.select
corp.all<-sort(corp.test[(max(nrow(corp.test))),], decreasing=T)
corp.all.df<-data.frame(corp.all)
corp.all.df$names<-row.names(corp.all.df)
#take the top closest poems based on significance
#COMMENT NEXT LINES OUT until ALTERNATIVE if using alternative
# #calc threshold based on statistical significance
# #this takes 2sd above the poem's overall mean similarity to other poems
# cut<-mean(corp.all)+(1.96*sd(corp.all))
# #if that amount is below the global mean + sd of similarity, then it uses this global mean
# if (cut < global.cut){
# cut<-global.cut
# }
# corp.top<-subset(corp.all.df, corp.all.df[,1] > cut)
# if (nrow(corp.top) == 0) {
# corp.top<-data.frame(corp.all.df[1,1], row.names(corp.all.df)[1])
# row.names(corp.top)<-as.character(corp.top[,2])
# }
#### ALTERNATIVE: take differential and cut when distance between 2 obs is greater than the next one, i.e. looking for a significant drop
#COMMENT lines above if you use this alternative
if (length(corp.all) > 2){
if (diff(corp.all)[[1]] < diff(corp.all)[[2]]){
corp.top<-corp.all.df[1,]
} else {
for (n in 1:(length(corp.all)-2)){
if (diff(corp.all)[[n+1]] < diff(corp.all)[[n]]){
corp.top<-corp.all.df[1:(n+1),]
break
} else {
corp.top<-corp.all.df
}
}
}
} else {
corp.top<-corp.all.df
}
###COMMENT TO HERE###
corp.df<-corp.top
#create edge list
for (j in 1:nrow(corp.df)) {
source<-row.names(cosine.matrix)[i]
target<-row.names(corp.df)[j]
weight<-corp.top[j,1]
count.target<-wordcount.df[row.names(wordcount.df) == row.names(corp.df)[j],]
count.diff<-(count.source-count.target)/min(count.source, count.target)
abs.diff<-abs(count.diff)
page.diff<-which(row.names(cosine.matrix) == as.character(source))-which(row.names(cosine.matrix) == as.character(target))
newRow<-data.frame(source, target, weight, count.source, count.target, count.diff, abs.diff, page.diff, stringsAsFactors = F)
edge.list<-rbind(edge.list, newRow)
}
}
#setwd("[SET DIRECTORY]")
#file.final<-paste(filenames0[m], "_EdgeList.csv", sep="")
#write.csv(edge.list, file=file.final)
#### Step 3: Network Analysis ####
author.edge<-edge.list[,1:3]
author.nodes<-append(edge.list$source, edge.list$target)
author.nodes<-data.frame(author.nodes[!duplicated(author.nodes)])
colnames(author.nodes)<-c("name")
author.split<-cSplit(author.nodes, "name", sep="_", type.convert=FALSE)
author.split<-cbind(author.nodes, author.split)
g<-graph.data.frame(author.edge, directed=TRUE, vertices=NULL)
#to measure diameter distance use inverse of similarity score
author.edge2<-data.frame(source=author.edge[,1], target=author.edge[,2], weight=((1/author.edge[,3])*100))
g.dist<-graph.data.frame(author.edge2, directed=TRUE, vertices=NULL)
#create undirected graph as well
g3<-graph.data.frame(author.edge, directed=FALSE, vertices=NULL)
#summary calculations
book.length<-nrow(cosine.matrix)
#number nodes
no.nodes<-length(V(g))
#number edges
no.edges<-length(E(g))
mean.degree<-mean(degree(g))
sd.degree<-sd(degree(g))
#percent of nodes w 1 connection
per.1.degree<-degree_distribution(g)[2]
#percent 1-3 connection
per.1.to.3.degree<-sum(degree_distribution(g)[2:4])
#percent 1-5 connection
per.1.to.5.degree<-sum(degree_distribution(g)[2:6])
#+5 connections
per.plus.five.degree<-sum(degree_distribution(g)[7:length(degree_distribution(g))])
trans.score<-transitivity(g, type=c("undirected"), vids=NULL, weights=NULL, isolates=c("NaN"))
#calculate avg. page distance between nodes for every edge
avg.page.diff<-mean(edge.list$page.diff)
sd.page.diff<-sd(edge.list$page.diff)
#normalized page difference
norm.avg.page.diff<-avg.page.diff/book.length
per.page.diff.less6<-length(which(edge.list$page.diff < 6))/length(edge.list$page.diff) #percent of poems spanning more than a decade
per.page.diff.greater20<-length(which(edge.list$page.diff > 20))/length(edge.list$page.diff) #percent of poems spanning more than a decade
avg.poem.similarity<-mean(E(g)$weight)
sd.poem.similarity<-sd(E(g)$weight)
#clean community detection and just scrape modularity + no. communities for all comm methods
comm.louvain<-cluster_louvain(g3, weights = E(g)$weight)
groups.louvain<-max(comm.louvain$membership)
mod.louvain<-modularity(comm.louvain)
####walktrap
#based on the random walk theory that you will get trapped more in a community than not.
comm.walk<-walktrap.community(g, weights = E(g)$weight, steps = 4, merges = TRUE, modularity = TRUE, membership = TRUE)
groups.walk<-max(comm.walk$membership)
mod.walk<-modularity(comm.walk)
###fastgreedy
comm.fast<-cluster_fast_greedy(g3, weights=E(g)$weight)
groups.fast<-max(comm.fast$membership)
mod.fast<-modularity(comm.fast)
#robustness = random deletion of nodes
robustness_vector<-vector()
for (j in 1:1000) {
#random.nodes<-sample(V(g), length(V(g)), replace = FALSE, prob = NULL)
g.robust<-as.undirected(g)
for (i in 1:length(V(g))) {
random.node<-sample(1:length(V(g.robust)), 1, replace = FALSE)
g.robust<-delete.vertices(g.robust, v=random.node)
if (is.connected(g.robust)==FALSE)
break
}
robustness_vector<-append(robustness_vector, i)
}
robust.mean<-mean(robustness_vector)
robust.sd<-sd(robustness_vector)
#vulnerability = targeted deletion of nodes via descending order of degree
#controlling for top 100 nodes
#vulnerability1 = % nodes deleted when the remaining graph is less than half the size of the original graph
deg<-names(sort(degree(g),TRUE))
g.vulnerable<-as.undirected(g)
g.vulnerable<-delete.vertices(g.vulnerable, v=as.character(deg[101:length(V(g.vulnerable))]))
for (i in 1:length(V(g.vulnerable))) {
g.vulnerable<-delete.vertices(g.vulnerable, v=as.character(deg[[i]]))
#components(g.vulnerable)$csize
#components(g.vulnerable)$no
if (max(components(g.vulnerable)$csize) < (length(V(g.vulnerable))/2))
break
}
vulnerability1<-i/length(V(g.vulnerable))
#vulnerability2 = when the largest remaining component is no longer 50% bigger than the next largest
deg<-names(sort(degree(g),TRUE))
g.vulnerable<-as.undirected(g)
g.vulnerable<-delete.vertices(g.vulnerable, v=as.character(deg[101:length(V(g.vulnerable))]))
for (i in 1:length(V(g.vulnerable))) {
g.vulnerable<-delete.vertices(g.vulnerable, v=as.character(deg[[i]]))
#components(g.vulnerable)$csize
#components(g.vulnerable)$no
if (components(g.vulnerable)$no > 1){
if ((.5*max(components(g.vulnerable)$csize)) < sort(components(g.vulnerable)$csize, decreasing=T)[2])
break
}
}
vulnerability2<-i/length(V(g.vulnerable))
diam<-diameter(g.dist)
#combine all network scores
author.name<-as.character(filenames0[m])
temp.df<-data.frame(author.name,book.length, no.nodes, no.edges, mean.degree, sd.degree,+
per.1.degree, per.1.to.3.degree, per.1.to.5.degree, per.plus.five.degree,+
trans.score, avg.page.diff, sd.page.diff, norm.avg.page.diff, per.page.diff.less6, per.page.diff.greater20,+
avg.poem.similarity, sd.poem.similarity, groups.louvain,+
mod.louvain,groups.walk,mod.walk,groups.fast,mod.fast, robust.mean, robust.sd,+
diam, vulnerability1, vulnerability2, sim.mean, sim.sd)
final.df<-rbind(final.df, temp.df)
}
#write.csv(final.df, file="Whitman_Results.csv")
#### Fig. 6.2 ####
#The network plot used was generated using the edge lists in the directory "Whitman_LeavesOfGrass_EdgeLists_Diff"
#It was generated in Gephi using the gephi file "Whitman.gephi"
#### Fig. 6.3 ####
#Figure three takes as input the output from Script 6.2 data called "Whitman_Results.csv"
a<-read.csv("Whitman_Results.csv")
tit.size<-14
p1<-ggplot(a, aes(x=author.name, y=norm.avg.page.diff*100)) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size=tit.size), plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_point(shape=15, size=2) +
geom_line() +
labs(title = "(a) Page Distance", x="Date", y="% of Work")
p2<-ggplot(a, aes(x=author.name, y=per.page.diff.less6*100)) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size=tit.size), plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_point(shape=15, size=2) +
geom_line() +
labs(title = "(b) Pages less than 5 away", x="Date", y="% of Pages")
p3<-ggplot(a, aes(x=author.name, y=per.page.diff.greater20*100)) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size=tit.size), plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_point(shape=15, size=2) +
geom_line() +
labs(title = "(c) Pages more than 20 away", x="Date", y="% of Pages")
p4<-ggplot(a, aes(x=author.name, y=sd.poem.similarity/100)) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size=tit.size), plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_point(shape=15, size=2) +
geom_line() +
labs(title = "(d) SD of Page Similarity", x="Date", y="Similarity")
p5<-ggplot(a, aes(x=author.name, y=(1-a$vulnerability2)*100)) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size=tit.size), plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_point(shape=15, size=2) +
geom_line() +
labs(title = "(e) Vulnerability", x="Date", y="% of Pages")
library(gridExtra)
grid.arrange(p1, p2, p3, p4, p5, ncol=2)
#6.3
### Model Comparison ###
#this script allows you to observe the effects of using different features and feature weights
#it takes one poet at a time as input
#here I use the example of Muriel Rukeyser
#input directory names
#requires 3 representations of every poet's work (lexical, syntactic, phonetic)
filenames0<-c("MurielRukeyser")
filenames1<-c("MurielRukeyser_POS")
filenames2<-c("MurielRukeyser_PHON")
#what follows are the different features. these can be turned off/on depending on how you want the model to work
#currently all features are included.
#LEXICAL
corpus1 <- VCorpus(DirSource(filenames0), readerControl=list(language="English"))
corpus1 <- tm_map(corpus1, content_transformer(stripWhitespace))
corpus1 <- tm_map(corpus1, content_transformer(tolower))
corpus1 <- tm_map(corpus1, content_transformer(removePunctuation))
corpus1 <- tm_map(corpus1, content_transformer(removeNumbers))
corpus1 <- tm_map(corpus1, stemDocument, language = "english")
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
scaling1<-rowSums(corpus1.matrix)
wordcount.df<-data.frame(scaling1)
#remove stopwords
corpus1 <- tm_map(corpus1, removeWords, stopwords("English"))
#remake dtm
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
#tfidf
corpus.tfidf<-weightTfIdf(corpus1.dtm, normalize = TRUE)
corpus.tfidf.mat<-as.matrix(corpus.tfidf, stringsAsFactors=F)
#LSA
data(stopwords_en)
nums<-c(1:5000)
myMatrix<-textmatrix(filenames0, stemming=TRUE, language="english", minWordLength=2, maxWordLength=FALSE, minDocFreq=1, maxDocFreq=FALSE, minGlobFreq=FALSE, maxGlobFreq=FALSE, stopwords=stopwords_de, vocabulary=NULL, phrases=NULL, removeXML=FALSE, removeNumbers=FALSE)
myMatrix<-myMatrix[!row.names(myMatrix) %in% nums,]
myMatrix<-lw_logtf(myMatrix) * gw_idf(myMatrix)
myLSAspace<-lsa(myMatrix, dims=dimcalc_share())
#make first similarity matrix based on semantic representation
cosine.dist2<-simil(t(as.textmatrix(myLSAspace)), method="cosine")
cosine.matrix2<-as.matrix(cosine.dist2, stringsAsFactors=F)*100
#POS
corpus2 <- VCorpus(DirSource(filenames1), readerControl=list(language="English"))
corpus2 <- tm_map(corpus2, content_transformer(stripWhitespace))
corpus2 <- tm_map(corpus2, content_transformer(function(x) gsub("\\$", "", x)))
#take 1-2 POSgrams
BigramTokenizer <- function(x) unlist(lapply(ngrams(words(x), 1:2), paste, collapse = " "), use.names = FALSE)
corpus2.dtm<-DocumentTermMatrix(corpus2, control = list(tokenize = BigramTokenizer, wordLengths=c(1,Inf)))
corpus2.matrix<-as.matrix(corpus2.dtm, stringsAsFactors=F)
corpus2.tfidf<-weightTfIdf(corpus2.dtm, normalize = TRUE)
corpus2.tfidf.mat<-as.matrix(corpus2.tfidf, stringsAsFactors=F)
#PHONEMES
corpus3 <- VCorpus(DirSource(filenames2))
corpus3 <- tm_map(corpus3, content_transformer(stripWhitespace))
corpus3.dtm<-DocumentTermMatrix(corpus3, control=list(wordLengths=c(1,Inf), removePunctuation = FALSE, stopwords = FALSE, tolower=FALSE))
corpus3.matrix<-as.matrix(corpus3.dtm, stringsAsFactors=F)
#tfidf
corpus3.tfidf<-weightTfIdf(corpus3.dtm, normalize = TRUE)
corpus3.tfidf.mat<-as.matrix(corpus3.tfidf, stringsAsFactors=F)
#combine POS & PHONEMES & LEXICAL Tables
all.tfidf<-cbind(corpus.tfidf.mat, corpus2.tfidf.mat, corpus3.tfidf.mat)
#make similarity matrix based on combined feature space
cosine.dist1<-simil(all.tfidf, method = "cosine")
cosine.matrix1<-as.matrix(cosine.dist1, stringsAsFactors=F)*100
#adjust for length
cosine.matrix1<-((1/log(scaling1))*cosine.matrix1)*10
#clean
cosine.matrix1[is.na(cosine.matrix1)]<-0
cosine.matrix2[is.na(cosine.matrix2)]<-0
#Option 1: Combine all 4 features
#combine LSA similarity matrix with LEX/POS/PHON and take *average* similarity
cosine.matrix<-(cosine.matrix1+cosine.matrix2)/2
#Option 2: only LSA
#cosine.matrix<-cosine.matrix2
#Option 3: only LEX/POS/PHON
#cosine.matrix<-cosine.matrix1
#Option 4: only LEX
#cosine.dist1<-simil(corpus.tfidf.mat, method = "cosine")
#cosine.matrix.lex<-as.matrix(cosine.dist1, stringsAsFactors=F)*100
#cosine.matrix.lex[is.na(cosine.matrix.lex)]<-0
#cosine.matrix<-cosine.matrix.lex
### Step 2: Make Edge List ###
#this outputs a table of most similar poems for a given poem based on the model used above
#it takes as input the table "cosine.matrix" and finds the most similar poems to a given poem
#it outputs an edge list where source is the original poem and target is the poem that is most similar to it
#poems can only be similar to poems that were written *before* them
#data needs tobe in chronological order in directory for this script to work
edge.list<-data.frame()
for (i in 3:nrow(cosine.matrix)) {
#first truncate the similarity table by poems that precede the i'th poem
count.source<-wordcount.df[row.names(wordcount.df) == row.names(cosine.matrix)[i],]
corp.select<-cosine.matrix[1:i,1:i]
#criteria 1: remove poems beyond 2x above/below its word length
#defaults to all poems if the selection criteria can't be met
#if poem is greater than 1000 words, then it can be similar to any poem > 500 words
if (count.source < 999){
wordcount.sub<-wordcount.df[row.names(wordcount.df) %in% row.names(corp.select),]
over<-which(wordcount.sub > 2*count.source)
under<-which(wordcount.sub < count.source/2)
comb<-append(over, under)
if (length(comb) != 0){
corp.test<-corp.select[-comb,-comb]
} else {corp.test<-corp.select
}
} else {
wordcount.sub<-wordcount.df[row.names(wordcount.df) %in% row.names(corp.select),]
over<-which(wordcount.sub > 500)
corp.test<-corp.select[over,over]
}
#revert to original if criteria aren't met
if (is.null(nrow(corp.test))){
corp.test<-corp.select
}
if (nrow(corp.test) == 2){
corp.test<-corp.select
}
zero.test<-corp.test[max(nrow(corp.test)),][1:(nrow(corp.test)-1)]
if(sum(zero.test) == 0){
corp.test<-corp.select
}
#sort the poems in decreasing order of similarity to the source poem
corp.all<-sort(corp.test[(max(nrow(corp.test))),], decreasing=T)
corp.all.df<-data.frame(corp.all)
corp.all.df$names<-row.names(corp.all.df)
#take the top closest poems based on statistical significance
#COMMENT NEXT LINES OUT until ALTERNATIVE if using alternative
# #calc threshold based on statistical significance
# #this takes 2sd above the poem's overall mean similarity to other poems
# cut<-mean(corp.all)+(1.96*sd(corp.all))
# #if that amount is below the global mean + sd of similarity, then it uses this global mean
# if (cut < global.cut){
# cut<-global.cut
# }
# corp.top<-subset(corp.all.df, corp.all.df[,1] > cut)
# if (nrow(corp.top) == 0) {
# corp.top<-data.frame(corp.all.df[1,1], row.names(corp.all.df)[1])
# row.names(corp.top)<-as.character(corp.top[,2])
# }
#### ALTERNATIVE: take differential and cut when distance between 2 obs is greater than the next one, i.e. looking for a significant drop
#COMMENT lines above if you use this alternative
#keep only those poems for which the distance between them is not greater than the one above it
#in other words, this cuts at the point at which the difference between two poems' similarity is greater than the previous poem's difference
#it thus keeps the most similar poem and any others whose similarity is closer to the one kept than the next one not kept
#it favors keeping fewer poems with the exception of when the dissimilarity of the next poem is not considerably greater than the one before it
if (length(corp.all) > 2){
if (diff(corp.all)[[1]] < diff(corp.all)[[2]]){
corp.top<-corp.all.df[1,]
} else {
for (n in 1:(length(corp.all)-2)){
if (diff(corp.all)[[n+1]] < diff(corp.all)[[n]]){
corp.top<-corp.all.df[1:(n+1),]
break
} else {
corp.top<-corp.all.df
}
}
}
} else {
corp.top<-corp.all.df
}
###COMMENT TO HERE###
#this is the top most similar poem(s)
corp.df<-corp.top
#create edge list
#this conjoins the source poem w its most similar poem(s)
#if there is more than one most similar then it creates a new row
#it records the word counts of each poem to assess how different they are in length
for (j in 1:nrow(corp.df)) {
source<-row.names(cosine.matrix)[i]
target<-row.names(corp.df)[j]
weight<-corp.top[j,1]
#word count of source and target poems to assess their similarity in terms of size
count.target<-wordcount.df[row.names(wordcount.df) == row.names(corp.df)[j],]
count.diff<-(count.source-count.target)/min(count.source, count.target)
abs.diff<-abs(count.diff)
#how far apart are the poems in the corpus (simple representation of poem#)
#a better representation would take year difference encoded in filenames
poem.diff<-which(row.names(cosine.matrix) == as.character(source))-which(row.names(cosine.matrix) == as.character(target))
newRow<-data.frame(source, target, weight, count.source, count.target, count.diff, abs.diff, poem.diff, stringsAsFactors = F)
edge.list<-rbind(edge.list, newRow)
}
}
#"edge.list" is the output to be inspected
#source and target are the poems being compared, i.e. it searches for the most
#similar poem to the source poem
#weight = cosine similarity *100
#count.source = word count of the source poem
#count.target = word count of the target poem [choices are limited based on word count similarity]
#count.diff = ratio of the two word counts with positive being how much longer
#the source is than the target
#abs.diff = same using absolute values
#poem.diff = how far apart they are in the corpus
#remember, you can only be similar backwards so those numbers grow as the poet ages
#more could be done to secure ordering of poems for this measure
#with each edge.list table for each model you can then compare the choices that the model makes
### Assess which words overlap between any two poems in a corpus ###
#this takes corpus.tfidf.mat from above as an input
#find intersect of two poems based on their tfidf words
poem1<-c("1948_RukeyserMuriel_018_SUMMERTHESACRAMENTO_0000001.txt")
poem2<-c("1976_RukeyserMuriel_008_BLUESPRUCE_0000001.txt")
poem2<-c("1957_RukeyserMuriel_062_Aredbridgefasteningthiscitytotheforest_0000001.txt")
df1<-corpus.tfidf.mat[which(row.names(corpus.tfidf.mat) == poem1), ]
df2<-corpus.tfidf.mat[which(row.names(corpus.tfidf.mat) == poem2), ]
df1<-df1[which(df1 != 0)]
df2<-df2[which(df2 != 0)]
intersect(names(df1), names(df2))
############# Section Two: Vulnerability and Periodization ###############
#6.4
###### Global Vulnerability ######
#this script creates a network of relationships between the works of a given poet
#each work is considered a node
#an edge is drawn between a work and its "most similar works", where similarity is determined using the
#four features described above
#it does so in an evolutionary fashion meaning that a work can only be connected to a work that has temporally come
#before it.
#the script takes as input three directories that each contain directories of:
#1. lexical representation of a poet's works
#2. syntactic representation of a poet's works (POS)
#3. phonetic representation of a poet's works (PHON)
#it requires that files are named in the following way:
#DATE_AUTHOR_TITLE.txt
#this allows the script to calculate the length of the poet's career and the yearly distances between similar poems
#it outputs a table of network measures related to the poet's career
#"Global Vulnerability" corresponds to "Vulnerability 2" below
#it can also output edge lists for every poet so these can be further studied or visualized
#they are formatted for use in Gephi
#load stopword lists for the LSA section
data(stopwords_de)
data(stopwords_en)
data(stopwords_fr)
#establish language of your corpus
#make sure to change the LSA stopword manually (this is a bug in the function)
language1<-c("German")
language2<-c("german")
#language1<-c("English")
#language2<-c("english")
#language1<-c("French")
#language2<-c("french")
#remove bad words
#for German
problems<-c("drum", "habt", "hast", "ichs", "ists", "sei", "wär", "weimar", "zwei", "seite", "apparat", "datumsangaben")
#or set as empty
#problems<-vector()
#set directories
#there should be one home directory which contains the 3 different representations of each poet
#each representation directory should contain directories of each poet (so one directory per poet)
homedir<-paste("~/Documents/2. Books/Enumerations/Enumerations - Data and Code/Data/txtlab_POETRY_CW")
#lexdir<-paste("PoetryAuthors_German")
#posdir<-paste("PoetryAuthors_German_POS")
#phondir<-paste("PoetryAuthors_German_PHON")
lexdir<-paste("PoetryAuthors_English")
posdir<-paste("PoetryAuthors_English_POS")
phondir<-paste("PoetryAuthors_English_PHON")
#lexdir<-paste("PoetryAuthors_French")
#posdir<-paste("PoetryAuthors_French_POS")
#phondir<-paste("PoetryAuthors_French_PHON")
#set working directory
setwd(homedir)
#get metadata
#meta<-read.csv("PoetryAuthors_German_Meta.csv")
meta<-read.csv("PoetryAuthors_English_Meta.csv")
#meta<-read.csv("PoetryAuthors_French_Meta.csv")
#get names of poets
#each poet's works should be a separate directory
filenames0<-list.files(lexdir, full.names=FALSE)
filenames1<-list.files(posdir, full.names=FALSE)
filenames2<-list.files(phondir, full.names=FALSE)
#run
final.df<-NULL
for (m in 1:length(filenames0)) {
print(m)
#LEXICAL
dir<-paste(homedir, lexdir, sep = "/")
setwd(dir)
corpus1 <- VCorpus(DirSource(filenames0[m]), readerControl=list(language=language1))
corpus1 <- tm_map(corpus1, content_transformer(stripWhitespace))
corpus1 <- tm_map(corpus1, content_transformer(tolower))
corpus1 <- tm_map(corpus1, content_transformer(removePunctuation))
corpus1 <- tm_map(corpus1, content_transformer(removeNumbers))
corpus1 <- tm_map(corpus1, removeWords, problems)
corpus1 <- tm_map(corpus1, stemDocument, language = language2)
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
scaling1<-rowSums(corpus1.matrix)
wordcount.df<-data.frame(scaling1)
#remove stopwords
corpus1 <- tm_map(corpus1, removeWords, stopwords(language1))
#remake dtm
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
#tfidf
corpus.tfidf<-weightTfIdf(corpus1.dtm, normalize = TRUE)
corpus.tfidf.mat<-as.matrix(corpus.tfidf, stringsAsFactors=F)
#LSA
dir<-paste(homedir, lexdir, filenames0[m], sep = "/")
nums<-c(1:5000)
#change language manually!!!
myMatrix<-textmatrix(dir, stemming=TRUE, language=language2, minWordLength=2, maxWordLength=FALSE, minDocFreq=1, maxDocFreq=FALSE, minGlobFreq=FALSE, maxGlobFreq=FALSE, stopwords=stopwords_en, vocabulary=NULL, phrases=NULL, removeXML=FALSE, removeNumbers=FALSE)
myMatrix<-myMatrix[!row.names(myMatrix) %in% nums,]
myMatrix<-myMatrix[!row.names(myMatrix) %in% problems,]
myMatrix<-lw_logtf(myMatrix) * gw_idf(myMatrix)
myLSAspace<-lsa(myMatrix, dims=dimcalc_share())
cosine.dist2<-simil(t(as.textmatrix(myLSAspace)), method="cosine")
cosine.matrix2<-as.matrix(cosine.dist2, stringsAsFactors=F)*100
#POS
dir<-paste(homedir, posdir, sep = "/")
setwd(dir)
corpus2 <- VCorpus(DirSource(filenames1[m]), readerControl=list(language=language1))
corpus2 <- tm_map(corpus2, content_transformer(stripWhitespace))
corpus2 <- tm_map(corpus2, content_transformer(function(x) gsub("\\$", "", x)))
#take 1-2 POSgrams
BigramTokenizer <- function(x) unlist(lapply(ngrams(words(x), 1:2), paste, collapse = " "), use.names = FALSE)
corpus2.dtm<-DocumentTermMatrix(corpus2, control = list(tokenize = BigramTokenizer, wordLengths=c(1,Inf)))
corpus2.matrix<-as.matrix(corpus2.dtm, stringsAsFactors=F)
corpus2.tfidf<-weightTfIdf(corpus2.dtm, normalize = TRUE)
corpus2.tfidf.mat<-as.matrix(corpus2.tfidf, stringsAsFactors=F)
#PHONEMES
dir<-paste(homedir, phondir, sep = "/")
setwd(dir)
corpus3 <- VCorpus(DirSource(filenames2[m]))
corpus3 <- tm_map(corpus3, content_transformer(stripWhitespace))
corpus3.dtm<-DocumentTermMatrix(corpus3, control=list(wordLengths=c(1,Inf), removePunctuation = FALSE, stopwords = FALSE, tolower=FALSE))
corpus3.matrix<-as.matrix(corpus3.dtm, stringsAsFactors=F)
#tfidf
corpus3.tfidf<-weightTfIdf(corpus3.dtm, normalize = TRUE)
corpus3.tfidf.mat<-as.matrix(corpus3.tfidf, stringsAsFactors=F)
#add POS & PHONEMES to LEXICAL DTM
all.tfidf<-cbind(corpus.tfidf.mat, corpus2.tfidf.mat, corpus3.tfidf.mat)
#similarity matrix
cosine.dist1<-simil(all.tfidf, method = "cosine")
cosine.matrix1<-as.matrix(cosine.dist1, stringsAsFactors=F)*100
#adjust for length
cosine.matrix1<-((1/log(scaling1))*cosine.matrix1)*10
#combine LSA with LEX/POS
cosine.matrix1[is.na(cosine.matrix1)]<-0
cosine.matrix2[is.na(cosine.matrix2)]<-0
cosine.matrix<-(cosine.matrix1+cosine.matrix2)/2
#generate statistical significance threshold
test<-sm2vec(cosine.matrix)
global.cut<-mean(test)+(1.96*sd(test))
sim.v<-vector()
for (z in 1:nrow(cosine.matrix)){
sub<-sort(cosine.matrix[z,], decreasing=T)[1:100]
sim.v<-append(sim.v, sub)
}
sim.mean<-mean(sim.v)
sim.sd<-sd(sim.v)
### Step 2: Make Edge List ###
edge.list<-data.frame()
for (i in 3:nrow(cosine.matrix)) {
count.source<-wordcount.df[row.names(wordcount.df) == row.names(cosine.matrix)[i],]
corp.select<-cosine.matrix[1:i,1:i]
#remove poems beyond 2x above/below its word length
#if conditions default to all poems if the selection criteria can't be met
if (count.source < 999){
wordcount.sub<-wordcount.df[row.names(wordcount.df) %in% row.names(corp.select),]
over<-which(wordcount.sub > 2*count.source)
under<-which(wordcount.sub < count.source/2)
comb<-append(over, under)
if (length(comb) != 0){
corp.test<-corp.select[-comb,-comb]
} else {corp.test<-corp.select
}
#if poem is greater than 1000 words, then it can be similar to any poem > 500 words
} else {
wordcount.sub<-wordcount.df[row.names(wordcount.df) %in% row.names(corp.select),]
over<-which(wordcount.sub > 500)
corp.test<-corp.select[over,over]
}
if (is.null(nrow(corp.test))){
corp.test<-corp.select
}
if (nrow(corp.test) == 2){
corp.test<-corp.select
}
zero.test<-corp.test[max(nrow(corp.test)),][1:(nrow(corp.test)-1)]
if(sum(zero.test) == 0){
corp.test<-corp.select
}
corp.all<-sort(corp.test[(max(nrow(corp.test))),], decreasing=T)
corp.all.df<-data.frame(corp.all)
corp.all.df$names<-row.names(corp.all.df)
#take the top closest poems based on significance
#COMMENT NEXT LINES OUT until ALTERNATIVE if using alternative
# #calc threshold based on statistical significance
# #this takes 2sd above the poem's overall mean similarity to other poems
# cut<-mean(corp.all)+(1.96*sd(corp.all))
# #if that amount is below the global mean + sd of similarity, then it uses this global mean
# if (cut < global.cut){
# cut<-global.cut
# }
# corp.top<-subset(corp.all.df, corp.all.df[,1] > cut)
# if (nrow(corp.top) == 0) {
# corp.top<-data.frame(corp.all.df[1,1], row.names(corp.all.df)[1])
# row.names(corp.top)<-as.character(corp.top[,2])
# }
#### ALTERNATIVE: take differential and cut when distance between 2 obs is greater than the next one, i.e. looking for a significant drop
#COMMENT lines above if you use this alternative
if (length(corp.all) > 2){
if (diff(corp.all)[[1]] < diff(corp.all)[[2]]){
corp.top<-corp.all.df[1,]
} else {
for (n in 1:(length(corp.all)-2)){
if (diff(corp.all)[[n+1]] < diff(corp.all)[[n]]){
corp.top<-corp.all.df[1:(n+1),]
break
} else {
corp.top<-corp.all.df
}
}
}
} else {
corp.top<-corp.all.df
}
###COMMENT TO HERE###
corp.df<-corp.top
#create edge list
for (j in 1:nrow(corp.df)) {
source<-row.names(cosine.matrix)[i]
target<-row.names(corp.df)[j]
weight<-corp.top[j,1]
count.target<-wordcount.df[row.names(wordcount.df) == row.names(corp.df)[j],]
count.diff<-(count.source-count.target)/min(count.source, count.target)
abs.diff<-abs(count.diff)
work.diff<-which(row.names(cosine.matrix) == as.character(source))-which(row.names(cosine.matrix) == as.character(target))
newRow<-data.frame(source, target, weight, count.source, count.target, count.diff, abs.diff, work.diff, stringsAsFactors = F)
edge.list<-rbind(edge.list, newRow)
}
}
#to write the edge lists requires directory of directories for every author in your corpus
#setwd(homedir)
#file.final<-paste(filenames0[m], "_EdgeList.csv", sep="")
#write.csv(edge.list, file=file.final)
#### Step 3: Network Analysis ####
author.edge<-edge.list[,1:3]
#adjust 0 values and negative values
author.edge$weight<-unlist(lapply(author.edge$weight, function(x) if (x<=0){x<-0.0001} else {x}))
#make node list
author.nodes<-append(edge.list$source, edge.list$target)
author.nodes<-data.frame(author.nodes[!duplicated(author.nodes)])
colnames(author.nodes)<-c("name")
author.split<-cSplit(author.nodes, "name", sep="_", type.convert=FALSE)
author.split<-cbind(author.nodes, author.split)
g<-graph.data.frame(author.edge, directed=TRUE, vertices=NULL)
#to measure diameter distance use inverse of similarity score
author.edge2<-data.frame(source=author.edge[,1], target=author.edge[,2], weight=((1/author.edge[,3])*100))
g.dist<-graph.data.frame(author.edge2, directed=TRUE, vertices=NULL)
#create undirected graph as well
g3<-graph.data.frame(author.edge, directed=FALSE, vertices=NULL)
#give attributes
#establish date for every poem drawn from filename
V(g)$date<-as.numeric(author.split$name_1)
#turn this into a decade
V(g)$decade<-as.numeric(substr(author.split$name_1, 1,3))
#summary calculations
career.length<-max(as.numeric(author.split$name_1))-min(as.numeric(author.split$name_1))
#number poems
no.nodes<-length(V(g))
#number connections
no.edges<-length(E(g))
#average connections per node
mean.degree<-mean(degree(g))
#standard deviation of connections per node
sd.degree<-sd(degree(g))
#percent of nodes w 1 connection
per.1.degree<-degree_distribution(g)[2]
#percent 1-3 connection
per.1.to.3.degree<-sum(degree_distribution(g)[2:4])
#percent 1-5 connection
per.1.to.5.degree<-sum(degree_distribution(g)[2:6])
#+5 connections
per.plus.five.degree<-sum(degree_distribution(g)[7:length(degree_distribution(g))])
#transitivity = # closed loops (triangles)
trans.score<-transitivity(g, type=c("undirected"), vids=NULL, weights=NULL, isolates=c("NaN"))
#calculate avg. temporal distance between nodes for every edge
date.v<-vector()
for(i in 1:nrow(author.edge)){
date.dist<-as.numeric(author.split[as.character(author.edge[i,1]) == as.character(author.split$name),2])-as.numeric(author.split[as.character(author.edge[i,2]) == as.character(author.split$name),2])
date.v<-append(date.v, date.dist)
}
#average distance between connected poems
avg.date.diff<-mean(date.v)
#standard deviation of distance between connected poems
sd.date.diff<-sd(date.v)
#percent of career avg. poems span
norm.avg.date.diff<-mean(date.v)/career.length
#percent of poems spanning more than a decade
per.non.decade<-length(which(date.v > 9))/length(date.v)
#average poem similarity
avg.poem.similarity<-mean(E(g)$weight)
sd.poem.similarity<-sd(E(g)$weight)
#assortativity by date measures the extent to which poems are connected to poems closer to them in time
assort.date<-assortativity(g, V(g)$date, directed=TRUE)
#number communities
#louvain method
comm.louvain<-cluster_louvain(g3, weights = E(g)$weight)
groups.louvain<-max(comm.louvain$membership)
mod.louvain<-modularity(comm.louvain)
#walktrap method
#based on the random walk theory that you will get trapped more in a community than not.
#comm.walk<-walktrap.community(g, weights = E(g)$weight, steps = 4, merges = TRUE, modularity = TRUE, membership = TRUE)
#groups.walk<-max(comm.walk$membership)
#mod.walk<-modularity(comm.walk)
###fastgreedy
#comm.fast<-cluster_fast_greedy(g3, weights=E(g)$weight)
#groups.fast<-max(comm.fast$membership)
#mod.fast<-modularity(comm.fast)
#robustness = random deletion of nodes
#this currently runs 100x and takes the average -- could change to 1,000 for more reliable result
robustness_vector<-vector()
for (j in 1:100) {
g.robust<-as.undirected(g)
for (i in 1:length(V(g))) {
random.node<-sample(1:length(V(g.robust)), 1, replace = FALSE)
g.robust<-delete.vertices(g.robust, v=random.node)
if (is.connected(g.robust)==FALSE)
break
}
robustness_vector<-append(robustness_vector, i)
}
#average robustness across all runs
robust.mean<-mean(robustness_vector)
robust.sd<-sd(robustness_vector)
#vulnerability = targeted deletion of nodes via descending order of degree
#vulnerability1 = when the remaining graph is less than half the size of the original graph
#deg<-names(sort(degree(g),TRUE))
#g.vulnerable<-as.undirected(g)
#for (i in 1:length(V(g))) {
# g.vulnerable<-delete.vertices(g.vulnerable, v=as.character(deg[[i]]))
# if (max(components(g.vulnerable)$csize) < (length(V(g))/2))
# break
#}
#vulnerability1<-i/length(V(g))
#vulnerability2 = when the largest remaining component is no longer 50% bigger than the next largest
deg<-names(sort(degree(g),TRUE))
g.vulnerable<-as.undirected(g)
for (i in 1:length(V(g))) {
g.vulnerable<-delete.vertices(g.vulnerable, v=as.character(deg[[i]]))
#components(g.vulnerable)$csize
#components(g.vulnerable)$no
if (components(g.vulnerable)$no > 1){
if ((.5*max(components(g.vulnerable)$csize)) < sort(components(g.vulnerable)$csize, decreasing=T)[2])
break
}
}
vulnerability<-i/length(V(g))
#add date differential to edge list and write
edge.list$date.diff<-date.v
setwd(homedir)
file.final<-paste(filenames0[m], "_EdgeList.csv", sep="")
write.csv(edge.list, file=file.final)
#combine all network scores
author.name<-as.character(filenames0[m])
avg.length<-mean(scaling1)
gender<-as.character(meta$gender[m])
century<-meta$century[m]
vuln.nodes<-i
temp.df<-data.frame(author.name, gender, century, avg.length,career.length, no.nodes, no.edges, mean.degree, sd.degree,+
per.1.degree, per.1.to.3.degree, per.1.to.5.degree, per.plus.five.degree,+
trans.score, avg.date.diff, sd.date.diff, norm.avg.date.diff, per.non.decade,+
avg.poem.similarity, sd.poem.similarity, assort.date, groups.louvain,+
mod.louvain, robust.mean, robust.sd, vulnerability, vuln.nodes)
final.df<-rbind(final.df, temp.df)
}
setwd(homedir)
write.csv(final.df, file="Global_Vulnerability_English.csv")
#### Fig. 6.4 ####
#the data for the figures was generated using gephi and can be found
#in the two gephi files for reproducing the illustrations in Fig6.4_Data
#Droste_Reduced.gephi = Fig. 6.4
##Droste_All.gephi = the full network for comparison (not shown in book)
#inherit edge list from above
a<-read.csv("DrosteHuelshoffPoetry_EdgeList.csv")
#make graph object
author.edge<-a[,2:4]
g<-graph.data.frame(author.edge, directed=TRUE, vertices=NULL)
#calculate vulnerability
deg<-names(sort(degree(g),TRUE))
g.vulnerable<-as.undirected(g)
for (i in 1:length(V(g))) {
g.vulnerable<-delete.vertices(g.vulnerable, v=as.character(deg[[i]]))
#components(g.vulnerable)$csize
#components(g.vulnerable)$no
if (components(g.vulnerable)$no > 1){
if ((.5*max(components(g.vulnerable)$csize)) < sort(components(g.vulnerable)$csize, decreasing=T)[2])
break
}
}
#find breakpoint
i
#create node list
author.nodes<-append(author.edge$source, author.edge$target)
author.nodes<-data.frame(author.nodes[!duplicated(author.nodes)])
#label by pre-break point
#identify top connected nodes prior to the breakpoint
deg.sub<-deg[1:17]
author.nodes$label<-0
#label nodes if they occur in this subset
author.nodes$label<-unlist(lapply(author.nodes$name, function(x) if (as.character(x) %in% as.character(deg.sub)){author.nodes$label<-1} else {author.nodes$label<-0}))
#observe nodes in the two main components
#list all components and their sizes
components(g.vulnerable)$csize
#get membership of 2 largest components
comp.a<-data.frame(which(components(g.vulnerable)$membership == 3))
comp.b<-data.frame(which(components(g.vulnerable)$membership == 5))
comp.a$poems<-row.names(comp.a)
comp.b$poems<-row.names(comp.b)
write.csv(comp.a, file="Droste_A.csv")
write.csv(comp.b, file="Droste_B.csv")
#write edge list minus pre-break nodes (top degree nodes)
node.sub<-author.nodes[author.nodes$label != 1,]
node.remove<-author.nodes[author.nodes$label == 1,]
edge.sub<-author.edge[!as.character(author.edge$source) %in% as.character(node.remove$name), ]
edge.sub<-edge.sub[!as.character(edge.sub$target) %in% as.character(node.remove$name), ]
write.csv(node.sub, file="Droste_Nodes_Reduced.csv")
write.csv(edge.sub, file="Droste_Edges_Reduced.csv")
write.csv(author.nodes, file="Droste_Nodes_All.csv")
write.csv(node.remove, file="Droste_Nodes_Removed.csv")
#observe vocabulary differences between the two large components
corpus1 <- VCorpus(DirSource("DrosteHuelshoffPoetry"), readerControl=list(language="German"))
corpus1 <- tm_map(corpus1, content_transformer(stripWhitespace))
corpus1 <- tm_map(corpus1, content_transformer(tolower))
corpus1 <- tm_map(corpus1, content_transformer(removePunctuation))
corpus1 <- tm_map(corpus1, content_transformer(removeNumbers))
corpus1 <- tm_map(corpus1, removeWords, problems)
corpus1 <- tm_map(corpus1, stemDocument, language = language2)
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
#divide into two components
a<-corpus1.matrix[row.names(corpus1.matrix) %in% row.names(comp.a),]
b<-corpus1.matrix[row.names(corpus1.matrix) %in% row.names(comp.b),]
#run distinctive words test
H = function(k) {N = sum(k); return(sum(k/N*log(k/N+(k==0))))}
word1<-colSums(a)
word2<-colSums(b)
all1<-sum(word1)
all2<-sum(word2)
results <- data.frame(word = colnames(a),
group1=word1,
group2=word2,
G2 = 0,
fisher.OR = 0,
fisher.p = 0)
for (j in 1:ncol(a)){
cont.table<-data.frame(c(word1[j], all1-word1[j]), c(word2[j], all2-word2[j]))
fish<-fisher.test(cont.table)
LLR = 2*sum(cont.table)*(H(cont.table)-H(rowSums(cont.table))-H(colSums(cont.table)))
results$G2[j] = LLR
results$fisher.OR[j] = fish$estimate
results$fisher.p[j] = fish$p.value
}
dunning.df<-results[order(-results$G2),] #sort by G2
dunning.df<-dunning.df[dunning.df$fisher.p < 0.05,] #remove non-significant words
dunning.sort<-dunning.df
dunning.sort$diff<-dunning.sort$group1-dunning.sort$group2
G2_Sort.v<-vector()
for (i in 1:nrow(dunning.sort)){
if (dunning.sort$diff[i] <= 0){
G2_Sort<--dunning.sort$G2[i]
} else {
G2_Sort<-dunning.sort$G2[i]
}
G2_Sort.v<-append(G2_Sort.v, G2_Sort)
}
#this gives you the distinctive words for group A (negative values represent words distinctive of group B)
dunning.sort<-cbind(dunning.sort, G2_Sort.v)
dunning.sort<-dunning.sort[order(-dunning.sort$G2_Sort.v),]
#6.5
#### Local Vulnerability ####
#this script takes as input a collection of poets' corpuses represented in 3 forms: lexical, POS, Phonetic
#currently it can work in 3 languages (German, French, English)
#it expects filenames in the following format:
#DATE_AnyOtherInformationAboutPoem.txt #it will use the date prior to underscore to calculate period dates
#it generates a similarity matrix for all poems across four domains: the 3 above and a semantic dimension using LSA
#it then identifies: moments (i.e. poems) in the poet's career that diverge significantly from the expected
#stylistic range up to that point in his or her career -- these are called moments of vulnerability
#where significance = values that occur less than 1% of the time in N random permutations of the data.
#this script outputs:
#1. a directory of vulnerability tables
#these are tables which list which poems fall below the expected stylistic range of the poet's career up to that point
#2. a directory of vulnerability graphs
#these allow you to observe the moments (poems) where that vulnerability is happening
#3. an output table called vulnerability.local that stores two different vulnerability measures
#described in the script and whether the final quarter of the career has the highest amount of vulnerability
data(stopwords_de)
data(stopwords_en)
data(stopwords_fr)
language1<-c("French")
language2<-c("french")
#remove bad words
#problems<-c("drum", "habt", "hast", "ichs", "ists", "sei", "wär", "weimar", "zwei", "seite", "apparat", "datumsangaben")
problems<-vector()
#set directories
homedir<-paste("INSERT DIRECTORY")
lexdir<-paste("PoetryAuthors_French")
phondir<-paste("PoetryAuthors_French_PHON")
posdir<-paste("PoetryAuthors_French_POS")
vulndir<-paste("PoetryAuthors_French_Vulnerability")
vuln.graph.dir<-paste("PoetryAuthors_French_Vulnerability_Graphs")
setwd(homedir)
filenames0<-list.files(lexdir, full.names=FALSE)
filenames1<-list.files(posdir, full.names=FALSE)
filenames2<-list.files(phondir, full.names=FALSE)
#ingest metadata about poets birth and death dates
meta<-read.csv("PoetryAuthors_French_Meta.csv")
#run
results.df<-NULL
vulnerability.local<-NULL
periods.df<-NULL
for (m in 1:length(filenames0)) {
print(m)
author<-as.character(meta$author[m])
#LEXICAL
dir<-paste(homedir, lexdir, sep = "/")
setwd(dir)
corpus1 <- VCorpus(DirSource(filenames0[m]), readerControl=list(language=language1))
corpus1 <- tm_map(corpus1, content_transformer(stripWhitespace))
corpus1 <- tm_map(corpus1, content_transformer(tolower))
corpus1 <- tm_map(corpus1, content_transformer(removePunctuation))
corpus1 <- tm_map(corpus1, content_transformer(removeNumbers))
corpus1 <- tm_map(corpus1, removeWords, problems)
corpus1 <- tm_map(corpus1, stemDocument, language = language2)
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
scaling1<-rowSums(corpus1.matrix)
wordcount.df<-data.frame(scaling1)
#remove stopwords
corpus1 <- tm_map(corpus1, removeWords, stopwords(language1))
#remake dtm
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
#tfidf
corpus.tfidf<-weightTfIdf(corpus1.dtm, normalize = TRUE)
corpus.tfidf.mat<-as.matrix(corpus.tfidf, stringsAsFactors=F)
#LSA
#SET STOPWORDS MANUALLY
dir<-paste(homedir, lexdir, filenames0[m], sep = "/")
nums<-c(1:5000)
myMatrix<-textmatrix(dir, stemming=TRUE, language=language2, minWordLength=2, maxWordLength=FALSE, minDocFreq=1, maxDocFreq=FALSE, minGlobFreq=FALSE, maxGlobFreq=FALSE, stopwords=stopwords_fr, vocabulary=NULL, phrases=NULL, removeXML=FALSE, removeNumbers=FALSE)
myMatrix<-myMatrix[!row.names(myMatrix) %in% nums,]
myMatrix<-myMatrix[!row.names(myMatrix) %in% problems,]
myMatrix<-lw_logtf(myMatrix) * gw_idf(myMatrix)
myLSAspace<-lsa(myMatrix, dims=dimcalc_share())
cosine.dist2<-simil(t(as.textmatrix(myLSAspace)), method="cosine")
cosine.matrix2<-as.matrix(cosine.dist2, stringsAsFactors=F)*100
#POS
dir<-paste(homedir, posdir, sep = "/")
setwd(dir)
corpus2 <- VCorpus(DirSource(filenames1[m]), readerControl=list(language="English"))
corpus2 <- tm_map(corpus2, content_transformer(stripWhitespace))
corpus2 <- tm_map(corpus2, content_transformer(function(x) gsub("\\$", "", x)))
#take 1-2 POSgrams
BigramTokenizer <- function(x) unlist(lapply(ngrams(words(x), 1:2), paste, collapse = " "), use.names = FALSE)
corpus2.dtm<-DocumentTermMatrix(corpus2, control = list(tokenize = BigramTokenizer, wordLengths=c(1,Inf)))
corpus2.matrix<-as.matrix(corpus2.dtm, stringsAsFactors=F)
corpus2.tfidf<-weightTfIdf(corpus2.dtm, normalize = TRUE)
corpus2.tfidf.mat<-as.matrix(corpus2.tfidf, stringsAsFactors=F)
#PHONEMES
dir<-paste(homedir, phondir, sep = "/")
setwd(dir)
corpus3 <- VCorpus(DirSource(filenames2[m]))
corpus3 <- tm_map(corpus3, content_transformer(stripWhitespace))
corpus3.dtm<-DocumentTermMatrix(corpus3, control=list(wordLengths=c(1,Inf), removePunctuation = FALSE, stopwords = FALSE, tolower=FALSE))
corpus3.matrix<-as.matrix(corpus3.dtm, stringsAsFactors=F)
corpus3.tfidf<-weightTfIdf(corpus3.dtm, normalize = TRUE)
corpus3.tfidf.mat<-as.matrix(corpus3.tfidf, stringsAsFactors=F)
#add POS & PHONEMES to LEXICAL DTM
all.tfidf<-cbind(corpus.tfidf.mat, corpus2.tfidf.mat, corpus3.tfidf.mat)
#similarity matrix
cosine.dist1<-simil(all.tfidf, method = "cosine")
cosine.matrix1<-as.matrix(cosine.dist1, stringsAsFactors=F)*100
#adjust for length
cosine.matrix1<-((1/log(scaling1))*cosine.matrix1)*10
#combine LSA with LEX/POS/PHON
cosine.matrix1[is.na(cosine.matrix1)]<-0 #Lex/POS/PHON Matrix
cosine.matrix2[is.na(cosine.matrix2)]<-0 #LSA Matrix
cosine.matrix<-(cosine.matrix1+cosine.matrix2)/2
#get lower triangle
cormat<-cosine.matrix
get_lower_tri<-function(cormat){
cormat[upper.tri(cormat)] <- NA
return(cormat)
}
lower_tri <- get_lower_tri(cormat)
lower_tri[lower_tri == 0] <- NA
#these are the observed values
#this takes the average similarity between a poem and every other poem prior to it
obs.v<-vector()
for (i in 2:nrow(lower_tri)){
sub<-lower_tri[i,]
sub<-sub[!is.na(sub)]
mean.work<-mean(sub)
obs.v<-append(obs.v, mean.work)
}
#smooth using rolling mean, window of ten poems
obs.roll<-rollmean(obs.v, k=10, na.pad = T) #set k for window, here 10
#then permute matrix n times
#this allows you to test for significance, i.e. when is a change in similarity significant
#by permuting the actual matrix we see the extent to which an observed value exceeds the expected range of values up to that point
#here I use a bootstrapping approach that samples with replacement
#I sample from the entire matrix because it makes it harder for a poem to be identified as vulnerable
#the idea is that given the overall variability throughout the poet's entire career, which moments look more vulnerable
final.df<-NULL
perm.df<-NULL
for (k in 2:nrow(lower_tri)){
sub<-lower_tri
#alternative = subset original matrix until poem k
#sub<-lower_tri[1:k, 1:k]
#turn all values into a single vector
sub.v<-unlist(as.data.frame(sub), use.names=FALSE)
sub.v<-sub.v[!is.na(sub.v)]
#sample from those values with replacement N times
mean.v<-vector()
for (j in 1:200){
#sample values with replacement and keep as many as there are poems prior to poem at time k
sample.v<-sample(sub.v, (k-1), replace = T)
#take the mean similarity for this sample (i.e. an imaginary poem)
mean.sample<-mean(sample.v)
#store in a vector
mean.v<-append(mean.v, mean.sample)
}
#append this vector to table
#this table represents the permuted similarity scores of every poem
#i.e. it represents the range of possible values that a poet has available at that time in his/her career
perm.df<-cbind(perm.df, mean.v)
}
#find the values in the 99th and 1st percentile for each poem at time k
perm.high<-apply(perm.df, 2, function(x) quantile(x, c(.99)))
perm.low<-apply(perm.df, 2, function(x) quantile(x, c(.01)))
#smooth
high.roll<-rollmean(perm.high, k=10, na.pad = T)
low.roll<-rollmean(perm.low, k=10, na.pad = T)
#create data frame
#add one more NA cell to each
poem.ids<-row.names(cosine.matrix)[2:nrow(cosine.matrix)]
final.df<-data.frame(high.roll, low.roll, obs.roll, poem.ids)
#export graph as separate file
#requires pre-existing directory
file.name<-paste(filenames0[m], "_Vulnerability_Graph.pdf", sep="")
dir<-paste(homedir, vuln.graph.dir, sep="/")
setwd(dir)
#graph
pdf(file.name,width=8,height=6)
plot(final.df$obs.roll, type="line", main = author, xlab = "Poems", ylab="Similarity")
lines(final.df$high.roll, type = "line", lty=2)
lines(final.df$low.roll, type = "line", lty=2)
dev.off()
#export table of values
#this looks for those moments that are above the expected ranges
#it outputs a table of values above and below the significance bands
#this allows you to observe where the moments of vulnerability are
#all positive values are above/below the sig bands
final.df$diff.high<-final.df$obs.roll-final.df$high.roll
final.df$diff.low<-final.df$low.roll-final.df$obs.roll
#this requires a new directory; see above under "vulndir"
file.name<-paste(filenames0[m], "_Vulnerability_Table.csv", sep="")
dir<-paste(homedir, vulndir, sep="/")
setwd(dir)
write.csv(final.df, file=file.name)
#vuln.score1 = % of poems that have less than 1% chance of being w/in normal range of similarity to rest of poems
#in other words = % poems that are significantly dissimilar from the rest of the corpus up to that point
vuln.score1<-length(which(final.df$diff.low > 0))/nrow(final.df)
#vuln.score2 = ratio of similar to dissimilar moments (X has 1.4x as many overly similar moments to overly dissimilar ones)
vuln.score2<-length(which(final.df$diff.high > 0))/length(which(final.df$diff.low > 0))
#vuln.final.quart = percentage of vulnerable poems that fall in final quarter of all poems
#this allows you to observe extent to which vulnerability is a late phenomenon
all.vuln1<-which(final.df$diff.low > 0)
vuln.final.quart<-length(all.vuln1[all.vuln1>nrow(final.df)-(nrow(final.df)/4)])/length(all.vuln1)
gender<-meta$gender[m]
century<-meta$century[m]
birth<-as.numeric(meta$birth.date[m])
death<-as.numeric(meta$death.date[m])
temp.df<-data.frame(author, birth, death, gender, century, vuln.score1, vuln.score2, vuln.final.quart)
vulnerability.local<-rbind(vulnerability.local, temp.df)
}
setwd(homedir)
write.csv(vulnerability.local, file="Vulnerability_Local_French.csv", row.names = F)
### Fig. 6.5 ####
#Local vulnerability in four poets
#input are local vulnerability tables produced by Script 3.2
library(gridExtra)
library(ggplot2)
a<-read.csv("Vulnerability_Table_Hugo.csv")
b<-read.csv("Vulnerability_Table_Droste.csv")
c<-read.csv("Vulnerability_Table_Rich.csv")
d<-read.csv("Vulnerability_Table_Auden.csv")
tit.size<-14
p1<-ggplot(a, aes(x=X, y=obs.roll/100)) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size=tit.size), plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_line() +
geom_line(aes(x=X, y=a$high.roll/100), linetype="dotted") +
geom_line(aes(x=X, y=a$low.roll/100), linetype="dotted") +
labs(title = "Victor Hugo", x="Poems over time", y="Similarity")
p2<-ggplot(b, aes(x=X, y=obs.roll/100)) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size=tit.size), plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_line() +
geom_line(aes(x=X, y=b$high.roll/100), linetype="dotted") +
geom_line(aes(x=X, y=b$low.roll/100), linetype="dotted") +
labs(title = "Droste-Hülshoff", x="Poems over time", y="Similarity")
p3<-ggplot(c, aes(x=X, y=obs.roll/100)) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size=tit.size), plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_line() +
geom_line(aes(x=X, y=c$high.roll/100), linetype="dotted") +
geom_line(aes(x=X, y=c$low.roll/100), linetype="dotted") +
labs(title = "Adrienne Rich", x="Poems over time", y="Similarity")
p4<-ggplot(d, aes(x=X, y=obs.roll/100)) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size=tit.size), plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_line() +
geom_line(aes(x=X, y=d$high.roll/100), linetype="dotted") +
geom_line(aes(x=X, y=d$low.roll/100), linetype="dotted") +
#ylim(c(0, 0.22)) +
labs(title = "W.H. Auden", x="Poems over time", y="Similarity")
#library(gridExtra)
grid.arrange(p1, p2, p3, p4, ncol=2)
#ratio of local to global vulnerability in all poets
a<-read.csv("Vulnerability_Local_All.csv")
b<-read.csv("Vulnerability_Global_All.csv")
a<-a[order(as.character(a$language), as.character(a$author)),]
b<-b[order(as.character(b$language), as.character(b$author.name)),]
#take inverse of global vulnerability
#in original measure higher = less vulnerable, i.e. more of the network needs to be removed before splitting in two
glob<-1-b$vulnerability
glob<-scale(glob, center = T, scale = T)
loc<-scale(a$vuln.score1, center = T, scale = T)
author<-a$author
label<-b$label2
#label<-b$author.name
#gender<-b$gender
all.df<-data.frame(author, label, glob, loc)
all.df$combined<-all.df$glob+all.df$loc
all.df$low.high<-all.df$loc-all.df$glob
all.df$high.low<--all.df$loc+all.df$glob
#
low.low<-all.df[order(all.df$combined)[1:5],]
low.high<-all.df[order(all.df$low.high)[1:5],]
high.low<-all.df[order(all.df$high.low)[1:5],]
high.high<-all.df[order(-all.df$combined)[1:5],]
#### Fig. 6.6 ####
### Career Types by two types of vulnerability ###
#library("ggplot2")
ggplot(all.df, aes(x=loc, y=glob)) +
theme_bw() +
theme(plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_point(size=1) +
#stat_ellipse(linetype="dotted") +
#theme(legend.position="none") +
geom_hline(yintercept=0) +
geom_vline(xintercept=0) +
geom_text(aes(label=label), size=3, hjust=0.5, vjust=-0.5) +
scale_x_continuous(limits=c(-3, 3)) +
scale_y_continuous(limits=c(-3, 3)) +
labs(x="Local Vulnerability", y="Global Vulnerability", caption="\nFig. 6.6 Career types based on two types of poetic vulnerbality.\n\nSource: Andrew Piper, Enumerations: Data and Literary Study (2018)")
#labs(x="Local Vulnerability", y="Global Vulnerability")
#6.6
#### Foote Novelty ####
#this script detects moments of significant change within a poet's corpus that are akin to "periods"
#rather than observe single moments that exceed expectation as in the vulnerability score above
#this script looks for larger windows when the amount of similarity decreases significantly
data(stopwords_de)
data(stopwords_en)
data(stopwords_fr)
language1<-c("French")
language2<-c("french")
#remove bad words
#problems<-c("drum", "habt", "hast", "ichs", "ists", "sei", "wär", "weimar", "zwei", "seite", "apparat", "datumsangaben")
problems<-vector()
#set directories
homedir<-paste("~/Documents/2. Books/Enumerations/Enumerations - Data and Code/Data/txtlab_POETRY_CW")
lexdir<-paste("PoetryAuthors_French")
phondir<-paste("PoetryAuthors_French_PHON")
posdir<-paste("PoetryAuthors_French_POS")
foote.dir<-paste("PoetryAuthors_French_FooteNovelty_Graphs")
setwd(homedir)
filenames0<-list.files(lexdir, full.names=FALSE)
filenames1<-list.files(posdir, full.names=FALSE)
filenames2<-list.files(phondir, full.names=FALSE)
#ingest metadata about poets birth and death dates
meta<-read.csv("PoetryAuthors_French_Meta.csv")
results.df<-NULL
periods.df<-NULL
for (m in 1:length(filenames0)) {
print(m)
final.df<-NULL
author<-as.character(meta$author[m])
#LEXICAL
dir<-paste(homedir, lexdir, sep = "/")
setwd(dir)
corpus1 <- VCorpus(DirSource(filenames0[m]), readerControl=list(language=language1))
corpus1 <- tm_map(corpus1, content_transformer(stripWhitespace))
corpus1 <- tm_map(corpus1, content_transformer(tolower))
corpus1 <- tm_map(corpus1, content_transformer(removePunctuation))
corpus1 <- tm_map(corpus1, content_transformer(removeNumbers))
corpus1 <- tm_map(corpus1, removeWords, problems)
corpus1 <- tm_map(corpus1, stemDocument, language = language2)
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
scaling1<-rowSums(corpus1.matrix)
wordcount.df<-data.frame(scaling1)
#remove stopwords
corpus1 <- tm_map(corpus1, removeWords, stopwords(language1))
#remake dtm
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
#tfidf
corpus.tfidf<-weightTfIdf(corpus1.dtm, normalize = TRUE)
corpus.tfidf.mat<-as.matrix(corpus.tfidf, stringsAsFactors=F)
#LSA
#SET STOPWORDS MANUALLY
dir<-paste(homedir, lexdir, filenames0[m], sep = "/")
nums<-c(1:5000)
myMatrix<-textmatrix(dir, stemming=TRUE, language=language2, minWordLength=2, maxWordLength=FALSE, minDocFreq=1, maxDocFreq=FALSE, minGlobFreq=FALSE, maxGlobFreq=FALSE, stopwords=stopwords_fr, vocabulary=NULL, phrases=NULL, removeXML=FALSE, removeNumbers=FALSE)
myMatrix<-myMatrix[!row.names(myMatrix) %in% nums,]
myMatrix<-myMatrix[!row.names(myMatrix) %in% problems,]
myMatrix<-lw_logtf(myMatrix) * gw_idf(myMatrix)
myLSAspace<-lsa(myMatrix, dims=dimcalc_share())
cosine.dist2<-simil(t(as.textmatrix(myLSAspace)), method="cosine")
cosine.matrix2<-as.matrix(cosine.dist2, stringsAsFactors=F)*100
#POS
dir<-paste(homedir, posdir, sep = "/")
setwd(dir)
corpus2 <- VCorpus(DirSource(filenames1[m]), readerControl=list(language="English"))
corpus2 <- tm_map(corpus2, content_transformer(stripWhitespace))
corpus2 <- tm_map(corpus2, content_transformer(function(x) gsub("\\$", "", x)))
#take 1-2 POSgrams
BigramTokenizer <- function(x) unlist(lapply(ngrams(words(x), 1:2), paste, collapse = " "), use.names = FALSE)
corpus2.dtm<-DocumentTermMatrix(corpus2, control = list(tokenize = BigramTokenizer, wordLengths=c(1,Inf)))
corpus2.matrix<-as.matrix(corpus2.dtm, stringsAsFactors=F)
corpus2.tfidf<-weightTfIdf(corpus2.dtm, normalize = TRUE)
corpus2.tfidf.mat<-as.matrix(corpus2.tfidf, stringsAsFactors=F)
#PHONEMES
dir<-paste(homedir, phondir, sep = "/")
setwd(dir)
corpus3 <- VCorpus(DirSource(filenames2[m]))
corpus3 <- tm_map(corpus3, content_transformer(stripWhitespace))
corpus3.dtm<-DocumentTermMatrix(corpus3, control=list(wordLengths=c(1,Inf), removePunctuation = FALSE, stopwords = FALSE, tolower=FALSE))
corpus3.matrix<-as.matrix(corpus3.dtm, stringsAsFactors=F)
corpus3.tfidf<-weightTfIdf(corpus3.dtm, normalize = TRUE)
corpus3.tfidf.mat<-as.matrix(corpus3.tfidf, stringsAsFactors=F)
#add POS & PHONEMES to LEXICAL DTM
all.tfidf<-cbind(corpus.tfidf.mat, corpus2.tfidf.mat, corpus3.tfidf.mat)
#similarity matrix
cosine.dist1<-simil(all.tfidf, method = "cosine")
cosine.matrix1<-as.matrix(cosine.dist1, stringsAsFactors=F)*100
#adjust for length
cosine.matrix1<-((1/log(scaling1))*cosine.matrix1)*10
#combine LSA with LEX/POS/PHON
cosine.matrix1[is.na(cosine.matrix1)]<-0 #Lex/POS/PHON Matrix
cosine.matrix2[is.na(cosine.matrix2)]<-0 #LSA Matrix
cosine.matrix<-(cosine.matrix1+cosine.matrix2)/2
#run foote novelty
cormat<-cosine.matrix
cormat[cormat==0]<-100
#scale the values
cormat.scale <- apply(cormat, 2, function(x) (x-min(x))/(max(x)-min(x)))
#set window of poems to detect "period", here 20
win<-20
#make the matrix that slides across the similarity matrix to calculate windows of dissimilarity
a.pos<-rep(1, win/2)
a.neg<-rep(-1, win/2)
a<-append(a.pos, a.neg)
b<-append(a.neg, a.pos)
a.mat<-matrix(rep(a, win/2), ncol=win, byrow=T)
b.mat<-matrix(rep(b, win/2), ncol=win, byrow=T)
foote.m<-rbind(a.mat, b.mat)
foote.win<-win-1
#observed values
foote.obs<-vector()
for (i in 1:(ncol(cormat.scale)-foote.win)){
cormat.sub<-cormat.scale[i:(i+foote.win), i:(i+foote.win)]
comb.m<-cormat.sub*foote.m
foote.score<-sum(comb.m)
foote.obs<-append(foote.obs, foote.score)
}
#smooth the foote novelty values
foote.roll<-rollmean(foote.obs, k=20, na.pad=TRUE)
foote.roll<-append(rep(NA,9), foote.roll) #make same length as original poem collection
foote.roll<-append(foote.roll, rep(NA,10))
#permute n times
#here again we want to establish an outer bound above which we can say that the degree of dissimilarity
#is significantly more than random variation within the corpus
#this time we permute all values in the table but maintain the symmetrical structure
#first construct lower triangle as above
cormat<-cosine.matrix
get_lower_tri<-function(cormat){
cormat[upper.tri(cormat)] <- NA
return(cormat)
}
lower_tri <- get_lower_tri(cormat)
lower_tri[lower_tri == 0] <- NA
#then permute N times
perm.vec<-vector()
for (i in 1:200){
lower_tri_perm<-NULL
#permute every column of the similarity matrix
for (j in 2:ncol(lower_tri)){
#extract column
col.v<-unname(lower_tri[j:nrow(lower_tri), (j-1)])
if (length(col.v) > 1){
#permute the column
col.v<-sample(col.v)
}
#reconstruct the original matrix
#add NA to missing values
add.v<-rep(NA, (j-1))
col.v<-append(add.v, col.v)
#rebuild original matrix
lower_tri_perm<-cbind(lower_tri_perm,col.v)
}
#copy lower_tri_perm to upper triangle to make symmetrical matrix
lower_tri_perm[upper.tri(lower_tri_perm)]<-lower_tri_perm[lower.tri(lower_tri_perm)]
lower_tri_perm[is.na(lower_tri_perm)]<-100
#scale
perm.scale <- apply(lower_tri_perm, 2, function(x) (x-min(x))/(max(x)-min(x)))
#run foote novelty measure
#this calculates the foote novelty score for every time k in the poet's career
#it then stores those values in a single vector
#then take the 90th percentile score to create upper significance band
#amounts above this level only occur less than 10% of the time in all permutations
perm.obs<-vector()
for (k in 1:(ncol(perm.scale)-foote.win)){
perm.sub<-perm.scale[k:(k+foote.win), k:(k+foote.win)]
perm.sub.m<-perm.sub*foote.m
foote.score<-sum(perm.sub.m)
perm.obs<-append(perm.obs, foote.score)
}
perm.vec<-append(perm.vec, perm.obs)
}
#calc significance band
perm.high<-quantile(perm.vec, c(.90))[[1]]
foote.prep<-foote.roll[is.na(foote.roll) != TRUE]
#smooth again to remove minor peaks
foote2<-rollmean(foote.prep, k=floor(length(foote.obs)/10))
foote2.prep<-foote2
foote2.prep<-append(rep(NA, (length(foote.roll)-length(foote2))/2), foote2.prep)
foote2.prep<-append(foote2.prep, rep(NA, (length(foote.roll)-length(foote2))/2))
#find significant peaks
#note, peaks cannot be within 20 poems of each other. A period is pre-defined as 20+ poems
#this avoids minor variations and takes the largest peak
sig<-which(foote2.prep > perm.high)
if (length(sig) > 0){
diff.sig<-which(diff(sig) > 19)
diff.sig<-append(diff.sig, length(sig))
change.v<-vector()
if (length(diff.sig) > 0) {
start<-1
for (l in 1:(length(diff.sig))){
sub<-foote2.prep[start:sig[diff.sig[l]]]
sub[is.na(sub)]<-0
change.p<-(which(sub == max(sub))+(start-1))
change.v<-append(change.v, change.p)
start<-sig[diff.sig[l]]+1
}
}
#export plot
#requires pre-existing directory
#file.name<-paste(filenames0[m], "_FooteNovelty_Graph.pdf", sep="")
#dir<-paste(homedir, foote.dir, sep="/")
#setwd(dir)
#graph
#pdf(file.name,width=8,height=6)
#plot(foote2.prep, type="line", main = author, xlab = "Poems", ylab="Change") #rolling mean of this rolling mean
#abline(h=perm.high, lty=2)
#abline(v=change.v)
#dev.off()
#calculate measures related to inferred periods
#no. periods
periods<-length(change.v)+1
author.meta<-data.frame(row.names(cormat))
colnames(author.meta)<-c("dates")
author.split<-cSplit(author.meta, "dates", sep="_", type.convert=FALSE)
periods.sub<-author.split[change.v,]
periods.temp<-data.frame(row.names(cormat)[change.v])
periods.temp<-cbind(author, periods.temp)
#this table lists poems at the peaks for further analysis
periods.df<-rbind(periods.df, periods.temp)
period.marks<-as.numeric(periods.sub$dates_1)
period.marks<-append(as.numeric(author.split$dates_1)[1], period.marks)
period.marks<-append(period.marks,as.numeric(author.split$dates_1)[length(author.split$dates_1)])
period.lengths<-diff(period.marks)
#calculate the poet's longest period
longest.period<-max(period.lengths)
#is the shortest period the final period?
if (which(period.lengths == min(period.lengths)) == length(period.lengths)){
when.shortest.period<-c("final")
} else {
when.shortest.period<-c("not_final")
}
#length of the first period
duration.first.period<-as.numeric(periods.sub$dates_1[1])-as.numeric(author.split$dates_1[1])
year.first.period<-as.numeric(periods.sub$dates_1[1])
year.last.period<-as.numeric(periods.sub$dates_1[length(periods.sub$dates_1)])
#length of the last period
duration.last.period<-as.numeric(author.split$dates_1[length(author.split$dates_1)])-as.numeric(periods.sub$dates_1[length(periods.sub$dates_1)])
birth<-as.numeric(meta$birth.date[m])
death<-as.numeric(meta$death.date[m])
#age of poet when first period ends
age.first.period<-year.first.period-birth
#age of poet when last period starts
age.last.period<-year.last.period-birth
} else {
periods<-0
duration.first.period<-0
year.first.period<-0
year.last.period<-0
duration.last.period<-0
birth<-as.numeric(meta$birth.date[m])
death<-as.numeric(meta$death.date[m])
age.first.period<-0
age.last.period<-0
when.shortest.period<-c("none")
longest.period<-0
}
gender<-meta$gender[m]
century<-meta$century[m]
temp.df<-data.frame(author, birth, death, gender, century, periods,+
year.first.period, age.first.period, duration.first.period,+
year.last.period, age.last.period, duration.last.period,+
longest.period, when.shortest.period)
results.df<-rbind(results.df, temp.df)
}
setwd(homedir)
#this table lists all of the features related to period detection and local vulnerability
write.csv(results.df, file="Foote_French_Results.csv")
#this table lists the poems that occur at the moment of period change for further analysis
write.csv(periods.df, file="Foote_French_Poems.csv")
#### Fig. 6.7 #####
#example of period detection using Foote novelty on the poetry of J.W. Goethe
#this is identical to Script 6.6 but takes a single poet as input and only runs period detection
#this takes as input a poet's collected works in the 3 representations used here (LEX, POS, PHON)
#make sure to set language settings throughout (stemming and stopwords)
library(proxy)
#includes a list of words to be removed
#input directory names
#requires 3 representations of every poet's work (lexical, syntactic, phonetic)
filenames0<-c("Goethe_LEX")
filenames1<-c("Goethe_POS")
filenames2<-c("Goethe_PHON")
problems<-c("drum", "habt", "hast", "ichs", "ists", "sei", "wär", "weimar", "zwei", "seite", "apparat", "datumsangaben")
#LEXICAL
corpus1 <- VCorpus(DirSource(filenames0), readerControl=list(language="German"))
corpus1 <- tm_map(corpus1, content_transformer(stripWhitespace))
corpus1 <- tm_map(corpus1, content_transformer(tolower))
corpus1 <- tm_map(corpus1, content_transformer(removePunctuation))
corpus1 <- tm_map(corpus1, content_transformer(removeNumbers))
corpus1 <- tm_map(corpus1, removeWords, problems)
corpus1 <- tm_map(corpus1, stemDocument, language = "german")
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
scaling1<-rowSums(corpus1.matrix)
wordcount.df<-data.frame(scaling1)
#remove stopwords
corpus1 <- tm_map(corpus1, removeWords, stopwords("German"))
#remake dtm
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
#tfidf
corpus.tfidf<-weightTfIdf(corpus1.dtm, normalize = TRUE)
corpus.tfidf.mat<-as.matrix(corpus.tfidf, stringsAsFactors=F)
#LSA
data(stopwords_de)
nums<-c(1:5000)
myMatrix<-textmatrix(filenames0, stemming=TRUE, language="german", minWordLength=2, maxWordLength=FALSE, minDocFreq=1, maxDocFreq=FALSE, minGlobFreq=FALSE, maxGlobFreq=FALSE, stopwords=stopwords_de, vocabulary=NULL, phrases=NULL, removeXML=FALSE, removeNumbers=FALSE)
myMatrix<-myMatrix[!row.names(myMatrix) %in% nums,]
myMatrix<-myMatrix[!row.names(myMatrix) %in% problems,]
myMatrix<-lw_logtf(myMatrix) * gw_idf(myMatrix)
myLSAspace<-lsa(myMatrix, dims=dimcalc_share())
#make first similarity matrix based on semantic representation
cosine.dist2<-simil(t(as.textmatrix(myLSAspace)), method="cosine")
cosine.matrix2<-as.matrix(cosine.dist2, stringsAsFactors=F)*100
#POS
corpus2 <- VCorpus(DirSource(filenames1), readerControl=list(language="English"))
corpus2 <- tm_map(corpus2, content_transformer(stripWhitespace))
corpus2 <- tm_map(corpus2, content_transformer(function(x) gsub("\\$", "", x)))
#take 1-2 POSgrams
BigramTokenizer <- function(x) unlist(lapply(ngrams(words(x), 1:2), paste, collapse = " "), use.names = FALSE)
corpus2.dtm<-DocumentTermMatrix(corpus2, control = list(tokenize = BigramTokenizer, wordLengths=c(1,Inf)))
corpus2.matrix<-as.matrix(corpus2.dtm, stringsAsFactors=F)
corpus2.tfidf<-weightTfIdf(corpus2.dtm, normalize = TRUE)
corpus2.tfidf.mat<-as.matrix(corpus2.tfidf, stringsAsFactors=F)
#PHONEMES
corpus3 <- VCorpus(DirSource(filenames2))
corpus3 <- tm_map(corpus3, content_transformer(stripWhitespace))
corpus3.dtm<-DocumentTermMatrix(corpus3, control=list(wordLengths=c(1,Inf), removePunctuation = FALSE, stopwords = FALSE, tolower=FALSE))
corpus3.matrix<-as.matrix(corpus3.dtm, stringsAsFactors=F)
#tfidf
corpus3.tfidf<-weightTfIdf(corpus3.dtm, normalize = TRUE)
corpus3.tfidf.mat<-as.matrix(corpus3.tfidf, stringsAsFactors=F)
#combine POS & PHONEMES & LEXICAL Tables
all.tfidf<-cbind(corpus.tfidf.mat, corpus2.tfidf.mat, corpus3.tfidf.mat)
#make similarity matrix based on combined feature space
cosine.dist1<-simil(all.tfidf, method = "cosine")
cosine.matrix1<-as.matrix(cosine.dist1, stringsAsFactors=F)*100
#adjust for length
cosine.matrix1<-((1/log(scaling1))*cosine.matrix1)*10
#clean
cosine.matrix1[is.na(cosine.matrix1)]<-0
cosine.matrix2[is.na(cosine.matrix2)]<-0
#combine LSA similarity matrix with LEX/POS/PHON and take average similarity
cosine.matrix<-(cosine.matrix1+cosine.matrix2)/2
### foote novelty ###
#this script detects moments of significant change within a poet's corpus that are akin to "periods"
#rather than observe single moments that exceed expectation as in the vulnerability score above
#this script looks for larger windows when the amount of similarity decreases significantly
cormat<-cosine.matrix
cormat[cormat==0]<-100
#scale the values
cormat.scale <- apply(cormat, 2, function(x) (x-min(x))/(max(x)-min(x)))
#set window of poems to detect "period", here 20
win<-20
#make the matrix that slides across the similarity matrix to calculate windows of dissimilarity
a.pos<-rep(1, win/2)
a.neg<-rep(-1, win/2)
a<-append(a.pos, a.neg)
b<-append(a.neg, a.pos)
a.mat<-matrix(rep(a, win/2), ncol=win, byrow=T)
b.mat<-matrix(rep(b, win/2), ncol=win, byrow=T)
foote.m<-rbind(a.mat, b.mat)
foote.win<-win-1
#observed values
foote.obs<-vector()
for (i in 1:(ncol(cormat.scale)-foote.win)){
cormat.sub<-cormat.scale[i:(i+foote.win), i:(i+foote.win)]
comb.m<-cormat.sub*foote.m
foote.score<-sum(comb.m)
foote.obs<-append(foote.obs, foote.score)
}
#smooth the foote novelty values
foote.roll<-rollmean(foote.obs, k=20, na.pad=TRUE)
foote.roll<-append(rep(NA,9), foote.roll) #make same length as original poem collection
foote.roll<-append(foote.roll, rep(NA,10))
#permute n times
#here again we want to establish an outer bound above which we can say that the degree of dissimilarity
#is significantly more than random variation within the corpus
#this time we permute all values in the table but maintain the symmetrical structure
#first construct lower triangle as above
cormat<-cosine.matrix
get_lower_tri<-function(cormat){
cormat[upper.tri(cormat)] <- NA
return(cormat)
}
lower_tri <- get_lower_tri(cormat)
lower_tri[lower_tri == 0] <- NA
#then permute N times
perm.vec<-vector()
for (i in 1:200){
print(i)
lower_tri_perm<-NULL
#permute every column of the similarity matrix
for (j in 2:ncol(lower_tri)){
#extract column
col.v<-unname(lower_tri[j:nrow(lower_tri), (j-1)])
if (length(col.v) > 1){
#permute the column
col.v<-sample(col.v)
}
#reconstruct the original matrix
#add NA to missing values
add.v<-rep(NA, (j-1))
col.v<-append(add.v, col.v)
#rebuild original matrix
lower_tri_perm<-cbind(lower_tri_perm,col.v)
}
#copy lower_tri_perm to upper triangle to make symmetrical matrix
lower_tri_perm[upper.tri(lower_tri_perm)]<-lower_tri_perm[lower.tri(lower_tri_perm)]
lower_tri_perm[is.na(lower_tri_perm)]<-100
#scale
perm.scale <- apply(lower_tri_perm, 2, function(x) (x-min(x))/(max(x)-min(x)))
#run foote novelty measure
#this calculates the foote novelty score for every time k in the poet's career
#it then stores those values in a single vector
#then take the 90th percentile score to create upper significance band
#amounts above this level only occur less than 10% of the time in all permutations
perm.obs<-vector()
for (k in 1:(ncol(perm.scale)-foote.win)){
perm.sub<-perm.scale[k:(k+foote.win), k:(k+foote.win)]
perm.sub.m<-perm.sub*foote.m
foote.score<-sum(perm.sub.m)
perm.obs<-append(perm.obs, foote.score)
}
perm.vec<-append(perm.vec, perm.obs)
}
#calc significance band
perm.high<-quantile(perm.vec, c(.90))[[1]]
foote.prep<-foote.roll[is.na(foote.roll) != TRUE]
#smooth again to remove minor peaks
foote2<-rollmean(foote.prep, k=floor(length(foote.obs)/10))
foote2.prep<-foote2
foote2.prep<-append(rep(NA, (length(foote.roll)-length(foote2))/2), foote2.prep)
foote2.prep<-append(foote2.prep, rep(NA, (length(foote.roll)-length(foote2))/2))
#find significant peaks
#note, peaks cannot be within 20 poems of each other. A period is pre-defined as 20+ poems
#this avoids minor variations and takes the largest peak
sig<-which(foote2.prep > perm.high)
if (length(sig) > 0){
diff.sig<-which(diff(sig) > 19)
diff.sig<-append(diff.sig, length(sig))
change.v<-vector()
if (length(diff.sig) > 0) {
start<-1
for (l in 1:(length(diff.sig))){
sub<-foote2.prep[start:sig[diff.sig[l]]]
sub[is.na(sub)]<-0
change.p<-(which(sub == max(sub))+(start-1))
change.v<-append(change.v, change.p)
start<-sig[diff.sig[l]]+1
}
}
}
#plot Fig. 6.7
tit.size=14
foote.df<-data.frame(foote2.prep)
foote.df$index<-c(1:length(foote2.prep))
ggplot(foote.df, aes(x=index, y=foote.df$foote2.prep/100)) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, size=tit.size), plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_line() +
geom_hline(yintercept=perm.high/100, linetype="dashed") +
geom_vline(xintercept=change.v) +
annotate("text", x=(change.v[1]-10), y = 21/100, srt=90, label=substring(row.names(cosine.matrix)[change.v][1], 1, 4))+
annotate("text", x=(change.v[2]-10), y = 21/100, srt=90, label=substring(row.names(cosine.matrix)[change.v][2], 1, 4))+
annotate("text", x=(change.v[3]-10), y = 21/100, srt=90, label=substring(row.names(cosine.matrix)[change.v][3], 1, 4))+
annotate("text", x=(change.v[4]-10), y = 21/100, srt=90, label=substring(row.names(cosine.matrix)[change.v][4], 1, 4))+
#labs(x="Poems over time", y="Change")
labs(x="Poems", y="Change", caption="\nFig. 6.7 Period predictions for J.W. Goethe's collected poems\n\nSource: Andrew Piper, Enumerations: Data and Literary Study (2018")
#6.7
### Table 6.1 ###
## Period Statistics ##
a<-read.csv("FooteNovelty_Results_All.csv")
#% of poets w no period
length(which(a$periods == 0))/nrow(a)
#avg age when poet undergoes first period
mean(a$age.first.period)
median(a$age.first.period)
#mode of most common decade
table(substr(a$age.first.period, 1,1))
#duration of first period
mean(a$duration.first.period)
median(a$duration.first.period)
sort(a$duration.first.period)
#duration of longest period
mean(a$X.longest.period)
hist(a$X.longest.period)
t.test(a$X.longest.period)
#what % of poets have shortest period at end of life (if they have 2 or more periods)
sub<-a[a$periods > 1,]
length(which(sub$when.shortest.period == "final"))/nrow(sub)
#are late periods usually longer or shorter than early periods?
wilcox.test(a$duration.first.period, a$duration.last.period)
#what % of poets have majority of their vulnerable poems occurring late in life?
b<-read.csv("Vulnerability_Local_All.csv")
length(which(b$vuln.final.quart > .5))/nrow(b)
############# Section 3: Late Style ##################
#this set of scripts examines poets' late periods to understand the way aging and creativity intersect
#6.8
## Difficulty ##
#this script implements 3 different readability scores
#to assess the degree of difficulty over the course of a poet's career
#Make sure to change language below accordingly!!!!!
library(koRpus)
homedir<-paste("INSERT DIRECTORY")
lexdir<-paste("PoetryAuthors_German")
setwd(homedir)
filenames0<-list.files(lexdir, full.names=FALSE)
final.df<-NULL
for (m in 1:length(filenames0)){
author<-filenames0[m]
language<-lexdir
dir1<-paste(homedir, lexdir,sep = "/")
dir2<-paste(homedir, lexdir,filenames0[m],sep = "/")
setwd(dir1)
filenames1<-list.files(filenames0[m], full.names = FALSE)
setwd(dir2)
for (j in 1:length(filenames1)) {
title<-filenames1[j]
work<-scan(filenames1[j], what="character", quote="")
work.clean<- gsub("\\d", "", work)
text.whole<-paste(work.clean, collapse=" ")
text.char<-as.String(text.whole)
tag.doc<-tokenize(text.char, format = "obj", lang="de") #CHANGE LANGUAGE!!!!!
#flesch.mod<-flesch(tag.doc)
tuldava.mod<-tuldava(tag.doc, quiet=T)
#fucks.mod<-fucks(tag.doc)
#flesch.score<-flesch.mod @ Flesch $RE
tuldava.score<-tuldava.mod @ Tuldava $Tuldava
#fucks.score<-fucks.mod @ Fucks $Fucks
#temp.df<-data.frame(language, author, title, flesch.score, tuldava.score, fucks.score)
temp.df<-data.frame(language, author, title, tuldava.score)
final.df<-rbind(final.df, temp.df)
}
}
#write.csv(final.df, file="Difficulty_German_All.csv")
#Significance Test
a<-read.csv("Difficulty_All.csv")
difficulty.df<-NULL
for (i in 1:nlevels(a$author)){
a.sub<-a[a$author == levels(a$author)[i],]
#define late period: here defined as final quarter of poet's output
final.quart<-round(nrow(a.sub)/4)
#create the actual two groups: late, notlate
diff.notlate<-a.sub[1:(nrow(a.sub)-final.quart),]
diff.late<-a.sub[(nrow(a.sub)-(final.quart-1)):nrow(a.sub),]
#calculate test statistic
test<-median(diff.late$tuldava.score)-median(diff.notlate$tuldava.score)
#run randomization test
pred.v<-vector()
for (j in 1:1000){
#create fictional career by randomly shuffling the order of the poems (rows)
permute.m<-a.sub[sample(nrow(a.sub), nrow(a.sub)),]
#subset it by the same sample sizes of late, notlate in the actual data
late.m<-permute.m[1:final.quart,]
notlate.m<-permute.m[(final.quart+1):nrow(permute.m),]
#calculcate test statistic
test.p<-median(late.m$tuldava.score)-median(notlate.m$tuldava.score)
#append to vector
pred.v<-append(pred.v, test.p)
}
#for each author label significance using a two-taled test
#if test statistic is above the 97.5 percentile, then it is significantly high for the late period
if (test > quantile(pred.v, c(.975))[[1]]){
difficulty<-c("late")
#if test statistic is below the 2.5 percentile, then it is significantly low for the late period
} else if (test < quantile(pred.v, c(.025))[[1]]){
difficulty<-c("early")
#if neither of the above then there is no significant difference between late and early period in terms of the test statistic
} else {
difficulty<-c("neither")
}
#store the test statistic
late.tuldava.difference<-test
#what % of the career is the most difficulty located across a window of 20 poems
max.difficulty.window.tuldava<-which(rollmean(a.sub$tuldava.score, k=20) == max(rollmean(a.sub$tuldava.score, k=20)))[[1]]/nrow(a.sub)
author<-levels(a$author)[i]
language<-as.character(a.sub$language[1])
temp.df<-data.frame(author, language, difficulty, late.tuldava.difference, max.difficulty.window.tuldava)
difficulty.df<-rbind(difficulty.df, temp.df)
}
#write.csv(difficulty.df, file="Difficulty_Results_All.csv")
#calculate scores based on difficulty results
a<-read.csv("Difficulty_Results_All.csv")
#what % of poets have significantly higher or lower difficulty late in their career?
length(which(a$difficulty != "neither"))/nrow(a)
#of this group, how many get more difficult (and not less)
length(which(a$difficulty == "late"))/nrow(a)
length(which(a$difficulty == "early"))/nrow(a)
#6.9
## Syntactic Irregularity ##
#this script takes as input poems rendered as parts of speech
#it calculates total tfidf score for each poem to measure the poems/windows where there
#is the greatest amount of syntactical irregularity/rarity
#in which documents do we see more frequent rare syntactical patterns?
#at what point in the career?
library(tm)
library(zoo)
language<-c("English")
filenames<-list.files("PoetryAuthors_English_POS", full.names = FALSE)
final.df<-NULL
for (m in 1:length(filenames)){
print(m)
#ingest the POS corpus
corpus2 <- VCorpus(DirSource(filenames[m]), readerControl=list(language="English")) #don't change language since using POS
corpus2 <- tm_map(corpus2, content_transformer(stripWhitespace))
corpus2 <- tm_map(corpus2, content_transformer(function(x) gsub("\\$", "", x)))
#take 1-3 POSgrams
BigramTokenizer <- function(x) unlist(lapply(ngrams(words(x), 1:3), paste, collapse = " "), use.names = FALSE)
corpus2.dtm<-DocumentTermMatrix(corpus2, control = list(tokenize = BigramTokenizer, wordLengths=c(1,Inf)))
corpus2.tfidf<-weightTfIdf(corpus2.dtm, normalize = TRUE)
corpus2.tfidf.mat<-as.matrix(corpus2.tfidf, stringsAsFactors=F)
#separate by late period, defined as last quartile of all poems
final.quart<-round(nrow(corpus2.tfidf.mat)/4)
corpus.notlate<-corpus2.tfidf.mat[1:(nrow(corpus2.tfidf.mat)-final.quart),]
corpus.late<-corpus2.tfidf.mat[(nrow(corpus2.tfidf.mat)-(final.quart-1)):nrow(corpus2.tfidf.mat),]
#calculate the test statistic
test<-median(rowSums(corpus.late))-median(rowSums(corpus.notlate))
#run randomization test
pred.v<-vector()
for (j in 1:1000){
#create fictional career by randomly shuffling the order of the poems (rows)
permute.m<-corpus2.tfidf.mat[sample(nrow(corpus2.tfidf.mat), nrow(corpus2.tfidf.mat)),]
#subset it by the same sample sizes of late, notlate in the actual data
late.m<-permute.m[1:final.quart,]
notlate.m<-permute.m[(final.quart+1):nrow(permute.m),]
#calculcate test statistic
test.p<-median(rowSums(late.m))-median(rowSums(notlate.m))
#append to vector
pred.v<-append(pred.v, test.p)
}
#for each author label significance using a two-taled test
#if test statistic is above the 97.5 percentile, then it is significantly high for the late period
if (test > quantile(pred.v, c(.975))[[1]]){
irregularity<-c("late")
#if test statistic is below the 2.5 percentile, then it is significantly low for the late period
} else if (test < quantile(pred.v, c(.025))[[1]]){
irregularity<-c("early")
#if neither of the above then there is no significant difference between late and early period in terms of the test statistic
} else {
irregularity<-c("neither")
}
#store test statistic
late.irregularity.difference<-test
#what % of the career is the most syntactical diversity located across a window of 20 poems
all.df<-rowSums(corpus2.tfidf.mat)
max.irregularity.window<-which(rollmean(all.df, k=20) == max(rollmean(all.df, k=20)))[[1]]/nrow(corpus2.tfidf.mat)
author<-filenames[m]
temp.df<-data.frame(author, language, irregularity, late.irregularity.difference, max.irregularity.window)
final.df<-rbind(final.df, temp.df)
}
#french<-final.df
#german<-final.df
english<-final.df
all<-rbind(french, german, english)
#write.csv(all, file="Syntactic_Irregularity_All.csv")
#create measures for syntactic irregularity
a<-read.csv("Syntactic_Irregularity_All.csv")
#what % of poets have significant change in syntactic patterns late in their career
length(which(a$irregularity != "neither"))/nrow(a)
#what % of poets increase / decrease
length(which(a$irregularity == "late"))/nrow(a)
length(which(a$irregularity == "early"))/nrow(a)
#how much more likely is the max window of irregularity to be in the late period than any other?
quart4<-length(which(a$max.irregularity.window > 0.75))
quart3<-length(which(a$max.irregularity.window < 0.75 & a$max.irregularity.window > 0.5))
quart2<-length(which(a$max.irregularity.window < 0.5 & a$max.irregularity.window > 0.25))
quart1<-length(which(a$max.irregularity.window < 0.25))
length(which(a$max.irregularity.window > 0.75))/max(quart3, quart2, quart1)
#6.10
#### Vocabulary Richness #####
#whose vocabularies begin to decline in their late career?
library("SnowballC")
library("koRpus")
lang.stem<-c("english")
lang<-c("PoetryAuthors_English")
dir0<-paste("INSERT DIRECTORY")
dir1<-paste("INSERT DIRECTORY", lang, sep="")
setwd(dir0)
filenames0<-list.files(lang, full.names=FALSE)
setwd(dir1)
s.size<-499
ttr.df<-NULL
for (i in 1:length(filenames0)) {
print(i)
setwd(dir1)
dir2<-paste(dir1, filenames0[i], sep="/")
filenames1<-list.files(filenames0[i], full.names=FALSE)
setwd(dir2)
final.quart<-round(length(filenames1)/4)
#create word vector of all early poems
early<-vector()
for (j in 1:(length(filenames1)-final.quart)){
work<-scan(filenames1[j], what="character", quote="", quiet=T)
early<-append(early, work)
}
early.clean<- gsub("\\d", "", early)
early.clean<- tolower(early.clean)
early.clean<-strsplit(early.clean, "\\W")
early.clean<-unlist(early.clean)
not.blanks<-which(early.clean!="")
early.v<-early.clean[not.blanks]
early.v<-wordStem(early.v, language=lang.stem)
#create word vector for late poems
late<-vector()
for (j in ((length(filenames1)-final.quart)+1):length(filenames1)){
work<-scan(filenames1[j], what="character", quote="", quiet=T)
late<-append(late, work)
}
late.clean<- gsub("\\d", "", late)
late.clean<- tolower(late.clean)
late.clean<-strsplit(late.clean, "\\W")
late.clean<-unlist(late.clean)
not.blanks<-which(late.clean!="")
late.v<-late.clean[not.blanks]
late.v<-wordStem(late.v, language=lang.stem)
#take 100 random contiguous samples from each and calculate TTR ratio
early.ttr.v<-vector()
for (k in 1:100) {
beg<-sample(1:(length(early.v)-s.size),1)
test<-early.v[beg:(beg+s.size)]
ttr.sample<-length(unique(test))/length(test)
early.ttr.v<-append(early.ttr.v, ttr.sample)
}
late.ttr.v<-vector()
for (k in 1:100) {
beg<-sample(1:(length(late.v)-s.size),1)
test<-late.v[beg:(beg+s.size)]
ttr.sample<-length(unique(test))/length(test)
late.ttr.v<-append(late.ttr.v, ttr.sample)
}
#calculate the test statistic
test.statistic<-median(late.ttr.v)-median(early.ttr.v)
#run randomization test
#combine early and late vectors
all.v<-append(early.v, late.v)
#then rerun random samples on fictional corpus
pred.v<-vector()
for (m in 1:1000) {
#create fictional early samples
early.ttr.v<-vector()
for (k in 1:100) {
beg<-sample(1:(length(all.v)-s.size),1)
test<-all.v[beg:(beg+s.size)]
ttr.sample<-length(unique(test))/length(test)
early.ttr.v<-append(early.ttr.v, ttr.sample)
}
#create fictional late samples
late.ttr.v<-vector()
for (k in 1:100) {
beg<-sample(1:(length(all.v)-s.size),1)
test<-all.v[beg:(beg+s.size)]
ttr.sample<-length(unique(test))/length(test)
late.ttr.v<-append(late.ttr.v, ttr.sample)
}
#calculate test statistic
perm.test<-median(late.ttr.v)-median(early.ttr.v)
#store for every permutation
pred.v<-append(pred.v, perm.test)
}
#if test statistic is above the 97.5 percentile, then it is significantly high for the late period
if (test.statistic > quantile(pred.v, c(.975))[[1]]){
richness<-c("late")
#if test statistic is below the 2.5 percentile, then it is significantly low for the late period
} else if (test.statistic < quantile(pred.v, c(.025))[[1]]){
richness<-c("early")
#if neither of the above then there is no significant difference between late and early period in terms of the test statistic
} else {
richness<-c("neither")
}
#store test statistic
late.richness.difference<-test.statistic
author<-filenames0[i]
language<-lang.stem
temp.df<-data.frame(author, language, richness, late.richness.difference)
ttr.df<-rbind(ttr.df, temp.df)
}
#german<-ttr.df
#french<-ttr.df
#english<-ttr.df
all.df<-rbind(german, french, english)
#write.csv(all.df, "TTR_All_Late.csv")
#Examine all values
a<-read.csv("TTR_All_Late.csv")
#what % of poets have significantly higher or lower vocabulary richness late in their career?
length(which(a$richness != "neither"))/nrow(a)
#of this group, how many expand their vocabularies
sub<-a[a$richness != "neither",]
length(which(sub$richness == "late"))/nrow(a)
length(which(sub$richness == "early"))/nrow(a)
#6.11
## As a whole career (ignoring late v. early) ##
lang.stem<-c("english")
lang<-c("PoetryAuthors_English")
dir0<-paste("INSERT DIRECTORY")
dir1<-paste("INSERT DIRECTORY", lang, sep="")
setwd(dir0)
filenames0<-list.files(lang, full.names=FALSE)
setwd(dir1)
s.size<-499
ttr.df<-NULL
for (i in 1:length(filenames0)) {
setwd(dir1)
dir2<-paste(dir1, filenames0[i], sep="/")
filenames1<-list.files(filenames0[i], full.names=FALSE)
setwd(dir2)
all<-vector()
for (j in 1:length(filenames1)){
work<-scan(filenames1[j], what="character", quote="")
all<-append(all, work)
}
all.clean<- gsub("\\d", "", all)
all.clean<- tolower(all.clean)
all.clean<-strsplit(all.clean, "\\W")
all.clean<-unlist(all.clean)
not.blanks<-which(all.clean!="")
all.v<-all.clean[not.blanks]
all.v<-wordStem(all.v, language=lang.stem)
ttr.v<-vector()
for (k in 1:1000) {
beg<-sample(1:(length(all.v)-s.size),1)
test<-all.v[beg:(beg+s.size)]
ttr.sample<-length(unique(test))/length(test)
ttr.v<-append(ttr.v, ttr.sample)
}
ttr.avg<-mean(ttr.v)
ttr.med<-median(ttr.v)
ttr.sd<-sd(ttr.v)
ttr.temp<-data.frame(filenames0[i], ttr.avg, ttr.med, ttr.sd)
ttr.df<-rbind(ttr.df, ttr.temp)
}
#write.csv(ttr.df, file="TTR_English.csv")
#6.12
### Concreteness Score ###
library(tm)
library(SnowballC)
# this measures the extent to which the ratio between things and abstractions is
# higher / lower in the late period
# it takes as input files that have been transformed in the following 2 steps:
#1. only nouns are kept
#2. those nouns are translated into their hypernym representations using the first most common word sense
#it compares hypernyms represented as "physical entities" or "abstractions" and compares their ratio in the early and late period
#the more physical entities there are relative to abstractions the more "concrete" a poem is said to be
#thus for each period we get a single ratio of things to abstractions
#we then use a permutation to test whether the difference between these ratios in the late period is random fluctuation or signals a significant difference either high or low
#change languages accordingly
#English
language1<-c("English")
language2<-c("english")
physical.word<-c("physical_entity")
abstraction.word<-c("abstraction")
#French
# language1<-c("French")
# language2<-c("french")
# physical.word<-c("chose")
# abstraction.word<-c("abstraction")
#German
#language1<-c("German")
#because the German wordnet is differently configured I compare "Objekt" and any form of "kognitiv"
#this is a very close approximation to physical entity and abstraction (all abstractions usually contain a label for feeling and/or cognition, and all feelings in German also contain the stem for kognitiv)
#the code must be changed specifically in the script below; marked accordingly.
#select appropriate directory
filenames<-list.files("PoetryAuthors_English_Hypernyms", full.names = FALSE)
#run
final.df<-NULL
for (m in 1:length(filenames)){
print(m)
corpus3 <- VCorpus(DirSource(filenames[m]), readerControl=list(language=language1))
corpus3 <- tm_map(corpus3, content_transformer(stripWhitespace))
corpus3 <- tm_map(corpus3, content_transformer(tolower))
corpus3.dtm<-DocumentTermMatrix(corpus3, control=list(wordLengths=c(1,Inf)))
corpus3.matrix<-as.matrix(corpus3.dtm, stringsAsFactors=F)
#divide periods
final.quart<-round(nrow(corpus3.matrix)/4)
rest<-nrow(corpus3.matrix)-final.quart
#find observed score
early.act<-corpus3.matrix[1:rest,]
keep1<-which(colSums(early.act) > 0)
early.act<-early.act[,keep1]
late.act<-corpus3.matrix[(nrow(corpus3.matrix)-final.quart+1):nrow(corpus3.matrix),]
keep2<-which(colSums(late.act) > 0)
late.act<-late.act[,keep2]
#for late period
late.ratio.actual<-unname(colSums(late.act)[which(colnames(late.act) == physical.word)])/unname(colSums(late.act)[which(colnames(late.act) == abstraction.word)])
#for German
#late.ratio.actual<-unname(colSums(late.act)[which(colnames(late.act) == "objekt")])/sum(unname(colSums(late.act)[grep("kognitiv", colnames(late.act))]))
#for early period
early.ratio.actual<-unname(colSums(early.act)[which(colnames(early.act) == physical.word)]/colSums(early.act)[which(colnames(early.act) == abstraction.word)])
#for German
#early.ratio.actual<-unname(colSums(early.act)[which(colnames(early.act) == "objekt")])/sum(unname(colSums(early.act)[grep("kognitiv", colnames(early.act))]))
#test statistic
test<-late.ratio.actual-early.ratio.actual
#ratio of late to early
#the higher the ratio the more the late period increases its use of things rather than abstractions
#it does not mean that there are absolutely more things (its possible to use more things relatively while still using fewer things overall)
#in order to test for significance we will permute the overall matrix N times with replacement
#do we see this degree of change between early and late in less than 5% of cases?
#if so, then we conclude that there is significant difference in the concreteness in the late period
#permute
pred.v<-vector()
for (i in 1:1000){
#permute the whole matrix
permute.m<-corpus3.matrix[sample(nrow(corpus3.matrix), nrow(corpus3.matrix)),]
#subset it to the same size as the actual periods
early.m<-permute.m[(final.quart+1):nrow(permute.m),]
late.m<-permute.m[1:final.quart,]
#calculate a predicted concrete/abstraction ratio
late.ratio.pred<-colSums(late.m)[which(colnames(late.m) == physical.word)][[1]]/colSums(late.m)[which(colnames(late.m) == abstraction.word)][[1]]
early.ratio.pred<-colSums(early.m)[which(colnames(early.m) == physical.word)][[1]]/colSums(early.m)[which(colnames(early.m) == abstraction.word)][[1]]
#for German
#late.ratio.pred<-unname(colSums(late.m)[which(colnames(late.m) == "objekt")])/sum(unname(colSums(late.m)[grep("kognitiv", colnames(late.m))]))
#early.ratio.pred<-unname(colSums(early.m)[which(colnames(early.m) == "objekt")])/sum(unname(colSums(early.m)[grep("kognitiv", colnames(early.m))]))
#test statistic
test.p<-late.ratio.pred-early.ratio.pred
#store
pred.v<-append(pred.v, test.p)
}
#test for significance
#question 1: does the late period have a significantly higher ratio of things to abstractions than by chance?
if (test > quantile(pred.v, c(.975))[[1]]){
concreteness<-c("late")
#question 2: does the late period have a significantly lower ratio of things to abstractions than by chance?
} else if (test < quantile(pred.v, c(.025))[[1]]){
concreteness<-c("early")
#or is there no difference between late and early period in terms of the thing/abstraction ratio
} else {
concreteness<-c("neither")
}
late.concreteness.diff<-test
author<-filenames[m]
language<-language1
temp.df<-data.frame(author, language, concreteness, late.concreteness.diff)
final.df<-rbind(final.df, temp.df)
}
#german<-final.df
#french<-final.df
english<-final.df
all<-rbind(german,french,english)
write.csv(all, file="Concreteness_All.csv", row.names = F)
#make calculations about all poets
a<-read.csv("Concreteness_All.csv")
#% of poets who have significant change in their concreteness in late period
length(which(a$concreteness != "neither"))/nrow(a)
#% of poets who have significant increase in late period
length(which(a$concreteness == "late"))/nrow(a)
#% of poets who have significant decrease
length(which(a$concreteness == "early"))/nrow(a)
#6.13
### Generality ###
#This looks at whether late writing is more general/particular than the earlier writing
#what % of words in the late period (group A) are hypernyms of words in the rest of the poetry (group B?
#i.e. how much more general is A than B?
#this counts all words in A that are hypernyms of B and sums
#to do so it loads regular words in A and the hypernyms of B and then subsets A %in% B
#it then compares the distributions of A IN B and B IN A to see which corpus is more
#general than the other.
#Version 1 = Winner Takes All
#if A has 20 "furnitures" and B has 1 "chair->hypernym furniture" this counts as 20
#because A has 20 words that are a hypernym of a word in B
#we want to observe how much generality (or specificity) a text has
#subset 1 according to hypernyms in 2
library("tm")
library("SnowballC")
library("RWeka")
root<-c("DIRECTORY")
setwd(root)
group1<-c("PoetryAuthors_English")
group2<-c("PoetryAuthors_English_Hypernyms")
dir1<-paste("DIRECTORY", group1, sep="")
dir2<-paste("DIRECTORY", group2, sep="")
filenames1<-list.files(group1, full.names = FALSE)
filenames2<-list.files(group2, full.names=FALSE)
language1<-c("English")
final.df<-NULL
for (i in 1:length(filenames1)){
print(i)
#load regular words
setwd(dir1)
corpus1 <- VCorpus(DirSource(filenames1[i]), readerControl=list(language=language1))
corpus1 <- tm_map(corpus1, content_transformer(stripWhitespace))
corpus1 <- tm_map(corpus1, content_transformer(tolower))
corpus1 <- tm_map(corpus1, content_transformer(removePunctuation))
corpus1 <- tm_map(corpus1, content_transformer(removeNumbers))
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
#load hypernyms
setwd(dir2)
corpus2 <- VCorpus(DirSource(filenames2[i]), readerControl=list(language=language1))
corpus2 <- tm_map(corpus2, content_transformer(stripWhitespace))
corpus2 <- tm_map(corpus2, content_transformer(tolower))
corpus2 <- tm_map(corpus2, content_transformer(function(x) gsub("_", " ", x)))
corpus2.dtm<-DocumentTermMatrix(corpus2, control=list(wordLengths=c(1,Inf)))
corpus2.matrix<-as.matrix(corpus2.dtm, stringsAsFactors=F)
#divide into early and late
final.quart<-round(nrow(corpus2.matrix)/4) #this is the definition of the late period
#early hypernyms (no this is not mislabeled)
late.hyp<-corpus2.matrix[(nrow(corpus2.matrix)-(final.quart-1)):nrow(corpus2.matrix),]
late.hyp<-late.hyp[,colSums(late.hyp) != 0]
early.reg<-corpus1.matrix[1:(nrow(corpus1.matrix)-final.quart),]
early.reg<-early.reg[,colSums(early.reg) != 0]
#late hypernyms (ditto)
late.reg<-corpus1.matrix[(nrow(corpus1.matrix)-(final.quart-1)):nrow(corpus1.matrix),]
late.reg<-late.reg[,colSums(late.reg) != 0]
early.hyp<-corpus2.matrix[1:(nrow(corpus2.matrix)-final.quart),]
early.hyp<-early.hyp[,colSums(early.hyp) != 0]
#calculate test statistic = late hypernymy minus early hypernymy
#hypernymy = % reg words of corpus A in hypernyms of corpus B
late.actual<-sum(late.reg[,which(colnames(late.reg) %in% colnames(early.hyp))])/sum(late.reg)
early.actual<-sum(early.reg[,which(colnames(early.reg) %in% colnames(late.hyp))])/sum(early.reg)
test<-late.actual-early.actual
#run randomization test
pred.v<-vector()
for (j in 1:1000){
#permute both samples
hyp.perm<-corpus2.matrix[sample(nrow(corpus2.matrix), nrow(corpus2.matrix)),]
reg.perm<-corpus1.matrix[sample(nrow(corpus1.matrix), nrow(corpus1.matrix)),]
#early hypernyms (no this is not mislabeled)
late.hyp<-hyp.perm[(nrow(hyp.perm)-(final.quart-1)):nrow(hyp.perm),]
late.hyp<-late.hyp[,colSums(late.hyp) != 0]
early.reg<-reg.perm[1:(nrow(reg.perm)-final.quart),]
early.reg<-early.reg[,colSums(early.reg) != 0]
#late hypernyms (ditto)
late.reg<-reg.perm[(nrow(reg.perm)-(final.quart-1)):nrow(reg.perm),]
late.reg<-late.reg[,colSums(late.reg) != 0]
early.hyp<-hyp.perm[1:(nrow(hyp.perm)-final.quart),]
early.hyp<-early.hyp[,colSums(early.hyp) != 0]
#calculate test statistic = late hypernymy minus early hypernymy
#hypernymy = % reg words of corpus A in hypernyms of corpus B
late.p<-sum(late.reg[,which(colnames(late.reg) %in% colnames(early.hyp))])/sum(late.reg)
early.p<-sum(early.reg[,which(colnames(early.reg) %in% colnames(late.hyp))])/sum(early.reg)
test.p<-late.p-early.p
pred.v<-append(pred.v,test.p)
}
if (test > quantile(pred.v, c(.975))[[1]]){
generality<-c("late")
#question 2: does the late period have a significantly lower ratio of things to abstractions than by chance?
} else if (test < quantile(pred.v, c(.025))[[1]]){
generality<-c("early")
#or is there no difference between late and early period in terms of the thing/abstraction ratio
} else {
generality<-c("neither")
}
late.generality.diff<-test
author<-filenames1[i]
language<-language1
temp.df<-data.frame(author, language, generality, late.generality.diff)
final.df<-rbind(final.df, temp.df)
}
#german<-final.df
#french<-final.df
english<-final.df
all<-rbind(german, french, english)
#write.csv(all, file="Generality_All.csv")
#make comparisons
a<-read.csv("Generality_All.csv")
#% of poets who show significant kind of change in their late career
length(which(a$generality != "neither"))/nrow(a)
#% increase
length(which(a$generality == "late"))/nrow(a)
#% decrease
length(which(a$generality == "early"))/nrow(a)
#6.14
### Fig. 6.8 ####
### Comparing poetic careers by all variables of challengingness
#load tables
a<-read.csv("Difficulty_Results_All.csv")
b<-read.csv("Syntactic_Irregularity_All.csv")
c<-read.csv("TTR_All_Late.csv")
d<-read.csv("Generality_All.csv")
e<-read.csv("Concreteness_All.csv")
a<-a[order(a$author),]
b<-b[order(b$author),]
c<-c[order(c$author),]
d<-d[order(d$author),]
e<-e[order(e$author),]
which(a$author != c$author)
#combine late period designations
all.late<-data.frame(a$author, a$difficulty, b$irregularity, c$richness, d$generality, e$concreteness)
late.score<-vector()
for (i in 1:nrow(all.late)){
sub<-all.late[i,]
late.s1<-length(which(sub[1,2:5] == "late"))
late.s2<-length(which(sub[1,6] == "early"))
late.s<-late.s1+late.s2
late.score<-append(late.score, late.s)
}
all.late$late.score<-late.score
length(which(all.late$late.score > 1))/nrow(all.late)
#combine scores
all<-data.frame(a$author, a$late.tuldava.difference, b$late.irregularity.difference, c$late.richness.difference, d$late.generality.diff, -e$late.concreteness.diff)
row.names(all)<-all$a.author
all<-all[,-1]
colnames(all)<-c("Difficulty", "Irregularity", "Richness", "Generality", "Abstraction")
#scale scores
all.norm<-apply(all, 2, function (x) scale(x, center=T, scale=T))
row.names(all.norm)<-row.names(all)
#subset by those who have significant late changes
#all.sub<-all.norm[row.names(all.norm) %in% all.late$a.author[all.late$late.score > 1],]
all.sub<-read.csv("BiPlot.csv")
#names removed names for visualization
row.names(all.sub)<-all.sub$name.limit
all.sub<-all.sub[,-1]
all.sub<-all.sub[,-1]
pca<-princomp(all.sub)
biplot(pca, col=c("black", "gray50"), xlim=c(-0.6, 0.6), xlab="PC1", ylab="PC2")
biplot(pca, col=c("black", "gray50"), xlim=c(-0.6, 0.6), xlab="", ylab="")
title(sub="\n\nFig. 6.8 Relationship of poets late periods to features of stylistic challengingness\n\nSource: Andrew Piper, Enumerations: Data and Literary Study (2018)")
#library(ggfortify)
#autoplot(prcomp(all.sub), data = all.sub, loadings = TRUE, loadings.label = TRUE, lty = 3,
# label=T, shape=F, loadings.colour = "#000000", loadings.label.colour = "#606060")
### Fig. 6.9 ###
#Wanda Coleman Local Vulnerability Graph
#this is drawn from the outputs of Script 3.2 above
a<-read.csv("WandaColeman_Vulnerability_Table.csv")
ggplot(a, aes(x=X, y=obs.roll/100)) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5), plot.caption = element_text(hjust = 0.5, size=10), panel.grid.minor = element_blank(), panel.grid.major = element_blank()) +
geom_line() +
geom_line(aes(x=X, y=a$high.roll/100), linetype="dotted") +
geom_line(aes(x=X, y=a$low.roll/100), linetype="dotted") +
geom_vline(xintercept = 596) +
geom_vline(xintercept = 766) +
ylim(0.025, 0.08) +
#labs(x="Poems over time", y="Similarity")
labs(x="Poems over time", y="Similarity", caption="\nFig. 6.9 Local vulnerability in the poetry of Wanda Coleman.\nVertical bars represent the collection Bathwater Wine (1998)\n\nSource: Andrew Piper, Enumerations: Data and Literary Study (2018)")
### Wanda Coleman Measures ###
#6.15
### Sentence Length ###
#comparing average lengths of sentences in Coleman's vulnerable and non-vulnerable work.
#NOTE: If this breaks, quit and restart R -- packages above will mask features for NLP packages and have them not work
require("NLP")
library("openNLP")
library("openNLPdata")
#Step 1: Run measure
sent_token_annotator <- Maxent_Sent_Token_Annotator(language = "en")
#word_token_annotator <- Maxent_Word_Token_Annotator(language = "en")
#pos_tag_annotator <- Maxent_POS_Tag_Annotator(language = "en")
setwd("~/Documents/2. Books/Enumerations/Enumerations - Chap 6 (Corpus)/Corpus - Graphs and Tables")
filenames<-list.files("WandaColeman", pattern="*.txt", full.names=FALSE)
setwd("~/Documents/2. Books/Enumerations/Enumerations - Chap 6 (Corpus)/Corpus - Graphs and Tables/WandaColeman")
#establish function to split sentences into words to assess sentence length
splitSentence<-function(x){
x<-unlist(strsplit(x, " "))
x<-x[x != ""]
x<-length(x)
}
#run across all poems
length.df<-NULL
for (i in 1:length(filenames)) {
work<-scan(filenames[i], what="character", quote="")
work.clean<- gsub("\\d", "", work)
text.whole<-paste(work.clean, collapse=" ") # collapse into single chunk
text.char<-as.String(text.whole)
a1 <- annotate(text.char, sent_token_annotator)
sentences<-text.char[a1]
length.v<-unlist(lapply(sentences, splitSentence))
mean.len<-mean(length.v)
poem<-filenames[i]
temp.df<-data.frame(poem, mean.len)
length.df<-rbind(length.df, temp.df)
}
#Step 2: Divide corpus in vulnerable and non-vulnerable poems and compare
a<-read.csv("WandaColeman_Vulnerability_Table.csv")
#find all poems that fall below the confidence interval for vulnerability
a.vuln<-a[a$diff.low > 0,]
a.vuln<-a.vuln[complete.cases(a.vuln),]
a.not<-a[a$diff.low < 0,]
a.not<-a.not[complete.cases(a.not),]
#subset length table by the vulnerable poems and not vulnerable poems and compare
length.vuln<-length.df[as.character(length.df$poem) %in% as.character(a.vuln$poems),]
length.not<-length.df[as.character(length.df$poem) %in% as.character(a.not$poems),]
median(length.not$mean.len)
median(length.vuln$mean.len)
#Step 3: Significance Test using bootstrapping
#take 1,000 samples of the not vulnerable work -- in how many case do you see a median value lower than the actual one?
med.v<-vector()
for (i in 1:1000){
not.samp<-length.not[sample(nrow(length.not), nrow(length.vuln), replace = T), ]
med<-median(not.samp$mean.len)
med.v<-append(med.v, med)
}
quantile(med.v, c(0.01))
median(length.vuln$mean.len)
#6.16
#Punctuation per poem
library(stringr)
filenames<-list.files("WandaColeman", pattern="*.txt", full.names=FALSE)
punct.df<-NULL
for (i in 1:length(filenames)) {
work<-scan(filenames[i], what="character", quote="")
if (length(work) > 0){
work.punct<-str_extract(work, "\\?|\\!|\\.|\\,|\\;|\\:|\\(|\\)")
punct<-which(!is.na(work.punct))
#calculate total words
work.lower<-tolower(work) # all lower case
work.words<-strsplit(work.lower, "\\W") # turn into a list of words
work.word.vector<-unlist(work.words) #turn into a vector
work.word.vector<-gsub("\\d", "", work.word.vector) #remove numbers
work.word.vector<-work.word.vector[work.word.vector != ""]
total.words<-length(work.word.vector)
punct.per<-length(punct)/total.words
poems<-filenames[i]
temp.df<-data.frame(poems, punct.per)
punct.df<-rbind(punct.df, temp.df)
}
}
#Step 2: Divide corpus in vulnerable and non-vulnerable poems and compare
a<-read.csv("WandaColeman_Vulnerability_Table.csv")
#find all poems that fall below the confidence interval for vulnerability
a.vuln<-a[a$diff.low > 0,]
a.vuln<-a.vuln[complete.cases(a.vuln),]
a.not<-a[a$diff.low < 0,]
a.not<-a.not[complete.cases(a.not),]
#subset length table by the vulnerable poems and not vulnerable poems and compare
punct.vuln<-punct.df[as.character(punct.df$poem) %in% as.character(a.vuln$poems),]
punct.not<-punct.df[as.character(punct.df$poem) %in% as.character(a.not$poems),]
median(punct.vuln$punct.per)/median(punct.not$punct.per)
#Step 3: Significance Test using bootstrapping
#take 1,000 samples of the not vulnerable work -- in how many case do you see a median value lower than the actual one?
med.v<-vector()
for (i in 1:1000){
not.samp<-punct.not[sample(nrow(punct.not), nrow(punct.vuln), replace = T), ]
med<-median(not.samp$punct.per)
med.v<-append(med.v, med)
}
quantile(med.v, c(0.99))
max(med.v)
median(punct.vuln$punct.per)
#6.17
#Vocabulary innovation
#what percentage of words appear in vulnerable poems that do not appear in non-vulnerable poems?
library(tm)
library(SnowballC)
corpus3 <- VCorpus(DirSource("WandaColeman"), readerControl=list(language="English"))
corpus3 <- tm_map(corpus3, content_transformer(stripWhitespace))
corpus3 <- tm_map(corpus3, content_transformer(tolower))
corpus3 <- tm_map(corpus3, content_transformer(removePunctuation))
corpus3 <- tm_map(corpus3, content_transformer(removeNumbers))
corpus3 <- tm_map(corpus3, stemDocument, language = "english")
corpus3.dtm<-DocumentTermMatrix(corpus3, control=list(wordLengths=c(1,Inf)))
corpus3.matrix<-as.matrix(corpus3.dtm, stringsAsFactors=F)
#Step 2: Divide corpus in vulnerable and non-vulnerable poems and compare
a<-read.csv("WandaColeman_Vulnerability_Table.csv")
#find all poems that fall below the confidence interval for vulnerability
a.vuln<-a[a$diff.low > 0,]
a.vuln<-a.vuln[complete.cases(a.vuln),]
a.not<-a[a$diff.low < 0,]
a.not<-a.not[complete.cases(a.not),]
#subset matrix by vuln and not vuln
vuln.m<-corpus3.matrix[row.names(corpus3.matrix) %in% as.character(a.vuln$poems),]
remove<-unname(which(colSums(vuln.m) == 0))
vuln.m<-vuln.m[,-remove]
not.m<-corpus3.matrix[row.names(corpus3.matrix) %in% as.character(a.not$poems),]
remove<-unname(which(colSums(not.m) == 0))
not.m<-not.m[,-remove]
#percent of words in vulnerable poems not in rest
actual<-length(which(!colnames(vuln.m) %in% colnames(not.m)))/ncol(vuln.m)
#number of new words in vulnerable poems
length(which(!colnames(vuln.m) %in% colnames(not.m)))
#number of new words as percentage of poems
length(which(!colnames(vuln.m) %in% colnames(not.m)))/nrow(vuln.m)
#permute
novelty.v<-vector()
for (i in 1:1000){
permute.m<-corpus3.matrix[sample(nrow(corpus3.matrix), nrow(corpus3.matrix)),]
early.m<-permute.m[1:nrow(not.m),]
remove1<-unname(which(colSums(early.m) == 0))
early.m<-early.m[,-remove1]
late.m<-permute.m[(nrow(not.m)+1):(nrow(permute.m)-10),]
remove2<-unname(which(colSums(late.m) == 0))
late.m<-late.m[,-remove2]
novelty.score<-length(which(!colnames(late.m) %in% colnames(early.m)))/ncol(late.m)
novelty.v<-append(novelty.v, novelty.score)
}
#actual
length(which(!colnames(vuln.m) %in% colnames(not.m)))/ncol(vuln.m)
#predicted
quantile(novelty.v, c(0.99))
max(novelty.v)
#6.18
#POS Analysis
#What parts of speech are distinctive of the vulnerable poems?
corpus2 <- VCorpus(DirSource("WandaColeman_POS"), readerControl=list(language="English"))
corpus2 <- tm_map(corpus2, content_transformer(stripWhitespace))
corpus2 <- tm_map(corpus2, content_transformer(function(x) gsub("\\$", "", x)))
#take 1-2 POSgrams
#BigramTokenizer <- function(x) unlist(lapply(ngrams(words(x), 1), paste, collapse = " "), use.names = FALSE)
#corpus2.dtm<-DocumentTermMatrix(corpus2, control = list(tokenize = BigramTokenizer, wordLengths=c(1,Inf)))
corpus2.dtm<-DocumentTermMatrix(corpus2, control = list(wordLengths=c(1,Inf)))
corpus2.matrix<-as.matrix(corpus2.dtm, stringsAsFactors=F)
#remove punctuation
corpus2.sub<-corpus2.matrix[,-grep("[[:punct:]]", colnames(corpus2.matrix))]
#subset by vuln and not vuln
a<-read.csv("WandaColeman_Vulnerability_Table.csv")
#find all poems that fall below the confidence interval for vulnerability
a.vuln<-a[a$diff.low > 0,]
a.vuln<-a.vuln[complete.cases(a.vuln),]
a.not<-a[a$diff.low < 0,]
a.not<-a.not[complete.cases(a.not),]
#subset matrix by vuln and not vuln
vuln.m<-corpus2.sub[row.names(corpus2.sub) %in% as.character(a.vuln$poems),]
not.m<-corpus2.sub[row.names(corpus2.sub) %in% as.character(a.not$poems),]
#run distinctive POS test
word1<-colSums(vuln.m)
word2<-colSums(not.m)
all1<-sum(vuln.m)
all2<-sum(not.m)
fisher.df<-data.frame(word=colnames(vuln.m), group1=word1, group2=word2, fish.odds=0, fish.p=0)
for (i in 1:ncol(vuln.m)){
cont.table<-data.frame(c(word1[i], all1-word1[i]), c(word2[i], all2-word2[i]))
fish<-fisher.test(cont.table)
fisher.df[i, c("fish.odds","fish.p")]<-c(fish$estimate[[1]], fish$p.value)
}
#remove insignificant words
fisher.final<-fisher.df[fisher.df$fish.p < 0.05,]
fisher.final<-fisher.final[order(-fisher.final$fish.odds),]
#6.19
#Abstraction Score
language1<-c("English")
language2<-c("english")
physical.word<-c("physical_entity")
abstraction.word<-c("abstraction")
corpus3 <- VCorpus(DirSource("WandaColeman_HYP"), readerControl=list(language=language1))
corpus3 <- tm_map(corpus3, content_transformer(stripWhitespace))
corpus3 <- tm_map(corpus3, content_transformer(tolower))
corpus3.dtm<-DocumentTermMatrix(corpus3, control=list(wordLengths=c(1,Inf)))
corpus3.matrix<-as.matrix(corpus3.dtm, stringsAsFactors=F)
#divide vuln not vuln
a<-read.csv("WandaColeman_Vulnerability_Table.csv")
#find all poems that fall below the confidence interval for vulnerability
a.vuln<-a[a$diff.low > 0,]
a.vuln<-a.vuln[complete.cases(a.vuln),]
a.not<-a[a$diff.low < 0,]
a.not<-a.not[complete.cases(a.not),]
#subset matrix by vuln and not vuln
vuln.hyp<-corpus3.matrix[row.names(corpus3.matrix) %in% as.character(a.vuln$poems),]
not.hyp<-corpus3.matrix[row.names(corpus3.matrix) %in% as.character(a.not$poems),]
#for vuln poems
vuln.ratio.actual<-unname(colSums(vuln.hyp)[which(colnames(vuln.hyp) == physical.word)])/unname(colSums(vuln.hyp)[which(colnames(vuln.hyp) == abstraction.word)])
#for non-vuln
not.ratio.actual<-unname(colSums(not.hyp)[which(colnames(not.hyp) == physical.word)]/colSums(not.hyp)[which(colnames(not.hyp) == abstraction.word)])
#ration of vuln to not vuln
#the higher the ratio the more the late period increases its use of things rather than abstractions
#it does not mean that there are absolutely more things (its possible to use more things relatively while still using fewer things overall)
vuln.not.ratio<-vuln.ratio.actual/not.ratio.actual
#randomization test
pred.v<-vector()
for (i in 1:1000){
#permute the entire matrix
permute.m<-corpus3.matrix[sample(nrow(corpus3.matrix), nrow(corpus3.matrix)),]
#subset it to the same sizes as the vulnerable and non-vulnerable poems
vuln.m<-permute.m[1:nrow(vuln.hyp),]
not.m<-permute.m[(nrow(vuln.hyp)+1):nrow(permute.m),]
#calculate the difference in the ratios
vuln.ratio.pred<-colSums(vuln.m)[which(colnames(vuln.m) == physical.word)][[1]]/colSums(vuln.m)[which(colnames(vuln.m) == abstraction.word)][[1]]
not.ratio.pred<-colSums(not.m)[which(colnames(not.m) == physical.word)][[1]]/colSums(not.m)[which(colnames(not.m) == abstraction.word)][[1]]
test.statistic<-vuln.ratio.pred/not.ratio.pred
pred.v<-append(pred.v, vuln.ratio.pred)
}
#actual
vuln.not.ratio
#predicted
max(pred.v)
quantile(pred.v, c(0.99))
#6.20
#Generality - Coleman
corpus1 <- VCorpus(DirSource("WandaColeman"), readerControl=list(language="English"))
corpus1 <- tm_map(corpus1, content_transformer(stripWhitespace))
corpus1 <- tm_map(corpus1, content_transformer(tolower))
corpus1 <- tm_map(corpus1, content_transformer(removePunctuation))
corpus1 <- tm_map(corpus1, content_transformer(removeNumbers))
corpus1.dtm<-DocumentTermMatrix(corpus1, control=list(wordLengths=c(1,Inf)))
corpus1.matrix<-as.matrix(corpus1.dtm, stringsAsFactors=F)
#load hypernyms
corpus2 <- VCorpus(DirSource("WandaColeman_HYP"), readerControl=list(language="English"))
corpus2 <- tm_map(corpus2, content_transformer(stripWhitespace))
corpus2 <- tm_map(corpus2, content_transformer(tolower))
corpus2 <- tm_map(corpus2, content_transformer(function(x) gsub("_", " ", x)))
corpus2.dtm<-DocumentTermMatrix(corpus2, control=list(wordLengths=c(1,Inf)))
corpus2.matrix<-as.matrix(corpus2.dtm, stringsAsFactors=F)
#subset by vuln and not vuln
a<-read.csv("WandaColeman_Vulnerability_Table.csv")
#find all poems that fall below the confidence interval for vulnerability
a.vuln<-a[a$diff.low > 0,]
a.vuln<-a.vuln[complete.cases(a.vuln),]
a.not<-a[a$diff.low < 0,]
a.not<-a.not[complete.cases(a.not),]
#subset matrix by vuln and not vuln
vuln.hyp<-corpus2.matrix[row.names(corpus2.matrix) %in% as.character(a.vuln$poems),]
not.hyp<-corpus2.matrix[row.names(corpus2.matrix) %in% as.character(a.not$poems),]
vuln.reg<-corpus1.matrix[row.names(corpus1.matrix) %in% as.character(a.vuln$poems),]
not.reg<-corpus1.matrix[row.names(corpus1.matrix) %in% as.character(a.not$poems),]
#test statistic
#what % of vuln words are not-vuln hypernyms
vuln.hypernymy<-sum(vuln.reg[,which(colnames(vuln.reg) %in% colnames(not.hyp))])/sum(vuln.reg)
#what % of not vuln words are hypernyms of vuln words
not.hypernymy<-sum(not.reg[,which(colnames(not.reg) %in% colnames(vuln.hyp))])/sum(not.reg)
#actual
actual<-vuln.hypernymy-not.hypernymy
#randomization test
pred.v<-vector()
for (j in 1:1000){
print(j)
#permute original matrices
corpus1.perm<-corpus1.matrix[sample(nrow(corpus1.matrix), nrow(corpus1.matrix)),]
corpus2.perm<-corpus2.matrix[sample(nrow(corpus2.matrix), nrow(corpus2.matrix)),]
#subset hyp an
vuln.hyp<-corpus2.perm[1:nrow(a.vuln),]
not.hyp<-corpus2.perm[(nrow(a.vuln)+1):nrow(corpus2.perm),]
vuln.reg<-corpus1.perm[1:nrow(a.vuln),]
not.reg<-corpus1.perm[(nrow(a.vuln)+1):nrow(corpus2.perm),]
#test statistic
#what % of vuln words are not-vuln hypernyms
vuln.hypernymy<-sum(vuln.reg[,which(colnames(vuln.reg) %in% colnames(not.hyp))])/sum(vuln.reg)
#what % of not vuln words are hypernyms of vuln words
not.hypernymy<-sum(not.reg[,which(colnames(not.reg) %in% colnames(vuln.hyp))])/sum(not.reg)
#actual
pred<-vuln.hypernymy-not.hypernymy
pred.v<-append(pred.v, pred)
}
#actual
vuln.hypernymy-not.hypernymy
#predicted
max(pred.v)
quantile(pred.v, c(0.99))
min(pred.v)
|
e15376c4fd46f18490202d97f32403582b123ff5
|
d2f39a2258dbe6253bc28fd00717a67b131751f4
|
/R/frequency_fns.R
|
57636fb2925ae005aa21e67f136329c14c5f3293
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
andrewzm/MVST
|
6e5d9d5c84ba0d28e38fdb69b12cfa8ba1bcc45f
|
2bf0835e66e04e120f78fe8673afe3dd9d6f42c0
|
refs/heads/master
| 2022-09-29T23:40:39.048820
| 2022-09-15T21:37:50
| 2022-09-15T21:37:50
| 20,478,703
| 10
| 9
| null | 2018-10-18T14:50:36
| 2014-06-04T10:13:03
|
R
|
UTF-8
|
R
| false
| false
| 12,256
|
r
|
frequency_fns.R
|
#' @title RBF filter
#' @description Takes a 2-D set of disaggregated points and smooths them out by placing a radial basis function at each point and using
#' least-squares estimation for the ensuing weights. The constant trend is added on at the end.
#' @param data a data frame with fields \code{x}, \code{y} and an output variable.
#' @param si data frame containing fields \code{x} and \code{y} which denote the desired coordinates of the output field (typically a grid).
#' @param varname the label of the column in \code{data} to use as the output variable.
#' @param smooth_var the variance of the radial basis function.
#' @return the smoothed field values at locations in \code{si}
#' @export
#' @examples
#' data <- data.frame(x = c(1,1,2,2),y = c(1,2,1,2), z = c(1,2,3,4))
#' si <- as.data.frame(expand.grid(seq(0,3,by=0.1),seq(0,3,by=0.1)))
#' names(si) <- c("x","y")
#' si$z <- RBF_filter(data,si,varname="z",smooth_var=1)
RBF_filter <- function(data,si,varname="z",smooth_var=800^2) {
kernel_smooth_var <- smooth_var
D <- rdist(data[c("x","y")],data[c("x","y")])
A <- my_RBF(D,mu=matrix(0,1,2),A=1,sigma2=kernel_smooth_var)
z <- data[varname][,1]
data$RBFw <- solve(A)%*%(z - mean(z))
si[varname] <- 0
for(i in 1:nrow(data)) {
r <- rdist(cbind(si$x,si$y),matrix(c(data$x[i],data$y[i]),1,2))
si[varname] <- si[varname] + my_RBF(r,A=data$RBFw[i],sigma2=kernel_smooth_var)
}
si[varname] <- si[varname] + mean(z)
return(si[varname][,1])
}
#' @title Find length scales by fitting a Matern
#' @description Takes a spatial or spatio-temporal data set and returns spatial and temporal length scales by finding the maximum likelihood
#' on a pre-specified grid. If the data is spatio-temporal a separable covariance structure is assumed.
#' @param data a data frame with fields \code{x}, \code{y} and an output variable \code{z}. The time coordinate \code{t} is optional.
#' @param rho an array of spatial practical ranges to consider.
#' @param nu an array of smoothness parameters to consider.
#' @param var an array of marginal variances to consider
#' @param theta an array of first-order auto-regressive parameters to consider in the model \eqn{x_{t+1} = \theta x_{t} + e_{t}}.
#' @export
#' @examples
#' var_true <- 1
#' kappa_true <- kappa_from_l(l=1700,nu=2)
#' X <- data.frame(x = 3000*runif(100), y = 3000*runif(100))
#' dd<- fields::rdist(X,X)
#' K <- Matern(r=dd,nu=2,var=var_true,kappa=kappa_true)
#' X$z <- t(chol(K)) %*% rnorm(nrow(X))
#' var_marg <- var(X["z"])
#' var_search <- 10^(seq(log10(var_marg/100),log10(var_marg*100),length=100))
#' rho_search=seq(100,4000,200)
#' lk_fit <- lscale_from_Matern(X,rho=rho_search,var=var_search,nu=c(2))
#' print(lk_fit$spat_df)
lscale_from_Matern <- function(data,rho=100,nu=3/2,var=1,theta = seq(-0.99,0.99,0.33)) {
marg_spat <- matrix(0,length(rho),length(var))
marg_temp <- NULL
spat_result_frame <- temp_result_frame <- NULL
dist_space <- list()
dist_time <- list()
if(!("t" %in% names(data))) {
data$t = 0
}
dist_space <- dlply(data,"t",function(df) {
if ("y" %in% names(df)) {
dd<- rdist(df[c("x","y")],df[c("x","y")])
} else {
dd<- rdist(df["x"],df["x"])
}
return(dd)
})
dist_time <- dlply(data,.("x","y"),function(df) {
dd<- rdist(df["t"],df["t"])
return(dd)
})
t_axis <- seq_along(unique(data$t))
tt <- unique(data$t)
L_norm <- marg1_norm <- marg2_norm <- list()
for(k in 1:length(nu)) {
nu_sel <- nu[k]
for(i in 1:length(rho)) {
kappa = kappa_from_l(rho[i],nu_sel)
for(h in t_axis) {
sub_data <- subset(data, t == tt[h])
K_norm <- Matern(r=dist_space[[h]],nu=nu_sel,var=1,kappa=kappa)
diag(K_norm) <- 1
L_norm[[h]] <- chol(K_norm) # Cholesky of normalised covariance matrix
Kinv_norm <- chol2inv(L_norm[[h]]) # normalised precision matrix
marg1_norm[[h]] <- t(sub_data$z)%*%Kinv_norm%*%sub_data$z
marg2_norm[[h]] <- logdet(L_norm[[h]])
}
for(j in 1:length(var)) {
#Kinv <- Kinv_norm/var[j]
#marg[i,j] <- -0.5*t(data$z)%*%Kinv%*%data$z - 0.5*logdet(sqrt(var[j])*L_norm)
for(h in t_axis)
marg_spat[i,j] <- marg_spat[i,j] -0.5*marg1_norm[[h]]/var[j] - 0.5*(nrow(L_norm[[h]]))*log(var[j]) - 0.5*marg2_norm[[h]]
}
# for(j in 1:length(var))
# for(h in t_axis) {
# sub_data <- subset(data, t == tt[h])
# K <- var[j]*Matern(r=dist_space[[h]],nu=nu_sel,var=1,kappa=kappa)
# diag(K) <- diag(K)*2
# L <- chol(K) # Cholesky of normalised covariance matrix
# Kinv <- chol2inv(L) # normalised precision matrix
# marg1_norm <- t(sub_data$z)%*%Kinv%*%sub_data$z
# marg2_norm <- logdet(L)
# marg[i,j] <- marg[i,j] -0.5*marg1_norm - 0.5*marg2_norm
# }
}
rho_max_ind <- which.max(apply(marg_spat,1,max))
var_max_ind <- which.max(apply(marg_spat,2,max))
if((rho_max_ind %in% c(1,length(rho)) )| (var_max_ind %in% c(1,length(var))) ) {
cat(paste("Warning: reached search boundary for nu = ",nu_sel,sep=""),sep="\n"); flush.console()
}
spat_result_frame <- rbind(spat_result_frame,data.frame(lscale = rho[rho_max_ind],var = var[var_max_ind], nu = nu_sel, lk = max(marg_spat)))
}
if(length(t_axis)>1) {
marg_temp <- matrix(0,length(theta),length(var)+2)
for (i in 1:length(theta)) {
marg_temp[i,] <- as.vector(colSums(ddply(data,c("x","y"), function(df) {
time_dist <- rdist(df$t,df$t)
diag(time_dist) <- 0
K_norm <- theta[i]^time_dist
L_norm <- chol(K_norm)
Kinv_norm <- chol2inv(L_norm)
marg1_norm <- t(df$z)%*%Kinv_norm%*%df$z
marg2_norm <- logdet(L_norm)
X <- rep(0,length(var))
for(j in 1:length(var)) {
X[j] <- -0.5*marg1_norm/var[j] - 0.5*nrow(L_norm)*log(var[j]) - 0.5*marg2_norm
}
return(t(matrix(X)))
})))
}
marg_temp <- marg_temp[,-(1:2)]
theta_max_ind <- which.max(apply(marg_temp,1,max))
var_max_ind <- which.max(apply(marg_temp,2,max))
if((theta_max_ind %in% c(1,length(theta)) )| (var_max_ind %in% c(1,length(var))) ) {
cat(paste("Warning: reached search boundary for temporal correlation",sep=""),sep="\n"); flush.console()
}
# Attach on the temporal bit
temp_result_frame <- rbind(temp_result_frame,data.frame(lscale = theta[theta_max_ind],var = var[var_max_ind], nu = NA, lk = max(marg_temp)))
}
return(list(spat_df = spat_result_frame,temp_df = temp_result_frame,marg_spat=marg_spat,marg_temp = marg_temp))
}
FreqAnal2D <- function(locs,x,theta.grid=0,smoothness = 5/2,d = seq(-5,5,0.1),dither = T,plotit=F) {
if (class(locs) == "list") {
locs <- cbind(locs[[1]],locs[[2]])
}
if (class(x) == "numeric") x <- matrix(x)
if ((dim(x)[1] > 200)&(dither==T)) {
cat("Dithering: too much data",sep="\n")
while(dim(x)[1] > 200) {
x <- matrix(x[seq(1,length(x),2)])
locs <- locs[seq(1,dim(locs)[1],2),]
}
}
cat('Using a Matern field to model data',sep="\n")
dd <- mean(diff(d)) # In km
fs_rho = 1/dd # In cycles per km
d1 <- d2 <- d
N_rho1 = length(d1)
N_rho2 = length(d2)
f_rho1 = fs_rho * (0 : (N_rho1-1)) / N_rho1 # Frequency axis for correlation function
f_rho2 = fs_rho * (0 : (N_rho2-1)) / N_rho2
#D <- meshgrid(d1,d2)
#D <- cbind(c(D$x),c(D$y))
D <- as.matrix(expand.grid(d1,d2))
r <- apply(D,1,function(x) {sqrt(x[1]^2 + x[2]^2) } ) # find radial distance from origin of every point
x <- apply(x,2,function(x) {x - mean(x)}) # detrend signal
if (length(theta.grid) == 0) {
fit <- MLE.Matern.fast(locs,x,smoothness=smoothness)
} else {
fit <- MLE.Matern.fast(locs,x,smoothness=smoothness,theta.grid=theta.grid) # estimate Matern parameters
# In MLE.Matern.fast theta is the range, rho is the sill and sigma2 is the nugget
}
rho <- matrix(Matern(abs(r),range=fit$par['theta'],nu=smoothness,phi=fit$par['rho']),N_rho2,N_rho1)
# In Matern range is theta and phi is the marginal variance (rho)
RHO <- fft(rho*dd)
PS = abs(RHO)
prediction <- Krig(locs,x,Covariance="Matern",smoothness=smoothness,theta=fit$par['theta'],rho=fit$par['rho'],sigma2=fit$par['sigma']^2)
out<- predict.surface(prediction)
if (plotit) {
image(d1,d2,t(rho),xlab='s1 (km)',ylab='s2 (km)')
title('Matern Kernel')
image(f_rho1,f_rho2,t(PS),xlab='f1 (cycles per unit)',ylab='f2 (cycles per unit)')
title('Power Spectrum')
surface( out, type="C") # option "C" our favorite
title("Kriged Field")
}
fit$cutoff <- f_rho1[min(which(abs(RHO)[1,] < 0.1*abs(RHO)[1,1]))]
fit$rho_0_1 <- as.numeric(fit$pars["theta"])*sqrt(8*smoothness) #Lindgren
out2=0
theta_axis = seq(1,50,0.5)
info<- list( x=locs,y=x,smoothness=smoothness, ngrid=1)
for(i in theta_axis) out2[i] <- MLE.objective.fn2(log(i), info, value=T,lambda.grid=1e3)
theta = theta_axis[which.min(out2)]
fit$rho_0_1 <- as.numeric(theta)*sqrt(8*smoothness) #Lindgren
return(fit)
}
## Find 2D Power Spectrum
PS2D <- function(s1,s2,x) {
N1 <- length(s1)
N2 <- length(s2)
ds <- mean(diff(s1))
fs <- 1/ds
#S = meshgrid(s1,s2)
X <- fft(x*ds)
f1 = fs * (0 : (N1-1)) / N1
f2 = fs * (0 : (N2-1)) / N2
#f_grid <- meshgrid(f1,f2)
PS = abs(X)^2
return(list(f1 = f1,f2 = f2, PS = PS))
}
MLE.objective.fn2 <- function (ltheta, info, value = TRUE,lambda.grid=100)
{
y <- as.matrix(info$y)
x <- info$x
smoothness <- info$smoothness
ngrid <- info$ngrid
M <- ncol(y)
Tmatrix <- fields.mkpoly(x, 2)
qr.T <- qr(Tmatrix)
N <- nrow(y)
Q2 <- qr.yq2(qr.T, diag(1, N))
ys <- t(Q2) %*% y
N2 <- length(ys)
theta <- exp(ltheta)
K <- Matern(rdist(x, x)/theta, smoothness = smoothness)
Ke <- eigen(t(Q2) %*% K %*% Q2, symmetric = TRUE)
u2 <- t(Ke$vectors) %*% ys
u2.MS <- c(rowMeans(u2^2))
D2 <- Ke$values
N2 <- length(D2)
ngrid <- min(ngrid, N2)
# lambda.grid <- 100
trA <- minus.pflike <- rep(NA, ngrid)
temp.fn <- function(llam, info) {
lam.temp <- exp(llam)
u2 <- info$u2.MS
D2 <- info$D2
N2 <- length(u2.MS)
rho.MLE <- (sum((u2.MS)/(lam.temp + D2)))/N2
lnDetCov <- sum(log(lam.temp + D2))
-1 * M * (-N2/2 - log(2 * pi) * (N2/2) - (N2/2) * log(rho.MLE) -
(1/2) * lnDetCov)
}
info <- list(D2 = D2, u2 = u2.MS, M = M)
out <- golden.section.search(f = temp.fn, f.extra = info,
gridx = log(lambda.grid), tol = 1e-07)
minus.LogProfileLike <- out$fmin
lambda.MLE <- exp(out$x)
rho.MLE <- (sum((u2.MS)/(lambda.MLE + D2)))/N2
sigma.MLE <- sqrt(lambda.MLE * rho.MLE)
trA <- sum(D2/(lambda.MLE + D2))
pars <- c(rho.MLE, theta, sigma.MLE, trA)
names(pars) <- c("rho", "theta", "sigma", "trA")
if (value) {
return(minus.LogProfileLike)
}
else {
return(list(minus.lPlike = minus.LogProfileLike, lambda.MLE = lambda.MLE,
pars = pars, mle.grid = out$coarse.search))
}
}
lscale_from_variogram <- function(data,lim=500,plotit=T) {
dist <- rdist(data[,1:2],data[,1:2])
diff <- rdist(data[,3],data[,3])
D <- data.frame(d <- c(dist),var <- c(diff)^2)
D <- subset(D, d < lim)
break_diff = lim/50
breaks <- seq(0,lim*2,break_diff)
D$group <- cut(D$d,breaks)
D_mean <- ddply(D,"group",function(x) mean = mean(x$var))
if (plotit == T) {
dev.new()
plot(D_mean[,1],-D_mean[,2])
}
th <- min(D_mean[,2])+diff(range(D_mean[,2]))*0.9
return(which(D_mean[,2]>th)[1]*break_diff)
}
|
0ae7edc569576e9a2eb253c18398b7be3985fd7f
|
176c89d7658008231b2d5f9f87677da1bfe5aba9
|
/xfl pilot plot.R
|
c77d51e771f8798bb6d596f7b082ae982d6eaf35
|
[] |
no_license
|
naymikm/XFL-public
|
fb0880e5e3068b9131091034867bce3b96a20dba
|
6002fb24cb9562784f43ccbcd8d3057c515b8e36
|
refs/heads/master
| 2021-01-08T07:45:12.832011
| 2020-02-20T07:11:33
| 2020-02-20T07:11:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,922
|
r
|
xfl pilot plot.R
|
library(ggplot2)
library(ggimage)
setwd('C:/Users/Owner/Documents/GitHub/XFL')
teams_df <- read.csv('teams.csv', stringsAsFactors=F)
PFF_df <- read.csv('epa/PFF grades.csv', stringsAsFactors=F)
pbp_df <- do.call(rbind, lapply(dir('epa/post-epa',full=T), function(i) read.csv(i, stringsAsFactors=F)))
off_epa <- aggregate(cbind(plays=1,epa) ~ ClubCode, data = pbp_df, FUN = sum, subset = PlayType == 'Pass' | PlayType == 'Rush')
off_epa$epa_per_play <- off_epa$epa / off_epa$plays
def_epa <- off_epa
def_epa$ClubCode <- c('STL','SEA','LA','HOU','TB','DC','DAL','NY')
off_epa <- merge(off_epa, teams_df, by.x = 'ClubCode', by.y = 'Abbr', all.x=T)
off_epa <- merge(off_epa, PFF_df[which(PFF_df$Type=='Offense'),], by.x = 'Full', by.y = 'Team', all.x=T)
off_epa$logos <- paste0('logos/',off_epa$ClubCode,'.png')
def_epa <- merge(def_epa, teams_df, by.x = 'ClubCode', by.y = 'Abbr', all.x=T)
def_epa <- merge(def_epa, PFF_df[which(PFF_df$Type=='Defense'),], by.x = 'Full', by.y = 'Team', all.x=T)
def_epa$logos <- paste0('logos/',def_epa$ClubCode,'.png')
ggplot(data = off_epa,aes(y = Grade, x = epa_per_play)) +
geom_image(aes(image = logos), size = 0.15) +
labs(y = 'PFF Team Offensive Grade',
x = 'Offensive EPA / Play',
caption = 'By Anthony Reinhard\nData from XFL.com and PFF\nEPA model from @nflscrapeR', title = 'XFL Team Offense',
subtitle = 'Through Week 1 of 2020') +
theme_bw() +
theme(
#text = element_text(family='Bahnschrift', color='darkblue'),
text = element_text(color='darkblue'),
plot.background = element_rect(fill = 'grey95'),
panel.border = element_rect(color = 'darkblue'),
axis.ticks = element_line(color = 'darkblue'),
axis.title = element_text(size = 10),
axis.text = element_text(size = 8, color = 'darkblue'),
plot.title = element_text(size = 14),
plot.subtitle = element_text(size = 8),
plot.caption = element_text(size = 5)
)
ggsave('offense.png',dpi = 1000)
ggplot(data = def_epa,aes(y = Grade, x = epa_per_play)) +
geom_image(aes(image = logos), size = 0.15) +
scale_x_reverse() +
labs(y = 'PFF Team Defensive Grade',
x = 'Defensive EPA / Play',
caption = 'By Anthony Reinhard\nData from XFL.com and PFF\nEPA model from @nflscrapeR',
title = 'XFL Team Defense',
subtitle = 'Through Week 1 of 2020') +
theme_bw() +
theme(
#text = element_text(family='Bahnschrift', color='darkblue'),
text = element_text(color='darkblue'),
plot.background = element_rect(fill = 'grey95'),
panel.border = element_rect(color = 'darkblue'),
axis.ticks = element_line(color = 'darkblue'),
axis.title = element_text(size = 10),
axis.text = element_text(size = 8, color = 'darkblue'),
plot.title = element_text(size = 14),
plot.subtitle = element_text(size = 8),
plot.caption = element_text(size = 5)
)
ggsave('defense.png',dpi = 1000)
|
656640a88cc30306725a0f0200cc4337fce5cc06
|
9d8061086e288b4a2ce5671301d67667351d44ff
|
/libraries.R
|
16d059288910bff54dfad4ede395063a63ed4b84
|
[] |
no_license
|
maxsal/biostat629_project1
|
f5944c1017dfb43a31624405917be7ea9efde5bc
|
9b4c75bb353b73d3b8b9f7f7f07d5081544a1c47
|
refs/heads/main
| 2023-03-17T07:31:40.200831
| 2021-03-17T16:28:29
| 2021-03-17T16:28:29
| 348,234,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 410
|
r
|
libraries.R
|
message("*beep boop* loading packages...")
suppressMessages(library(tidyverse))
suppressMessages(library(vroom))
suppressMessages(library(janitor))
suppressMessages(library(glue))
suppressMessages(library(RColorBrewer))
# suppressMessages(library(lme4))
suppressMessages(library(lmerTest))
suppressMessages(library(lattice))
suppressMessages(library(geepack))
message("*beep boop* packages loaded!!")
|
92a136cd051599a2ba95a66f7dd30f3edd144787
|
deb874f9d6934c81fe058f9b4edf7aba85065b52
|
/Demo.R
|
7e6e1221a2ba6abf8ee914a7f584f30b5d6ec714
|
[] |
no_license
|
melissadale/R-Demo-workshop
|
69f65db05d63c436008c7e773c73f5909daaf699
|
0f7e3ef32752a9ae7a3d992afd09b6e921757828
|
refs/heads/master
| 2020-09-08T09:14:28.658072
| 2019-11-11T23:50:43
| 2019-11-11T23:50:43
| 221,090,667
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14
|
r
|
Demo.R
|
sample(1, 10)
|
87e0a2777003b79b6fd1b4b9201dc22ffaf5c4f9
|
4b4a5da77bed2708f54b281c66c7ba0fd5deb036
|
/RCT_WinRatio_Death_LVEF.R
|
996b732026b76992b7e3c6cbf661bc88710d60f8
|
[] |
no_license
|
bizatheo/RCT-Simulation-v1
|
f79351b330c4c9876fecec040afdae01db82f2ff
|
39ea84491f10589391db3a961d39634bd4ecc022
|
refs/heads/main
| 2023-09-05T01:08:18.541334
| 2021-10-22T13:29:40
| 2021-10-22T13:29:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,130
|
r
|
RCT_WinRatio_Death_LVEF.R
|
library(truncnorm)
library(survival)
library(BuyseTest)
options(scipen=10)
trial_simulation <- function(nSims, nPerGroup, maxtime, scale0, scale1, trt_lvef, seed) {
set.seed(seed)
trial<-numeric(nSims)
wr<-numeric(nSims)
wr_lcl<-numeric(nSims)
wr_ucl<-numeric(nSims)
wr_pvalue<-numeric(nSims)
wr_success<-numeric(nSims)
for(i in 1:nSims){
trial[i]=i
pid <- seq(1, by=1, len=nPerGroup*2)
treatment=rep(0:1, nPerGroup)
bl_lvef <- rtruncnorm(nPerGroup*2, a=0, b=0.35, mean=0.25, sd=0.1)
survtimes0 <- rweibull(nPerGroup*2,1,scale0)
survtimes1 <- rweibull(nPerGroup*2,1,scale1)
survivaltime <- numeric(nPerGroup*2)
survivaltime <- ifelse(treatment==0, survtimes0, survtimes1)
death <- ifelse(survivaltime <= maxtime, 1, 0)
deathtime<-ifelse(death == 1, survivaltime, maxtime)
ch_lvef <- rtruncnorm(nPerGroup*2, a=0, b=0.30, mean=0.15, sd=0.1) + treatment*trt_lvef
fu_lvef <- ifelse(death == 1, NA, bl_lvef + ch_lvef)
sampletrial <- data.frame(cbind(pid, treatment, bl_lvef, death, deathtime, fu_lvef))
# Win Ratio for Death & LVEF
Setup <- treatment ~ tte(deathtime, status="death") + cont(fu_lvef)
BT <- BuyseTest(Setup, data=sampletrial, trace=0)
winratio <- summary(BT, statistic="WinRatio", print=FALSE)
wr[i] <- winratio$table.print$Delta[2]
wr_lcl[i] <- winratio$table$CIinf.Delta[3]
wr_ucl[i] <- winratio$table$CIsup.Delta[3]
wr_pvalue[i] <- winratio$table$p.value[3]
wr_success[i] <- ifelse(wr[i] > 1 & wr_pvalue[i] < 0.05, 1, 0)
}
trialresults <- as.data.frame(cbind(trial,
wr, wr_lcl, wr_ucl, wr_pvalue, wr_success))
return(trialresults)
}
#maxtime=180 -> assuming LVEF assessed at 6 months
#scale0=2000 -> assigns a 6-month event rate ~8.6% for patients with treatment=0 based on pweibull(180,1,2000)
#scale1=5000 -> assigns a 6-month event rate ~3.5% for patients with treatment=1 based on pweibull(180,1,5000)
#trt_lvef=0.05 -> assigns the "treatment effect" of LVEF improvement being +0.05 for treatment=1 vs treatment=0
trialresults1 <- trial_simulation(nSims=1000, nPerGroup=90, maxtime=180, scale0=2000, scale1=5000, trt_lvef=0.05, seed=1)
head(trialresults1,n=10)
table(trialresults1$wr_success)
|
b0514b5104427658a0928b23b22ce4ab96889770
|
477b91dacef6bd139e467a5e55c3fdc6586c7454
|
/MTS.R
|
be4b9bd2c12619d63bd10c73a9222ecdba822f0c
|
[] |
no_license
|
chanhyeoni/MTS_project
|
b83ef78b5d2138efb798e92cc2bf9080ac509333
|
18fd4eb270d8d698d320b3662d10c8ddd4204f44
|
refs/heads/master
| 2016-09-06T00:42:18.614679
| 2014-08-17T01:20:00
| 2014-08-17T01:20:00
| 21,401,060
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,754
|
r
|
MTS.R
|
# the Mahalanobis-Taguchi System module
# Chang Hyun Lee
# created 2014.06.24
# This file contains the necessary modules in order to run
# the Mahalanobis-Taguchi System (MTS)
# necessary package should be installed
library(ggplot2)
library(data.table)
library(xlsx)
library(MASS)
mean_matrix <- function(dataFrame){
# obtain the mean meatrix
mean_data <- apply(dataFrame, 2, mean)
mean_data <- t(as.matrix(mean_data))
times <- dim(dataFrame)[1]
mean_data_mat <- matrix(rep((mean_data), times), ncol = ncol(dataFrame), byrow = TRUE)
return (mean_data_mat)
}
sd_matrix <- function(dataFrame){
#obtain standard deviation matrix
sd_data <- apply(dataFrame, 2, sd)
sd_data <- as.matrix(sd_data)
times <- dim(dataFrame)[1]
sd_data_mat <- matrix(rep((sd_data), times), ncol = ncol(dataFrame), byrow = TRUE)
return (sd_data_mat)
}
normalize <- function(normal_data, data){
# normalize the data
mean_normal_mat <- mean_matrix(normal_data)
sd_normal_mat <- sd_matrix(normal_data)
nrow = dim(data)[1]
normalized_data <- (as.matrix(data) - mean_normal_mat[seq(1:nrow), ])/(sd_normal_mat[seq(1:nrow), ])
return (normalized_data)
}
mahalanobis_dist <- function(data, corr){
#calculate the mahalanobis distnace and returns the values diagonally positioned in the matrix
k <- dim(data)[2] # the number of variables
front <- as.matrix(data)/k
inv_corr <- ginv(corr)
dist <- front %*% inv_corr %*% t(as.matrix(data))
group <- diag(dist)
return (group)
}
plot_result<- function(ref_group, outside_group){
# plot the data in order to show the classification result
g <- ggplot(data = data.table(ref_group),
aes(x = seq(1, length(ref_group)), y = ref_group, colour = "the reference group")) + geom_point()
g <- g + geom_point(data = data.table(outside_group),
aes(x = seq(1, length(outside_group)), y = outside_group, colour = "the outside group"))
cb <- c("#0072B2", "#D55E00")
g <- g + ggtitle("The Mahalanobis Distances for the Referece and the Outside Groups") +
xlab("the data") + ylab("MD") + scale_colour_manual(name = "Group", values=cb)
print(g)
}
make_ortho_arr<- function(orthoarray_filename, nVariables){
# takes the name of the orthogonal array filename and makes an orthogonal array
print ("the dimension of the orthogonal array")
ortho_array <- as.matrix(read.csv(orthoarray_filename, header = FALSE))
print (dim(ortho_array))
nRuns <- dim(ortho_array)[1]
ortho_arr <- ortho_array[seq(1,nRuns), seq(1, nVariables)]
return(ortho_arr)
}
generate_runs <- function(ortho_arr, normal, abnormal) {
# generates the matrix that has the result of the distances for the samples
# ortho_arr : the orthogonal array generated from the excel file
# normal, abnormal : the normal and abnormal variables
# corr : the correlation matrix
# MUST!!! : the column dimension for the three matrices MUST be the same
runs_matrices <- matrix(nrow = dim(abnormal)[1], ncol = dim(ortho_arr)[1])
# starts with the number of columns being the number of runs
bool_val <-(ortho_arr==1)
for (run in seq(1,dim(bool_val)[1])){
# if it is one, it is TRUE; otehrwise, it is false
bool_run <- bool_val[run, ]
new_normal <- as.matrix(normal[, bool_run])
new_abnormal <- as.matrix(abnormal[, bool_run])
corr <- cor(new_normal, new_normal)
dist <- t(as.matrix((mahalanobis_dist(new_abnormal, corr))))
runs_matrices[, run] <- dist
}
return (runs_matrices)
}
SN_ratio <- function(runs) {
# generate the signal-to-noise ratio
nRow= dim(runs)[1]
mat = 1/(runs*runs)
mat = colSums(mat)
mat = mat/nRow
mat = -10 * log10(mat)
#return (t(mat))
return (t(t(mat)))
}
create_use_dont_use_matrix<- function(ortho_arr, sn_ratio){
# takes a unhtalthy matrices and s/n ratio to make the use / don't use matrices
# replicate the number of SN ratio such that it has the same dimension
# as the unhealthy matrices
nCol <- dim(ortho_arr)[2]
sn_ratio <- matrix(rep(sn_ratio,nCol), ncol = nCol)
# multiply unhealthy matrices and s/n ratio
result <- ortho_arr * sn_ratio
# average for each variable
mat <- colMeans(result)
return ((mat))
}
avr_SN_ratio<- function(runs_matrices, ortho_arr, var_names){
# compute the average value of signal-to-noise ratio
# depending on the value of taguchi arrays
# call the SN_ratio function
sn_ratio <- SN_ratio(runs_matrices)
##### use_matrix computation #####
# change the values of 2 into 0
ortho_arr[ortho_arr==2] = 0
# return the use matrix
use_matrix <- create_use_dont_use_matrix(ortho_arr, sn_ratio)
##### don't_use_matrix computation #####
# change the values of 1 into 2
ortho_arr[ortho_arr == 1] = 2
# change the values of 0 into 1
ortho_arr[ortho_arr == 0] = 1
# change the values of 2 into 0
ortho_arr[ortho_arr == 2] = 0
dont_use_matrix <- create_use_dont_use_matrix(ortho_arr, sn_ratio)
# combine the matrix
avr_sn_ratio <- data.frame(ON = use_matrix, OFF = dont_use_matrix, row.names = var_names)
avr_sn_ratio$delta <- (use_matrix - dont_use_matrix)
return (avr_sn_ratio)
}
graph_SN_ratio <- function(avr_sn_ratio){
sn_ratio_ordered <- avr_sn_ratio[order(-avr_sn_ratio$delta), ]
p <- qplot(x = rownames(sn_ratio_ordered), y = sn_ratio_ordered$delta, data = sn_ratio_ordered,
main = "the Signal-to-Noise", xlab = "variables", ylab = "delta")
print (p)
}
get_ordred_sn_ratio <- function(avr_sn_ratio){
# shows the deltas along with the variable names as well as
# asking for the number of variables to select
ratio_ordered <- avr_sn_ratio[order(-avr_sn_ratio$delta), ]
print (ratio_ordered)
return (ratio_ordered)
}
dim_reduction<- function(data, ratio_ordered, nVariables){
# carry out the dimensionality reduction and return the matrix with different variables
selected_vars <- rownames(ratio_ordered[seq(1,nVariables), ])
print (selected_vars)
return (data[, selected_vars])
}
MTS <- function(normal, abnormal, ortho_filename){
# normal and abnormal : the datasets that do not include the labels
################ MAHALANOBIS DISTANCE ################
# find the correlations
corr <- cor(normal, normal)
# estimate the mahalanobis distances
ref_group <- mahalanobis_dist(normal, corr)
outside_group <- mahalanobis_dist(abnormal, corr)
outside_group <- outside_group[-which(outside_group == max(outside_group))]
# plot the result
plot_result(ref_group, outside_group)
readline(prompt = "Hit Enter to contiue ")
################ TAGUCHI ARRAY ################
# make an orthogonal array
nVariables <- dim(abnormal)[2]
ortho_arr <- make_ortho_arr(ortho_filename, nVariables)
nCols <- seq(1, dim(ortho_arr)[2])
var_names <- colnames(normal[, nCols])
# comput the nosie-to-signal ratio
runs <- generate_runs(ortho_arr, normal[, nCols], abnormal[, nCols])
avr_sn_ratio <- avr_SN_ratio(runs, ortho_arr, var_names)
graph_SN_ratio(avr_sn_ratio)
readline(prompt = "Hit Enter to contiue ")
# make the dimensionality reduction and generate new normal and abnormal data
ratio_ordered <- get_ordred_sn_ratio(avr_sn_ratio)
nVars <- as.integer(readline(prompt = "choose the number of variables : "))
normal <- dim_reduction(normal, ratio_orderd,nVars)
abnormal <- dim_reduction(abnormal, ratio_ordered,nVars)
################ MAHALANOBIS DISTANCE (the second) ################
# find the correlations
corr <- cor(normal, normal)
# estimate the mahalanobis distances
ref_group <- mahalanobis_dist(normal, corr)
outside_group <- mahalanobis_dist(abnormal, corr)
outside_group <- outside_group[-which(outside_group == max(outside_group))]
# plot the result
plot_result(ref_group, outside_group)
}
|
c89c08ed27295a4605c6081c7d4d063159be69af
|
ae4b017ae346e94f62aeb5511c4b2bf624c0c7ed
|
/testy/GSO funkcje.R
|
f9036f9eea2606927e7a1ed0cf5a0f44c94139cb
|
[] |
no_license
|
JKulpinski/Artificial-Intelligence
|
08e579a30fbc7de15d073e71af56dd710922fc66
|
a28c432ee3e6652937b8c923b010c6b712bbe1da
|
refs/heads/master
| 2020-04-23T22:54:03.347285
| 2019-02-19T18:05:38
| 2019-02-19T18:05:38
| 171,517,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,060
|
r
|
GSO funkcje.R
|
ackley.gso = function(x,y)
{
a =-(-20*exp(-0.2*sqrt(0.5*(x^2+y^2)))-exp(0.5*(cos(2*pi*x)+cos(2*pi*y))) + exp(1) + 20)
return(a)
}
rastrigin.gso <- function(x,y) # x[-5.12,5.12] min 0 at (0,0)
-(20 + x^2 + y^2 - 10*(cos(2*pi*x) + cos(2*pi*y)))
beale.gso = function(x,y) # [-4.5,4.5] min 0 at (3,0.5)
-((1.5-x+x*y)^2 + (2.25-x+x*y^2)^2 +(2.625-x+x*y^3)^2)
goldstein.gso = function(x,y) # [-2,2] min 3 at (0,-1)
-((1+(x+y+1)^2 * (19-14*x+3*x^2 - 14*y+6*x*y+3*y^2))*(30+(2*x-3*y)^2 * (18-32*x+12*x^2 + 48*y-36*x*y+27*y^2)))
booth.gso = function(x,y) # [-10,10] min 0 at (1,3)
-((x+ 2*y-7)^2 + (2*x+y-5)^2)
bukin.gso = function(x,y) # x1[-15,-5] x2[-3,3] min 0 at (-10,1)
-(100*sqrt(abs(y-0.01*x^2))+0.01*abs(x+10))
matyas.gso = function(x,y) # [-10,10] min 0 at (0,0)
-(0.26*(x^2+y^2)-0.48*x*y)
levin13.gso = function(x,y) # [-10,10] min 0 at (1,1)
-(sin(3*pi*x)^2+(x-1)^2 * (1+sin(3*pi*y)^2)+(y-1)^2 * (1+sin(2*pi*y)^2))
three.hump.camel.gso = function(x,y) # [-5,5] min 0 at (0,0)
-(2*x^2 - 1.05*x^4 + (x^6)/6 + x*y + y^2)
easom.gso = function(x,y) # [-100,100] min -1 at (3.14,3.14)
-(-cos(x)*cos(y)*exp(-((x-pi)^2 + (y-pi)^2)))
schaffer.gso = function(x,y) # [-100,100] min 0 at (0,0)
-(0.5+(sin(x^2 - y^2)^2 - 0.5)/(1+0.001*(x^2 + y^2))^2)
eggholder.gso = function(x,y) # [-512,512] min -959 at (512,404)
-(-(y+47)*sin(sqrt(abs(y+x/2+47)))-x*sin(sqrt(abs(x-(y+47)))))
six.hump.camel.gso <- function(x,y) #x1[-3,3] x2[-2,2] min -1.03 at (0.089,-0.712) lub(-0.089, 0.71)
-((4-2.1*x^2+(x^4)/3)*x^2+x*y+(-4+4*y^2)*y^2)
gramacy_lee.gso = function(x,y) ## 1 wymiarowe [0.5,2.5]
-((((sin(10*pi*x))/(2*x))+(x-1)^4))
drop_wave.gso = function(x,y)# [-5.12,5.12] min -1 at (0,0)
-(-(1+cos(12*sqrt(x^2+y^2)))/(0.5*(x^2+y^2)+2))
mccormick.gso = function(x,y) # x1[-1.5,4] x2[-3,4] min -1.91 at (-0.54,-1.54)
-(sin(x+y)+(x-y)^2-(1.5*x)+(2.5*y)+1)
holder.table.gso <- function(x,y) #x[-10,10] min -19 at (8.05,9.66 i z minusami)
-(-abs(sin(x)*cos(y)*exp(abs(1 - sqrt(x^2+y^2)/pi))))
cross_in_tray.gso = function(x,y) # x[-10,10] min -2.06 at (1.34,1.34 i z minusami)
-(-0.0001*(abs(sin(x)*sin(y)*exp(abs(100-((sqrt(x^2+y^2))/pi))))+1)^0.1)
test_tube_1.gso = function(x,y) ## nie ze strony
-(-4*abs(sin(x)*cos(y)*exp(1)^abs(cos(((x^2)+(y^2))/200))))
adjiman.gso = function(x,y) # x1[-1,2] x2[-1,1] min -2.02 at (2,0.1)
-(cos(x)*sin(y) - (x)/(y^2 +1))
bartels.conn.gso = function(x,y) # [-500,500] min 1 at (0,0)
-(abs(x^2 + y^2 + x*y)+abs(sin(x))+abs(cos(y)))
bohachevsky.gso = function(x,y) # [-100,100] min 0 at (0,0)
-(x^2 + 2*y^2 - 0.3*cos(3*pi*x) - 0.4*cos(4*pi*y)+0.7)
brent.gso = function(x,y) #[-10,10] min 0 at (0,0) ale wychodzi 201
-((x+10)^2 + (y+10)^2 + exp(1)^(-x^2 -y^2))
leon.gso = function(x,y) # [-1.2,1.2] min 0 at (1,1)
-(100*(y-x^2)^2 + (1-x)^2)
venter.gso = function(x,y) # [-50,50] min -400 at (0,0)
-(x^2 - 100*cos(x)^2-100*cos(x^2/30)+y^2-100*cos(y)^2-100*cos(y^2/30))
zirilli.gso = function(x,y) #[-10,10] min -0.35 at (-1.04,0)
-(0.25*x^4 - 0.5*x^2 + 0.1*x + 0.5*y^2)
|
bebd0bb0efffaa12670390214e1aad357206d971
|
44ffa5ec0467f425b2ce4f55e3795abd1b082ef5
|
/R/conv.factor.2.char.R
|
442ac9f25e8a1bd4b9ba24b3b08f352457025caa
|
[] |
no_license
|
MikeMorris89/rmm
|
147e2b8b08745a17c5075f7dd3786d1a2ba1fad1
|
03e6a8bfd5c3bf572c3cd3e6869368f6ab9ca5ac
|
refs/heads/master
| 2021-01-12T06:37:26.595215
| 2017-02-11T14:19:08
| 2017-02-11T14:19:08
| 77,397,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 335
|
r
|
conv.factor.2.char.R
|
#' conv.factor.2.char
#'
#' Convert factor columns to character columns.
#' @param df dataframe to run conversion on
#' @return dataframe with all factor columns converted to character.
#' @export
conv.factor.2.char<-function(df){
df[sapply(df, is.factor)] <- lapply(df[sapply(df, is.factor)], as.character, drop = F)
return(df)
}
|
6960a72fd607d96dcb1d074652722d095422eb92
|
c42e4faa9a5a546fc8fbe7cc367383a14c2a7d8a
|
/그래프(미완성).R
|
991a6d0b69e01d62ea40fba43bfc1e1878e216bb
|
[] |
no_license
|
wisdom009/R
|
4be82b65a9d1b79085cf40ced11173421f405548
|
202dce45c6faac6bd4563f96a40a09b09ae416ed
|
refs/heads/master
| 2020-06-03T19:03:28.896231
| 2019-06-17T08:30:42
| 2019-06-17T08:30:42
| 189,162,812
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,419
|
r
|
그래프(미완성).R
|
mtcars
str(mtcars) # 정보 간소화
ggplot(mtcars,aes(x=hp, y=mpg)) +
geom_point()
ggplot(mtcars,aes(x=hp, y=disp)) +
geom_point()
ggplot(mtcars,aes(x=hp, y=mpg)) +
geom_point(color="red") +
geom_line()
ggplot(kor,aes(x=이름,y=점수)) + geom_point()
sort=arrange(score,이름,과목)
sort
sort2 = sort %>%
group_by(이름)
mpg %>%
group_by(class) %>%
select(cty,class) %>%
filter(class %in% c('suv','compact')) %>%
summarise_each(funs(mean),cty)
View(mpg)
# -1-
ggplot(mpg, aes(x=cty, y=hwy)) + geom_point()
# -2-
ggplot(midwest, aes(x=poptotal, y=popasian)) + geom_point()
mw
# -3-
mpg3=mpg %>%
filter(class =='suv') %>%
group_by(manufacturer) %>%
summarise(mean_cty) %>%
arrange(desc(class))
mpg3
#-4-
mpg1=mpg %>%
group_by(model) %>%
select(model,class) %>%
filter(class %in% c('suv')) %>%
summarise_each(funs(n())) %>%
arrange(desc(class))
mpg1
ggplot(mpg1,aes(x=model, y=class)) +
geom_bar(stat = "identity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust = 1,colour = "black",size = 10)) +
#---5--
economics
ggplot(economics, aes(x=date, y=psavert))+
geom_line()
# --6--
mpg
mpg2 = mpg %>%
group_by(class) %>%
select(cty,class) %>%
filter(class %in% c('compact','subcompact','suv'))
mpg2
ggplot(mpg2, aes(x = class, y = cty)) + geom_boxplot(fill=c("blue","green","red")) +
#--7-1-
str(diamonds)
돗수가 몇개인지
d1=diamonds %>%
group_by(cut) %>%
filter(cut, %iin% c('Fair','Good','Very Good','Premium','ideal')) %>%
summarise_each(list(mean),Fair,Good,Very Good,Premium,ideal)
#----
ggplot(diamonds, aes(x=cut, fill=cut)) +
geom_bar() # 정답
#--7-2-
ggplot(diamonds, aes(x=cut, y=price)) +
geom_bar(stat = "identity",fill="blue") +
theme(axis.text.x = element_text(angle = 45, hjust = 1,vjust = 1,colour = "black",size = 10),
axis.text.y = element_text(angle = 30, hjust = 1,vjust = 1,size = 10))
# ---
dd= diamonds %>%
group_by(cut) %>%
summarise(mean_price = mean(price))
ggplot(dd, aes(x=cut, y=mean_price, fill=cut)) +
geom_col() # 정답
#(ggplot(diamonds, aes(x=cut, fill=cut)) + geom_bar() (7-1번답) 간소화 가능)
#--7-3-
dd1=diamonds%>%
group_by(color) %>%
select(cut, color, price) %>%
filter(cut,color)
summarise(mean_price = mean(price))
ggplot(diamonds, aes(x=price)) +
goem_histogram(bins=10) +
facet_wrap(cut) # 간단하게
|
5071a5a3b71aec831060e5199b50bbab6d913d56
|
8c455002b17bced05e00ab992da04b4ed23baa07
|
/05.1-Esti_UF-2000.R
|
5f915677bffc89e0d45ae55a2140c7d674863533
|
[] |
no_license
|
Marcelquintela/Dissertacao
|
7569b0b91017dd986de5c85264597064aac778d4
|
286d69cd9090091ed9041b0002413825b2589820
|
refs/heads/master
| 2022-11-05T23:53:18.626948
| 2020-06-22T15:32:53
| 2020-06-22T15:32:53
| 274,173,811
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,941
|
r
|
05.1-Esti_UF-2000.R
|
## *************************************************************************##
# POBREZA NO NORDESTE BRASILEIRO #
# Aluno MARCEL DANTAS DE QUINTELA Mat. 2012303131-27 #
# Programa: ESTIMATIVAS #
## *************************************************************************##
#
library(survey)
library(xlsx)
setwd("D:\\Mestrado-ENCE.2012\\Dropbox\\00.Dissertacao")
UF <-c("Maranhão","Piauí","Ceará","Rio Grande do Norte",
"Paraíba","Pernambuco","Alagoas","Sergipe","Bahia")
Y <- data.frame(matrix(NA,1,27,byrow=T))
names(Y) <- c("UF",
"MPI" ,"CV.MPI",
"Pobres (%)","CV.Pobre (%)",
"NaoPobres (%)","CV.NaoPobre (%)",
"Vulneraveis (%)","CV.Vulneraveis (%)",
"Pobres Graves (%)","CV.Pobre Graves (%)",
"Intensidade.Priva(%)","CV.Intensidade(%)",
"Contrib.D_1 (%)","CV.D_1 (%)",
"Contrib.D_2 (%)","CV.D_2 (%)",
"Contrib.D_3 (%)","CV.D_3 (%)",
"Contrib.D_4 (%)","CV.D_4 (%)",
"Contrib.D_5 (%)","CV.D_5 (%)",
"Pobre LP-2PPC(%)","CV.Pobre LP-2ppc(%)",
"Pobre LP-MDS(%)","CV.Pobre LP-MDS(%)")
c <- c(21:29)
for (j in 1:length(c)){
load(paste("04.Banco.de.Dados/Banco/2000_Dom",c[j],".rda",sep=""))
# ------------------- Scores de Privação ----------------------------------
dados.Dom <- transform(dados.Dom, SD1 = ((D11+D12+D13+D14 )*1/4*1/5),
SD2 = ((D21+D22+D23+D24+D25 )*1/5*1/5),
SD3 = ((D31+D32+D33+D34+D35+D36)*1/6*1/5),
SD4 = ((D41+D42+D43+D44 )*1/4*1/5),
SD5 = ((D51+D52+D53+D54 )*1/4*1/5))
dados.Dom <- transform(dados.Dom, C = SD1+SD2+SD3+SD4+SD5)
dados.Dom <- transform(dados.Dom, NPob = ifelse( C<0.20,1,0),
Vul = ifelse((C>=0.20 & C<0.33),1,0),
Pob = ifelse( C>=0.33,1,0),
PobG = ifelse( C>=0.50,1,0))
# ------------------- Incidência - H ------------------------------------
# Representa a proporção da população multidimensionalmente pobre:H=q/n
# onde q é o número de pessoas que estão multidimensionalmente pobre e
# n a população total por dominio = município.
# como o banco esta agregado por dom a expansão p/ pessoas é feita por:
# V0401-Nº pessoas residentes no domicílio
dados.Dom <- transform(dados.Dom, q = ifelse(Pob==1,V7100,0))
dados.Dom <- transform(dados.Dom, SumC = ifelse(Pob==1,C*V7100,0),
SumC.1 = ifelse(Pob==1,SD1*V7100,0),
SumC.2 = ifelse(Pob==1,SD2*V7100,0),
SumC.3 = ifelse(Pob==1,SD3*V7100,0),
SumC.4 = ifelse(Pob==1,SD4*V7100,0),
SumC.5 = ifelse(Pob==1,SD5*V7100,0))
AES <- svydesign (id = ~1,
strata = ~AREAP,
fpc = ~N_Dom.AP,
weights = ~P001,
data = dados.Dom)
#Incidencia de domicílios pobres e vulneráveis - H
# inclui o indicador de renda domiciliar per capta para ser comparada
# Linha nacional e o da ONU (2U$PPP/dia) em 2000 estas linhas
# se equivalem R$70,8
# Lembrando que a linha em 2000 é a mesma de 2010 deflacionana pelo INPC
#(divide pelo total de domicílios do municipio)
H <- svymean(~Pob+NPob+Vul+PobG+D11a+D11,
design = AES,
na.rm = TRUE)
rownames(H)<-NULL
#Intencidade (A)
A <- svyratio(numerator = ~SumC,
denominator = ~q, # denominador número de pobres
design = AES,
na.rm = TRUE)
rownames(A)<-NULL
#MPI
MPI <- svyratio(numerator = ~SumC,
denominator = ~V7100,
design = AES,
na.rm = TRUE)
rownames(MPI)<-NULL
#Contribuição da dimensão
C.i <- svyratio(numerator = ~SumC.1+SumC.2+SumC.3+SumC.4+SumC.5,
denominator = ~SumC,
design = AES,
na.rm = TRUE)
rownames(C.i)<-NULL
coef(C.i)
X <- data.frame(UF[j],
coef(MPI) , round(cv(MPI)*100,4) ,
H[1]*100 , round(cv(H)[2]*100,4),
H[2]*100 , round(cv(H)[1]*100,4),
H[3]*100 , round(cv(H)[3]*100,4),
H[4]*100 , round(cv(H)[4]*100,4),
coef(A)*100 , round(cv(A)*100,4) ,
coef(C.i)[1]*100 , round(cv(C.i)[1]*100,4),
coef(C.i)[2]*100 , round(cv(C.i)[2]*100,4),
coef(C.i)[3]*100 , round(cv(C.i)[3]*100,4),
coef(C.i)[4]*100 , round(cv(C.i)[4]*100,4),
coef(C.i)[5]*100 , round(cv(C.i)[5]*100,4),
H[5]*100 , round(cv(H)[3]*100,4),
H[6]*100 , round(cv(H)[4]*100,4))
names(X) <- c("UF",
"MPI" ,"CV.MPI",
"Pobres (%)","CV.Pobre (%)",
"NaoPobres (%)","CV.NaoPobre (%)",
"Vulneraveis (%)","CV.Vulneraveis (%)",
"Pobres Graves (%)","CV.Pobre Graves (%)",
"Intensidade.Priva(%)","CV.Intensidade(%)",
"Contrib.D_1 (%)","CV.D_1 (%)",
"Contrib.D_2 (%)","CV.D_2 (%)",
"Contrib.D_3 (%)","CV.D_3 (%)",
"Contrib.D_4 (%)","CV.D_4 (%)",
"Contrib.D_5 (%)","CV.D_5 (%)",
"Pobre LP-2PPC(%)","CV.Pobre LP-2ppc(%)",
"Pobre LP-MDS(%)","CV.Pobre LP-MDS(%)")
row.names(X)<-NULL
Y <- rbind(Y,X)
}
Y <- Y[-1,]
row.names(Y)<-NULL
write.xlsx(Y, "07.Programa.R/saidas/Ind_Estado/MPI_UF2000.xlsx")
|
30d1ef7cfbacd57e8a03fb9b573ac4152c4cd563
|
a3864f60b8dc3a3b9af5a42547c25470e38f7bf6
|
/man/print.mvdalab.Rd
|
3812e9171f0909198ba00708cc2e7684a8d9dc79
|
[] |
no_license
|
cwmiller21/mvdalab
|
c29ffb52a41a969a1c2701ccab77901afed2fbd9
|
e9f1bea960cdf7dd2d0472581f9fe97c30bd6d4f
|
refs/heads/master
| 2021-01-18T05:24:46.133643
| 2016-02-29T10:54:45
| 2016-02-29T10:54:45
| 52,802,389
| 1
| 0
| null | 2016-02-29T15:38:22
| 2016-02-29T15:38:22
| null |
UTF-8
|
R
| false
| false
| 818
|
rd
|
print.mvdalab.Rd
|
\name{print.mvdalab}
\alias{print.mvdareg}
\title{Print Methods for mvdalab Objects}
\description{Summary and print methods for mvdalab objects.}
\usage{
\method{print}{mvdareg}(x, ...)
}
\arguments{
\item{x}{ an mvdalab object }
\item{\dots}{ additional arguments. Currently ignored. }
}
\details{
\code{print.mvdalab} Is a generic function used to print mvdalab objects, such as \code{print.empca} for \code{imputeEM}, \code{print.mvdapca} for \code{mvdapca} objects, and \code{summary.mvdareg} for \code{mvdareg} objects.
}
\author{Nelson Lee Afanador (\email{nelson.afanador@mvdalab.com})}
\examples{
data(Penta)
mod1 <- plsFit(log.RAI ~., scale = TRUE, data = Penta[, -1], ncomp = 3, contr = "contr.none",
method = "bidiagpls", validation = "oob")
print(mod1, ncomp = 3)
summary(mod1, ncomp = 3)
}
|
ab7f4058bb7f43d977ae8001d37138c01a59b150
|
5ca4fee7458cf53b14b05f20c6908d7e1a2e947b
|
/Xt20.R
|
0854cea8110e3056a3ff5171dc7ff68d7b32bdd6
|
[] |
no_license
|
zwangcode/R_at_UC
|
5c259de5cb40a6902016dc1338be1b933ba35849
|
c1ae937673e404e61999c3e8c6b47061c11827c7
|
refs/heads/master
| 2021-01-18T15:06:04.688011
| 2016-03-18T19:10:56
| 2016-03-18T19:10:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 59
|
r
|
Xt20.R
|
11.5
11.6
11.6
11.7
11.5
11.6
11.5
11.6
11.5
11.5
11.5
11.6
|
1f9f3aac9672d73b10ed32296fcf3480a6b4340c
|
09176f958c9f3ac9a3273d6a16e426d379a0ae40
|
/importing_held_data.R
|
85cf5805fbdc13c80382152d7d8594a3c107433b
|
[] |
no_license
|
jbakerr/CIS2017-18
|
bb8d202bb06c0673610f3a78defd857631e957d1
|
0051ee2cf2284583bb80a9e9fb6664649ab59ed7
|
refs/heads/master
| 2020-03-09T12:31:15.687034
| 2018-07-24T18:44:39
| 2018-07-24T18:44:39
| 128,788,186
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,343
|
r
|
importing_held_data.R
|
grades <- readWorksheetFromFile('Student_Metric_Import_Template_Final copy 2.xlsx', sheet=7, header = T, startRow = 1)
shepard_attendance <- readWorksheetFromFile('Student_Metric_Import_Template_Final copy 2.xlsx', sheet=1, header = T, startRow = 1)
grades[grades$Core.Course == "Eng/Lang Arts/Reading/Writing", 1] <- "ELA"
grades[grades$Core.Course == "Math 1", 1] <- "Math"
quarter <- c("Q1", "Q2", "Q3", "Q4")
# grades <- spread(grades, Grading.period, Numeric.Grade)
# progress <- spread(progress[, ! colnames(progress) %in% c("Metric", "Period")], quartersubject, Value)
colnames(grades)[1] <- "Metric"
colnames(grades)[3] <- "Q3"
grades[4:12] <- NULL
shepard_attendance$Q3 <- 0
shepard_attendance$Q3 <- (1 - (shepard_attendance$X..of.unexcused / shepard_attendance$X..of.days.in.report.period))*100
shepard_attendance$Metric <- "Attendance Rate"
shepard_attendance[2:12] <- NULL
progress <- merge(progress, grades, by = c("Student.ID", "Metric"), all = T)
progress$Q3.y[ is.na(progress$Q3.y) ] <- progress$Q3.x[ is.na(progress$Q3.y) ]
colnames(progress)[22] <- "Q3"
progress$Q3.x <- NULL
#
progress <- merge(progress, shepard_attendance, by = c("Student.ID", "Metric"), all = T)
#
#
progress$Q3.y[ is.na(progress$Q3.y) ] <- progress$Q3.x[ is.na(progress$Q3.y) ]
colnames(progress)[22] <- "Q3"
progress$Q3.x <- NULL
|
fb248e3c3c0542471b75f2c292a0b3e249cd0dda
|
2ee4b1f2ac564f9bc894e4f5e36dc88360d7fe9a
|
/R/Visualization.R
|
61b0923f3b5af36cda136e8cde673ee7692d4a40
|
[
"MIT"
] |
permissive
|
sjanssen2/MetaLonDA
|
9db9a5315875c7f1b6e5afdc1b1eda6f539b40c5
|
2ad94285d2a4cc7ca6af749f3539db37b4f1d35e
|
refs/heads/master
| 2020-07-14T00:18:52.912013
| 2019-04-13T09:07:44
| 2019-04-13T09:07:44
| 205,187,456
| 0
| 0
|
NOASSERTION
| 2019-08-29T14:54:17
| 2019-08-29T14:54:17
| null |
UTF-8
|
R
| false
| false
| 15,553
|
r
|
Visualization.R
|
#' Visualize Longitudinal Feature
#'
#' Visualize Longitudinal Feature
#'
#' @param df dataframe has the Count, Group, ID, Time
#' @param text feature name
#' @param group.levels The two level's name
#' @param unit time interval unit
#' @param col two color to be used for the two groups (eg., c("red", "blue")).
#' @param ylabel text to be shown on the y-axis of all generated figures (default: "Normalized Count")
#' @param prefix prefix to be used to create directory for the analysis results
#' @import ggplot2
#' @import grDevices
#' @import graphics
#' @references
#' Ahmed Metwally (ametwall@stanford.edu)
#' @examples
#' data(metalonda_test_data)
#' dir.create(file.path("Test"))
#' n.sample = 5
#' n.timepoints = 10
#' n.group = 2
#' Group = factor(c(rep(0, n.sample*n.timepoints), rep(1, n.sample*n.timepoints)))
#' Time = rep(rep(1:n.timepoints, times = n.sample), 2)
#' ID = factor(rep(1:(2*n.sample), each = n.timepoints))
#' points = seq(1, 10, length.out = 10)
#' aggregate.df = data.frame(Count = metalonda_test_data[1,], Time = Time, Group = Group, ID = ID)
#' visualizeFeature(aggregate.df, text = rownames(metalonda_test_data)[1], Group)
#' @export
visualizeFeature = function (df, text, group.levels, unit = "days", ylabel = "Normalized Count",
col = c("blue", "firebrick"), prefix = "Test")
{
cat("Visualizing Feature = ", text, "\n")
Count=0; Time=0; ID=0; Group=0 ## This line is just to pass the CRAN checks for the aes in ggplot2
p = ggplot(df, aes(Time, Count, colour = Group, group = interaction(Group, ID)))
p = p + geom_point(size = 1, alpha = 0.5) + geom_line(size = 1, alpha = 0.7) + theme_bw() +
ggtitle(paste("Feature = ", text, sep = "")) + labs(y = ylabel, x = sprintf("Time (%s)", unit)) +
scale_colour_manual(values = col, breaks = c("0", "1"),
labels = c(group.levels[1], group.levels[2])) +
theme(axis.text.x = element_text(colour="black", size=12, angle=0, hjust=0.5, vjust=0.5, face="bold"),
axis.text.y = element_text(colour="black", size=12, angle=0, hjust=0.5, vjust=0.5, face="bold"),
axis.title.x = element_text(colour="black", size=15, angle=0, hjust=.5, vjust=0.5, face="bold"),
axis.title.y = element_text(colour="black", size=15, angle=90, hjust=.5, vjust=.5, face="bold"),
legend.text=element_text(size=15, face="plain"), legend.title = element_blank(),
plot.title = element_text(hjust = 0.5)) +
theme(legend.position="top") + scale_x_continuous(breaks = waiver())
#print("Prefix = ", prefix)
#ggsave(filename=paste("Feature_", text, ".jpg", sep=""), dpi = 1200, height = 10, width = 15, units = 'cm')
ggsave(filename=paste(prefix, "/", "Feature_", text, ".jpg", sep=""), dpi = 1200, height = 10, width = 15, units = 'cm')
}
#' Visualize the feature trajectory with the fitted Splines
#'
#' Plot the longitudinal features along with the fitted splines
#'
#' @param df dataframe has the Count , Group, ID, Time
#' @param model the fitted model
#' @param method The fitting method (negative binomial, LOWESS)
#' @param group.levels The two level's name
#' @param text feature name
#' @param unit time unit used in the Time vector (hours, days, weeks, months, etc.)
#' @param col two color to be used for the two groups (eg., c("red", "blue")).
#' @param ylabel text to be shown on the y-axis of all generated figures (default: "Normalized Count")
#' @param prefix prefix to be used to create directory for the analysis results
#' @import ggplot2
#' @import grDevices
#' @import graphics
#' @references
#' Ahmed Metwally (ametwall@stanford.edu)
#' @export
visualizeFeatureSpline = function (df, model, method, text, group.levels, unit = "days", ylabel = "Normalized Count",
col = c("blue", "firebrick"), prefix = "Test")
{
cat("Visualizing Splines of Feature = ", text, "\n")
Count=0;Time=0;ID=0;Group=0;lnn=0 ## This line is just to pass the CRAN checks for the aes in ggplot2
dd.null = model$dd.null
dd.0 = model$dd.0
dd.1 = model$dd.1
ln = factor(c(rep("longdash", nrow(df)), rep("longdash", nrow(dd.0)), rep("longdash", nrow(dd.1))))
size = c(rep(1, nrow(df)), rep(1, nrow(dd.0)), rep(1, nrow(dd.1)))
dm = rbind(df[,c("Time", "Count", "Group", "ID")], dd.0, dd.1)
dm$lnn=ln
dm$sz= size
p = ggplot(dm, aes(Time, Count, colour = Group, group = interaction(Group, ID)))
p = p + theme_bw() + geom_point(size=1, alpha=0.5) + geom_line(aes(linetype=lnn), size=1, alpha=0.5) +
ggtitle(paste("Feature = ", text, sep = "")) + labs(y = ylabel, x = sprintf("Time (%s)", unit)) +
scale_colour_manual(values = c(col, col),
breaks = c("0", "1", "fit.0", "fit.1"),
labels = c(group.levels[1], group.levels[2], paste(group.levels[1], ".fit", sep=""), paste(group.levels[2], ".fit", sep="")))+
theme(axis.text.x = element_text(colour="black", size=12, angle=0, hjust=0.5, vjust=0.5, face="bold"),
axis.text.y = element_text(colour="black", size=12, angle=0, hjust=0.5, vjust=0.5, face="bold"),
axis.title.x = element_text(colour="black", size=15, angle=0, hjust=.5, vjust=0.5, face="bold"),
axis.title.y = element_text(colour="black", size=15, angle=90, hjust=.5, vjust=.5, face="bold"),
legend.text=element_text(size=15, face="plain"), legend.title = element_blank(),
plot.title = element_text(hjust = 0.5)) +
theme(legend.position="top") + scale_x_continuous(breaks = waiver()) + guides(linetype=FALSE, size =FALSE)
ggsave(filename=paste(prefix, "/", "Feature_", text, "_CurveFitting_", method, ".jpg", sep=""), dpi = 1200, height = 10, width = 15, units = 'cm')
}
#' Visualize significant time interval
#'
#' Visualize significant time interval
#'
#' @param aggregate.df Dataframe has the Count, Group, ID, Time
#' @param model.ss The fitted model
#' @param method Fitting method (negative binomial or LOWESS)
#' @param start Vector of the start points of the time intervals
#' @param end Vector of the end points of the time intervals
#' @param text Feature name
#' @param group.levels Level's name
#' @param unit time unit used in the Time vector (hours, days, weeks, months, etc.)
#' @param col two color to be used for the two groups (eg., c("red", "blue")).
#' @param ylabel text to be shown on the y-axis of all generated figures (default: "Normalized Count")
#' @param prefix prefix to be used to create directory for the analysis results
#' @import ggplot2
#' @import grDevices
#' @import graphics
#' @references
#' Ahmed Metwally (ametwall@stanford.edu)
#' @export
visualizeArea = function(aggregate.df, model.ss, method, start, end, text, group.levels, unit = "days",
ylabel = "Normalized Count", col = c("blue", "firebrick"), prefix = "Test")
{
cat("Visualizing Significant Intervals of Feature = ", text, "\n")
Time = 0 ## This line is just to pass the CRAN checks for the aes in ggplot2
sub.11 = list()
sub.10 = list()
xx = NULL
for(i in 1:length(start))
{
sub.11[[i]] = subset(model.ss$dd.1, Time >= start[i] & Time <= end[i])
sub.10[[i]] = subset(model.ss$dd.0, Time >= start[i] & Time <= end[i])
cmd = sprintf('geom_ribbon(data=sub.10[[%d]], aes(ymin = sub.11[[%d]]$Count, ymax = Count), colour= "grey3", fill="grey69",
alpha = "0.6")', i, i)
if (i != 1)
{
xx = paste(xx, cmd, sep = "+")
} else
{
xx = cmd
}
}
# ddNULL = model_ss$ddNULL
dd.0 = model.ss$dd.0
dd.1 = model.ss$dd.1
dm = rbind(dd.0, dd.1)
p1 = 'ggplot(dm, aes(Time, Count, colour = Group, group = interaction(Group, ID))) +
theme_bw() + geom_point(size = 1, alpha = 0.5) + geom_line(size = 1, alpha = 0.5) +
ggtitle(paste("Feature = ", text, sep = "")) + labs(y = ylabel, x = sprintf("Time (%s)", unit)) +
scale_colour_manual(values = col,
breaks = c("fit.0", "fit.1"),
labels = c(paste(group.levels[1], ".fit", sep = ""), paste(group.levels[2], ".fit", sep = ""))) +
theme(axis.text.x = element_text(colour = "black", size = 12, angle = 0, hjust = 0.5, vjust = 0.5, face = "bold"),
axis.text.y = element_text(colour = "black", size = 12, angle = 0, hjust = 0.5, vjust = 0.5, face = "bold"),
axis.title.x = element_text(colour = "black", size = 15, angle = 0, hjust = 0.5, vjust = 0.5, face = "bold"),
axis.title.y = element_text(colour = "black", size = 15, angle = 90, hjust = 0.5, vjust = 0.5, face = "bold"),
legend.text = element_text(size = 15, face="plain"), legend.title = element_blank(),
plot.title = element_text(hjust = 0.5)) +
theme(legend.position = "top") + scale_x_continuous(breaks = waiver())'
p2 = xx
p3 = paste(p1, p2, sep="+")
p = eval(parse(text = p3))
ggsave(filename=paste(prefix, "/", "Feature_", text, "_SignificantInterval_", method, ".jpg", sep=""), dpi = 1200, height = 10, width = 15, units = 'cm')
}
#' Visualize all significant time intervals for all tested features
#'
#' Visualize all significant time intervals for all tested features
#'
#' @param interval.details Dataframe has infomation about significant interval (feature name, start, end, dominant, p-value)
#' @param prefix prefix for the output figure
#' @param unit time unit used in the Time vector (hours, days, weeks, months, etc.)
#' @param col two color to be used for the two groups (eg., c("red", "blue")).
#' @import ggplot2
#' @import grDevices
#' @import graphics
#' @references
#' Ahmed Metwally (ametwall@stanford.edu)
#' @export
visualizeTimeIntervals = function(interval.details, prefix = "Test", unit = "days",
col = c("blue", "firebrick"))
{
feature=0;dominant=0;ID=0;Group=0;lnn=0 ## This line is just to pass the CRAN checks for the aes in ggplot2
interval.details$dominant = as.factor(interval.details$dominant)
interval.details$pvalue = as.numeric((interval.details$pvalue))
interval.details = interval.details[order(interval.details$feature), ]
ggplot(interval.details, aes(ymin = start , ymax = end, x = feature, xend = feature)) +
geom_linerange(aes(color = dominant), size = 1) +
coord_flip() + scale_colour_manual(values = col) +
labs(x = "Feature", y = sprintf("Time (%s)", unit), colour="Dominant") +
theme(axis.text.x = element_text(colour = "black", size = 10, angle = 0, hjust = 0.5, vjust = 0.5, face = "bold"),
axis.text.y = element_text(colour = "black", size = 8, angle = 0, vjust = 0.5, face = "bold"),
axis.title.x = element_text(colour = "black", size = 15, angle = 0, hjust = 0.5, vjust = 0.5, face = "bold"),
axis.title.y = element_text(colour = "black", size = 15, angle = 90, hjust = 0.5, vjust = 0.5, face = "bold"),
legend.text = element_text(size = 15, face = "plain")) +
theme(panel.grid.minor = element_blank(),
panel.grid.major.y = element_line(colour = "white", size = 6),
panel.grid.major.x = element_line(colour = "white",size = 0.75)) +
theme(legend.position="top", panel.border = element_rect(colour = "black", fill = NA, size = 2))
ggsave(filename = paste(prefix, "/", prefix, "_MetaLonDA_TimeIntervals.jpg", sep=""), dpi = 1200, height = 30, width = 20, units = 'cm')
}
#' Visualize Area Ratio (AR) empirical distribution
#'
#' Visualize Area Ratio (AR) empirical distribution for each time interval
#'
#' @param permuted Permutation of the permuted data
#' @param text Feature name
#' @param method fitting method
#' @import ggplot2
#' @import grDevices
#' @import graphics
#' @param prefix prefix to be used to create directory for the analysis results
#' @references
#' Ahmed Metwally (ametwall@stanford.edu)
#' @export
visualizeARHistogram = function(permuted, text, method, prefix = "Test"){
cat("Visualizing AR Distribution for Feature = ", text, "\n")
n = ncol(permuted)
r = ceiling(sqrt(n))
c = ceiling(sqrt(n))
xx = paste(prefix, "/", "Feature_", text, "_AR_distribution_", method, ".jpg", sep = "")
jpeg(filename = xx, res = 1200, height = r*5, width = c*5, units = 'cm')
par(mfrow=c(r,c))
for( i in 1:ncol(permuted)){
hist(permuted[,i], xlab = "AR Ratio", ylab = "Frequency",
breaks = 10, col = "yellow", border = "red",
main = paste("Interval # ", i, sep=""), xlim = c(0,1))
}
dev.off()
}
#' Visualize log2 fold-change and significance of each interval as volcano plot
#'
#' Visualize log2 fold-change and significance of each interval as volcano plot
#'
#' @param df Dataframe has a detailed summary about feature's significant intervals
#' @param text Feature name
#' @import ggplot2
#' @import grDevices
#' @import graphics
#' @param prefix prefix to be used to create directory for the analysis results
#' @references
#' Ahmed Metwally (ametwall@stanford.edu)
#' @export
visualizeVolcanoPlot = function(df, text, prefix = "Test"){
adjusted.pvalue_pseudo=0; Significance=0; log2FoldChange=0 ## This line is just to pass the CRAN checks
cat("Visualizing Volcano Plot of Feature = ", text, "\n")
# Highlight features that have an absolute log2 fold change > 1 and p-value < 0.05
df$adjusted.pvalue_pseudo = df$adjusted.pvalue
df$adjusted.pvalue_pseudo[which(df$adjusted.pvalue == 0)] = 0.00001
df$Significance <- "NS"
df$Significance[(abs(df$log2FoldChange) > 1)] <- "FC"
df$Significance[(df$adjusted.pvalue_pseudo<0.05)] <- "FDR"
df$Significance[(df$adjusted.pvalue_pseudo<0.05) & (abs(df$log2FoldChange)>1)] <- "FC_FDR"
table(df$Significance)
df$Significance <- factor(df$Significance, levels=c("NS", "FC", "FDR", "FC_FDR"))
ggplot(data=df, aes(x=log2FoldChange, y=-log10(adjusted.pvalue_pseudo), colour=Significance)) +
geom_point(alpha=0.4, size=1.75) + theme_bw() +
ggtitle(paste("Feature = ", text, sep = "")) +
xlab("log2 fold change") + ylab("-log10 p-value") +
theme(axis.text.x = element_text(colour="black", size=12, angle=0, hjust=0.5, vjust=0.5, face="bold"),
axis.text.y = element_text(colour="black", size=12, angle=0, hjust=0.5, vjust=0.5, face="bold"),
axis.title.x = element_text(colour="black", size=12, angle=0, hjust=.5, vjust=0.5, face="bold"),
axis.title.y = element_text(colour="black", size=12, angle=90, hjust=.5, vjust=.5, face="bold"),
legend.text=element_text(size=10, face="plain"), legend.title = element_blank(),
plot.title = element_text(hjust = 0.5), legend.position="bottom") +
scale_color_manual(values=c(NS="grey30", FC="forestgreen", FDR="royalblue", FC_FDR="red2"),
labels=c(NS="NS", FC=paste("Log2FC>|", 1, "|", sep=""), FDR=paste("FDR Q<", 0.05, sep=""),
FC_FDR=paste("FDR Q<", 0.05, " & Log2FC>|", 1, "|", sep=""))) +
#Tidy the text labels for a subset of genes
geom_text(data=subset(df, adjusted.pvalue_pseudo<0.05 & abs(log2FoldChange)>=1),
aes(label=rownames(subset(df, adjusted.pvalue_pseudo<0.05 & abs(log2FoldChange)>= 1))),
size=2.25,
#segment.color="black", #This and the next parameter spread out the labels and join them to their points by a line
#segment.size=0.01,
check_overlap=TRUE,
vjust=1.0)+
geom_vline(xintercept=c(-1,1), linetype="dotted") + geom_hline(yintercept=-log10(0.05), linetype="dotted") +
ggsave(filename = paste(prefix, "/", "Feature_", text, "_VolcanoPlot.jpg", sep=""), dpi = 1200, height = 12, width = 12, units = 'cm')
}
|
73b4151e37627644c049ed0b4172306555ce0606
|
2c6bdee82bc3df0a9ddf65e55e2dc4019bd22521
|
/SlowAssoc/ui.R
|
c6b638c44b09df2ff8a67b88f5896c095bfdad42
|
[] |
no_license
|
homerhanumat/shinyGC
|
0eb1b55bcc8373385ea1091c36ccc1d577dc72fb
|
dc580961877af2459db8a69f09d539f37fd6e2ee
|
refs/heads/master
| 2021-07-11T18:20:53.862578
| 2021-06-21T19:18:32
| 2021-06-21T19:18:32
| 37,830,825
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,101
|
r
|
ui.R
|
library(shiny)
library(shinythemes)
library(rhandsontable)
# no more than this many sims at any one time
simLimit <- 10000
navbarPage(
title = "Chi-Square Test for Association",
theme = shinytheme("cerulean"),
tabPanel(
title = "The Test",
fluidPage(
sidebarPanel(
conditionalPanel(
condition = "output.state == 'tableSetup'",
numericInput(inputId ="rows", "Number of rows", min = 2, max = 5,
step = 1, value = 2),
helpText("Enter row names (separated by commas):"),
textInput(inputId = "rowNames","Row Names",
value = "cool,warm"),
numericInput(inputId ="cols", "Number of columns", min = 2, max = 5,
step = 1, value = 2),
helpText("Enter column names (separated by commas):"),
textInput(inputId = "colNames","Column Names",
value = "baiting,polite"),
actionButton(inputId = "submitTable", "Submit the Table")
),
conditionalPanel(
condition = "output.state == 'simSetup'",
helpText("Select a method for simulation (see About Tab for details)."),
radioButtons(inputId = "simMethod", "Simulation Method",
choices = c("row/col sums fixed" = "rcFix",
"row sums fixed" = "rFix",
"neither fixed" = "nFix"))
),
conditionalPanel(
condition = "output.state != 'tableSetup'",
helpText("How many simulations do you want the machine to perform at once?","
(Upper limit is 10000)"),
numericInput(inputId = "numberSims", "Number of Simulations",
min = 1, max = simLimit, value = 1),
actionButton(inputId = "sim", "Simulate Now"),
actionButton(inputId = "reset", "Reset (Same Table)")
),
conditionalPanel(
condition = "output.state != 'tableSetup'",
actionButton(inputId = "newTable", "Make New Table")
),
width = 2),
mainPanel(
conditionalPanel(
condition = "input.submitTable == 0 || output.state == 'tableSetup'",
HTML(paste0("<h3>Enter your two-way table</h3>",
"<p>You use the table below (see the About Tab for a description),",
" or you can enter your own table usng the input widgets on the ",
"sideboard.</p><hr>")),
rHandsontableOutput("cross", height = 160),
HTML("<hr><p>When you are ready, press the button to submit your table.</p>")
),
conditionalPanel(
condition = "output.state == 'simSetup'",
fluidRow(
column(4,
radioButtons(inputId = "barmosInit",
label = "Graph Type:",
choices = c("Bar Chart" = "bar",
"Mosaic Plot" = "mosaic"),
selected = "bar", inline = TRUE)
),
column(6,
h5(textOutput("remarksInitial"))
)
),
plotOutput("mosaicInitial", height = 350),
fluidRow(
column(3,
h5("Observed"),
tableOutput("obsTable")
),
column(3, offset = 1,
h5("Expected by Null"),
tableOutput("expTable")
),
column(4,offset=1,
h5("Contributions"),
tableOutput("contrTable")
)
)
),
conditionalPanel(
condition = "output.state == 'simulating'",
tabsetPanel(
tabPanel(
title = "Latest Simulation",
fluidRow(
column(4,
radioButtons(inputId = "barmosLatest",
label = "Graph Type:",
choices = c("Bar Chart" = "bar",
"Mosaic Plot" = "mosaic"),
selected = "bar", inline = TRUE)
),
column(6,
p(textOutput("remarksLatest1"))
)
),
plotOutput("mosaicLatest", height = 350),
fluidRow(
column(4,
h5("Simulated Table"),
tableOutput("latestTable")
),
column(4,offset=2,
h5("Expected Table"),
tableOutput("latestExpTable")
)
),
tableOutput("summary1"),
p(textOutput("remarksProbBar"))
),
tabPanel(
title = "Density Plot",
plotOutput("densityplot"),
p(textOutput("remarksLatest2")),
tableOutput("summary2"),
p(textOutput("remarksProbDensity"))
),
tabPanel(
title = "Curve",
plotOutput("chisqCurve"),
br(),
splitLayout(
checkboxInput("compareDen",
HTML("Compare with simulated <br>chi-square distribution")),
checkboxInput("yates","Use Yates correction")
),
p(textOutput("remarksProb"))
),
id = "tabs"
)
)
)
)
),# end tabPanel "The Test"
tabPanel(
title = "About",
includeHTML("informationFiles/about.html")
)
) #nd navbarPage
|
9cb5302d9877b0b9a602942b190c179f651a55b0
|
e4ee3ea6955325370c2e95703483baa75009c8a5
|
/functies.R
|
4c7bb0732f8d72cf46754dd1b33221caef0bfe3a
|
[] |
no_license
|
FrieseWoudloper/steekproefgrootte
|
823cb825cb7a6db97d1d520c7a4de0f48766de64
|
30a30df6d19fe6d43450a06e4bd5268d6e838c40
|
refs/heads/master
| 2022-11-14T14:48:21.424489
| 2020-07-06T09:54:01
| 2020-07-06T09:54:01
| 277,503,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,494
|
r
|
functies.R
|
bereken_resultaat <- function(p = 0.5,
conf_level = 0.95,
gewenste_nauwkeurigheid = 0.05,
populatie = 1000000){
z_laag <- qnorm((1 - conf_level) / 2, mean = 0, sd = 1)
z_hoog <- qnorm(conf_level + (1 - conf_level)/2, mean = 0, sd = 1)
# Berekening van betrouwbaarheidsinterval, formule gebruikt uit boek: "Buijs, A., statistiek om mee te werken (8e druk, blz. 246)"
# dit is de waarde die wordt verkregen met een benaderingsformule (geldt eigenlijk pas bij n >= 200)
n_steekproef <- ceiling(z_laag^2 * p * (1 - p) / (gewenste_nauwkeurigheid^2))
# nagaan of een correctie moet plaatsvinden vanwege de eindige populatiegrootte
bool_eindigepopulatie <- ifelse(n_steekproef/populatie >= 0.1, TRUE, FALSE)
# wanneer de populatie eindig is dan wordt het betrouwbaarheidsinterval vermenigvuldigd met een factor
factor <- ifelse(bool_eindigepopulatie, sqrt((populatie - n_steekproef)/(populatie - 1)), 1)
# Er wordt een grafiek getoond, de x-as wil ik niet laten eindigen bij de waarde van n_steekproef
# vandaar dat ik het domein iets uitbreid
max_steekproef <- ceiling(n_steekproef / 500) * 500
n <- c(20:max_steekproef)
df_resultaat <- data.frame(n)
if (max_steekproef < 200){
# Berekening van betrouwbaarheidsinterval, formule gebruikt uit boek: "Buijs, A., statistiek om mee te werken (8e druk, blz. 240)"
formule <- "Uitgebreide formule"
df_resultaat$ondergrens <- factor * (p + z_laag^2/(2*df_resultaat$n) + z_laag * sqrt(p*(1-p)/df_resultaat$n + z_laag^2/(4*df_resultaat$n^2)))/(1 + z_laag^2/df_resultaat$n)
df_resultaat$bovengrens <- factor * (p + z_hoog^2/(2*df_resultaat$n) + z_hoog * sqrt(p*(1-p)/df_resultaat$n + z_hoog^2/(4*df_resultaat$n^2)))/(1 + z_hoog^2/df_resultaat$n)
} else {
formule <- "Benaderingsformule"
df_resultaat$ondergrens <- factor * (p + z_laag * sqrt(p*(1-p)/df_resultaat$n))
df_resultaat$bovengrens <- factor * (p + z_hoog * sqrt(p*(1-p)/df_resultaat$n))
}
df_resultaat$breedte <- df_resultaat$bovengrens - df_resultaat$ondergrens
df_resultaat$verschil_met_gewenst <- (df_resultaat$breedte - gewenste_nauwkeurigheid*2)^2
n_steekproef <- ifelse(formule == "Uitgebreide formule",
which.min(df_resultaat$verschil_met_gewenst), #opzoeken waar minimum wordt bereikt
n_steekproef)
obj <- list(p = p, conf_level = conf_level, gewenste_nauwkeurigheid = gewenste_nauwkeurigheid,
formule = formule, data = df_resultaat, n_steekproef_optimaal = n_steekproef)
class(obj) <- c("list", "resultaat")
return(obj)
}
plot_resultaat <- function(r, ...){
if (!"resultaat" %in% class(r)) stop("Het argument van de functie plot_resultaat() moet van de klasse resultaat zijn.")
ggplot(data = r$data, mapping = aes(x = n), ...) +
geom_line(aes(y = ondergrens, color = "ondergrens"), size = 2) +
geom_line(aes(y = bovengrens, color = "bovengrens"), size = 2) +
geom_vline(aes(xintercept = r$n_steekproef_optimaal, color = "optimale steekproefgrootte"), linetype = "dashed", size = 1) +
geom_label(aes(x = r$n_steekproef_optimaal, y = r$p, label = r$n_steekproef_optimaal)) +
labs(x = "steekproefgrootte", y = "kans", title = glue("Steekproefgrootte berekend met {tolower(r$formule)}")) +
scale_color_manual(name = "legenda",
values = c("ondergrens" = "darkred",
"bovengrens" = "darkgreen",
"optimale steekproefgrootte" = "purple"),
labels = c("bovengrens",
"ondergrens",
"optimale steekproefgrootte"))
}
beschrijf_resultaat <- function(r){
if (!"resultaat" %in% class(r))
stop("Het argument van de functie beschrijf_resultaat() moet van de klasse resultaat zijn.")
glue("{r$formule}: wanneer de kans op succes in de totale populatie {r$p} is,",
"dan kun je met een betrouwbaarheid van {r$conf_level}",
"zeggen dat in een steekproef van {r$n_steekproef_optimaal} stuks",
"de kans op succes tussen {r$p - r$gewenste_nauwkeurigheid}",
"en {r$p + r$gewenste_nauwkeurigheid} ligt.", .sep = " ")
}
# Testen:
# r <- bereken_resultaat()
# plot_resultaat(r)
#<<<<<<< HEAD
# beschrijf_resultaat(r)
#=======
# beschrijf_resultaat(r)
#>>>>>>> 1c48aa96aef607962363ce11c87c817d85d81187
|
06ecff1444fe33d4f166335bac018b9c1ed1e6a0
|
cd85a75e0b179c4736a63bc1d870eced7fd3a53f
|
/02_figures.R
|
2962327411120291bd79e821ddd663af85f75b73
|
[] |
no_license
|
jennybc/chicken-wings
|
ddaa9e2eef2c9bbf4bb32e6ba239198d81543511
|
63455a7c3626a8f1d0376e1ceebbaaa13022c20b
|
refs/heads/master
| 2020-04-08T21:12:03.018199
| 2018-11-30T19:42:54
| 2018-11-30T19:42:54
| 159,734,172
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 657
|
r
|
02_figures.R
|
library(tidyverse)
conflicted::conflict_prefer("lag", "dplyr")
wings <- read_csv("chicken-wings.csv")
wings <- wings %>%
mutate(
wing_cost = price/count,
marginal_wing_cost =
(price - lag(price, default = 0)) / (count - lag(count, default = 0))
)
# sanity check
ggplot(wings, aes(x = count, y = price)) +
geom_point()
# cost per wing against total wings bought
# https://twitter.com/dataandme/status/1056343232735571969
ggplot(wings, aes(x = count, y = wing_cost)) +
geom_path()
# marginal wing price
# https://twitter.com/alistaire/status/1056531627445231621
ggplot(wings, aes(x = count, y = marginal_wing_cost)) +
geom_path()
|
5f330df8103e12ce1785a783a04b97e7bccb46bd
|
8dff28544e8dc1ba210cfdb22a4e82ce97a5d30b
|
/Course W3.R
|
475b9fae6faf222823eb754dbd6ba7fee1d97b9a
|
[] |
no_license
|
GaelleLouise/GettingCleaningData
|
f5f4e56b5c1e07c16ffba184f3afce66aca5b8fa
|
954f27bacac24bede45e2b756ebfef4366b4d348
|
refs/heads/master
| 2023-01-29T07:36:53.811404
| 2020-12-10T14:56:19
| 2020-12-10T14:56:19
| 319,390,870
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,863
|
r
|
Course W3.R
|
# data from the restaurants in Baltimore :
if(!file.exists("./data")) {dir.create("./data")}
fileURL <- "https://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD"
download.file(fileURL, destfile = "./data/Restaurants.csv")
dateDownload <- date()
# stockage des données dans un df :
restData <- read.csv("./data/Restaurants.csv")
# 1er parcours des données :
head(restData)
tail(restData)
summary(restData)
str(restData)
quantile(restData$councilDistrict, na.rm = TRUE)
quantile(restData$councilDistrict, probs = c(0.5, 0.75, 0.9))
table(restData$zipCode == "21212")
table(restData$zipCode %in% c("21212","21213"))
restData[restData$zipCode %in% c("21212","21213"),]
object.size(restData)
# répartir les données selon un quatile pour les catégoriser :
# ex : répartition de mes zipcodes selon les 0-25%, 25-50%, 50-75% et 75-100% des valeurs des zipcodes:
restData$zipGroups = cut(restData$zipCode, breaks = quantile(restData$zipCode))
table(restData$zipGroups) # pour voir les groupes créés
table(restData$zipGroups,restData$zipCode) #pour voir la répartition des zipcodes sur les différents groupes
# pour répartir les zipcodes en 4 groupes (encore plus simple, en utilisant cut2 du package Hmisc) :
install.packages("Hmisc")
library(Hmisc)
restData$zipGroups = cut2(restData$zipCode, g = 4)
table(restData$zipGroups)
# melting data frames :
install.packages("reshape2")
library(reshape2)
mtcars$carname <- rownames(mtcars) # pour affecter le nom de chaque ligne à l'une des colonnes (sinon, l'info n'existe pas)
# on définit ensuite quelles variables sont considérées comme identifiants et lessquelles commes mesures :
carMelt <- melt(mtcars, id = c("carname", "gear", "cyl"), measure.vars = c("mpg", "hp"))
head(carMelt, 5)
tail(carMelt, 5)
# utilisation du package dyplr :
install.packages("dplyr")
library(dplyr)
|
50e2ee98fe536a493da8cb9c370ceb1cdcfbf75e
|
284503d62ba7ead77260f2e119861d0a8359262c
|
/R_Programming/Assignment_3/best.R
|
f031ed634bf7c2f10af70f0cb564a2b8b217b9d3
|
[] |
no_license
|
kunjzk/datasciencecoursera
|
846098ff61dba0536e300de4bed9da8ef036351c
|
787169c25312b92524234b31bcdefcaa6bdd1015
|
refs/heads/master
| 2022-12-02T19:54:34.735777
| 2020-08-11T16:08:05
| 2020-08-11T16:08:05
| 263,391,605
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,116
|
r
|
best.R
|
best <- function(state, outcome){
# useful variables
state_column <- 7
hosp_name_column <- 2
# read data, if the specified column name is valid
if (outcome == "heart attack"){
outcome_column <- 11
}
else if (outcome == "heart failure"){
outcome_column <- 17
}
else if (outcome == "pneumonia"){
outcome_column <- 23
}
else{
stop("invalid outcome")
}
# remove NAs from column of interest, remove unneccessary columns
data <- read.csv("rprog_data_ProgAssignment3-data/outcome-of-care-measures.csv")
suppressWarnings(data[, outcome_column] <- as.numeric(data[, outcome_column]))
filtered_data <- data[complete.cases(data[, outcome_column]), c(hosp_name_column, state_column, outcome_column) ]
rm(data)
# read data on the specified state, if the name is valid
if(! state %in% filtered_data$State){
stop("invalid state")
}
filtered_data <- filtered_data[filtered_data$State == state, ]
# Return all hospital names with minimum mortality
min_mortality <- min(filtered_data[,3])
filtered_data[filtered_data[,3]==min_mortality, ]$Hospital.Name
}
|
f4b28a2e45d34eb2ca42d63efbcb877f198a2445
|
0f5c873832940a95e25026801109ada880049c91
|
/cachematrix.R
|
bb561176b57b77fb7ef957b3fe368d622d6609c3
|
[] |
no_license
|
t-young/ProgrammingAssignment2
|
ef57ef17d4b3d6d3946699048bc7ec66e43b7bf2
|
3379c822df7fbcccfb1b11d95285b4e92aa1756c
|
refs/heads/master
| 2020-12-03T05:13:55.988835
| 2016-05-28T07:05:55
| 2016-05-28T07:05:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,348
|
r
|
cachematrix.R
|
## cachematrix.R
##
## File contains two functions that are used for computing the
## inverse of a given matrix and caching its results.
## Function creates a list that contains accessor/mutator functions
## for the given matrix and its inverse.
##
## Parameters:
## x - Matrix to have its inverse cached.
##
## Return:
## A list that contains accessor/mutator functions for the given
## matrix and its inverse.
##
makeCacheMatrix <- function( x = matrix() )
{
inv <- NULL
set <- function( mat )
{
x <<- mat
inv <<- NULL
}
get <- function() x
setInverse <- function( iMat ) inv <<- iMat
getInverse <- function() inv
list( set = set, get = get, setInverse = setInverse, getInverse = getInverse )
}
## Functions takes a list that was created with the 'makeCacheMatrix'
## function and computes the inverse of the matrix that is an item
## within the given list and caches it within the list. If the inverse
## has already been computed then it will just return the cached value.
##
## Paramter:
## x - The list that contains the matrix to have its inverse computed.
##
## Return:
## The inverse of the given matrix.
##
cacheSolve <- function(x, ...)
{
inv <- x$getInverse()
if( !is.null( inv ) )
{
return ( inv )
}
mat <- x$get()
inv <- solve( mat, ... )
x$setInverse( inv )
inv
}
|
2b24cd811dc840eb48c013d618cf95062df2978d
|
bef05db616ba8d3825e2fcd2835fce0b56e355c1
|
/man/hw08.Rd
|
f220fc274c6f437f89dc60a0124d9f6e065207d3
|
[] |
no_license
|
namanpaul/hw08
|
6962be9f31f0d560b39fe16a3206c812a72821c4
|
2b080713a09813f3e37d7670e920cbe0f63b2754
|
refs/heads/master
| 2021-01-10T06:56:07.913538
| 2015-11-23T00:19:33
| 2015-11-23T00:19:33
| 46,464,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 202
|
rd
|
hw08.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/hw08-package.r
\docType{package}
\name{hw08}
\alias{hw08}
\alias{hw08-package}
\title{hw08.}
\description{
hw08.
}
|
e8aafce747b3ff38da0791fec57b9632716c309b
|
f76222d220a09d2141914c95ece50a7c6f707089
|
/10th_act/0718/Logistic Classification/Code.R
|
29b7b38c4797c5bc74d981fae881cebd01439e3e
|
[] |
no_license
|
kynk94/Tobigs
|
a06629b138c04230548626ab2c1e3a682641b791
|
ebe60431bb9db6bf3cf369f371fc7f372427c1df
|
refs/heads/master
| 2020-05-16T19:41:39.426349
| 2019-06-04T06:39:03
| 2019-06-04T06:39:03
| 177,414,620
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,205
|
r
|
Code.R
|
setwd("C:/Users/Owner/Dropbox/tobigs10/1주차 수업")
rm(list=ls())
#1
library(boot)
data(nodal)
str(nodal)
?nodal
table(nodal$m)
rd = nodal[,-1] #m이 전부 1이니까 지움
str(rd)
table(rd$r)
model = glm(r~., data = rd, family = binomial)
summary(model)
predict(model) #output은 logit
sigmoid = function(x) {
return(exp(x)/(1+exp(x)))
}
sigmoid(predict(model)) #sigmoid를 걸어야 원하는 확률값이 나온다!
predict(model, type = "response") #output은 sigmoid를 거친 값!
#2
#은행 예금에 가입할 것인지 예측을 위한 데이
bank = read.csv("bank-additional.csv", sep = ";")
str(bank)
#Feature Selection by Hand
select1 = colnames(bank)[c(1,2,3,6,7,8:10,12,15,17:19,21)]
select11 = colnames(bank)[c(1,2,3,6,7,8:10,12,15,17:19)]
formula1 = formula(paste("y~",paste(select11, collapse=" + ")))
bank = bank[select1]
bank$y = as.factor(ifelse(bank$y == "no",0,1))
str(bank)
#train/test partition
library(caret)
idx = createDataPartition(bank$y, p = 0.7, list = F)
banktrain = bank[idx,]
banktest = bank[-idx,]
##Model1
model.glm1 = glm(formula1, banktrain, family = binomial)
pred.glm1 = as.numeric(predict(model.glm1, banktest, type = "response") > 0.5)
confusionMatrix(as.factor(pred.glm1),as.factor(banktest$y))
table(pred.glm1)
##Model2
model.glm2 = glm(formula1, banktrain, family = binomial)
pred.glm2 = as.numeric(predict(model.glm2, banktest, type = "response") > 0.3)
confusionMatrix(as.factor(pred.glm2),as.factor(banktest$y))
table(pred.glm2)
#Upsample
table(banktrain$y)
banktrain_up = upSample(subset(banktrain, select=-y), banktrain$y)
table(banktrain_up$Class)
formula2 = formula(paste("Class~",paste(select11, collapse=" + ")))
##Model3
model.glm3 = glm(formula2, banktrain_up, family = binomial)
pred.glm3 = as.numeric(predict(model.glm3, banktest, type = "response") > 0.5)
confusionMatrix(as.factor(pred.glm3),banktest$y)
table(pred.glm3)
#ROC
library(ROCR)
pred_glm <- prediction(as.numeric(pred.glm3),as.numeric(banktest$y))
perf_glm <- performance(pred_glm, measure = "tpr", x.measure = "fpr")
plot(perf_glm, main = "ROC curve for GLM", col = "blue", lwd = 2)
#AUC
auc_glm = performance(pred_glm, measure = "auc")
auc_glm@y.values[[1]]
|
4da94687dceaaf17713fc77eb45185309d50789e
|
fb21cc60eb492ecf70c6a74f63eeaf8be96c6239
|
/man/ons_download.Rd
|
5cc025e66bd7e1c6985d54cd24c29f54b6448409
|
[
"MIT"
] |
permissive
|
Lextuga007/monstR
|
5740bd78d64074e8807a73b5df4e728e09edae05
|
3444c73711c79be1ae04eb1c03b005d1f444813b
|
refs/heads/master
| 2023-05-26T23:18:39.778873
| 2020-11-16T22:07:13
| 2020-11-16T22:07:13
| 284,761,064
| 1
| 0
|
MIT
| 2020-08-03T17:15:59
| 2020-08-03T17:15:59
| null |
UTF-8
|
R
| false
| true
| 380
|
rd
|
ons_download.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ons.R
\name{ons_download}
\alias{ons_download}
\title{Download}
\usage{
ons_download(metadata, format = "csv")
}
\arguments{
\item{metadata}{data describing the download}
\item{format}{a valid format for the download}
}
\description{
\code{ons_download} retrieves the data described by the given df
}
|
102b0aaa9b6b3c0670aba069dbe9e0918c90be2c
|
d23f973a6d637a3778eca36788a1c257d6501479
|
/utility_simulation_app/app.R
|
0d2108b2e6bf47a2cdb3efe49f48568f5ef52933
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
bbest/consmap-prep
|
41a2b04ccc9ed208a835cbf83166a819a3e0bcb9
|
6de80d8c40f6075de26e1f2186d0495b36510815
|
refs/heads/master
| 2021-01-01T17:43:16.402391
| 2019-03-08T04:38:23
| 2019-03-08T04:38:23
| 42,673,812
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,709
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# setwd('~/github/consmap-prep/utility_simulation_app')
suppressPackageStartupMessages({
library(shiny)
library(shinydashboard)
library(markdown)
library(dplyr)
library(DT)
library(scales)
library(RColorBrewer)
library(ggplot2)
})
source('functions.R')
load('d.Rdata')
# Define UI for application that draws a histogram
ui = dashboardPage(
dashboardHeader(
title='Conservation Mapping'),
dashboardSidebar(
sidebarMenu(
menuItem(
'Routing', tabName = 'routing', icon = icon('road')),
menuItem(
'Siting', tabName = 'siting', icon = icon('map-marker'), selected=T))),
dashboardBody(
tabItems(
tabItem(
tabName = 'siting',
fluidRow( # boxes need to be put in a row (or column)
box(
title = 'Tradeoff Plot', status = 'primary', collapsible=T, width=6,
plotOutput(
'utility_plot', height = 500)),
box(
title = 'Controls', status = 'primary', collapsible=T,
withMathJax(helpText(
'$$
u = a * W - (1 -a ) * B
$$')),
sliderInput(
'a_range', 'Range of alpha:',
min = 0, max = 1, value = c(0, 1), dragRange=T) ))),
tabItem(
tabName = 'routing',
p('To be ported here: ', a(href='http://shiny.env.duke.edu/bbest/consmap/', 'shiny.env.duke.edu/bbest/consmap')))))
)
# Define server logic required to draw a histogram
server <- shinyServer(function(input, output) {
get_d_sum <- reactive({
req(input$a_range)
n = 11 # should be odd to get median
d_sim = simulate_utility(
d, x='B_ssi', y='I_usm',
a_vec = seq(input$a_range[1], input$a_range[2], length.out=n),
fxn=lazyeval::lazy( a * y - (1 - a) * x ),
fxn_slope=lazyeval::lazy( -1*(1 - a)/a ),
fxn_intercept=lazyeval::lazy( u[u_max] / a ))
#d_sim = simulate_utility(d, x='B_ssi', y='I_usm', a_vec = seq(0, 1, length.out=n))
# summarize sites by average utility across alphas
d_sum = d_sim %>%
group_by(i, x, y) %>%
summarize(
u_avg = sum(u_sim)/n) %>%
ungroup() %>%
arrange(desc(u_avg)) %>%
mutate(
rank = row_number(),
pct_rank = percent_rank(u_avg)) %>%
left_join(
d_sim %>%
filter(u_max==T) %>%
select(i, a_sim, u_max, u_slope, u_yintercept),
by='i') %>%
mutate(
u_max = ifelse(is.na(u_max), F, T))
})
output$utility_plot <- renderPlot({
d_sum = get_d_sum()
# get ticks for x and y based on original values
brks = c(0,0.25,0.5,0.75,1)
lbls_B = sprintf('%0.3f', approx(x=d$x, y=d$B_ssi, xout=brks, rule=c(2,2))$y)
lbls_I = sprintf('%0.1f', approx(x=d$y, y=d$I_usm, xout=brks, rule=c(2,2))$y)
# summary plot
# TODO: add contour around points
ggplot(d_sum, aes(x, y, colour=u_avg)) +
geom_point() +
coord_equal() +
scale_x_continuous(
name = 'Bird Sensitivity', breaks=brks, labels=lbls_B, trans='reverse') +
scale_y_continuous(
name = 'Wind Profitablity ($NPV)', breaks=brks, labels=lbls_I) +
scale_colour_gradientn(colours = brewer.pal(9, 'YlGnBu')) +
geom_text(aes(x, y, label=rank), data = filter(d_sum, rank <= 5), colour='purple', size=4, hjust=0, vjust=0) +
with(filter(d_sum, a_sim == median(a_sim, na.rm=T)), geom_abline(slope = u_slope, intercept = u_yintercept, linetype=1)) + #, show_guide=T)) + # median a
with(filter(d_sum, a_sim == min(a_sim, na.rm=T)), # min a
{if ( is.infinite(u_slope) ) geom_vline(
xintercept = x, linetype=3) else geom_abline(
slope = u_slope, intercept = u_yintercept, linetype=3)}) +
with(filter(d_sum, a_sim == max(a_sim, na.rm=T)), # max a
{if ( is.infinite(u_slope) ) geom_vline(
xintercept = x, linetype=3) else geom_abline(
slope = u_slope, intercept = u_yintercept, linetype=3)}) +
xlab('Bird Conservation') +
ylab('Wind Profitability ($NPV)')
})
})
# Run the application
shinyApp(ui = ui, server = server)
|
bf443c7d1b75c58c9fd5bf2dac25778e143d2961
|
fdab0c18eab28477d0980723c5ac5b4ba10c506f
|
/pictures/COPIES1HQ/test.r
|
8f72f499a3f6b7fe039e226f6170d77a96be3016
|
[
"MIT"
] |
permissive
|
MIT-Informatics/PreservationSimulation
|
58b53595841c39e1fe00a05241be43ed0bcf6430
|
38c6641a25108022ce8f225a352f566ad007b0f3
|
refs/heads/master
| 2021-08-25T10:35:46.066554
| 2021-08-24T20:17:13
| 2021-08-24T20:17:13
| 17,369,426
| 9
| 0
|
NOASSERTION
| 2021-03-20T02:55:37
| 2014-03-03T15:03:30
|
R
|
UTF-8
|
R
| false
| false
| 304
|
r
|
test.r
|
# Get the data into the right form for these plots.
alldat <- fndfGetGiantDataRaw("")
newdat <- alldat %>%
group_by(copies, lifem, auditfrequency, audittype, auditsegments
, glitchfreq, glitchimpact) %>%
summarize(mdmlosspct=round(midmean(lost/docstotal)*100.0, 2), n=n()) %>%
filter(copies==5)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.