blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d78a6838578a72dcc83abf2a7af93577a4cd24fa
|
6704f3fb2a8fe3b4ccd951c902cf0e0d84bb59ca
|
/archive/simulation.R
|
b1fe3581afabc6af9e17ea185b0bfbc49813ea28
|
[] |
no_license
|
tm-pham/covid-19_nosocomialtransmission
|
f5d04f987530d556c33a37eed7b98b017271eb7d
|
67878b9feaf487404760dffac817025e7f4d05a8
|
refs/heads/master
| 2022-12-05T07:42:01.994047
| 2020-08-24T06:56:48
| 2020-08-24T06:56:48
| 265,420,009
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,751
|
r
|
simulation.R
|
# =============================================================================#
# NOSOCOMIAL TRANSMISSION
# Simulate data
# =============================================================================#
# Libraries
library("TailRank") # for beta binomial distribution
# ============================#
# Parameters
# ============================#
T <- 3*30 # Study period (days)
delta <- rep(0.2, T) # Proportion/Probability of nosocomial infections, infected s days ago who are discharged, isolated or died
gamma <- rep(0.1, T) # Proportion/Probability of nosocomial HCW infections, infected s days ago who recover (and thus are immune)
gen_shape <- 2.826 # shape parameter for generation time distribution (Feretti et al)
gen_scale <- 5.665 # scale parameter for generation time distribution (Feretti et al)
disp_inf <- 0.01 # Dispersion parameter for beta-binomial distribution of infection process
disp_obs <- 0.01 # Dispersion parameter for beta-binomial distribution of observation process
p_p_obs <- 1/3 # Proportion of observed patient infections (CO-CIN)
alpha_obs <- disp_obs/(1-p_p_obs) # Parameter for beta-binomial distribution of observation process
beta_obs <- disp_obs/p_p_obs # Parameter for beta-binomial distribution of observation process
# Transmission parameters (assume to be known in simulation)
f_hcw_p <- 0.001 # known infected patient to HCW
f_hcw_pp <- 0.001 # unknown infected patient to HCW
f_hcw_hcw <- 0.001 # HCW to HCW
f_p_p <- 0.001 # Known infected patient to patient
f_p_pp <- 0.001 # Unkown infected patient to patient
f_p_hcw <- 0.001 # HCW to susceptible patient
# Probability distribution for incubation period
p1<-1.621
p2<-0.418
cum_prob_inc <- plnorm(1:T,p1,p2)
inc_distr <- cum_prob_inc-c(0,cum_prob_inc[1:(T-1)])
# Probality distribution for LOS
meanlos <- 7
cum_prob_los <- pexp(1:T,1/meanlos)
prob_los <- cum_prob_los-c(0,cum_prob_los[1:(T-1)])
# Delay distribution
# First entry in prob_delay corresponds to delay=0
cum_prob_delay <- pgamma(1:T, shape = 0.811, rate = 0.064)
prob_delay <- cum_prob_delay-c(0,cum_prob_delay[1:(T-1)])
onset_to_discharge_cum_distr <- distr.onset.to.discharge(prob_los,prob_inc)$cum_distr
len_delay <- min(length(prob_delay),length(onset_to_discharge_cum_distr))
# Counterfactual delay distribution (delay from symptom onset to study enrolment)
# that would have occurred if there was no discharge
cf_delay_distr <- prob_delay[1:len_delay]/(1-onset_to_discharge_cum_distr[1:len_delay])
cf_delay_distr <- cf_delay_distr/sum(cf_delay_distr)
# ============================#
# Data
# ============================#
# Health-care workers
S_hcw <- rep(30, T) # Number of susceptible HCWs at time t
I_hcwU <- matrix(c(10,rep(0,T*T-1)), ncol=T) # Number of unknown infected HCWs at time t who got infected s-1 days ago
I_hcwS <- matrix(c(1,rep(0,T*T-1)), ncol=T) # Number of unknown infected HCWs who got infected s-1 days ago and develop symptoms at time t
I_hcwR <- matrix(c(2,rep(0,T*T-1)), ncol=T) # Number of symptomatic HCWs who got infected s-1 days ago and recover at time t
R_hcw <- rep(2,T) # Number of immune HCWs at time t
# Patients
S_p <- rep(100, T) # Number of susceptible patients at time t
I_pU <- matrix(c(20,rep(0, T*T-1)), ncol=T) # Number of unknown (unisolated) infected patients at time t who were infected s days ago
I_pUS <- matrix(rep(1, T*T), ncol=T) # Number of unisolated infected patients who were infected s days ago and developed symptoms
I_pUD <- matrix(rep(1, T*T), ncol=T) # Number of unknown infected patients eligible for detection at time t who got infected s-1 days ago
# This accounts for the delay from symptom onset till detection
I_p <- rep(10, T) # Number of (isolated) infected patients in hospital at time t
# initialize with number of severly infected patients arriving from community
N_ncp <- rep(S_p+I_pU[1,T], T) # Number of non-cohorted patients at time t
# Probability of infection
p_hcw <- rep(0,T) # Probability of infection for HCWs at time t
p_p <- rep(0,T) # Probability of infection for patients at time t
inf_hcw <- rep(0,T) # Infectivity from HCWs (densitiy dependent)
inf_p <- rep(0,T) # Infectivity from patients (densitiy dependent)
# Generation time distribution (Ferretti et al, 2020)
gen_time <- dweibull(seq(1,T,by=1),shape=gen_shape, scale=gen_scale)
for(t in 2:T){
# cumulative infectivity from HCWs
inf_hcw[t] <- sum(gen_time*I_hcwU[,t-1])
# Infectivity from patients
inf_p[t] <- sum(gen_time*I_pU[,t-1])
# Probability of infection for HCWs at time t
p_hcw[t] = 1-exp(-f_hcw_hcw*inf_hcw[t] - f_hcw_pp*inf_p[t] - f_hcw_p*I_p[t])
# Probability of infection for patients at time t
p_p[t] = 1-exp(-f_p_hcw*inf_hcw[t] - f_p_pp*inf_p[t] - f_p_p*I_p[t])
# Number of newly infected HCWs at time t
# Beta binomial
alpha_hcw <- disp_inf/(1-p_hcw) # Parameter for beta-binomial distribution
beta_hcw <- disp_inf/(1-p_hcw) # Parameter for beta-binomial distribution
I_hcwU[1,t] <- rbb(1, N=S_hcw[t], u=alpha_hcw, v=beta_hcw)
# Alternative: Binomial: I_hcw[1,t] = rbinom(1, size=S_hcw[t], prob=p_hcw[t])
# Number of newly infected patients at time t
# Beta binomial
alpha_p <- disp_inf/(1-p_p) # Parameter for beta-binomial distribution
beta_p <- disp_inf/(1-p_p) # Parameter for beta-binomial distribution
I_pU[1,t] <- rbb(1, N=S_p[t], u=alpha_p, v=beta_p)
# Alternative: Binomial: I_pU[1,t] = rbinom(1, size=S_p[t], prob=p_p[t])
for(s in 2:t){
# (note that R starts counting at 1)
# HEALTH-CARE WORKERS
# Number of unknown infected HCWs who got infected s-1 days ago and develop symptoms at time t
hcw_symp <- rbinom(1, size=I_hcwU[s-1,t-1], prob=inc_distr[s])
I_hcwS[s,t] <- hcw_symp
# Remaining unknown infected HCWs at time t who got infected s-1 days ago
I_hcwU[s,t] <- I_hcwU[s-1,t-1] - hcw_symp
# Number of symptomatic HCWs who got infected s-1 days ago and recover at time t
# Assume that they are immediately isolated
hcw_recover <- rbinom(1, size=I_hcwS[s-1,t-1], prob=gamma[s-1])
I_hcwR[s,t] <- hcw_recover
I_hcwS[s,t] <- I_hcwS[s,t] - hcw_recover
# PATIENTS
# Number of unknown infected patients at time t who got infected s-1 days ago
# = Number of unknown infected patients a day before - those that recover, are discharged, or die
unknown_infected <- rbinom(1, size=I_pU[s-1,t-1], prob=1-delta[s-1])
I_pU[s,t] <- unknown_infected
# Number of unknown infected patients that develop symptoms at time t who got infected s-1 days ago
I_pUS[s,t] <- rbinom(1, size=I_pU[s-1,t-1]-unknown_infected, prob=inc_distr[s])
# Number of unknown infected patients eligible for detection at time t who got infected s-1 days ago
# Delay from symptom onset till detection
I_pUD[s,t] <- rbinom(1, size=I_pUS[s-1,t-1], prob=cf_delay_distr[s])
}
# Number of known (isolated) infected patients at time t
# Beta-binomial distribution for observation process
I_p[t] <- I_p[t] + rbb(1, N=sum(I_pUD[1:t,t-1]), u=alpha_obs, v=beta_obs)
# Number of immune HCWs at time t
R_hcw[t] = R_hcw[t-1] + sum(I_hcwR[2:t,t])
# Number of non-cohorted patients
N_ncp[t] <- S_p[t] + I_pU[1,t]
}
|
c0f2d749490ec8267b0e00cb2d562a954f76b816
|
226a24b0674571473ab80837987aaf7919682aaf
|
/interarrival.R
|
3c6f31ec466463e96cce343026b7dad276e66c5d
|
[] |
no_license
|
GlennMatias/ProgrammingAssignment2
|
3dd62d8b71f1f7d2fa78ee0ed8e1006f8a5ec734
|
c8ffb5c3b23b53b39cd0fca8c18735ab6517ed37
|
refs/heads/master
| 2020-05-04T22:13:56.726249
| 2019-04-04T14:11:44
| 2019-04-04T14:11:44
| 179,504,341
| 0
| 0
| null | 2019-04-04T13:36:45
| 2019-04-04T13:36:44
| null |
UTF-8
|
R
| false
| false
| 935
|
r
|
interarrival.R
|
simulationdata =read.csv("Simulationdata.csv")
hist(simulationdata$Interarrival.Times, main = "Histogram of Interarrival Times")
hist(simulationdata$Machine.1, main = "Historgram of Machine 1 service times")
hist(simulationdata$Machine.2, main = "Histogram of Machine 2 Service Times")
install.packages("fitdistrplus")
library(fitdistrplus)
Interarrival.Times.Param = fitdist(simulationdata$Interarrival.Times, "exp")
Machine.1.Param = fitdist(simulationdata$Machine.1, "exp")
Machine.2.Param = fitdist(simulationdata$Machine.2, "exp")
Interarrival.Times.Param$estimate
Machine.1.Param$estimate
Machine.2.Param$estimate
install.packages("swirl")
bWidgetNumber = 1:100
set.seed(1)
rIAT= runif(100)
RandIAT = (-1/0.4611669)*log(1-rIAT,exp(1))
AT = cumsum(RandIAT)
set.seed(2)
rMach1= runif(100)
RandMachServ1 = (-1/1.38255)*log(1-rMach1,exp(1))
set.seed(3)
rMach2= runif(100)
RandMachServ2 = (-1/0.8488159)*log(1-rMach2,exp(1))
|
45693224550aae7f82c514bbd1a918cdeea0123d
|
da3a582e8ae13954765e5e020a499b82589a82c0
|
/fastr-3.6.1-oracle-geospatial/scripts/check_packages.R
|
e84ce4367df938e27d2a2b6d6cb4c97d26892e9c
|
[] |
no_license
|
ismailsunni/dockeRs
|
0296b91b83650570e11dd89c11f0a7ccf6e9a419
|
9294b415ae895a6f5439782346f1bb40abdf2f31
|
refs/heads/master
| 2021-07-02T06:01:52.030239
| 2020-03-15T19:42:44
| 2020-03-15T19:42:44
| 213,689,521
| 2
| 1
| null | 2019-12-23T10:57:44
| 2019-10-08T16:01:55
|
R
|
UTF-8
|
R
| false
| false
| 1,711
|
r
|
check_packages.R
|
print("Checking Spatial and SpatioTemporal packages completeness")
# Ger list of available packages
spatial_packages <- readLines("./spatial_packages.txt")
spatiotemporal_packages <- readLines("./spatiotemporal_packages.txt")
# Get current installed pcakges
installed_packages <- installed.packages()[ ,1]
spatial_not_installed <- setdiff(spatial_packages, installed_packages)
spatiotemporal_not_installed <- setdiff(spatiotemporal_packages, installed_packages)
if (length(spatial_not_installed) == 0){
print("All Spatial R packages are installed.")
} else {
print("Missing Spatial R packages are:")
print(spatial_not_installed)
}
spatial_number_installed = length(spatial_packages) - length(spatial_not_installed)
print(paste("Installed packages:", spatial_number_installed))
print(paste("Not installed packages:", length(spatial_not_installed)))
print(paste("Total packages:", length(spatial_packages)))
print(paste("Installed percentage:", round(spatial_number_installed / length(spatial_packages) * 10000) / 100, "%"))
print("")
if (length(spatiotemporal_not_installed) == 0){
print("All SpatioTemporal R packages are installed.")
} else {
print("Missing SpatioTemporal R packages are:")
print(spatiotemporal_not_installed)
}
spatiotemporal_number_installed = length(spatiotemporal_packages) - length(spatiotemporal_not_installed)
print(paste("Installed packages:", spatiotemporal_number_installed))
print(paste("Not installed packages:", length(spatiotemporal_not_installed)))
print(paste("Total packages:", length(spatiotemporal_packages)))
print(paste("Installed percentage:", round(spatiotemporal_number_installed / length(spatiotemporal_packages) * 10000) / 100, "%"))
|
98060e52233080b582a2d774a378289eccbec2af
|
c4582ce899f30c1c094895bff9e6173106fd34a1
|
/pairwise_correlation.R
|
0efbdc923b890d3633cb5b592fdc8db623280f35
|
[] |
no_license
|
jcaguerri/media_representations_mena
|
4f6ed3324139a49bf15b6d07d50f712e79d91019
|
fc83faaab45b4dfa496bdbbe7160c40aa7f7527f
|
refs/heads/master
| 2022-11-24T16:24:56.040644
| 2020-07-04T09:31:57
| 2020-07-04T09:31:57
| 277,078,426
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,449
|
r
|
pairwise_correlation.R
|
#PAIRWISE CORRELATIONS
#correlations through news
#tokkenize and filter stopwords
#Stopwords:
stop_word <-stopwords(kind = "spanish")
stop_word <- c(stop_word, "según", "sin", "so", "sobre", "tras", "si", "ser", "dos")
#Tokenize
general_words <- news_1719 %>%
unnest_tokens(word, text) %>%
filter(!word %in% stop_word) %>%
mutate(word = str_replace(word, "menas", "mena"))
# count words co-occuring within news
gen_word_pairs <- general_words %>%
pairwise_count(word, url, sort = TRUE)
#MENA:
gen_word_pairs %>%
filter(item1 == "mena") %>% head(20)
#correlation bewtween words:
gen_word_cors <- general_words %>%
group_by(word) %>%
filter(n()>100)%>%
pairwise_cor(word, url, sort = TRUE) #estoy provadno filtros de más de 20, 50, de
#momento queda muy bien con 50 todo.
#MENA:
gen_word_cors %>%
filter(item1 == "mena") %>% head(20)
set.seed(2016)
#figure general:
gen_word_cors %>%
filter(correlation > .30) %>% #rpovando indices correlacion
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), repel = TRUE) +
theme_void()
#figure just mena word
gen_word_cors %>%
filter(item1== "mena") %>%
filter(correlation > .11) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), repel = TRUE) +
theme_void()
# PAIRWISE JUST FOR NEWS WHERE THE WORLD MENA IS USED
#Tokenize
mena_general_words <- news_1719 %>% filter(mena_word == "si") %>%
unnest_tokens(word, text) %>%
filter(!word %in% stop_word)
mena_word_pairs <- mena_general_words %>%
pairwise_count(word, url, sort = TRUE)
mena_word_pairs %>% head(20)
#correlation bewtween words:
mena_word_cors <- mena_general_words %>%
group_by(word) %>%
filter(n()>=88)%>%
pairwise_cor(word, url, sort = TRUE)
#figure
fig_mena_cor <- mena_word_cors %>%
filter(correlation > .3) %>% #rpovando indices correlacion
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), repel = TRUE) +
theme_void()+ labs(
title = "Red de correlaciones en las piezas donde se usa el término MENA",
subtitle = "Correlaciones con un coeficiente de phi > 0.3 encontradas entre las 150 palabras
más mencinadas",
caption = "Fuente: elaboración propia"
)
ggsave("fig_mena_cor.jpg", width = 15, height = 17, units = "cm")
#figure2 <-
mena_word_cors %>%
filter(correlation >= 0.3) %>% #rpovando indices correlacion
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes( edge_alpha = correlation, edge_width = correlation), edge_colour = "cyan4") +
geom_node_point(size = 3) +
geom_node_text(aes(label = name), repel = TRUE,
point.padding = unit(0.2, "lines")) +
theme_void()
# PAIRWISE JUST FOR NEWS WHERE THE WORLD MENA IS NOT USED
#Tokenize
nomena_general_words <- news_1719 %>% filter(mena_word == "no") %>%
unnest_tokens(word, text) %>%
filter(!word %in% stop_word)
nomena_word_pairs <- nomena_general_words %>%
pairwise_count(word, url, sort = TRUE)
nomena_word_pairs %>% head(20)
#correlation bewtween words:
nomena_word_cors <- nomena_general_words %>%
group_by(word) %>%
filter(n()>72)%>% #2.2 times more than in menaword, because there are 2.2. times more news
pairwise_cor(word, url, sort = TRUE)
#figure
fig_nomena_cor <- nomena_word_cors %>%
filter(correlation > .3) %>% #rpovando indices correlacion
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = correlation), show.legend = FALSE) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), repel = TRUE) +
theme_void() +
labs(
title = "Red de correlaciones en las piezas donde NO se usa el término MENA",
subtitle = "Correlaciones con un coeficiente de phi > 0.3 encontradas entre las 150 palabras
más mencinadas",
caption = "Fuente: elaboración propia"
)
ggsave("fig_nomena_cor.jpg", width = 15, height = 17, units = "cm")
|
80ee2c3eface157731ff7294fe1bb59bd4137e2f
|
08cf8d05d798064a861d99fade04249f2f2988c9
|
/ui.R
|
ab0bdf1d5054282cf179b4ab420cbcd216d113ee
|
[] |
no_license
|
Parasdeepkohli/cs424-Project3
|
20a8f4acc26397018e9d59c44a967da9c3c58b60
|
d71e4befa091654f3ff4a20f4c556e1fedbf6ba5
|
refs/heads/main
| 2023-04-25T08:07:13.992153
| 2021-04-30T17:33:27
| 2021-04-30T17:33:27
| 361,116,267
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,243
|
r
|
ui.R
|
library(shiny)
library(leaflet)
library(mapview)
library(ggplot2)
library(DT)
communities <- sort(readLines("Communities.txt"))
fluidPage(title="Chicago power usage",
navbarPage(
title = "Navigation",
id = "nav",
position = "static-top",
collapsible = TRUE,
selected = "About",
tabPanel(
title = "About",
tags$h1("Welcome to Project 3 of CS 424!", `style` = "text-align:center"),
tags$h4("Created by: Parasdeep (Spring 2021)", `style` = "text-align:right"),
tags$u(tags$h3("Purpose:", `style` = "font-weight:bold")),
tags$ul(tags$li("Visualize the energy and gas used by various types of buildings in the Near West Side community area of Chicago", `style` = "font-size:20px"),
tags$li("Compare the energy and gas used between two community areas within the city of Chicago (Compare Communities)",`style` = "font-size:20px"),
tags$li("Geographically visualize variance in power usage across the entire city (Compare communities)", `style` = "font-size:20px")),
tags$u(tags$h3("The Data:", `style` = "font-weight:bold")),
tags$ul(tags$li("An excel file detailing the power usage by community area in the city of Chicago for the year of 2010", `style` = "font-size:20px"),
tags$li("The file provides totals for both gas and energy, as well as providing monthly values for both attributes.",`style` = "font-size:20px"),
tags$li("Please find the link to the data source here:", tags$a(`href` = "https://www.kaggle.com/chicago/chicago-energy-usage-2010", "Source"), `style` = "font-size:20px")),
tags$u(tags$h3("Notes and tips:", `style` = "font-weight:bold")),
tags$ul(tags$li("Please use the navbar above to navigate the app", `style` = "font-size:20px"),
tags$li("The application will either show a blank slate, or an error, in case your filters do not match any data points", `style` = "font-size:20px"),
tags$li("Select the source Total in the Month drop-down inputs to see the data for all months. It is selected by default in each visualization.", `style` = "font-size:20px"),
tags$li("The application takes some time to start up as the data files to be downloaded are large.", `style` = "font-size:20px"),
tags$li("Select the Community 'All of Chicago' to see the data for the entire city. Note that this may take up to a minute to load due to the data size", `style` = "font-size:20px"),
tags$li("Switching from block level to tract level takes several seconds, so please be patient!", `style` = "font-size:20px")),
tags$u(tags$h3("Known Issues", `style` = "font-weight:bold")),
tags$ul(tags$li("Sometimes, the maps will fail to load and display a gray screen. Changing the selected community from the dropdown fixes this.", `style` = "font-size:20px"))
),
tabPanel("Near West Side",
sidebarLayout(
sidebarPanel(
width = 2,
tags$head(tags$style("#NWS{height:40vh !important;}")),
selectInput(
inputId = "SourcesNWS",
label = "Pick a view",
choices = c("Electricity", "Gas", "Avg Building Age", "Avg Building Height", "Total population"),
selected = "Electricity"
),
selectInput(
inputId = "TypeNWS",
label = "Pick a building type",
choices = c("All", "Commercial", "Industrial", "Residential"),
selected = "All"
),
conditionalPanel(condition = "input.SourcesNWS == 'Electricity' || input.SourcesNWS == 'Gas'",
selectInput(
inputId = 'MonthNWS',
label = 'Select a month',
choices = c("Total", "January", "February", "March", "April", "May", "June", "July", "August", "September", "October",
"November", "December"),
selected = "Total"
)
),
actionButton("reset_button", "Reset view")
),
mainPanel(
width = 10,
title = "Near West Side",
leafletOutput('NWS'),
splitLayout(
cellWidths = c("33%", "33%", "33%"),
plotOutput('NWSElec', height = "500px"),
plotOutput('NWSGas', height = "500px"),
dataTableOutput("NWSTable")
)
)
)
),
tabPanel("Compare Communities",
fluidRow(
column(width = 2,
sidebarLayout(
sidebarPanel(width = 12,
radioButtons(
inputId = 'BorT',
label = "Pick the view",
choices = c('Blocks', 'Tracts'),
selected = 'Blocks'
),
selectInput(
inputId = "Communities1",
label = "Top Community",
choices = c('All of Chicago', communities),
selected = "Near West Side"
),
selectInput(
inputId = "Sources1",
label = "Top view",
choices = c("Electricity", "Gas", "Avg Building Age", "Avg Building Height", "Total population"),
selected = "Electricity"
),
selectInput(
inputId = "Type1",
label = "Top building type",
choices = c("All", "Commercial", "Industrial", "Residential"),
selected = "All"
),
conditionalPanel(condition = "input.SourcesNWS == 'Electricity' || input.SourcesNWS == 'Gas'",
selectInput(
inputId = 'Month1',
label = 'Top month',
choices = c("Total", "January", "February", "March", "April", "May", "June", "July", "August", "September", "October",
"November", "December"),
selected = "Total"
)
),
selectInput(
inputId = "Communities2",
label = "Bottom Community",
choices = c('All of Chicago', communities),
selected = "Loop"
),
selectInput(
inputId = "Sources2",
label = "Bottom view",
choices = c("Electricity", "Gas", "Avg Building Age", "Avg Building Height", "Total population"),
selected = "Electricity"
),
selectInput(
inputId = "Type2",
label = "Bottom building type",
choices = c("All", "Commercial", "Industrial", "Residential"),
selected = "All"
),
conditionalPanel(condition = "input.SourcesNWS == 'Electricity' || input.SourcesNWS == 'Gas'",
selectInput(
inputId = 'Month2',
label = 'Bottom month',
choices = c("Total", "January", "February", "March", "April", "May", "June", "July", "August", "September", "October",
"November", "December"),
selected = "Total"
)
),
selectInput(
inputId = "Colors",
label = "Pick a color scheme",
choices = c("Viridis", "Heat", "SunsetDark"),
selected = "Viridis"
),
actionButton("reset_button1", "Reset top view"),
actionButton("reset_button2", "Reset bot view"),
),
mainPanel()
)
),
column(width = 6,
tags$head(tags$style("#map1{height:43vh !important;}
#map2{height:43vh !important;")),
leafletOutput("map1"),
br(),
leafletOutput("map2")
),
column(width = 4,
sidebarLayout(
sidebarPanel(width = 0,
plotOutput('Elec1', height = "210px"),
plotOutput('Gas1', height = "210px"),
plotOutput('Elec2', height = "210px"),
plotOutput('Gas2', height = "210px"),
),
mainPanel()
)
)
)
)
)
)
|
86123370af02acc4092c5f766080ad992f16ea0b
|
a8bd4a8d687b7a923bc82763a9d2e84a3235b186
|
/man/CovarFARIMA.Rd
|
8e2fc01073dbd4a4975f9a5efff483dd4d09df83
|
[] |
no_license
|
crbaek/lwglasso
|
08e08c9d6091f5a7839ad50b7df36293c56c264f
|
0be595bb514c498b3cd7028eeaee2a0b195b45b1
|
refs/heads/master
| 2022-12-06T03:51:29.511529
| 2022-12-01T03:46:49
| 2022-12-01T03:46:49
| 266,767,907
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 453
|
rd
|
CovarFARIMA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gencode.R
\name{CovarFARIMA}
\alias{CovarFARIMA}
\title{Covariance function in mlw paper (21)}
\usage{
CovarFARIMA(N, d, delta, Sigmae)
}
\arguments{
\item{N}{sample size}
\item{d}{LRD parameter}
\item{delta}{delta parameter}
}
\description{
CovarFARIMA () Function
}
\examples{
CovarFARIMA(N,d,delta,Sigmae)
}
\keyword{Gaussian}
\keyword{Process}
\keyword{generation}
|
28e56d75596452010d99db761ee696955da469f9
|
8f3d6701c7a6c2d4e0e248c15d778ab6cd0801d0
|
/tests/testthat/test_group.R
|
dc13cf9ed30d4787074954396d4b830e3b08fad3
|
[
"MIT"
] |
permissive
|
han-tun/groupdata2
|
b9f00701832971bf9f491d81b3683119a5585a2a
|
b10aa53848a7b3558598f1965ca542b608ef93ae
|
refs/heads/master
| 2022-10-11T04:12:02.155276
| 2020-06-15T18:50:32
| 2020-06-15T18:50:32
| 274,775,956
| 1
| 0
|
NOASSERTION
| 2020-06-24T21:48:02
| 2020-06-24T21:48:02
| null |
UTF-8
|
R
| false
| false
| 18,702
|
r
|
test_group.R
|
library(groupdata2)
context("group()")
# Needs testing of vector and factor as input
test_that("dimensions of data frame with group()", {
xpectr::set_test_seed(1)
df <- data.frame(
"x" = c(1:12),
"species" = factor(rep(c("cat", "pig", "human"), 4)),
"age" = c(5, 65, 34, 54, 32, 54, 23, 65, 23, 65, 87, 98)
)
# The added grouping factor means we should get and extra column
expect_equal(ncol(group(df, 5)), 4)
# We expect the same amount of rows
expect_equal(nrow(group(df, 5)), 12)
# Outputted rows with force_equal = TRUE
expect_equal(nrow(group(df, 5, force_equal = TRUE)), 10)
expect_equal(nrow(group(df, 7, force_equal = TRUE)), 7)
expect_equal(nrow(group(df, 4, force_equal = TRUE)), 12)
})
test_that("mean age of groups made with group()", {
xpectr::set_test_seed(1)
# Create df 3x12
df <- data.frame(
"x" = c(1:12),
"species" = factor(rep(c("cat", "pig", "human"), 4)),
"age" = c(5, 65, 34, 54, 32, 54, 23, 65, 23, 65, 87, 98)
)
int_mean_age <- function(df, n, method) {
df_means <- group(df, n, method = method)
df_means <- dplyr::summarise(df_means, mean_age = mean(age))
return(as.integer(df_means$mean_age))
}
# group(df, 5, method = 'n_fill')
expect_equal(int_mean_age(df, 5, "n_dist"), c(35, 44, 36, 44, 83))
expect_equal(int_mean_age(df, 5, "n_fill"), c(34, 46, 44, 44, 92))
expect_equal(int_mean_age(df, 5, "n_last"), c(35, 44, 43, 44, 68))
expect_equal(int_mean_age(df, 7, "n_dist"), c(5, 49, 43, 54, 44, 44, 92))
expect_equal(int_mean_age(df, 7, "n_fill"), c(35, 44, 43, 44, 44, 87, 98))
expect_equal(int_mean_age(df, 7, "n_last"), c(5, 65, 34, 54, 32, 54, 60))
# For n_rand test how many groups has been made
expect_equal(length(int_mean_age(df, 5, "n_rand")), 5)
expect_equal(length(int_mean_age(df, 7, "n_rand")), 7)
})
test_that("error messages work in group()", {
xpectr::set_test_seed(1)
# Create df 3x12
df <- data.frame(
"x" = c(1:12),
"species" = factor(rep(c("cat", "pig", "human"), 4)),
"age" = c(5, 65, 34, 54, 32, 54, 23, 65, 23, 65, 87, 98)
)
expect_error(
xpectr::strip_msg(group(df, 13)),
xpectr::strip("Assertion on 'nrow(data) >= n' failed: Must be TRUE."),
fixed = TRUE)
expect_error(
xpectr::strip_msg(group(df, 0)),
xpectr::strip(paste0("1 assertions failed:\n * 'n' was 0. If this is on purpose, ",
"set 'allow_zero' to 'TRUE'.")),
fixed = TRUE)
})
test_that("allow_zero works in group()", {
xpectr::set_test_seed(1)
# Create df 3x12
df <- data.frame(
"x" = c(1:12),
"species" = factor(rep(c("cat", "pig", "human"), 4)),
"age" = c(5, 65, 34, 54, 32, 54, 23, 65, 23, 65, 87, 98)
)
group_zero <- function(force_equal = FALSE) {
return(group(df, 0,
allow_zero = TRUE,
force_equal = force_equal
))
}
na_col <- function() {
grouped_df <- group(df, 0, allow_zero = TRUE)
return(grouped_df$.groups)
}
# Check that the .groups column contains NAs
expect_equal(na_col(), c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA))
# We should still get the added grouping factor
expect_equal(ncol(group_zero()), 4)
# We should still have the same amount of rows
expect_equal(nrow(group_zero()), 12)
# The same with force_equal as there are no group sizes to force equal
expect_equal(ncol(group_zero(force_equal = TRUE)), 4)
expect_equal(nrow(group_zero(force_equal = TRUE)), 12)
})
test_that("col_name can be set correctly in group()", {
xpectr::set_test_seed(1)
# Create df 3x12
df <- data.frame(
"x" = c(1:12),
"species" = factor(rep(c("cat", "pig", "human"), 4)),
"age" = c(5, 65, 34, 54, 32, 54, 23, 65, 23, 65, 87, 98)
)
set_col_name <- function(df) {
grouped_data <- group(df, 5, col_name = ".cats")
return(colnames(grouped_data[4]))
}
expect_equal(set_col_name(df), ".cats")
})
test_that("l_starts can take n = auto", {
xpectr::set_test_seed(1)
df <- data.frame(
"x" = c(1:12),
"x2" = c(1, 1, 1, 2, NA, 2, 2, 3, NA, NA, 6, 6),
"species" = rep(c("cat", "cat", "human", "human"), 3),
"age" = c(5, 65, 34, 54, 32, 54, 23, 65, 23, 65, 87, 98),
stringsAsFactors = FALSE
)
expect_equal(
group(df,
n = "auto", method = "l_starts",
starts_col = "species"
)$.groups,
factor(c(1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6))
)
expect_equal(
group(df,
n = "auto", method = "l_starts",
starts_col = "x2"
)$.groups,
factor(c(1, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 7))
)
expect_error(
xpectr::strip_msg(group(df,
n = "auto", method = "l_sizes",
starts_col = "species")),
xpectr::strip(paste0("2 assertions failed:\n * 'n' can only be character when met",
"hod is 'l_starts'.\n * when method is not 'l_starts', 'start",
"s_col' must be 'NULL'.")),
fixed = TRUE)
})
test_that("l_starts can take starts_col = index / .index", {
xpectr::set_test_seed(1)
df <- data.frame(
"x" = c(1:12),
stringsAsFactors = FALSE
)
# index
expect_equal(
group(df, c(1, 4, 7),
method = "l_starts",
starts_col = "index"
)$.groups,
factor(c(1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3))
)
# .index
expect_equal(
group(df, c(1, 4, 7),
method = "l_starts",
starts_col = ".index"
)$.groups,
factor(c(1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3))
)
df2 <- data.frame(
"x" = c(1:12),
"index" = c(2:13),
".index" = c(3:14),
stringsAsFactors = FALSE
)
expect_warning(expect_equal(
group(df2, c(2, 7, 11),
method = "l_starts",
starts_col = ".index"
)$.groups,
factor(c(1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4))
),
"data contains column named \'.index\' but this is ignored.",
fixed = TRUE
)
expect_warning(expect_equal(
group(df2, c(2, 7, 11),
method = "l_starts",
starts_col = "index"
)$.groups,
factor(c(1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3))
),
"'data' contains column named 'index'. This is used as starts_col instead",
fixed = TRUE
)
})
test_that("simple fuzz test of group()", {
xpectr::set_test_seed(1)
# NOTE: Most things already tested in group_factor,
# so we just make a simple set of arg values
df <- data.frame(
"x" = c(1:12),
"x2" = c(1, 1, 1, 2, NA, 2, 2, 3, NA, NA, 6, 6),
"species" = rep(c("cat", "cat", "human", "human"), 3),
"age" = c(5, 65, 34, 54, 32, 54, 23, 65, 23, 65, 87, 98),
stringsAsFactors = FALSE
)
xpectr::set_test_seed(3)
# xpectr::gxs_function(group,
# args_values = list(
# "data" = list(df, df$x),
# "n" = list(3),
# "method" = list("n_dist", "n_rand"),
# "return_factor" = list(FALSE, TRUE),
# "col_name" = list(".groups", "myGroups")
# ), indentation = 2)
## Testing 'group' ####
## Initially generated by xpectr
# Testing different combinations of argument values
# Testing group(data = df, n = 3, method = "n_dist", ret...
xpectr::set_test_seed(42)
# Assigning output
output_11680 <- group(data = df, n = 3, method = "n_dist", return_factor = FALSE, col_name = ".groups")
# Testing class
expect_equal(
class(output_11680),
c("grouped_df", "tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_11680[["x"]],
c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12),
tolerance = 1e-4)
expect_equal(
output_11680[["x2"]],
c(1, 1, 1, 2, NA, 2, 2, 3, NA, NA, 6, 6),
tolerance = 1e-4)
expect_equal(
output_11680[["species"]],
c("cat", "cat", "human", "human", "cat", "cat", "human", "human",
"cat", "cat", "human", "human"),
fixed = TRUE)
expect_equal(
output_11680[["age"]],
c(5, 65, 34, 54, 32, 54, 23, 65, 23, 65, 87, 98),
tolerance = 1e-4)
expect_equal(
output_11680[[".groups"]],
structure(c(1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L), .Label = c("1",
"2", "3"), class = "factor"))
# Testing column names
expect_equal(
names(output_11680),
c("x", "x2", "species", "age", ".groups"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_11680),
c("integer", "numeric", "character", "numeric", "factor"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_11680),
c("integer", "double", "character", "double", "integer"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_11680),
c(12L, 5L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_11680)),
".groups",
fixed = TRUE)
# Testing group(data = df, n = 3, method = "n_dist", ret...
# Changed from baseline: col_name
xpectr::set_test_seed(42)
# Assigning output
output_18075 <- group(data = df, n = 3, method = "n_dist", return_factor = FALSE, col_name = "myGroups")
# Testing class
expect_equal(
class(output_18075),
c("grouped_df", "tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_18075[["x"]],
c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12),
tolerance = 1e-4)
expect_equal(
output_18075[["x2"]],
c(1, 1, 1, 2, NA, 2, 2, 3, NA, NA, 6, 6),
tolerance = 1e-4)
expect_equal(
output_18075[["species"]],
c("cat", "cat", "human", "human", "cat", "cat", "human", "human",
"cat", "cat", "human", "human"),
fixed = TRUE)
expect_equal(
output_18075[["age"]],
c(5, 65, 34, 54, 32, 54, 23, 65, 23, 65, 87, 98),
tolerance = 1e-4)
expect_equal(
output_18075[["myGroups"]],
structure(c(1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L), .Label = c("1",
"2", "3"), class = "factor"))
# Testing column names
expect_equal(
names(output_18075),
c("x", "x2", "species", "age", "myGroups"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_18075),
c("integer", "numeric", "character", "numeric", "factor"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_18075),
c("integer", "double", "character", "double", "integer"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_18075),
c(12L, 5L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_18075)),
"myGroups",
fixed = TRUE)
# Testing group(data = df, n = 3, method = "n_dist", ret...
# Changed from baseline: col_name
xpectr::set_test_seed(42)
# Testing side effects
expect_error(
xpectr::strip_msg(group(data = df, n = 3, method = "n_dist", return_factor = FALSE, col_name = NULL)),
xpectr::strip(paste0("1 assertions failed:\n * Variable 'col_name': Must be of ty",
"pe 'string', not 'NULL'.")),
fixed = TRUE)
# Testing group(data = df$x, n = 3, method = "n_dist", r...
# Changed from baseline: data
xpectr::set_test_seed(42)
# Assigning output
output_13277 <- group(data = df$x, n = 3, method = "n_dist", return_factor = FALSE, col_name = ".groups")
# Testing class
expect_equal(
class(output_13277),
c("grouped_df", "tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_13277[["data"]],
c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12),
tolerance = 1e-4)
expect_equal(
output_13277[[".groups"]],
structure(c(1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L), .Label = c("1",
"2", "3"), class = "factor"))
# Testing column names
expect_equal(
names(output_13277),
c("data", ".groups"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_13277),
c("integer", "factor"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_13277),
c("integer", "integer"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_13277),
c(12L, 2L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_13277)),
".groups",
fixed = TRUE)
# Testing group(data = NULL, n = 3, method = "n_dist", r...
# Changed from baseline: data
xpectr::set_test_seed(42)
# Testing side effects
expect_error(
xpectr::strip_msg(group(data = NULL, n = 3, method = "n_dist", return_factor = FALSE, col_name = ".groups")),
xpectr::strip("1 assertions failed:\n * 'data' cannot be 'NULL'"),
fixed = TRUE)
# Testing group(data = df, n = 3, method = "n_rand", ret...
# Changed from baseline: method
xpectr::set_test_seed(42)
# Assigning output
output_16043 <- group(data = df, n = 3, method = "n_rand", return_factor = FALSE, col_name = ".groups")
# Testing class
expect_equal(
class(output_16043),
c("grouped_df", "tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_16043[["x"]],
c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12),
tolerance = 1e-4)
expect_equal(
output_16043[["x2"]],
c(1, 1, 1, 2, NA, 2, 2, 3, NA, NA, 6, 6),
tolerance = 1e-4)
expect_equal(
output_16043[["species"]],
c("cat", "cat", "human", "human", "cat", "cat", "human", "human",
"cat", "cat", "human", "human"),
fixed = TRUE)
expect_equal(
output_16043[["age"]],
c(5, 65, 34, 54, 32, 54, 23, 65, 23, 65, 87, 98),
tolerance = 1e-4)
expect_equal(
output_16043[[".groups"]],
structure(c(1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L), .Label = c("1",
"2", "3"), class = "factor"))
# Testing column names
expect_equal(
names(output_16043),
c("x", "x2", "species", "age", ".groups"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_16043),
c("integer", "numeric", "character", "numeric", "factor"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_16043),
c("integer", "double", "character", "double", "integer"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_16043),
c(12L, 5L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_16043)),
".groups",
fixed = TRUE)
# Testing group(data = df, n = 3, method = NULL, return_...
# Changed from baseline: method
xpectr::set_test_seed(42)
# Testing side effects
expect_error(
xpectr::strip_msg(group(data = df, n = 3, method = NULL, return_factor = FALSE, col_name = ".groups")),
xpectr::strip(paste0("1 assertions failed:\n * Variable 'method': Must be of type",
" 'string', not 'NULL'.")),
fixed = TRUE)
# Testing group(data = df, n = NULL, method = "n_dist", ...
# Changed from baseline: n
xpectr::set_test_seed(42)
# Testing side effects
expect_error(
xpectr::strip_msg(group(data = df, n = NULL, method = "n_dist", return_factor = FALSE, col_name = ".groups")),
xpectr::strip("1 assertions failed:\n * 'n' cannot be 'NULL'"),
fixed = TRUE)
# Testing group(data = df, n = 3, method = "n_dist", ret...
# Changed from baseline: return_factor
xpectr::set_test_seed(42)
# Assigning output
output_15776 <- group(data = df, n = 3, method = "n_dist", return_factor = TRUE, col_name = ".groups")
# Testing is factor
expect_true(
is.factor(output_15776))
# Testing values
expect_equal(
as.character(output_15776),
c("1", "1", "1", "1", "2", "2", "2", "2", "3", "3", "3", "3"),
fixed = TRUE)
# Testing names
expect_equal(
names(output_15776),
NULL,
fixed = TRUE)
# Testing length
expect_equal(
length(output_15776),
12L)
# Testing number of levels
expect_equal(
nlevels(output_15776),
3L)
# Testing levels
expect_equal(
levels(output_15776),
c("1", "2", "3"),
fixed = TRUE)
# Testing group(data = df, n = 3, method = "n_dist", ret...
# Changed from baseline: return_factor
xpectr::set_test_seed(42)
# Testing side effects
expect_error(
xpectr::strip_msg(group(data = df, n = 3, method = "n_dist", return_factor = NULL, col_name = ".groups")),
xpectr::strip(paste0("1 assertions failed:\n * Variable 'return_factor': Must be ",
"of type 'logical flag', not 'NULL'.")),
fixed = TRUE)
## Finished testing 'group' ####
#
})
test_that("group() works with group_by()", {
xpectr::set_test_seed(42)
df <- data.frame(
"n" = c(1, 2, 3, 4, 2, 1, 5, 2, 1, 9),
"s" = c(4, 4, 4, 4, 7, 7, 7, 7, 1, 1),
"c" = as.character(c(4, 4, 6, 6, 7, 7, 7, 8, 8, 1)),
"f" = as.factor(as.character(c(4, 4, 6, 6, 7, 7, 7, 8, 8, 1))),
stringsAsFactors = FALSE
)
## Testing 'xpectr::suppress_mw( df %>% dplyr::group_by(...' ####
## Initially generated by xpectr
xpectr::set_test_seed(42)
# Assigning output
output_19148 <- xpectr::suppress_mw(
df %>%
dplyr::group_by(s) %>%
group(n = 2)
)
# Testing class
expect_equal(
class(output_19148),
c("grouped_df", "tbl_df", "tbl", "data.frame"),
fixed = TRUE)
# Testing column values
expect_equal(
output_19148[["n"]],
c(1, 9, 1, 2, 3, 4, 2, 1, 5, 2),
tolerance = 1e-4)
expect_equal(
output_19148[["s"]],
c(1, 1, 4, 4, 4, 4, 7, 7, 7, 7),
tolerance = 1e-4)
expect_equal(
output_19148[["c"]],
c("8", "1", "4", "4", "6", "6", "7", "7", "7", "8"),
fixed = TRUE)
expect_equal(
output_19148[["f"]],
structure(c(5L, 1L, 2L, 2L, 3L, 3L, 4L, 4L, 4L, 5L), .Label = c("1",
"4", "6", "7", "8"), class = "factor"))
expect_equal(
output_19148[[".groups"]],
structure(c(1L, 2L, 1L, 1L, 2L, 2L, 1L, 1L, 2L, 2L), .Label = c("1",
"2"), class = "factor"))
# Testing column names
expect_equal(
names(output_19148),
c("n", "s", "c", "f", ".groups"),
fixed = TRUE)
# Testing column classes
expect_equal(
xpectr::element_classes(output_19148),
c("numeric", "numeric", "character", "factor", "factor"),
fixed = TRUE)
# Testing column types
expect_equal(
xpectr::element_types(output_19148),
c("double", "double", "character", "integer", "integer"),
fixed = TRUE)
# Testing dimensions
expect_equal(
dim(output_19148),
c(10L, 5L))
# Testing group keys
expect_equal(
colnames(dplyr::group_keys(output_19148)),
c("s", ".groups"),
fixed = TRUE)
## Finished testing 'xpectr::suppress_mw( df %>% dplyr::group_by(...' ####
})
|
6f1cadbc0e29d74480ce0767eff3945301437abe
|
afbc14ca3d89dbc736d2a984c905155e28b3bb11
|
/BMI551_Homework_3_support.r
|
078f8f1765419edda2c43a67894c2fe88c08f9b8
|
[] |
no_license
|
dannhek/BMI551
|
c9188b9b7824d7d10aea2b3989e1f33481660256
|
1d96508762e0e8d5bdf53b3bb5eef67fab190b6a
|
refs/heads/master
| 2020-08-22T02:11:34.264367
| 2019-10-31T03:32:01
| 2019-10-31T03:32:01
| 216,296,870
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,724
|
r
|
BMI551_Homework_3_support.r
|
library(Hmisc)
library(knitr)
probability_range <- seq(0.000,1.000,0.001)
possible_bounds <- data.frame(
P = probability_range
,`P(k=0)` = round(dbinom(0,20,probability_range),6)
,`P(k=1)` = round(dbinom(1,20,probability_range),6)
,`P(k=2)` = round(dbinom(2,20,probability_range),6)
,`P(k=3)` = round(dbinom(3,20,probability_range),6)
,`P(k=4)` = round(dbinom(4,20,probability_range),6)
,`P(k=5)` = round(dbinom(5,20,probability_range),6)
,`P(k=6)` = round(dbinom(6,20,probability_range),6)
)
names(possible_bounds)<-c('possible_pi','P(k=0)','P(k=1)','P(k=2)','P(k=3)','P(k=4)','P(k=5)','P(k=6)')
possible_bounds$`P(k<=1)` <- possible_bounds$`P(k=0)` + possible_bounds$`P(k=1)`
possible_bounds$`P(k<=2)` <- possible_bounds$`P(k=0)` + possible_bounds$`P(k=1)` + possible_bounds$`P(k=2)`
possible_bounds$`P(k<=3)` <- possible_bounds$`P(k=0)` + possible_bounds$`P(k=1)` + possible_bounds$`P(k=2)` +
possible_bounds$`P(k=3)`
possible_bounds$`P(k<=4)` <- possible_bounds$`P(k=0)` + possible_bounds$`P(k=1)` + possible_bounds$`P(k=2)` +
possible_bounds$`P(k=3)` + possible_bounds$`P(k=4)`
possible_bounds$`P(k<=5)` <- possible_bounds$`P(k=0)` + possible_bounds$`P(k=1)` + possible_bounds$`P(k=2)` +
possible_bounds$`P(k=3)` + possible_bounds$`P(k=4)` + possible_bounds$`P(k=5)`
possible_bounds$`P(k<=6)` <- possible_bounds$`P(k=0)` + possible_bounds$`P(k=1)` + possible_bounds$`P(k=2)` +
possible_bounds$`P(k=3)` + possible_bounds$`P(k=4)` + possible_bounds$`P(k=5)` +
possible_bounds$`P(k=6)`
kable(possible_bounds)
|
52d2491ac7e8718417f0826516927744e03fb66d
|
140fb0b962691c7fbf071587e2b950ce3da121be
|
/docs/Code/comparative_analysis/clusters_heatmap/cluster_s_535.R
|
b8be753ae698a708032ca3eec2da5152229ad1f4
|
[] |
no_license
|
davidbmorse/SEEP
|
cb0f8d33ca01a741db50ebf21e0f56ab3c5bb372
|
357645a6beb428e4c091f45f278c5cdb4d4d3745
|
refs/heads/main
| 2023-04-09T10:55:01.267306
| 2023-03-24T02:53:21
| 2023-03-24T02:53:21
| 305,464,980
| 0
| 0
| null | 2020-10-31T16:15:55
| 2020-10-19T17:38:20
|
R
|
UTF-8
|
R
| false
| false
| 31,981
|
r
|
cluster_s_535.R
|
#generate a matrix and data frame that can be used to generate a heatmap showing correlation between clusters
library(RColorBrewer)
library(lattice)
library(latticeExtra)
library(grid)
library(gridExtra)
library(Seurat)
library(DT)
library(ComplexHeatmap)
library(tidyverse)
#define a function to change the name of an RDA file object loaded
loadRData <- function(fileName){
#loads an RData file, and returns it
load(fileName)
get(ls()[ls() != "fileName"])
}
#load data ------
#spheroid
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/Codes/01GSEA_pGSEAfunction.R")
data("pGSEA")
data("PrerankSphere")
data("scaledClusterdDataGSEA")
dx=spheroid_CIOS_GSEA@assays$RNA@data
mm=t(apply(dx,1,tapply, spheroid_CIOS_GSEA@active.ident, calc.logMean))
mms = apply(dx,1,calc.logMean)
dd_spheroid = mm - mms
S_subsets <- loadRData("MSigDB/MSigDB_subsets.rda")
#give unique names
difs_s <- difs
means_s <- means
gsea_0s <- gsea_0
gsea_1s <- gsea_1
gsea_2s <- gsea_2
gsea_3s <- gsea_3
gsea_4s <- gsea_4
gsea_5s <- gsea_5
gsea_6s <- gsea_6
#organoid
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/Codes/01GSEA_pGSEAfunction.R")
data("pGSEA")
data("PrerankOrg")
data("scaledClusterdDataGSEA")
dx=Organoid_GSEA@assays$RNA@data
mm=t(apply(dx,1,tapply, Organoid_GSEA@active.ident, calc.logMean))
mms = apply(dx,1,calc.logMean)
dd_organoid = mm - mms
O_subsets <- loadRData("MSigDB/MSigDB_subsets.rda")
#give unique names
difs_o <- difs
means_o <- means
gsea_0o <- gsea_0
gsea_1o <- gsea_1
gsea_2o <- gsea_2
gsea_3o <- gsea_3
gsea_4o <- gsea_4
gsea_5o <- gsea_5
gsea_6o <- gsea_6
#biopsy
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/Codes/01GSEA_pGSEAfunction.R")
data("pGSEA")
data("PrerankBiopsy")
data("scaledClusterdDataGSEA")
dx=biopsy_GSEA@assays$RNA@data
mm=t(apply(dx,1,tapply, biopsy_GSEA@active.ident, calc.logMean))
mms = apply(dx,1,calc.logMean)
dd_biop = mm - mms
B_subsets <- loadRData("MSigDB/MSigDB_subsets.rda")
#give unique names
difs_b <- difs
means_b <- means
gsea_0b <- gsea_0
gsea_1b <- gsea_1
gsea_2b <- gsea_2
gsea_3b <- gsea_3
gsea_4b <- gsea_4
gsea_5b <- gsea_5
#gsea_6b <- gsea_6
#Run loaded data through loops over all 'interesteing' gene sets from all gene collections -----
surface_clusters535_matrices <- list()
for (collection in c("h.all", "c5.bp", "c2.cgp", "c2.cp", "c4.cm", "c4.cgn")) {
if (collection == "h.all"){
hallmark_matrices <- list()
for (pathway in c("HALLMARK_MYC_TARGETS_V1",
"HALLMARK_DNA_REPAIR",
"HALLMARK_MYC_TARGETS_V2",
"HALLMARK_E2F_TARGETS",
"HALLMARK_G2M_CHECKPOINT")) {
#Spheroid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
# point to correct 'gsea_x' object!!
fid=which(gsea_5s$h.all$pathway==pathway)
gseaRes=gsea_5s$h.all[fid, ]
nameGset = gseaRes$pathway
gset = S_subsets[[collection]][[nameGset]]
ranks = difs_s$'5'
names(ranks) = rownames(difs_s)
LE_sphere = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_sphere = dd_spheroid[LE_sphere,]
#Heatmap(mat_sphere)
#Organoid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 3
fid=which(gsea_3o$h.all$pathway==pathway)
gseaRes=gsea_3o$h.all[fid, ]
nameGset = gseaRes$pathway
gset = O_subsets[[collection]][[nameGset]]
ranks = difs_o$'3'
names(ranks) = rownames(difs_o)
LE_organoid = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_organoid=dd_organoid[LE_organoid,]
#Heatmap(mat_organoid)
#Biopsy ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
fid=which(gsea_5b$h.all$pathway==pathway)
gseaRes=gsea_5b$h.all[fid, ]
nameGset = gseaRes$pathway
gset = B_subsets[[collection]][[nameGset]]
ranks = difs_b$'5'
names(ranks) = rownames(difs_b)
LE_biopsy = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_biopsy=dd_biop[LE_biopsy,]
#Heatmap(mat_biopsy)
#find LE overlap and combine in a matrix --------
Common_LE <- intersect(LE_sphere, LE_organoid)
Common_LE <- intersect(Common_LE, LE_biopsy)
#next if Common_LE is too short
if (length(Common_LE) < 3) {
next
}
#create sub-matricies for plotting
mat_sphere=t(dd_spheroid[Common_LE,])
mat_organoid=t(dd_organoid[Common_LE,])
mat_biopsy=t(dd_biop[Common_LE,])
#rename layers by biology
rownames(mat_sphere) <- paste(rownames(mat_sphere), "s", sep = "_")
rownames(mat_organoid) <- paste(rownames(mat_organoid), "o", sep = "_")
rownames(mat_biopsy) <- paste(rownames(mat_biopsy), "b", sep = "_")
#bind new matrix
BIND.MTX <- rbind(mat_sphere, mat_organoid, mat_biopsy)
BIND.MTX <- t(BIND.MTX)
hallmark_matrices[[pathway]] <- BIND.MTX
}
surface_clusters535_matrices[[collection]] <- hallmark_matrices
}
else if (collection == "c5.bp"){
GObp_matrices <- list()
for (pathway in c("GO_ANAPHASE_PROMOTING_COMPLEX_DEPENDENT_CATABOLIC_PROCESS",
"GO_CENTROMERE_COMPLEX_ASSEMBLY",
"GO_HISTONE_EXCHANGE",
"GO_DNA_REPLICATION_INDEPENDENT_NUCLEOSOME_ORGANIZATION",
"GO_POSITIVE_REGULATION_OF_LIGASE_ACTIVITY",
"GO_REGULATION_OF_PROTEIN_UBIQUITINATION_INVOLVED_IN_UBIQUITIN_DEPENDENT_PROTEIN_CATABOLIC_PROCESS",
"GO_REGULATION_OF_LIGASE_ACTIVITY",
"GO_PROTEIN_HETEROTETRAMERIZATION",
"GO_ATP_DEPENDENT_CHROMATIN_REMODELING",
"GO_DNA_REPLICATION_DEPENDENT_NUCLEOSOME_ORGANIZATION",
"GO_REGULATION_OF_MEGAKARYOCYTE_DIFFERENTIATION",
"GO_DNA_BIOSYNTHETIC_PROCESS",
"GO_DNA_STRAND_ELONGATION_INVOLVED_IN_DNA_REPLICATION",
"GO_MITOTIC_RECOMBINATION",
"GO_INTERSTRAND_CROSS_LINK_REPAIR",
"GO_NEGATIVE_REGULATION_OF_PROTEIN_MODIFICATION_BY_SMALL_PROTEIN_CONJUGATION_OR_REMOVAL",
"GO_TELOMERE_ORGANIZATION",
"GO_SPLICEOSOMAL_SNRNP_ASSEMBLY",
"GO_TELOMERE_MAINTENANCE_VIA_RECOMBINATION",
"GO_DNA_STRAND_ELONGATION")) {
#Spheroid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
# point to correct 'gsea_x' object!!
fid=which(gsea_5s$c5.bp$pathway==pathway)
gseaRes=gsea_5s$c5.bp[fid, ]
nameGset = gseaRes$pathway
gset = S_subsets[[collection]][[nameGset]]
ranks = difs_s$'5'
names(ranks) = rownames(difs_s)
LE_sphere = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_sphere = dd_spheroid[LE_sphere,]
#Heatmap(mat_sphere)
#Organoid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 3
fid=which(gsea_3o$c5.bp$pathway==pathway)
gseaRes=gsea_3o$c5.bp[fid, ]
nameGset = gseaRes$pathway
gset = O_subsets[[collection]][[nameGset]]
ranks = difs_o$'3'
names(ranks) = rownames(difs_o)
LE_organoid = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_organoid=dd_organoid[LE_organoid,]
#Heatmap(mat_organoid)
#Biopsy ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
fid=which(gsea_5b$c5.bp$pathway==pathway)
gseaRes=gsea_5b$c5.bp[fid, ]
nameGset = gseaRes$pathway
gset = B_subsets[[collection]][[nameGset]]
ranks = difs_b$'5'
names(ranks) = rownames(difs_b)
LE_biopsy = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_biopsy=dd_biop[LE_biopsy,]
#Heatmap(mat_biopsy)
#find LE overlap and plot as combined matrices-------
Common_LE <- intersect(LE_sphere, LE_organoid)
Common_LE <- intersect(Common_LE, LE_biopsy)
#next if Common_LE is too short
if (length(Common_LE) < 3) {
next
}
#create sub-matricies for plotting
mat_sphere=t(dd_spheroid[Common_LE,])
mat_organoid=t(dd_organoid[Common_LE,])
mat_biopsy=t(dd_biop[Common_LE,])
#rename layers by biology
rownames(mat_sphere) <- paste(rownames(mat_sphere), "s", sep = "_")
rownames(mat_organoid) <- paste(rownames(mat_organoid), "o", sep = "_")
rownames(mat_biopsy) <- paste(rownames(mat_biopsy), "b", sep = "_")
#bind new matrix
BIND.MTX <- rbind(mat_sphere, mat_organoid, mat_biopsy)
BIND.MTX <- t(BIND.MTX)
GObp_matrices[[pathway]] <- BIND.MTX
}
surface_clusters535_matrices[[collection]] <- GObp_matrices
}
else if (collection == "c2.cgp"){
CGP_matrices <- list()
for (pathway in c("SOTIRIOU_BREAST_CANCER_GRADE_1_VS_3_UP",
"GRAHAM_NORMAL_QUIESCENT_VS_NORMAL_DIVIDING_DN",
"GRAHAM_CML_DIVIDING_VS_NORMAL_QUIESCENT_UP",
"ZHOU_CELL_CYCLE_GENES_IN_IR_RESPONSE_24HR",
"RHODES_UNDIFFERENTIATED_CANCER",
"CROONQUIST_IL6_DEPRIVATION_DN",
"MORI_LARGE_PRE_BII_LYMPHOCYTE_UP",
"CROONQUIST_NRAS_SIGNALING_DN",
"WINNEPENNINCKX_MELANOMA_METASTASIS_UP",
"BENPORATH_PROLIFERATION",
"WONG_EMBRYONIC_STEM_CELL_CORE",
"GAVIN_FOXP3_TARGETS_CLUSTER_P6",
"TARTE_PLASMA_CELL_VS_PLASMABLAST_DN",
"FERREIRA_EWINGS_SARCOMA_UNSTABLE_VS_STABLE_UP",
"MORI_PRE_BI_LYMPHOCYTE_UP",
"LY_AGING_OLD_DN",
"YU_MYC_TARGETS_UP",
"FERRANDO_T_ALL_WITH_MLL_ENL_FUSION_DN",
"CONCANNON_APOPTOSIS_BY_EPOXOMICIN_DN",
"BURTON_ADIPOGENESIS_PEAK_AT_24HR",
"CHANG_CORE_SERUM_RESPONSE_UP",
"MORI_MATURE_B_LYMPHOCYTE_DN",
"GARCIA_TARGETS_OF_FLI1_AND_DAX1_DN",
"CHICAS_RB1_TARGETS_LOW_SERUM",
"PAL_PRMT5_TARGETS_UP")) {
#Spheroid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
# point to correct 'gsea_x' object!!
fid=which(gsea_5s$c2.cgp$pathway==pathway)
gseaRes=gsea_5s$c2.cgp[fid, ]
nameGset = gseaRes$pathway
gset = S_subsets[[collection]][[nameGset]]
ranks = difs_s$'5'
names(ranks) = rownames(difs_s)
LE_sphere = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_sphere = dd_spheroid[LE_sphere,]
#Heatmap(mat_sphere)
#Organoid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 3
fid=which(gsea_3o$c2.cgp$pathway==pathway)
gseaRes=gsea_3o$c2.cgp[fid, ]
nameGset = gseaRes$pathway
gset = O_subsets[[collection]][[nameGset]]
ranks = difs_o$'3'
names(ranks) = rownames(difs_o)
LE_organoid = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_organoid=dd_organoid[LE_organoid,]
#Heatmap(mat_organoid)
#Biopsy ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
fid=which(gsea_5b$c2.cgp$pathway==pathway)
gseaRes=gsea_5b$c2.cgp[fid, ]
nameGset = gseaRes$pathway
gset = B_subsets[[collection]][[nameGset]]
ranks = difs_b$'5'
names(ranks) = rownames(difs_b)
LE_biopsy = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_biopsy=dd_biop[LE_biopsy,]
#Heatmap(mat_biopsy)
#find LE overlap and plot as combined matrices-------
Common_LE <- intersect(LE_sphere, LE_organoid)
Common_LE <- intersect(Common_LE, LE_biopsy)
#next if Common_LE is too short
if (length(Common_LE) < 3) {
next
}
#create sub-matricies for plotting
mat_sphere=t(dd_spheroid[Common_LE,])
mat_organoid=t(dd_organoid[Common_LE,])
mat_biopsy=t(dd_biop[Common_LE,])
#rename layers by biology
rownames(mat_sphere) <- paste(rownames(mat_sphere), "s", sep = "_")
rownames(mat_organoid) <- paste(rownames(mat_organoid), "o", sep = "_")
rownames(mat_biopsy) <- paste(rownames(mat_biopsy), "b", sep = "_")
#bind new matrix
BIND.MTX <- rbind(mat_sphere, mat_organoid, mat_biopsy)
BIND.MTX <- t(BIND.MTX)
CGP_matrices[[pathway]] <- BIND.MTX
}
surface_clusters535_matrices[[collection]] <- CGP_matrices
}
else if (collection == "c2.cp"){
CanonPW_matrices <- list()
for (pathway in c("REACTOME_REGULATION_OF_MITOTIC_CELL_CYCLE",
"REACTOME_DNA_REPLICATION",
"REACTOME_CELL_CYCLE_CHECKPOINTS",
"REACTOME_CELL_CYCLE",
"REACTOME_MITOTIC_M_M_G1_PHASES",
"REACTOME_APC_C_CDC20_MEDIATED_DEGRADATION_OF_MITOTIC_PROTEINS",
"REACTOME_CELL_CYCLE_MITOTIC",
"REACTOME_APC_C_CDH1_MEDIATED_DEGRADATION_OF_CDC20_AND_OTHER_APC_C_CDH1_TARGETED_PROTEINS_IN_LATE_MITOSIS_EARLY_G1",
"REACTOME_SYNTHESIS_OF_DNA",
"REACTOME_G1_S_TRANSITION",
"REACTOME_M_G1_TRANSITION",
"REACTOME_S_PHASE",
"REACTOME_FORMATION_OF_TUBULIN_FOLDING_INTERMEDIATES_BY_CCT_TRIC",
"REACTOME_MITOTIC_G1_G1_S_PHASES",
"REACTOME_ASSEMBLY_OF_THE_PRE_REPLICATIVE_COMPLEX",
"REACTOME_CHROMOSOME_MAINTENANCE",
"REACTOME_DEPOSITION_OF_NEW_CENPA_CONTAINING_NUCLEOSOMES_AT_THE_CENTROMERE",
"REACTOME_ORC1_REMOVAL_FROM_CHROMATIN",
"REACTOME_SCFSKP2_MEDIATED_DEGRADATION_OF_P27_P21",
"REACTOME_AUTODEGRADATION_OF_CDH1_BY_CDH1_APC_C")) {
#Spheroid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
# point to correct 'gsea_x' object!!
fid=which(gsea_5s$c2.cp$pathway==pathway)
gseaRes=gsea_5s$c2.cp[fid, ]
nameGset = gseaRes$pathway
gset = S_subsets[[collection]][[nameGset]]
ranks = difs_s$'5'
names(ranks) = rownames(difs_s)
LE_sphere = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_sphere = dd_spheroid[LE_sphere,]
#Heatmap(mat_sphere)
#Organoid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 3
fid=which(gsea_3o$c2.cp$pathway==pathway)
gseaRes=gsea_3o$c2.cp[fid, ]
nameGset = gseaRes$pathway
gset = O_subsets[[collection]][[nameGset]]
ranks = difs_o$'3'
names(ranks) = rownames(difs_o)
LE_organoid = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_organoid=dd_organoid[LE_organoid,]
#Heatmap(mat_organoid)
#Biopsy ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
fid=which(gsea_5b$c2.cp$pathway==pathway)
gseaRes=gsea_5b$c2.cp[fid, ]
nameGset = gseaRes$pathway
gset = B_subsets[[collection]][[nameGset]]
ranks = difs_b$'5'
names(ranks) = rownames(difs_b)
LE_biopsy = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_biopsy=dd_biop[LE_biopsy,]
#Heatmap(mat_biopsy)
#find LE overlap and plot as combined matrices-------
Common_LE <- intersect(LE_sphere, LE_organoid)
Common_LE <- intersect(Common_LE, LE_biopsy)
#next if Common_LE is too short
if (length(Common_LE) < 3) {
next
}
#create sub-matricies for plotting
mat_sphere=t(dd_spheroid[Common_LE,])
mat_organoid=t(dd_organoid[Common_LE,])
mat_biopsy=t(dd_biop[Common_LE,])
#rename layers by biology
rownames(mat_sphere) <- paste(rownames(mat_sphere), "s", sep = "_")
rownames(mat_organoid) <- paste(rownames(mat_organoid), "o", sep = "_")
rownames(mat_biopsy) <- paste(rownames(mat_biopsy), "b", sep = "_")
#bind new matrix
BIND.MTX <- rbind(mat_sphere, mat_organoid, mat_biopsy)
BIND.MTX <- t(BIND.MTX)
CanonPW_matrices[[pathway]] <- BIND.MTX
}
surface_clusters535_matrices[[collection]] <- CanonPW_matrices
}
else if (collection == "c4.cm"){
CancerMod_matrices <- list()
for (pathway in c("MODULE_54",
"MODULE_219",
"MODULE_91",
"MODULE_158",
"MODULE_28",
"MODULE_125",
"MODULE_388",
"MODULE_102",
"MODULE_61",
"MODULE_299")) {
#Spheroid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
# point to correct 'gsea_x' object!!
fid=which(gsea_5s$c4.cm$pathway==pathway)
gseaRes=gsea_5s$c4.cm[fid, ]
nameGset = gseaRes$pathway
gset = S_subsets[[collection]][[nameGset]]
ranks = difs_s$'5'
names(ranks) = rownames(difs_s)
LE_sphere = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_sphere = dd_spheroid[LE_sphere,]
#Heatmap(mat_sphere)
#Organoid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 3
fid=which(gsea_3o$c4.cm$pathway==pathway)
gseaRes=gsea_3o$c4.cm[fid, ]
nameGset = gseaRes$pathway
gset = O_subsets[[collection]][[nameGset]]
ranks = difs_o$'3'
names(ranks) = rownames(difs_o)
LE_organoid = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_organoid=dd_organoid[LE_organoid,]
#Heatmap(mat_organoid)
#Biopsy ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
fid=which(gsea_5b$c4.cm$pathway==pathway)
gseaRes=gsea_5b$c4.cm[fid, ]
nameGset = gseaRes$pathway
gset = B_subsets[[collection]][[nameGset]]
ranks = difs_b$'5'
names(ranks) = rownames(difs_b)
LE_biopsy = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_biopsy=dd_biop[LE_biopsy,]
#Heatmap(mat_biopsy)
#find LE overlap and plot as combined matrices-------
Common_LE <- intersect(LE_sphere, LE_organoid)
Common_LE <- intersect(Common_LE, LE_biopsy)
#next if Common_LE is too short
if (length(Common_LE) < 3) {
next
}
#create sub-matricies for plotting
mat_sphere=t(dd_spheroid[Common_LE,])
mat_organoid=t(dd_organoid[Common_LE,])
mat_biopsy=t(dd_biop[Common_LE,])
#rename layers by biology
rownames(mat_sphere) <- paste(rownames(mat_sphere), "s", sep = "_")
rownames(mat_organoid) <- paste(rownames(mat_organoid), "o", sep = "_")
rownames(mat_biopsy) <- paste(rownames(mat_biopsy), "b", sep = "_")
#bind new matrix
BIND.MTX <- rbind(mat_sphere, mat_organoid, mat_biopsy)
BIND.MTX <- t(BIND.MTX)
CancerMod_matrices[[pathway]] <- BIND.MTX
}
surface_clusters535_matrices[[collection]] <- CancerMod_matrices
}
else if (collection == "c4.cgn"){
CancerGeneNet_matrices <- list()
for (pathway in c("GNF2_RRM1",
"GNF2_RAN",
"GNF2_RFC4",
"GNF2_PA2G4",
"MORF_PCNA",
"GNF2_CKS1B",
"GNF2_MCM4",
"GNF2_CKS2",
"GNF2_RFC3",
"MORF_BUB3",
"MORF_FEN1",
"GNF2_ESPL1",
"MORF_FBL",
"GNF2_BUB1",
"MORF_CSNK2B",
"MORF_UNG",
"MORF_RAD23A",
"MORF_HAT1",
"MORF_MAP2K2",
"MORF_ANP32B")) {
#Spheroid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/PEO1_spheroids/1712_GSEA_scRNA_spheroid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
# point to correct 'gsea_x' object!!
fid=which(gsea_5s$c4.cgn$pathway==pathway)
gseaRes=gsea_5s$c4.cgn[fid, ]
nameGset = gseaRes$pathway
gset = S_subsets[[collection]][[nameGset]]
ranks = difs_s$'5'
names(ranks) = rownames(difs_s)
LE_sphere = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_sphere = dd_spheroid[LE_sphere,]
#Heatmap(mat_sphere)
#Organoid ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190926_GSEA_scRNA_organoid_clusters/Codes/01GSEA_pGSEAfunction.R")
#cluster 3
fid=which(gsea_3o$c4.cgn$pathway==pathway)
gseaRes=gsea_3o$c4.cgn[fid, ]
nameGset = gseaRes$pathway
gset = O_subsets[[collection]][[nameGset]]
ranks = difs_o$'3'
names(ranks) = rownames(difs_o)
LE_organoid = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_organoid=dd_organoid[LE_organoid,]
#Heatmap(mat_organoid)
#Biopsy ---------
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/")
source("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/biopsy2_3/R_scripts/4th_MERGE_mouses/SubtractMouseReads/GSEA_biopsy_clusters_042320/Codes/01GSEA_pGSEAfunction.R")
#cluster 5
fid=which(gsea_5b$c4.cgn$pathway==pathway)
gseaRes=gsea_5b$c4.cgn[fid, ]
nameGset = gseaRes$pathway
gset = B_subsets[[collection]][[nameGset]]
ranks = difs_b$'5'
names(ranks) = rownames(difs_b)
LE_biopsy = sort(unlist(gseaRes$leadingEdge))
#create sub-matrix for plotting
mat_biopsy=dd_biop[LE_biopsy,]
#Heatmap(mat_biopsy)
#find LE overlap and plot as combined matrices-------
Common_LE <- intersect(LE_sphere, LE_organoid)
Common_LE <- intersect(Common_LE, LE_biopsy)
#next if Common_LE is too short
if (length(Common_LE) < 3) {
next
}
#create sub-matricies for plotting
mat_sphere=t(dd_spheroid[Common_LE,])
mat_organoid=t(dd_organoid[Common_LE,])
mat_biopsy=t(dd_biop[Common_LE,])
#rename layers by biology
rownames(mat_sphere) <- paste(rownames(mat_sphere), "s", sep = "_")
rownames(mat_organoid) <- paste(rownames(mat_organoid), "o", sep = "_")
rownames(mat_biopsy) <- paste(rownames(mat_biopsy), "b", sep = "_")
#bind new matrix
BIND.MTX <- rbind(mat_sphere, mat_organoid, mat_biopsy)
BIND.MTX <- t(BIND.MTX)
CancerGeneNet_matrices[[pathway]] <- BIND.MTX
}
surface_clusters535_matrices[[collection]] <- CancerGeneNet_matrices
}
else{
print("no loop found for this pathway")
}
}
#save list of lists as RDS -------
saveRDS(surface_clusters535_matrices, file = "/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/comparative_analysis_filt_biop/clusters_heatmap/comparison_data/surface535.rds")
#unlist and bind into a large matrix called surface_535
grouped_families <- list()
for (i in seq_along(surface_clusters535_matrices)) {
grouped_families[[i]] <- do.call(rbind, surface_clusters535_matrices[[i]])
}
surface_535 <- do.call(rbind, grouped_families)
surface_535 <- unique(surface_535)
Heatmap(surface_535)
Heatmap(unique(grouped_families[[1]]))
hallmark_genes <- grouped_families[[1]]
hallmark_genes2 <- unique(grouped_families[[1]])
Heatmap(hallmark_genes2)
all_genes_unique_surface535 <- surface_535
saveRDS(all_genes_unique_surface535, file = "/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/comparative_analysis_filt_biop/clusters_heatmap/comparison_data/all_genes_unique_surface535.rds")
#make a data frame that annotates the overlapping LE from important gene sets
surface_535_annotation <- data.frame(row.names = rownames(surface_535))
n=0
for (i in seq_along(surface_clusters535_matrices)) {
for (j in seq_along(surface_clusters535_matrices[[i]])) {
n=n+1
surface_535_annotation[[n]] <- rownames(surface_535_annotation) %in% rownames(surface_clusters535_matrices[[i]][[j]])
colnames(surface_535_annotation)[n] <- names(surface_clusters535_matrices[[i]][j])
}
}
n=0
saveRDS(surface_535_annotation, file = "/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/comparative_analysis_filt_biop/clusters_heatmap/comparison_data/surface_535_annotation.rds")
#try to plot heatmap with layers clustered
cluster_positions <- data.frame(row.names = colnames(surface_535))
cluster_positions$biology <- c("m", "m", "m", "c", "m", "s", "s", "c", "s", "m", "s", "c", "m", "s", "c", "s", "m", "c", "s", "s")
col.biology <- list("biology" = c("c" = "blue", "m" = "green3", "s" = "red"))
Heatmap(hallmark_genes2, name = '535',
column_split = factor(cluster_positions$biology, levels = c("s", "m", "c")),
row_km = 4,
cluster_column_slices = FALSE)
#reorder columns for kmeans clustering
kclus <- kmeans(t(surface_535), 2)
kclus$cluster
#custom ordering
split <- factor(paste0("Cluster\n", kclus$cluster), levels=c("Cluster\n2","Cluster\n1"))
#reorder.hmap <- Heatmap(surface_535, column_split=split)
Heatmap(surface_535, name = '535',
column_split=split,
row_km = 4,
bottom_annotation = HeatmapAnnotation(df = cluster_positions, col = col.biology))
Heatmap(surface_535, name = '535',
#column_split=split,
column_km = 3,
row_km = 1,
#row_split = surface_535_annotation[1:2],
#right_annotation = HeatmapAnnotation(df = surface_535_annotation[5], which = "row"),
bottom_annotation = HeatmapAnnotation(df = cluster_positions, col = col.biology))
#end-----
|
4088922f6375b9b2d7fccc91f637d34bff94eee2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MLDS/examples/simu.6pt.Rd.R
|
7915e22e536b4cb5e6aa1c4e781c57ae80826a13
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 330
|
r
|
simu.6pt.Rd.R
|
library(MLDS)
### Name: simu.6pt
### Title: Perform Bootstrap Test on 6-point Likelihood for MLDS FIT
### Aliases: simu.6pt
### Keywords: datagen misc
### ** Examples
data(kk1)
x.mlds <- mlds(SwapOrder(kk1))
#nsim should be near 10,000 for stability,
# but this will take a little time
simu.6pt(x.mlds, 100, nrep = 1)
|
850b8536a02d4faf3ee317d495b776fd4a022a9a
|
c6a6b77f3b71ea68f1281b043dd60f17dd85381c
|
/inst/NEWS.Rd
|
d2c51539187b6fb80a8a2a61af95b53760ba3664
|
[] |
no_license
|
benilton/oligoClasses
|
df76a4ee4d755342ae32b07c9acb5355153e3f4f
|
be0e1088c52ee8827c86f061e80ffe9b44982a88
|
refs/heads/master
| 2021-01-10T21:40:35.903511
| 2019-11-23T12:22:08
| 2019-11-23T12:22:08
| 1,779,156
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,898
|
rd
|
NEWS.Rd
|
\name{NEWS}
\title{News for Package 'oligoClasses'}
\section{Changes in version 1.18}{
\subsection{New class: GenomeAnnotatedDataFrame}{
\itemize{
\item GenomeAnnotatedDataFrame extends AnnotatedDataFrame but requires
columns 'isSnp', 'chromosome', and 'position'.
\item Accessors for the SNP indicator, chromosome, and position are
defined for the GenomeAnnotatedDataFrame class
\item The assayData elements must all be integers. Copy number or
relative copy number should be multipled by 100 and stored as an
integer. Use integerMatrix(x, 100), where x is a matrix of copy number
on the original scale. B allele frequencies should be multipled by
1000 and saved as an integer. Use integerMatrix(b, 1000), where 'b' is
a matrix of B allele frequencies.
\item The featureData in these classes should have the class
"GenomeAnnotatedDataFrame".
}
}
}
\section{Changes in version 1.14}{
\subsection{NEW FEATURES}{
\itemize{
\item ocSamples() and ocProbesets() propagate the value they are
set to to a cluster (if the cluster is set);
}
}
}
\section{Changes in version 1.9}{
\subsection{USER VISIBLE CHANGES}{
\itemize{
\item Major refactoring of classes used by SNP/CNP algorithms.
\enumerate{
\item New class: CNSet, AlleleSet, SnpSuperSet
\item Removed classes: Snp*QSet, Snp*CallSet, Snp*CallSetPlus
\item Genotyping algorithms will use SnpSet instead of SnpCallSet
}
\item Replaced thetaA, thetaB, senseThetaA, antisenseThetaA,
senseThetaB and antisenseThetaB methods by allele.
\item Exported DBPDInfo, ExpressionPDInfo, FeatureSet, TilingPDInfo
\item Added ff utilities used by both oligo and crlmm packages
}
}
\subsection{NEW FEATURES}{
\itemize{
\item bothStrands() methods for AlleleSet and SnpFeatureSet.
}
}
}
|
5004e32ed49998962e79cd4c26c3ac13fde27266
|
eca592d4066d28063fa9c1e92347a4f270e5548f
|
/plot1.R
|
824675e2829dfae453145fdcae75a1d5441b7737
|
[] |
no_license
|
llself/ExData_Plotting1
|
7cb95a728d5903cd9eb2c3e3c9522587cc9cb377
|
c829a2f6809b19a07c446f6c07d14808a45d1233
|
refs/heads/master
| 2020-12-02T16:35:31.848401
| 2014-12-04T21:56:23
| 2014-12-04T21:56:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,576
|
r
|
plot1.R
|
#Exploratory Data Analysis Class through Coursera
#Leslie Self
#December 2, 2014
#Plot1.R - creates a plot - histogram that is red displaying the frequency of the Global Active Power
# with main title "Global Active Power" and x-axis label as "Global Active Power(kilowatts)
library(sqldf)
library(lubridate)
#download file and then extract out what is needed for assignment: only 2/1/2007-2/2/2007 data.
td = tempdir() # create a temporary directory
tf = tempfile(tmpdir=td, fileext=".zip") # create the placeholder file
download.file(fileURL, tf) # download into the placeholder file
fname = unzip(tf, list=TRUE)$Name[1] # get the name of the first file in the zip which will be the only one in our case
unzip(tf, files=fname, exdir=getwd(), overwrite=TRUE) # unzip the file to the my working directory
#pulls in only data from Feb 1 2007 and Feb 2 2007 from dataset.
powerDS <- read.csv.sql("household_power_consumption.txt",
sql = "select * from file where Date = '1/2/2007' or Date= '2/2/2007'",
header = TRUE,
sep = ";")
#create a POSIXct column for Date/Time
powerDS$DateTime <- as.POSIXct(paste(powerDS$Date, powerDS$Time), format="%d/%m/%Y %H:%M:%S")
#creates plot using basic plotting system.
windows()
hist(powerDS$Global_active_power, col = "red",
xlab = "Global Active Power(kilowatts)",
main = "Global Active Power")
#copy my plot to a png file
#png file default pixels to 480x480 so I don't need to change size.
dev.copy(png, file = "plot1.png")
dev.off()
|
0873c80f2eef7be1862b70c884ed986d570293cc
|
2852308e9a4b8790bcfaa0d2dc34682125747f1d
|
/UI/WW_Codon_Adapter_ui.R
|
1d8e31b7dd1602d15a9f03fbfd48b6e7d8783bf6
|
[
"MIT"
] |
permissive
|
HallemLab/Wild_Worm_Codon_Adapter
|
c2bb05781704b6dd57ef45ab34c00095582c7470
|
b1d92adb91aa2e9fa31145afe75df469f0426324
|
refs/heads/master
| 2023-04-06T23:14:59.268081
| 2022-07-29T23:36:13
| 2022-07-29T23:36:13
| 265,046,341
| 1
| 0
|
MIT
| 2021-04-22T00:18:46
| 2020-05-18T19:52:25
|
R
|
UTF-8
|
R
| false
| false
| 19,266
|
r
|
WW_Codon_Adapter_ui.R
|
# Header ----
navbarPage(h3("Wild Worm Codon Adapter"),
windowTitle = "WWCA",
theme = shinytheme("flatly"),
collapsible = F,
id = "tab",
# Optimize Sequence Mode Tab ----
tabPanel(h4("Optimize Sequences"),
value = "optimization",
fluidRow(
column(width = 3,
panel(
heading = tagList(h5(shiny::icon("fas fa-sliders-h"),"Inputs & Options")),
status = "primary",
h5('Step 1: Upload Sequence', class = 'text-danger', style = "margin: 0px 0px 5px 0px"),
p(tags$em('Please input a cDNA or single-letter amino acid sequence for optimization. Alternatively, upload a gene sequence file (.gb, .fasta, or .txt files accepted).', style = "color: #7b8a8b")),
p(tags$em(tags$b('Note: Please hit the Clear button if switching between typing and uploading inputs.', style = "color: #F39C12"))),
### Sequence (text box)
textAreaInput('seqtext',
h6('Sequence (DNA or AA)'),
rows = 5,
resize = "vertical"),
### Upload list of sequences
uiOutput('optimization_file_upload'),
### Option to pick what species the sequence will be codon optimized for
h5('Step 2: Select Optimization Rule', class = 'text-danger', style = "margin: 0px 0px 5px 0px"),
p(tags$em('Select the codon usage pattern to apply. To apply a custom codon usage pattern, select the "Custom" option, then use the file loader to upload a list of optimal codons.', style = "color: #7b8a8b")),
div(id = "ruleDiv",
selectInput('sp_Opt',
h6('Select rule'),
choices = list("Strongyloides",
"Nippostrongylus",
"Pristionchus",
"Brugia",
"C. elegans",
"Custom"),
selected = "Strongyloides")
),
### Upload custom optimal codon table
uiOutput('custom_lut_upload'),
tags$br(),
h5('Step 3: Pick Intron Options', class = 'text-danger', style = "margin: 0px 0px 5px 0px"),
p(tags$em('Users may choose between three sets of built-in intron sequences, the canonical Fire lab set, PATC-rich introns, or native Pristionchus pacificus intron sequences. Alternatively, insert custom introns by selecting the "Custom" option, then using the file loader to upload a FASTA file containing custom introns.', style = "color: #7b8a8b")),
### Option to pick intron sequences (pulldown)
selectInput('type_Int',
h6('Built-in sequence source'),
choices = list("Canonical (Fire lab)",
"PATC-rich",
"Pristionchus",
"Custom"),
selected = "Canonical (Fire lab)"),
### Upload custom intron file (file loader)
uiOutput('custom_intron_upload'),
### Option to pick number of introns (pulldown)
selectInput('num_Int',
h6('Number of introns'),
choices = 0:3,
selected = 3),
## Option to pick intron insertion strategy (radio)
radioButtons('mode_Int',
h6('Intron insertion mode'),
choiceNames = list("Canonincal invertebrate exon splice junction (AG^A or AG^G)",
"Equidistantly along sequence length (Fire lab strategy)"),
choiceValues = list("Canon",
"Equidist")),
actionButton('goButton',
'Submit',
#width = '40%',
class = "btn-primary",
icon = icon("fas fa-share")),
actionButton('resetOptimization', 'Clear',
icon = icon("far fa-trash-alt"))
)
),
column(width = 9,
conditionalPanel(condition = "input.goButton",
panel(heading = tagList(h5(shiny::icon("fas fa-dna"),
"Optimized Sequences")),
status = "primary",
uiOutput("tabs"))
)),
column(width = 4,
uiOutput("seqinfo")
)
)
),
# Analysis Mode Tab ----
tabPanel(h4("Analyze Sequences"),
value = "analysis",
fluidRow(
column(width = 3,
panel(heading = tagList(h5(shiny::icon("fas fa-sliders-h"),"Inputs & Options")),
width = NULL,
status = "primary",
## GeneID Upload
h5('Analyze Transgene', class = 'text-danger', style = "margin: 0px 0px 5px 0px"),
p(tags$em('To measure the codon bias of an individual transgene, supply the cDNA sequence.', style = "color: #7b8a8b")),
### Sequence direct input
textAreaInput('cDNAtext',
h6('Transgene sequence'),
rows = 2,
resize = "vertical"),
h5('Analyze Native Sequences', class = 'text-danger', style = "margin: 5px 0px 5px 0px"),
p(tags$em('To perform analysis of native coding sequences, list sequence IDs as: WormBase gene IDs (prefix: WB), species-specific gene or transcript IDs (prefixes: SSTP, SRAE, SPAL, SVEN, Bma, Ppa, NBR); C. elegans gene names with a "Ce-" prefix (e.g. Ce-ttx-1); or C. elegans transcript IDs. For individual analyses use textbox input; for bulk analysis upload gene/transcript IDs as a single-column CSV file. If using the text box, please separate search terms by a comma.', style = "color: #7b8a8b")),
p(tags$em('Alternatively, users may directly provide coding sequences for analysis, either as a 2-column CSV file listing sequence names and coding sequences, or a FASTA file containing named coding sequences.', style = "color: #7b8a8b")),
p(tags$em('Example CSV files can be downloaded using the Data Availability panel in the About tab', style = "color: #7b8a8b")),
p(tags$em(tags$b('Note: Please hit the Clear button if switching between typing and uploading inputs.', style = "color: #F39C12"))),
### GeneID (text box)
textAreaInput('idtext',
h6('Gene/Transcript IDs'),
rows = 2,
resize = "vertical"),
uiOutput('analysis_file_upload'),
actionButton('goAnalyze',
'Submit',
class = "btn-primary",
icon = icon("fas fa-share")),
actionButton('resetAnalysis', 'Clear',
icon = icon("far fa-trash-alt"))
)
),
column(width = 6,
conditionalPanel(condition = "input.goAnalyze",
panel(heading = tagList(h5(shiny::icon("fas fa-calculator"),
"Sequence Info")),
status = "primary",
DTOutput("info_analysis")
)
)
),
column(width = 3,
conditionalPanel(condition ="input.goAnalyze != 0 && output.info_analysis",
panel(heading = tagList(h5(shiny::icon("fas fa-file-download"),
"Download Options")),
status = "primary",
prettyCheckboxGroup("download_options",
'Select Values to Download',
status = "default",
icon = icon("check"),
choiceNames = c("GC ratio",
"Sr_CAI values",
"Ce_CAI values",
"Bm_CAI values",
"Nb_CAI values",
"Pp_CAI values",
"Coding sequences"
),
choiceValues = c("GC",
"Sr_CAI",
"Ce_CAI",
"Bm_CAI",
"Nb_CAI",
"Pp_CAI",
"coding sequence"),
selected = c("GC",
"Sr_CAI",
"Ce_CAI",
"Bm_CAI",
"Nb_CAI",
"Pp_CAI",
"coding sequence")),
uiOutput("downloadbutton_AM")
))
)
)
),
## About Tab ----
tabPanel(h4("About (v1.2.0)"),
value = "about",
fluidRow(
column(8,
panel(heading = tagList(h5(shiny::icon("fas fa-question-circle"),
"App Overview")),
status = "primary",
id = "About_Overview",
includeMarkdown('UI/README/README_Features.md')
)
),
column(4,
panel( heading = tagList(h5(shiny::icon("fas fa-drafting-compass"),
"Authors and Release Notes")),
status = "primary",
id = "About_Updates",
includeMarkdown('UI/README/README_Updates.md')
)
)
),
fluidRow(
column(8,
panel(heading = tagList(h5(shiny::icon("fas fa-chart-line"),
"Optimization Methods")),
status = "primary",
id = "About_Analysis_Methods",
tabsetPanel(
type = "pills",
tabPanel(
title = "Codon Adaptation Index",
includeMarkdown('UI/README/README_Methods_CAI.md')
),
tabPanel(
title = "GC Ratio",
includeMarkdown('UI/README/README_Methods_GC.md')
),
tabPanel(
title = "Artificial Introns",
includeMarkdown('UI/README/README_Methods_Introns.md')
)
)
)
),
column(4,
panel(heading = tagList(h5(shiny::icon("fas fa-cloud-download-alt"),
"Data Availability")),
status = "primary",
p('The following datasets used can be
downloaded using the dropdown menu and download button below:',
tags$ol(
tags$li('Multi-species codon frequency/relative adaptiveness table (.csv)'),
tags$li('Multi-species optimal codon lookup table (.csv)'),
tags$li('Example custom preferred codon table (.csv)'),
tags$li('Example geneID list (.csv)'),
tags$li('Example 2-column geneID/sequence list (.csv)'),
tags$li('Example custom intron list (.fasta)')
)),
pickerInput("which.Info.About",
NULL,
choices = c('Multi-species codon frequency table',
"Multi-species optimal codon table",
"Example custom preferred codon table",
"Example geneID list",
"Example 2-column geneID/sequence list",
"Example custon intron list"),
options = list(style = 'btn btn-primary',
title = "Select a file to download")),
uiOutput("StudyInfo.panel.About")
)
)
)
)
)
|
042c711dd79b390f6fff8ea15fab51c53a53533e
|
65ca8a525fc395d99648233b8f05d51b519404e9
|
/R/runScrape.R
|
b41a641c308b858a419de78e9d1d587e74718edb
|
[] |
no_license
|
isaactpetersen/ffanalytics
|
c3c21cd9e7209f0e511011eb1df4a62c4525c4a1
|
b65b7c700d5c273bf6960f56be7858fdb78de746
|
refs/heads/master
| 2021-09-20T18:33:06.526322
| 2018-08-14T03:48:58
| 2018-08-14T03:48:58
| 85,698,645
| 6
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,379
|
r
|
runScrape.R
|
#' Scrape Projections
#'
#' Executes a scrape of players' fantasy football projections based on the selected
#' season, week, analysts, and positions. If no inputs are specified, the user is prompted.
#'
#' @note The function has the ability to include subscription based sources,
#' but you will need to either download subscription projections separately or
#' provide a user name and password for those sites.
#' Scraping past seasons/weeks is nearly impossible because very few if any sites
#' make their historical projections available. An attempt to scrape historical
#' projections will likely produce current projections in most cases.
#' @param season The season of projections to scrape (e.g., 2015).
#' @param week The week number of projections to scrape (e.g., 16).
#' Week number should be an integer between 0 and 21.
#' Week number 0 reflects seasonal projections.
#' Week number between 1 and 17 reflects regular season projections.
#' Week number between 18 and 21 reflects playoff projections.
#' @param analysts An integer vector of analystIds specifying which analysts' projections to
#' scrape. See \link{analysts} data set for values of analystIds.
#' @param positions A character vector of position names specifying which positions
#' to scrape: \code{c("QB", "RB", "WR", "TE", "K", "DST", "DL", "LB", "DB")}.
#' @return list of \link{dataResults}. One entry per position scraped.
#' @examples
#' runScrape(season = 2016, week = 0, ## Scrape 2016 season data for all
#' analysts = 99, positions = "all") ## available analysts and positions
#'
#' runScrape(season = 2016, week = 1, ## Scrape 2016 week 1 data for
#' analysts = c(-1, 5), ## CBS Average and NFL.com
#' positions = c("QB", "RB", "WR", "TE",)) ## and offensive positions
#' @export runScrape
runScrape <- function(season = NULL, week = NULL,
analysts = NULL, positions = NULL,
fbgUser = NULL, fbgPwd, updatePlayers = TRUE){
# Request input from user to determine period to scrape
if(is.null(week) & is.null(season)){
scrapeSeason <- as.numeric(readline("Enter season year to scrape: "))
scrapeWeek <- as.numeric(readline("Enter week to scrape (use 0 for season): "))
} else {
scrapeWeek <- ifelse(is.null(week), 0, week)
scrapeSeason <- ifelse(is.null(season), as.POSIXlt(Sys.Date())$year + 1900, season)
}
scrapePeriod <- dataPeriod()
if(!is.na(scrapeWeek))
scrapePeriod["weekNo"] <- scrapeWeek
if(!is.na(scrapeSeason))
scrapePeriod["season"] <- scrapeSeason
scrapeType <- periodType(scrapePeriod)
# Request input from user to select the analysts to scrape for
selectAnalysts <- analystOptions(scrapePeriod)
if(is.null(analysts)){
scrapeAnalysts <- selectAnalysts[select.list(names(selectAnalysts),
title = "Select Analysts to Scrape",
multiple = TRUE)]
if(max(nchar(scrapeAnalysts)) == 0)
scrapeAnalysts <- selectAnalysts
} else {
if(any(analysts == 99)){
scrapeAnalysts <- selectAnalysts
} else {
scrapeAnalysts <- analysts
}
}
selectPositions <- analystPositions$position[analystPositions$analystId %in% scrapeAnalysts]
# Request input from user to select the positions to scrape for
if(is.null(positions)){
scrapePosition <- select.list(position.name, multiple = TRUE,
title = "Select positions to scrape")
if(max(nchar(scrapePosition)) == 0)
scrapePosition <- selectPositions
} else {
if(any(tolower(positions) == "all")){
scrapePositions <- selectPositions
} else {
scrapePosition <- positions
}
}
urlTable <- getUrls(scrapeAnalysts, scrapeType, scrapePosition)
if(nrow(urlTable) == 0){
stop("Nothing to scrape. Please try again with different selection.", call. = FALSE)
}
if(updatePlayers){
cat("Retrieving player data \n")
playerData <<- getPlayerData(season = scrapeSeason, weekNo = scrapeWeek,
pos = scrapePosition)
}
if(.Platform$OS.type == "windows"){
progress_bar <- winProgressBar
set_progress_bar <- setWinProgressBar
get_progress_bar <- getWinProgressBar
} else {
progress_bar <- tcltk::tkProgressBar
set_progress_bar <- tcltk::setTkProgressBar
get_progress_bar <- tcltk::getTkProgressBar
}
numUrls <- nrow(urlTable)
pb_value <- 0
pb <- progress_bar(title = "Scraping Data ...", label = "Starting Data Scrape",
0, 1)
scrapeResults <- apply(urlTable, 1, function(urlInfo){
scrapeSrc <- createObject("sourceTable", as.list(urlInfo))
srcId <-as.numeric(urlInfo["analystId"])
analystName <- names(selectAnalysts)[selectAnalysts == srcId]
info <- paste("Scraping", analystName,
urlInfo[["sourcePosition"]])
set_progress_bar(pb, get_progress_bar(pb), title = "Scraping Data ...", label = info)
scraped <- retrieveData(scrapeSrc, scrapePeriod, fbgUser, fbgPwd)
pb_value <- get_progress_bar(pb) + 1/numUrls
set_progress_bar(pb, pb_value, title = "Scraping Data ...", label = info)
return(scraped)
})
close(pb)
scrapeSummary <- data.table::data.table(pos = as.character(),
success = as.character(),
failure = as.character())
returnData <- lapply(intersect(position.name, urlTable$sourcePosition), function(pos){
resData <- data.table::rbindlist(
lapply(scrapeResults[which(urlTable$sourcePosition == pos)],
function(sr)sr@resultData), fill = TRUE)
resData[, position := pos]
expectedAnalysts <- as.numeric(unique(urlTable$analystId[urlTable$sourcePosition == pos]))
names(expectedAnalysts) <- unique(urlTable$analystName[urlTable$sourcePosition == pos])
actualAnalysts <- as.numeric(unique(resData$analyst))
missingAnalysts <- as.numeric(setdiff(expectedAnalysts, actualAnalysts))
pos.summary <- data.table::data.table("pos" = pos,
success = paste(names(expectedAnalysts)[which(expectedAnalysts %in% actualAnalysts)],
collapse = ", "))
if(length(missingAnalysts) > 0)
pos.summary[, failure := paste(names(expectedAnalysts)[which(expectedAnalysts %in% missingAnalysts)], collapse = ", ")]
scrapeSummary <<- data.table::rbindlist(list(scrapeSummary, pos.summary), fill = TRUE)
if(class(resData$position) != "character")
resData$position <- as.character(resData$position)
return(dataResult(resultData = resData, position = pos))
})
cat("=================\nScrape Summary:\n")
for(p in unique(urlTable$sourcePosition)){
cat("\t", p, ":\n")
cat("\t\tSuccessfully:", scrapeSummary[pos == p]$success, "\n")
cat("\t\tFailed:", scrapeSummary[pos == p]$failure, "\n")
}
names(returnData) <- intersect(position.name, urlTable$sourcePosition)
if(any(names(returnData) == "K"))
returnData[["K"]]@resultData <- updateFieldGoals(data.table::copy(returnData[["K"]]@resultData))
dualData <- dualPositionData(returnData)
for(pos in names(dualData)){
table.list <- list(returnData[[pos]]@resultData, dualData[[pos]])
returnData[[pos]]@resultData <- data.table::rbindlist(table.list, fill = TRUE)
}
returnData$period <- scrapePeriod
returnData$analysts <- scrapeAnalysts
return(returnData)
}
#' Analyst options for a period
#'
#' Find the analysts that are projecting stats for the provided period
#' @export
analystOptions <- function(period){
if(periodType(period) == "Season"){
periodAnalysts <- analysts[season == 1]
}
if(periodType(period) == "Week"){
periodAnalysts <- analysts[weekly == 1]
}
periodAnalysts <- periodAnalysts[siteId %in% siteUrls$siteId]
periodAnalysts <- merge(periodAnalysts, sites, by = "siteId")
periodAnalysts[siteId %in% periodAnalysts$siteId[duplicated(siteId)],
listName := paste0(siteName, ": ", analystName)]
periodAnalysts[is.na(listName), listName := analystName]
periodAnalysts <- periodAnalysts[order(siteId, analystId)]
analystList <- periodAnalysts$analystId
names(analystList) <- periodAnalysts$listName
return(analystList)
}
|
278267856d5525db72a9b7441c3cd71843e4021f
|
685245043fd77c6a2f9a5fef80be742ed8022f29
|
/cachematrix.R
|
3cea0d13e23332ff6c1b8ae6617d256e87b89d87
|
[] |
no_license
|
bostrovsky/ProgrammingAssignment2
|
22c3aecbba8607e8bb5c3ef666592ea2deb9189d
|
bf5bf7bcb0b43f3b22cbfaafc147e87f2b8159f5
|
refs/heads/master
| 2021-01-21T19:28:03.131648
| 2014-10-21T19:32:09
| 2014-10-21T19:32:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,567
|
r
|
cachematrix.R
|
## The assignment was to write two functions to cache the inverse of a matrix(makeCacheMatrix)
## and one to invert a matrix unless it has already been done (cacheSolve). if is has already
## been solvved, cacheSolve should retrieve the already inverted matrix.
## makeCacheMatrix does the following:
## 1. sets the value of the matrix
## 2. gets the value of the matrix
## 1. sets the inverse of the matrix
## 1. gets the inverse of the matrix
## it is the same function as the example given for the vector modified for a matrix
## makeCacheMatrix checks for an already calculated matrix inverse
## if it finds one it retreives it. If not, it calculates it.
## again, it is the same function as the example given for the vector modified for a matrix
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setInv <- function(solve) s <<- solve
getInv <- function() s
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## cacheSolve calculates the inverse of an invertable matrix created in the above function
## unless it has already been inverted. If it has, cacheSolve retrieves that solution.
cacheSolve <- function(x, ...) {
s <- x$getInv()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setInv(s)
s
}
|
22ef4bc8f65a857647ac5ffdbbde8722cdcf754d
|
d4451c85cbd4b4538aede88be9c2a1ec22ede530
|
/man/graft.vs.host.Rd
|
c3bece21f4c1fb73ec8bbf647d1c69e24f89fedd
|
[] |
no_license
|
cran/ISwR
|
db7d2287ba64cda7e72c41d1597bd729b2b0048d
|
39c697c78bd35df51b1e4f96aeb20dda89c1b0d1
|
refs/heads/master
| 2021-06-02T22:10:53.526489
| 2020-01-20T08:50:02
| 2020-01-20T08:50:02
| 17,679,985
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,519
|
rd
|
graft.vs.host.Rd
|
\name{graft.vs.host}
\alias{graft.vs.host}
\title{Graft versus host disease}
\description{
The \code{gvhd} data frame has 37 rows and 7 columns.
It contains data from patients receiving a nondepleted allogenic bone
marrow transplant with the purpose of finding variables associated with
the development of acute graft-versus-host disease.
}
\usage{graft.vs.host}
\format{
This data frame contains the following columns:
\describe{
\item{\code{pnr}}{
a numeric vector patient number.
}
\item{\code{rcpage}}{
a numeric vector, age of recipient (years).
}
\item{\code{donage}}{
a numeric vector, age of donor (years).
}
\item{\code{type}}{
a numeric vector, type of leukaemia coded 1: AML, 2: ALL, 3: CML
for acute myeloid, acute lymphatic, and chronic myeloid leukaemia.
}
\item{\code{preg}}{
a numeric vector code indicating whether donor has been pregnant. 0: no, 1: yes.
}
\item{\code{index}}{
a numeric vector giving an index of mixed epidermal cell-lymphocyte
reactions.
}
\item{\code{gvhd}}{
a numeric vector code, graft-versus-host disease, 0: no, 1: yes.
}
\item{\code{time}}{a numeric vector, follow-up time}
\item{\code{dead}}{a numeric vector code, 0: no (censored), 1: yes}
}
}
\source{
D.G. Altman (1991), \emph{Practical Statistics for Medical Research},
Exercise 12.3, Chapman & Hall.
}
\examples{
plot(jitter(gvhd,0.2)~index,data=graft.vs.host)
}
\keyword{datasets}
|
e9cdc4383cec5e678b99f382ae858f24f5d733dd
|
1e0f4661062ee00a7b4b74301f3b672d8963a9bd
|
/Lecture_2_advanced_R/loop_server.R
|
022e171956320a117ef8667f9164a5e9ae845bd6
|
[] |
no_license
|
jasonqiangguo/econ5170
|
290e59a1cb233e52ea7e3676ef28354d5e3408b5
|
ee97466f3af7455aa248a044fbdb138d77501e56
|
refs/heads/master
| 2020-12-25T00:06:40.497804
| 2016-03-20T15:37:45
| 2016-03-20T15:37:45
| 60,931,462
| 1
| 0
| null | 2016-06-11T23:00:48
| 2016-06-11T23:00:48
| null |
UTF-8
|
R
| false
| false
| 1,113
|
r
|
loop_server.R
|
library(plyr)
library(foreach)
library(doParallel)
# prepare the functions
mu = 2
CI = function(x){
# x is a vector of random variables
n = length(x)
mu = mean(x)
sig = sd(x)
upper = mu + 1.96/sqrt(n) * sig
lower = mu - 1.96/sqrt(n) * sig
return( list( lower = lower, upper = upper) )
}
capture = function(i){
x = rpois(sample_size, mu)
bounds = CI(x)
return( ( bounds$lower <= mu ) & (mu <= bounds$upper) )
}
############ implementation ###############
Rep = 200
sample_size = 5000000
pts0 = Sys.time() # check time
out = ldply(.data = 1:Rep, .fun = capture, .parallel = FALSE)
cat( "empirical coverage probability = ", mean(out$V1), "\n") # empirical size
pts1 = Sys.time() - pts0 # check time elapse
print(pts1)
# compare to the parallel version
registerDoParallel(16) # opens other CPUs
pts0 = Sys.time() # check time
out = ldply(.data = 1:Rep, .fun = capture, .parallel = TRUE,
.paropts = list(.export = ls(envir=globalenv() )) )
cat( "empirical coverage probability = ", mean(out$V1), "\n") # empirical size
pts1 = Sys.time() - pts0 # check time elapse
print(pts1)
|
b084492ad015c866a0f455f3f1487500cecb84b0
|
ff65c1db4ef1db242fb24da4cc52e8359ca35a2f
|
/inst/app/make_in_situ_plot.R
|
0b51f1c97957ea56e542dec3b8924e0f0a9c9d63
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
whtns/plaeApp
|
02ed8ab41978b2cf0ac51600719831f6eb2c0302
|
c0b61494407ad763bddbc3e332ed0f6d18b608e5
|
refs/heads/master
| 2023-01-06T17:31:57.061373
| 2020-11-03T14:05:43
| 2020-11-03T14:05:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,305
|
r
|
make_in_situ_plot.R
|
## Function to read in original images
get_image <- function(file) {
image_read(file.path(paste0('www/insitu_layers/ret_',file, ".png")))
}
## Function to recolor each individual cell layer based on expression
recolor <- function(ret_layer, color){
if (length(color) == 0) {
recolored_layer <- ret_layer
} else {
recolored_layer <- ret_layer %>% image_colorize(75,color)
}
return(recolored_layer)
}
## Function to gather gene expression data in table
get_insitu_table <- function(input, db, meta_filter) {
### Pull the data for the gene of interest
gene <- input$insitu_Gene
grouping_features <- "CellType_predict"
if (input$insitu_filter_cat !=''){
validate(
need(input$insitu_filter_on != '', "Please select at least one feature to filter on")
)}
### Filter expression table, if filters active
if (input$insitu_filter_cat != ''){
filt_cat <- input$insitu_filter_cat
filt_on <- input$insitu_filter_on
full_table <- db %>% tbl('grouped_stats') %>%
filter(Gene == gene) %>%
filter(!!as.symbol(filt_cat) %in% filt_on) %>%
group_by_at(vars(one_of(c('Gene', grouping_features)))) %>%
summarise(cpm = sum(cpm * cell_exp_ct) / sum(cell_exp_ct),
cell_exp_ct = sum(cell_exp_ct, na.rm = TRUE)) %>%
as_tibble() %>%
tidyr::drop_na() %>%
full_join(., meta_filter %>%
group_by_at(vars(one_of(grouping_features))) %>%
summarise(Count = n())) %>%
mutate(cell_exp_ct = ifelse(is.na(cell_exp_ct), 0, cell_exp_ct)) %>%
mutate(`%` = round((cell_exp_ct / Count) * 100, 2),
Expression = round(cpm * (`%` / 100), 2)) %>%
select_at(vars(one_of(c('Gene', grouping_features, 'cell_exp_ct', 'Count', '%', 'Expression')))) %>%
arrange(-Expression)
}
### Or make expression table without filtering if none selected
else {
full_table <- db %>% tbl('grouped_stats') %>%
filter(Gene == gene) %>%
group_by_at(vars(one_of(c('Gene', grouping_features)))) %>%
summarise(cpm = sum(cpm * cell_exp_ct) / sum(cell_exp_ct),
cell_exp_ct = sum(cell_exp_ct, na.rm = TRUE)) %>%
as_tibble() %>%
tidyr::drop_na() %>%
full_join(., meta_filter %>%
group_by_at(vars(one_of(grouping_features))) %>%
summarise(Count = n())) %>%
mutate(cell_exp_ct = ifelse(is.na(cell_exp_ct), 0, cell_exp_ct)) %>%
mutate(`%` = round((cell_exp_ct / Count) * 100, 2),
Expression = round(cpm * (`%` / 100), 2)) %>%
select_at(vars(one_of(c('Gene', grouping_features, 'cell_exp_ct', 'Count', '%', 'Expression')))) %>%
arrange(-Expression)
}
}
make_insitu_plot <- function(input, scEiaD_2020_v01, meta_filter){
### Load unedited images
amacrine <- get_image('amacrine')
artery <- get_image('artery')
astrocyte <- get_image('astrocyte')
axons <- get_image('axons')
layer_labels <- get_image('background')
bipolar <- get_image('bipolar')
bruch <- get_image('bruch')
choriocap <- get_image('choriocap')
cones <- get_image('cones')
horizontal <- get_image('horizontal')
cell_labels <- get_image('labels')
melanocytes <- get_image('melanocytes')
microglia <- get_image('microglia')
muller <- get_image('muller')
rgc <- get_image('rgc')
rods <- get_image('rods')
rpe <- get_image('rpe')
sclera <- get_image('sclera')
vein <- get_image('vein')
full_table <- get_insitu_table(input, scEiaD_2020_v01, meta_filter)
### Create mini table with color codes
p <- full_table %>%
select(CellType_predict,Expression) %>%
tidyr::drop_na() %>%
arrange(Expression)
### Convert expression to color scale
p$col <- viridis(length(p$Expression))
### Generate legend plot
leg_lab <- seq(min(p$Expression),max(p$Expression),l=5)
leg_lab[] <- lapply(leg_lab, round,2)
legend <- image_graph(width = 300, height = 600, res = 96)
legend_image <- as.raster(matrix(rev(p$col), ncol=1))
plot(c(0,3),c(0,1),type = 'n', axes = F,xlab = '', ylab = '', main = expression(bold("Log"[2] * "(CPM + 1)")), cex.main=1.5)
text(x=2, y = seq(0,1,l=5), labels = leg_lab[], cex=1.5)
rasterImage(legend_image, 0, 0, 1,1)
dev.off()
### Recolor each layer based on expression
amacrine <- recolor(amacrine, p$col[which(p$CellType_predict == "Amacrine Cells")])
artery <- recolor(artery, p$col[which(p$CellType_predict == "Artery")])
astrocyte <- recolor(astrocyte, p$col[which(p$CellType_predict == "Astrocytes")])
axons <- recolor(axons, p$col[which(p$CellType_predict == "Axons")])
bipolar <- recolor(bipolar, p$col[which(p$CellType_predict == "Bipolar Cells")])
bruch <- recolor(bruch, p$col[which(p$CellType_predict == "Bruch Membrane")])
cones <- recolor(cones, p$col[which(p$CellType_predict == "Cones")])
choriocap <- recolor(choriocap, p$col[which(p$CellType_predict == "Choriocapillaris")])
horizontal <- recolor(horizontal, p$col[which(p$CellType_predict == "Horizontal Cells")])
melanocytes <- recolor(melanocytes, p$col[which(p$CellType_predict == "Melanocytes")])
microglia <- recolor(microglia, p$col[which(p$CellType_predict == "Microglia")])
muller <- recolor(muller, p$col[which(p$CellType_predict == "Muller Glia")])
rpe <- recolor(rpe, p$col[which(p$CellType_predict == "RPE")])
rgc <- recolor(rgc, p$col[which(p$CellType_predict == "Retinal Ganglion Cells")])
rods <- recolor(rods, p$col[which(p$CellType_predict == "Rods")])
sclera <- recolor(sclera, p$col[which(p$CellType_predict == "Sclera")])
vein <- recolor(vein, p$col[which(p$CellType_predict == "Vein")])
### Merge the recolored layers into single image
retina_insitu <- c(layer_labels, amacrine, artery, astrocyte,choriocap, bipolar, bruch, cones, horizontal, melanocytes, microglia, muller, axons, rpe, rgc, rods, sclera, vein, cell_labels)
ret_img <- retina_insitu %>%
image_mosaic() %>%
image_flatten()
### Append the legend to the side and write a temporary file with the complete image
tmpfile <- image_append(c(ret_img,legend), stack=FALSE) %>%
image_write(tempfile(fileext='png'), format = 'png')
### Return the location of the temporary file
return(list(src = tmpfile,
height = input$insitu_height,
contentType = "image/png"))
}
|
8b6eca09d10746e4ff5b7c645945dec14daf23c2
|
0c8d30f3ef99f0c68d21fb89bbee657e2114f7f0
|
/FISH 552_Intro to R for Biologists/552_Homework1/fishPassage work.R
|
3dfcf46defbef91bce974084e316d21c85f9a89e
|
[] |
no_license
|
atchin/reference-scripts
|
73654c53c9a145cf0fd7dc910df465ace4ab9227
|
5d0943ecdc596455435f6073dac1fbccd10456f6
|
refs/heads/master
| 2023-04-25T00:54:44.975789
| 2021-05-05T23:48:33
| 2021-05-05T23:48:33
| 298,167,442
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,695
|
r
|
fishPassage work.R
|
# Name: Andrew Chin
# Homework 1, Part 2
fishPassage <- read.csv("fishPassage.csv", header=T)
labels(fishPassage)
#calculate the max and min of wild steelhead to pass Bonneville Dam, and specify in which year each occurred
fishPassage$Wild.Steelhead #NA's found
# fishPassage$Wild.Steelhead <- na.pass(fishPassage$Wild.Steelhead)
# max(fishPassage$wild.Steelhead, na.rm=T) # doesn't work; returning "-Inf"
fishPassage$Wild.Steelhead[!is.na(fishPassage$Wild.Steelhead)] #what are the values that exclude NA?
max(fishPassage$Wild.Steelhead[!is.na(fishPassage$Wild.Steelhead)])
which.max(fishPassage$Wild.Steelhead[!is.na(fishPassage$Wild.Steelhead)])
#subset Bonneville?
BON.fishPassage <- fishPassage[fishPassage$Dam=="BON",]
head(BON.fishPassage)
which.max(BON.fishPassage$Wild.Steelhead[!is.na(BON.fishPassage$Wild.Steelhead)])
max(BON.fishPassage$Wild.Steelhead[!is.na(BON.fishPassage$Wild.Steelhead)])
BON.fishPassage$Year[which.max(BON.fishPassage$Wild.Steelhead[!is.na(BON.fishPassage$Wild.Steelhead)])]
# maximum wild steelhead passage was in 2001, with 149,582 fish.
min(BON.fishPassage$Wild.Steelhead[!is.na(BON.fishPassage$Wild.Steelhead)])
which.min(BON.fishPassage$Wild.Steelhead[!is.na(BON.fishPassage$Wild.Steelhead)])
BON.fishPassage$Year[which.min(BON.fishPassage$Wild.Steelhead[!is.na(BON.fishPassage$Wild.Steelhead)])]
# 12 fish passed over Bonneville dam in 1996, the lowest on record.
# Calculate the total number of fish counted at Bonneville in 2007.
BON.fishPassage[BON.fishPassage$Year=="2007",3:11]
sum(BON.fishPassage[BON.fishPassage$Year=="2007",3:11])
#3,454,997 total fish, comprised of 8 species, passed over Bonneville in 2007.
# create a new data frame called "fishPassage1995BON" that contains only observations from Bonneville from 1995 onwards.
fishPassage1995BON <- BON.fishPassage[BON.fishPassage$Year >= 1995,]
fishPassage1995BON
# create a matrix with 3 columns from "fishPassage1995BON" that contains counts of coho adults, coho jacks, and ratios of coho jacks to adults. Name this matrix "cohoPassage."
dim(fishPassage1995BON)
matrix(c(fishPassage1995BON$Coho.Adult, fishPassage1995BON$Coho.Jack, fishPassage1995BON$Coho.Jack/fishPassage1995BON$Coho.Adult), nrow=13, ncol=3)
cohoPassage <- matrix(c(fishPassage1995BON$Coho.Adult, fishPassage1995BON$Coho.Jack, fishPassage1995BON$Coho.Jack/fishPassage1995BON$Coho.Adult), nrow=13, ncol=3)
cohoPassage
colnames(cohoPassage) <- c("coho adult", "coho jack", "jack:adult")
cohoPassage
# calculate the mean for each variable in "cohoPassage."
colMeans(cohoPassage)
# coho adult coho jack jack:adult
# 8.371569e+04 5.489308e+03 9.241451e-02
# round the output of the means to two decimal places
|
7b56575f5e7525ef6023dd949a78ea6081bbd3f3
|
82967c8f2b6b219c5623713cbf466009055b2b6c
|
/code/genes/AlleleProportionsByCluster.R
|
4377c20ed08f7acd53df45e082a1aeb46ad66d23
|
[
"MIT"
] |
permissive
|
ejcorn/neuropathcluster
|
f6c4517ba7e4c82913ade3b065af6a8e97205009
|
02dc1f07e6523cf6a3ad9e79e7d88a7d76891409
|
refs/heads/master
| 2022-11-30T12:34:23.599889
| 2020-08-03T21:50:29
| 2020-08-03T21:50:29
| 184,345,803
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,918
|
r
|
AlleleProportionsByCluster.R
|
rm(list = setdiff(ls(), c("params")))
homedir <- params$homedir
setwd(homedir)
savedir <- paste(params$resultsdir,'genecluster/',sep='')
dir.create(savedir,recursive=T)
source('code/misc/fxns.R')
source('code/misc/plottingfxns.R')
microSample <- read.csv(paste(params$opdir,'processed/microSample.csv',sep=''),stringsAsFactors=F)[,-(1:2)] # Get rid of index column and INDDIDs
patientSample <- read.csv(paste(params$opdir,'processed/patientSample.csv',sep=''),stringsAsFactors=F)[,-(1:2)] # Get rid of index column and INDDIDs
INDDIDs <- read.csv(paste(params$opdir,'processed/microSample.csv',sep=''),stringsAsFactors=F)[,2]
if(sum(duplicated(INDDIDs))){
break
}
#####################
### Load clusters ###
#####################
load(file = paste(params$resultsdir,'analyzecluster/subjLouvainPartitionReordered.RData',sep=''))
INDDIDs <- remove.Disconnected.Subjects(INDDIDs,DisconnectedSubjects)
microSample <- remove.Disconnected.Subjects(microSample,DisconnectedSubjects)
patientSample <- remove.Disconnected.Subjects(patientSample,DisconnectedSubjects)
clusterColors <- getClusterColors(k)
load(file=paste(savedir,'AlleleTablesCluster.RData',sep=''))
##########################################
### Plot allele proportions by cluster ###
##########################################
G.color.inds <- list(APOE=c(1,3,7),MAPTHaplotype=c(4:5))
ClusterProportion.byAllele <- lapply(Allele.Tables, function(A)
sapply(clusterNames, function(k.i) colSums(A[partitionSample == k.i,]) / colSums(A)))
AlleleProportion.byCluster <- list()
for(g.i in names(Allele.Tables)){
AlleleProportion.byCluster[[g.i]] <- sapply(clusterNames, function(k.i) colSums(Allele.Tables[[g.i]][partitionSample == k.i,]) / (2*sum(partitionSample == k.i)))
Allele.Proportions <- AlleleProportion.byCluster[[g.i]]
df <- data.frame(y=as.vector(Allele.Proportions),
g=as.vector(sapply(clusterNames, function(i) matrix(rownames(Allele.Proportions),ncol=1))),
x=rep(clusterNames,each=nrow(Allele.Proportions)))
save(df,file=paste(savedir,'Fig4a-b_',g.i,'SourceData.RData',sep=''))
pal.g <- colorRampPalette(brewer.pal(name = 'Set3',n=12))
pal.g <- pal.g(12)[G.color.inds[[g.i]]]
p <- ggplot(data=df,aes(y=y,x=x,fill=g)) + geom_col(position=position_dodge(width = 0.9)) + theme_classic()+
scale_fill_manual(values=pal.g,name='') + scale_y_continuous(limits=c(0,1)) +
ylab('Proportion of Cluster') + xlab('') + ggtitle(g.i) +
theme(
#legend.position = c(0.32,0.8),
plot.margin = unit(c(0, 0, 0, 0), "cm"),
legend.key.size = unit(0.1,'in'),
plot.title = element_text(face='bold',size=8,hjust=0.5)) +
theme(text= element_text(size=8),axis.text.x = element_text(angle=90,vjust=0.5,hjust=1,color=clusterColors))
p
ggsave(filename = paste(savedir,g.i,'AlleleProportionsByCluster.pdf',sep=''),plot = p,
height = 5.5,width=7.5,units='cm')
}
|
fe5084f9efcaa6d6371e506a2f45381eb7d9df4a
|
e5ebddef173d10c4722c68f0ac090e5ecc626b8b
|
/KIR/bin/HLA.R
|
9e90a9e323157c89f1fd8211209be08875175cce
|
[] |
no_license
|
pontikos/PhD_Projects
|
1179d8f84c1d7a5e3c07943e61699eb3d91316ad
|
fe5cf169d4624cb18bdd09281efcf16ca2a0e397
|
refs/heads/master
| 2021-05-30T09:43:11.106394
| 2016-01-27T15:14:37
| 2016-01-27T15:14:37
| 31,047,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,434
|
r
|
HLA.R
|
setwd('~nikolas/stats-archive/Papers/KIR/Data')
print(load('qPCR/MI-10-datasets.RData'))
# HLA data
head(hla.data <- read.table('Data/HLA/cc-hla-2013-06-21.tab',header=T))
dim( hla.data )
hla.data$t1d <- hla.data$t1d-1
hla.data$dil_subjectid <- hla.data$dil_subject
dim( hla.data <- subset(hla.data, t1d %in% 0:1) )
hla.data$HLAA <- with(hla.data,paste(HLAA_Bw4_aa80_1, HLAA_Bw4_aa80_2, sep='-'))
hla.data$HLAB <- with(hla.data,paste(HLAB_Bw4_Bw6_aa80_1, HLAB_Bw4_Bw6_aa80_2, sep='-'))
hla.data$HLA_Bw <- ifelse(grepl('6N', with(hla.data, paste(HLAA,HLAB,sep='-'))), '6N', '0')
hla.data$HLA_Bw[grepl('4T', with(hla.data, paste(HLAA,HLAB,sep='-')))] <- '4T'
hla.data$HLA_Bw[grepl('4I', with(hla.data, paste(HLAA,HLAB,sep='-')))] <- '4I'
#drop column collection
head(hla.data <- hla.data[,-which(colnames(hla.data)=='collection')])
# 20,445 = 9,174 : 11,271
case.only.chisq.test <- function(b) {
dim( cases <- subset(cases, !is.na(cases[,paste(b,'kir',sep='.')])) )
# kir presence/absence
table( kir3ds1 <- ifelse(as.numeric(gsub('(.)-.','\\1',cases[,paste(b,'kir3ds1',sep='.')])>0), 'KIR3DS1+', 'KIR3DS1-') )
table( kir3dl1 <- ifelse(as.numeric(gsub('.-(.)','\\1',cases[,paste(b,'kir3dl1',sep='.')])>0), 'KIR3DL1+', 'KIR3DL1-') )
table( kir <- paste( kir3ds1, kir3dl1, sep='/' ) )
# hlabw4 presence/absence
table( hlabw4 <- c('HLA-Bw4-','HLA-Bw4+')[1+as.numeric(grepl('4',cases$HLA_Bw))] )
table( hlabw4I <- c('HLA-Bw4-80I-','HLA-Bw4-80I+')[1+as.numeric(grepl('4I',cases$HLA_Bw))] )
# kir3dl1
print(xtable(table(kir3dl1, hlabw4)))
print(chisq.test(table(kir3dl1, hlabw4)))
# kir3ds1
print(xtable(table(kir3ds1, hlabw4I)))
print(chisq.test(table(kir3ds1, hlabw4I)))
# kir
print(xtable(table(kir, hlabw4)))
print(chisq.test(table(kir, hlabw4)))
#xtable(table(kir, hlabw4I))
#chisq.test(table(kir, hlabw4I))
}
mi <- lapply(imputations, function(i) {
i <- subset(i, t1d==1)
i <- merge(i, hla.data)
# kir presence/absence
i$kir3ds1 <- ifelse(as.numeric(gsub('(.)-.','\\1',i$geno)>0), 'KIR3DS1+', 'KIR3DS1-')
i$kir3dl1 <- ifelse(as.numeric(gsub('.-(.)','\\1',i$geno)>0), 'KIR3DL1+', 'KIR3DL1-')
# hlabw4 presence/absence
i$hlabw4 <- c('HLA-Bw4-','HLA-Bw4+')[1+as.numeric(grepl('4',i$HLA_Bw))]
i$hlabw4I <- c('HLA-Bw4-80I-','HLA-Bw4-80I+')[1+as.numeric(grepl('4I',i$HLA_Bw))]
return(i)
})
|
ff770859f89df98e9991db23d9b2a0f2b33380de
|
3e473b1d46386499fc45c664fd5c3f9282636650
|
/plot4.R
|
b4eef763b71bce9adccc5b5b3a3ea8c7e4185c21
|
[] |
no_license
|
Danlu0217/ExData_Plotting1
|
7437d558b4c3523fd8660cf995032921bdf25876
|
b1b2be8a3cd425b81dfa102310c49924518b657f
|
refs/heads/master
| 2020-07-17T19:03:03.265464
| 2019-09-10T17:18:54
| 2019-09-10T17:18:54
| 206,078,374
| 0
| 0
| null | 2019-09-03T13:01:03
| 2019-09-03T13:01:03
| null |
UTF-8
|
R
| false
| false
| 1,745
|
r
|
plot4.R
|
#Download and Read the file#
temp<-tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
datafile<-unzip(temp)
#Read file,file in table has 9 variables
data<-read.table(datafile,sep =";")
#Ajust time format in order to pick data based on time
time<-as.Date(data$V1,format="%d/%m/%Y")
#Add time new column to the existing data frame "data"
data$time<-time
#Subset the data based on date
data1<-subset(data,data$time>="2007-02-01"&data$time<="2007-02-02")
#Adjust x data, factor converted to numeric. Note that factor needs to be converted to character first, then converted to numeric
xdata<-as.numeric(as.character(data1$V3))
#Convert date into weekday
#Mkae the weekday data a new column in data frame data1
datetime <- paste(as.Date(data1$V1,format="%d/%m/%Y"), data1$V2)
data1$Datetime <- as.POSIXct(datetime)
#Convert submerging_1 data into numeric
#Convert submerging_2 data into numeric
s2<-as.numeric(as.character(data1$V8))
s3<-as.numeric(as.character(data1$V9))
#Get voltage data
vol<-as.numeric(as.character(data1$V5))
#Get Reactive power data
rep<-as.numeric(as.character(data1$V4))
# Make 2 by 2 panels
par(mfrow=c(2,2))
# Make plots
plot(xdata ~ data1$Datetime,type="l",xlab="",ylab="Global Active Power(kilowatts)")
plot(vol ~data1$Datetime, xlab="datetime",ylab="Voltage",type="l")
plot(s1 ~ data1$Datetime,type="l",ylab = "Energy sub metering",xlab="")
lines(s2 ~ data1$Datetime,type="l",col="red")
lines(s3 ~ data1$Datetime,type="l",col="blue")
legend(x="topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col = c("black","red","blue"),lty=1,cex = 0.7,bty="n")
plot(rep ~data1$Datetime, xlab="datetime",ylab="Global_reactive_power",type="l")
|
bd8c2e8f95a3b0570585dd764af757a9ea0364e8
|
90a4676f3d6994cfe02e06ce5899ab2fb222512d
|
/app.R
|
e7fa097973e5bfd5e49b490596f402bf13353557
|
[
"MIT"
] |
permissive
|
nuke504/ESA_Project_Electronic_Dice
|
8f21cd61cf458b1cf3571d2c28b24349cbc5b047
|
7429b563fc14c1da849239d2237e39b36ceb31c9
|
refs/heads/master
| 2020-12-28T06:16:46.618760
| 2020-02-04T13:15:13
| 2020-02-04T13:15:13
| 238,208,633
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,937
|
r
|
app.R
|
# Electronic Dice R Shiny App
library(shiny)
library(ggplot2)
library(stats)
library(bnlearn)
library(expm)
gen.pmf <- function(){
# Create a list of PMFs
pmf <- list()
# Generate Standard Dice PMF
pmf$standard <- data.frame(X=1:6,Y=rep(1/6,6))
# Generate Normal-Binom Approx PMF
sd1 <- (6-3.5)/3-0.5+1.25*runif(1)
pmf$binomial <- data.frame(X=1:6,Y=(pnorm(seq(1.5,6.5,by = 1), mean = 3.5, sd = sd1)-pnorm(seq(0.5,5.5,by = 1), mean = 3.5, sd = sd1))/(pnorm(6.5, mean = 3.5, sd = sd1)-pnorm(0.5, mean = 3.5, sd = sd1)))
# Generate Bimodal PMF
sd2 <- (6-3.5)/3-0.5+1.25*runif(1)
pmf$bimodal <- data.frame(X=c(3,2,1,6,5,4),Y=(pnorm(seq(1.5,6.5,by = 1), mean = 3.5, sd = sd2)-pnorm(seq(0.5,5.5,by = 1), mean = 3.5, sd = sd2))/(pnorm(6.5, mean = 3.5, sd = sd2)-pnorm(0.5, mean = 3.5, sd = sd2)))
# Generate martingale dice PMF
ml <- sample(5:9,1)
mh <- sample(15:20,1)
exp.r <- 0.032+0.1*runif(1)
n.m <- sum(pexp(seq(ml-1+0.5,ml+1+0.5,by=1), rate = exp.r)-pexp(seq(ml-1-0.5,ml+1-0.5,by=1), rate = exp.r),pexp(seq(mh-1+0.5,mh+1+0.5,by=1), rate = exp.r)-pexp(seq(mh-1-0.5,mh+1-0.5,by=1), rate = exp.r))
pmf$martingale <- data.frame(X=c(ml-1,ml,ml+1,mh-1,mh,mh+1)-10, Y=c(pexp(seq(ml-1+0.5,ml+1+0.5,by=1), rate = exp.r)-pexp(seq(ml-1-0.5,ml+1-0.5,by=1), rate = exp.r),pexp(seq(mh-1+0.5,mh+1+0.5,by=1), rate = exp.r)-pexp(seq(mh-1-0.5,mh+1-0.5,by=1), rate = exp.r))/n.m)
return(pmf)
}
gen.values <- function(){
df <- data.frame(A=sample(1:6,1),
B=sample(0:1,1),
C=0.6+0.05*runif(1),
D=0.4+0.05*runif(1),
E=0.7+0.05*runif(1),
'F'=sample(1:4,1),
G=0.3+0.05*runif(1))
df$K <- df$E/(df$E+df$C/2+3/4*df$D)
return(df)
}
# Values for challenges
padlockSum <- sample(3:6,1)
c2.x <- runif(1)
c2.y <- runif(1)
c2.z <- runif(1)
c3.d <- sample(0:1,1)
c3.p1 <- ifelse(c3.d == 1,0.1+0.5*runif(1),0.4+0.5*runif(1))
c3.p2 <- ifelse(c3.d == 0,0.1+0.5*runif(1),0.4+0.5*runif(1))
gen.c3.samplemean <- function(){
df <- data.frame(Equipment=c('Guard Dog','Scanner'),Sample.Mean=c(mean(rbinom(3,10,c3.p1)),mean(rbinom(3,10,c3.p2))))
colnames(df) <- c('Route','Sample Mean')
return(df)
}
gen.p.matrix <- function(){
m <- c()
for(j in 1:6){
p <- rep(0,6)
s <- sample((1:6)[-j],3)
cut.points <- runif(2)
pi <- c(max(cut.points)-min(cut.points),min(cut.points),1-max(cut.points))
for(i in 1:3){
p[s[i]] <- pi[i]
}
m <- c(m,p)
}
return(matrix(m, nrow = 6, ncol = 6, byrow = T))
}
c4.PMatrix <- gen.p.matrix()
c4.am <- rbind(c(0,1,0,1,0,0),c(1,0,1,0,1,0),c(0,1,0,0,0,1),c(1,0,0,0,1,0),c(0,1,0,1,0,1),c(0,0,1,0,1,0),c(1,1,1,0,0,0))
c4.is <- sample(4:6,1)
# Define getaway car
car.circuit = model2network("[A][B][C|A:B][E|C][D|C][F|D:G][G][H|F][I|F][P|H][K|P:M:N][J|K][L|K][O|N:I][M][N]")
# UI Function
ui <- fluidPage(
titlePanel("Prison Break!"),
tabsetPanel(
tabPanel('Game Dice',
fluidRow(tags$hr()),
fluidRow(
column(2,
wellPanel(
actionButton(inputId = 'next.turn', label = tags$h4('Next Turn')),
tags$p('Restarts timer, generate new PMFs and random values'),
tags$h3(textOutput('timeleft'))
),
wellPanel(
radioButtons(inputId = 'dice.choice', label = 'Choose your dice', choiceNames = c('Dice 1','Dice 2','Dice 3'), choiceValues = c(1,2,3)),
actionButton(inputId = 'submit.dice.choice', label = tags$h4('Roll Dice!')),
tags$h3(textOutput('dice.roll'))
)
),
column(10,tags$h3('Dice PMFs')),
column(10,tags$p('Black line indicates fair dice probabilities')),
column(3,
plotOutput('dice1pmf')
),
column(3,
plotOutput('dice2pmf')
),
column(3,
plotOutput('dice3pmf')
),
column(10,offset = 2,tags$h4('Random Value Table for use in Questions')),
column(10,offset = 2,tableOutput('valueTable'))
),
fluidRow(tags$hr())
),
# Challenge 1
tabPanel('Breakout!',
fluidRow(
column(12,
tags$h2('Challenge 1'),
tags$hr(),
wellPanel(
tags$h4('You need to breakout of the cell!'),
tags$p('You have recently managed to pickpocket a guard to obtain information on unlocking the prison cell. It is given below. In addition, a fellow inmate has told you that the sum of the 3 numbers on the combination padlock is ',tags$b(as.character(padlockSum)),'.')
)
)
),
fluidRow(
column(12,actionButton(inputId = 'revealC1', label = tags$h3('Reveal the rest of the information!'), width = '100%'))
),
fluidRow(
column(12,tags$br())
),
fluidRow(
column(12,conditionalPanel(condition = 'input.revealC1 >= 1',tags$img(src = "CombiLock.png", width = '100%')))
),
fluidRow(
column(12,
tags$br(),
conditionalPanel(condition = 'input.revealC1 >= 1',
wellPanel(
tags$p(tags$b('What is the probability that you will open the lock on the first try?')),
tags$p('Note: Giving the wrong answer will lead to a penalty of losing a turn. Key in answer as numerator over denominator')
))
)
),
fluidRow(
column(8,
conditionalPanel(condition = 'input.revealC1 >= 1',
wellPanel(
numericInput("cellN", "Numerator:", 1, min = 1),
numericInput("cellD", "Denominator:", 1, min = 1),
actionButton(inputId = 'submitC1', label = tags$h4('Check Probability'), width = '100%'),
tags$h3(textOutput('time1Left')),
conditionalPanel(condition = 'output.result1Show == "Question By:"',tags$h3(textOutput('result1Text'))),
conditionalPanel(condition = 'output.reset1Show == "Lin Xiao Hao"',actionButton(inputId = 'restarttimer1', label = 'Try Again'))
)
)
)
),
fluidRow(
column(12,tags$hr())
),
fluidRow(
column(12,tags$p(textOutput('result1Show'),textOutput('reset1Show')))
)
),
# Challenge 2
tabPanel('Bypass Guard Room',
fluidRow(
column(12,
tags$h2('Challenge 2'),
tags$hr(),
wellPanel(
tags$h4('You need to bypass the patrol guards!'),
tags$p('Patrol officer tends to slack during patrol duty. The chance of a patrol officer being in the toilet is',tags$b(as.character(floor(c2.x*100))),'%. A fellow inmate knows exactly when the officer is in the toilet, and says that the officer is in the toilet',tags$b(as.character(floor(c2.y*100))),'% of the time when the officer is in the toilet (hence telling the truth) and',tags$b(as.character(floor(c2.z*100))),'% of the time when the officer is not in the toilet (hence lying). If the inmate tells you that the officer is in the toilet, what is the probability that the officer is really in the toilet? If the probability is more than 50%, it will benefit you if you take his advice.'),
tags$p("You may choose to take the inmate's information and risk crossing the danger zone in one step, but a failed attempt will result in missing a turn. If you get caught, you can try the danger crossing again on your next available turn or roll the dice.")
)
)
),
fluidRow(
column(12,actionButton(inputId = 'revealC2', label = tags$h3('Reveal math notes!'), width = '100%'))
),
fluidRow(
column(12,tags$br())
),
fluidRow(
column(12,conditionalPanel(condition = 'input.revealC2 >= 1',tags$img(src = "Bayes.png", width = '100%')))
),
fluidRow(
column(12,
tags$br()
)
),
fluidRow(
column(8,
conditionalPanel(condition = 'input.revealC2 >= 1',
wellPanel(
actionButton(inputId = 'submitC2', label = tags$h4('Attempt Danger Crossing!'), width = '100%'),
tags$h3(textOutput('time2Left')),
conditionalPanel(condition = 'output.result2Show == "Question By:"',tags$h3(textOutput('result2Text'))),
conditionalPanel(condition = 'output.reset2Show == "Lin Xiao Hao"',actionButton(inputId = 'restarttimer2', label = 'Try Danger Crossing Again'))
)
)
)
),
fluidRow(
column(12,tags$hr())
),
fluidRow(
column(12,tags$p(textOutput('result2Show'),textOutput('reset2Show')))
)
),
# Challenge 3
tabPanel('Avoid Security Check',
fluidRow(
column(12,
tags$h2('Challenge 3'),
tags$hr(),
wellPanel(
tags$h4('You need to bypass the guard room!'),
tags$p('There are two paths to bypass the guard room. You can choose to go pass the guard dogs or to go pass the scanner. You know when somebody passes by the dog or the scanner, the dog barks with probability p1 and the scanner will work with p2. You do not know what the probabilities are.'),
tags$p('However, you found ',tags$b('secret records'),' that detail the security lapses at the guard dog and scanner!'),
tags$p('The records show in 3 days, for 10 people that walked past the guard dog and scanner each day, the average number of people caught follows the table below:'),
tableOutput('c3SampleMean'),
tags$p('You remembered knowledge from 40.001 that can help you solve this problem…')
)
)
),
fluidRow(
column(12,actionButton(inputId = 'revealC3', label = tags$h3('Reveal 40.001 knowledge!'), width = '100%'))
),
fluidRow(
column(12,tags$br())
),
fluidRow(
column(12,conditionalPanel(condition = 'input.revealC3 >= 1',tags$img(src = "Binfer.png", width = '100%')))
),
fluidRow(
column(12,
tags$br()
)
),
fluidRow(
column(8,
conditionalPanel(condition = 'input.revealC3 >= 1',
wellPanel(
actionButton(inputId = 'submitC3.1', label = tags$h4('Sneak past the dog!')),
actionButton(inputId = 'submitC3.2', label = tags$h4('Sneak past the scanners!')),
tags$h3(textOutput('time3Left')),
conditionalPanel(condition = 'output.result3Show == "Question By:"',tags$h3(textOutput('result3Text'))),
conditionalPanel(condition = 'output.reset3Show == "Loh Zheng Yi"',actionButton(inputId = 'restarttimer3', label = 'Try bypassing again'))
)
)
)
),
fluidRow(
column(12,tags$hr())
),
fluidRow(
column(12,tags$p(textOutput('result3Show'),textOutput('reset3Show')))
)
),
# Challenge 4
tabPanel('Avoid Search Light',
fluidRow(
column(12,
tags$h2('Challenge 4'),
tags$hr(),
wellPanel(
tags$h4('You need to avoid the search light!'),
tags$p("Lucky for you, you managed to find some details on the search light's operations. Click the button below to reveal these details"),
tags$h5('Possible Player Movements'),
tags$p('Out of squares 1,2,3 on the basketball court, choose one square to move to (see diagram below).'),
tags$p('You can only move to squares adjacent to your current square. Diagonal movements are not allowed.'),
tags$p('If you are caught by the search light, restart at the square right before the basketball court.')
)
)
),
fluidRow(
column(12,actionButton(inputId = 'revealC4', label = tags$h3('Reveal search light details!'), width = '100%'))
),
fluidRow(
column(12,tags$br())
),
fluidRow(
column(12,conditionalPanel(condition = 'input.revealC4 >= 1',tags$img(src = "Markov.png", width = '100%')))
),
fluidRow(
column(12,
tags$br()
)
),
fluidRow(
column(6,
conditionalPanel(condition = 'input.revealC4 >= 1',
wellPanel(
tags$p('Initial Searchlight Location: ',tags$h4(c4.is)),
tags$p('Current Searchlight Location: ',tags$h4(textOutput('c4SlState'))),
tags$p('Current Player Location: ',tags$h4(textOutput('c4PState'))),
selectInput("nextMoveC4", "Next Player Location",c(1:3)),
actionButton(inputId = 'submitC4', label = tags$h4('Move!'), width = '100%'),
tags$h3(textOutput('time4Left')),
conditionalPanel(condition = 'output.result4Show == "Question By:"',tags$h3(textOutput('result4Text'))),
conditionalPanel(condition = 'output.reset4Show == "Lin Hao "',actionButton(inputId = 'restarttimer4a', label = 'Next Move')),
conditionalPanel(condition = 'output.reset4Show == "Lin Hao"',actionButton(inputId = 'restarttimer4', label = 'Restart at entrance'))
)
)
),
column(6,
conditionalPanel(condition = 'input.revealC4 >= 1',
tags$p('Transition Matrix (in %):'),
tableOutput(formatC(c4.PMatrix*100, digits = 3, format ='fg')),
tags$p('Calculate State Probabilities after n turns'),
numericInput("c4MatrixPower", "Turns", 1, min = 1),
tableOutput('c4StateProb')
)
)
),
fluidRow(
column(12,tags$hr())
),
fluidRow(
column(12,tags$p(textOutput('result4Show'),textOutput('reset4Show')))
)
),
# Challenge 5
tabPanel('Car Escape',
fluidRow(
column(12,
tags$h2('Final Challenge'),
tags$hr(),
wellPanel(
tags$h3('Instructions'),
tags$p('To complete your escape plan, you have to hotwire a getaway car. The car has a stochastic alarm system. As the car alarm is armed with a nuclear bomb, you have to figure out how to hotwire the car such that electricity does not flow from Node A (orange) to Node O (green). You found a manual in the car that provides you with tips on hotwiring.'),
tags$h4(tags$b('You can only hotwire 1 node!')),
tags$p('Note: Think of all possible paths from A to O. Each intersection of nodes is one of the following 3 possibilities. You just need to hotwire one node to break the electricity flow.'),
tags$p('Choosing the wrong node will lead to a penalty of losing a turn')
),
tags$hr()
)
),
fluidRow(
column(12,tags$img(src = "HotwireInstructions.png", width = '100%'))
),
fluidRow(
column(12,tags$br())
),
fluidRow(
column(12,tags$img(src = "HotwiringEg.png", width = '100%'))
),
fluidRow(
column(12,tags$hr())
),
fluidRow(
column(12,actionButton(inputId = 'revealCircuit', label = tags$h3('I am ready to escape! Show me the circuit!'), width = '100%'))
),
fluidRow(
column(12,tags$br())
),
fluidRow(
column(4,
conditionalPanel(condition = 'input.revealCircuit >= 1',
wellPanel(
tags$h4('Choose node to hotwire'),
selectInput("nodeChoice", "Node", c("B","C","D","E","F","G","H","I","J","K","L","M","N","P")),
actionButton(inputId = 'submitNodeChoice', label = tags$h4('Hotwire this node!')),
tags$h3(textOutput('time5left')),
conditionalPanel(condition = 'output.result5Show == "Circuit By:"',tags$h3(textOutput('result5Text'))),
conditionalPanel(condition = 'output.reset5Show == "Gladwin Lam"',actionButton(inputId = 'restarttimer5', label = 'Try Again'))
)
)
),
column(6,
conditionalPanel(condition = 'input.revealCircuit >= 1',
tags$img(src = "HotwireCircuit.png", width = "600px")
)
)
),
fluidRow(
column(12,tags$hr())
),
fluidRow(
column(12,tags$p(textOutput('result5Show'),textOutput('reset5Show')))
)
)
)
)
# Server Function
server <- function(input, output, session) {
# Initialize timer variables
turnTime <- 1*60
time1 <- 3*60
time2 <- 3*60
time3 <- 3*60
time4 <- 60
time5 <- 3*60
turnTimer <- reactiveVal(turnTime)
timer1 <- reactiveVal(time1)
timer2 <- reactiveVal(time2)
timer3 <- reactiveVal(time3)
timer4 <- reactiveVal(time4)
timer5 <- reactiveVal(time5)
activeTurnTimer <- reactiveVal(F)
activetimer1 <- reactiveVal(F)
activetimer2 <- reactiveVal(F)
activetimer3 <- reactiveVal(F)
activetimer4 <- reactiveVal(F)
activetimer5 <- reactiveVal(F)
diceRolled <- reactiveVal(F)
# observer that invalidates every second. If timer is active, decrease by one.
observe({
invalidateLater(1000, session)
isolate({
if(activeTurnTimer())
{
turnTimer(turnTimer()-1)
if(turnTimer()<1)
{
activeTurnTimer(F)
diceRolled(F)
turnTimer(turnTime)
showModal(modalDialog(
title = "Turn Ended",
"You are out of time!"
))
}
}
})
})
rv <- reactiveValues(choices = sample(1:4,3),
roll = 0,
pmf = gen.pmf(),
values.table = gen.values(),
c1.result = NA,
result1.show = 'Question By: ',
reset1.show = 'Lin Xiao Hao ',
c2.result = NA,
result2.show = 'Question By: ',
reset2.show = 'Lin Xiao Hao ',
c3.result = NA,
result3.show = 'Question By: ',
reset3.show = 'Loh Zheng Yi ',
c3.samplemean = gen.c3.samplemean(),
c4.result = NA,
c4.slstate = c4.is,
c4.pState = 7,
result4.show = 'Question By: ',
reset4.show = 'Lin Hao ',
c5.result = NA,
result5.show = 'Circuit By: ',
reset5.show = 'Gladwin Lam ')
observeEvent(input$next.turn, {
rv$pmf <- gen.pmf()
rv$values.table <- gen.values()
rv$choices <- sample(1:4,3)
activeTurnTimer(T)
diceRolled(F)
turnTimer(turnTime)
})
observeEvent(input$submit.dice.choice, {
if(activeTurnTimer()&!diceRolled()){
rv$roll <- rv$pmf[[rv$choices[as.integer(input$dice.choice)]]]$X[which(runif(1) <= cumsum(rv$pmf[[rv$choices[as.integer(input$dice.choice)]]]$Y))[1]]
diceRolled(T)
activeTurnTimer(F)
}else if(diceRolled()){
showModal(modalDialog(title = "Dice Rolled","You may not roll the dice more than once per turn"))
}else{
showModal(modalDialog(title = "Turn Ended","Please press 'Next Turn' button before rolling dice"))
}
})
output$dice1pmf <- renderPlot({
ggplot(data = rv$pmf[[rv$choices[1]]], aes(x=as.factor(X),y=Y,label=formatC(Y, digits = 3, format ='fg'))) +
geom_col(fill = 'pink', width = 1/1.618) + geom_text(nudge_y = 0.001) + geom_hline(aes(yintercept = 1/6)) +
labs(x = 'Roll Outcome', y = 'P(X=x)', title = 'Dice 1 Probability Mass Function') + coord_cartesian(ylim=c(0,0.6)) +
theme_minimal()
})
output$dice2pmf <- renderPlot({
ggplot(data = rv$pmf[[rv$choices[2]]], aes(x=as.factor(X),y=Y,label=formatC(Y, digits = 3, format ='fg'))) +
geom_col(fill = 'pink', width = 1/1.618) + geom_text(nudge_y = 0.001) + geom_hline(aes(yintercept = 1/6)) +
labs(x = 'Roll Outcome', y = 'P(X=x)', title = 'Dice 2 Probability Mass Function') + coord_cartesian(ylim=c(0,0.6)) +
theme_minimal()
})
output$dice3pmf <- renderPlot({
ggplot(data = rv$pmf[[rv$choices[3]]], aes(x=as.factor(X),y=Y,label=formatC(Y, digits = 3, format ='fg'))) +
geom_col(fill = 'pink', width = 1/1.618) + geom_text(nudge_y = 0.001) + geom_hline(aes(yintercept = 1/6)) +
labs(x = 'Roll Outcome', y = 'P(X=x)', title = 'Dice 3 Probability Mass Function') + coord_cartesian(ylim=c(0,0.6)) +
theme_minimal()
})
output$dice.roll <- renderText({
paste('You have rolled: ',rv$roll)
})
output$valueTable <- renderTable({
rv$values.table
})
# Output the time left.
output$timeleft <- renderText({
paste("Time left: ", turnTimer(),'s',sep='')
})
# Events for challenge 1
observe({
invalidateLater(1000, session)
isolate({
if(activetimer1())
{
timer1(timer1()-1)
if(timer1()<1)
{
activetimer1(F)
timer1(time1)
rv$c5.result <- 'Time is out! Try again next turn!'
rv$result1.show <- 'Question By:'
rv$reset1.show <- 'Lin Xiao Hao'
}}})})
observeEvent(input$revealC1, {
activeTurnTimer(F)
activetimer1(T)
timer1(time1)
})
observeEvent(input$restarttimer1, {
activetimer1(T)
timer1(time1)
rv$result1.show <- 'Question By: '
rv$reset1.show <- 'Lin Xiao Hao '
})
observeEvent(input$submitC1, {
if(input$cellN/input$cellD == 1/(sum(1:(padlockSum-2)))){
rv$c1.result <- 'Success!'
}else{
rv$c1.result <- 'Wrong! Try again next turn!'
rv$reset1.show <- 'Lin Xiao Hao'
}
rv$result1.show <- 'Question By:'
activetimer1(F)
})
output$time1Left <- renderText({
paste("Time left: ", timer1(),'s',sep='')
})
output$result1Show <- renderText({
paste(rv$result1.show)
})
output$reset1Show <- renderText({
paste(rv$reset1.show)
})
output$result1Text <- renderText({
paste(rv$c1.result)
})
# Events for challenge 2
observe({
invalidateLater(1000, session)
isolate({
if(activetimer2())
{
timer2(timer2()-1)
if(timer2()<1)
{
activetimer2(F)
timer2(time2)
rv$c2.result <- 'Time is out! Try again next turn!'
rv$result2.show <- 'Question By:'
rv$reset2.show <- 'Lin Xiao Hao'
}}})})
observeEvent(input$revealC2, {
activeTurnTimer(F)
activetimer2(T)
timer2(time2)
})
observeEvent(input$restarttimer2, {
activetimer2(T)
timer2(time2)
rv$result2.show <- 'Question By: '
rv$reset2.show <- 'Lin Xiao Hao '
})
observeEvent(input$submitC2, {
if(runif(1)<=c2.y*c2.x/(c2.x*c2.y+c2.z*(1-c2.x))){
rv$c2.result <- 'Success!'
}else{
rv$c2.result <- 'Caught! Lose a turn!'
rv$reset2.show <- 'Lin Xiao Hao'
}
rv$result2.show <- 'Question By:'
activetimer2(F)
})
output$time2Left <- renderText({
paste("Time left: ", timer2(),'s',sep='')
})
output$result2Show <- renderText({
paste(rv$result2.show)
})
output$reset2Show <- renderText({
paste(rv$reset2.show)
})
output$result2Text <- renderText({
paste(rv$c2.result)
})
# Events for challenge 3
observe({
invalidateLater(1000, session)
isolate({
if(activetimer3())
{
timer3(timer3()-1)
if(timer3()<1)
{
activetimer3(F)
timer2(time3)
rv$c3.result <- 'Time is out! Try again next turn!'
rv$result3.show <- 'Question By:'
rv$reset3.show <- 'Loh Zheng Yi'
}}})})
observeEvent(input$revealC3, {
activeTurnTimer(F)
activetimer3(T)
timer3(time3)
})
observeEvent(input$restarttimer3, {
activetimer3(T)
timer3(time3)
rv$result3.show <- 'Question By: '
rv$reset3.show <- 'Loh Zheng Yi '
})
observeEvent(input$submitC3.1, {
if(runif(1)<=(1-c3.p1)){
rv$c3.result <- 'Success!'
}else{
rv$c3.result <- 'Caught! Try again next turn!'
rv$reset3.show <- 'Loh Zheng Yi'
}
rv$result3.show <- 'Question By:'
activetimer3(F)
})
observeEvent(input$submitC3.2, {
if(runif(1)<=(1-c3.p2)){
rv$c3.result <- 'Success!'
}else{
rv$c3.result <- 'Caught! Try again next turn!'
rv$reset3.show <- 'Loh Zheng Yi'
}
rv$result3.show <- 'Question By:'
activetimer3(F)
})
output$time3Left <- renderText({
paste("Time left: ", timer3(),'s',sep='')
})
output$result3Show <- renderText({
paste(rv$result3.show)
})
output$reset3Show <- renderText({
paste(rv$reset3.show)
})
output$result3Text <- renderText({
paste(rv$c3.result)
})
output$c3SampleMean <- renderTable({
rv$c3.samplemean
})
# Events for challenge 4
observe({
invalidateLater(1000, session)
isolate({
if(activetimer4())
{
timer4(timer4()-1)
if(timer4()<1)
{
activetimer4(F)
timer4(time4)
rv$c4.result <- 'Time is out! Restart at Entrance!'
rv$result4.show <- 'Question By:'
rv$reset4.show <- 'Lin Hao'
}}})})
observeEvent(input$revealC4, {
activeTurnTimer(F)
activetimer4(T)
timer4(time4)
})
observeEvent(input$restarttimer4, { # Hard Reset to entrance
activetimer4(T)
timer4(time4)
rv$result4.show <- 'Question By: '
rv$reset4.show <- 'Lin Hao '
rv$c4.pState <- 7
})
observeEvent(input$restarttimer4a, { # Soft Reset
activetimer4(T)
timer4(time4)
rv$result4.show <- 'Question By: '
rv$reset4.show <- 'Lin Hao '
})
observeEvent(input$submitC4, {
rv$c4.slstate <- which(runif(1)<=cumsum(c4.PMatrix[rv$c4.slstate,]))[1]
if(rv$c4.slstate!=as.integer(input$nextMoveC4) & as.integer(input$nextMoveC4)==5){
rv$c4.pState <- as.integer(input$nextMoveC4)
rv$c4.result <- 'Completed Searchlight Challenge!'
rv$reset4.show <- 'Lin Hao '
}else if(rv$c4.slstate!=as.integer(input$nextMoveC4) & as.integer(input$nextMoveC4)!=5){
rv$c4.pState <- as.integer(input$nextMoveC4)
rv$c4.result <- 'Evaded searchlight! Continue moving!'
rv$reset4.show <- 'Lin Hao '
}else if(rv$c4.slstate==as.integer(input$nextMoveC4)){
rv$c4.pState <- as.integer(input$nextMoveC4)
rv$c4.result <- 'Caught! Restart at entrance!'
rv$reset4.show <- 'Lin Hao'
}
rv$result4.show <- 'Question By:'
activetimer4(F)
})
output$time4Left <- renderText({
paste("Time left: ", timer4(),'s',sep='')
})
output$c4StateProb <- renderTable({
sapply(1:6, function(x) ifelse(x==rv$c4.slstate,1,0))%*%(c4.PMatrix%^%input$c4MatrixPower)
})
output$c4SlState <- renderText({
paste(rv$c4.slstate)
})
output$c4PState <- renderText({
paste(ifelse(rv$c4.pState==7,'Start',rv$c4.pState))
})
output$result4Show <- renderText({
paste(rv$result4.show)
})
output$reset4Show <- renderText({
paste(rv$reset4.show)
})
output$result4Text <- renderText({
paste(rv$c4.result)
})
observe({
x <- which(c4.am[rv$c4.pState,]==1)
# Can also set the label and select items
updateSelectInput(session, "nextMoveC4",
label = "Next Player Location",
choices = x
)
})
# Events for challenge 5
observe({
invalidateLater(1000, session)
isolate({
if(activetimer5())
{
timer5(timer5()-1)
if(timer5()<1)
{
activetimer5(F)
timer5(time5)
rv$c5.result <- 'Nuclear bomb has exploded! Try again next turn!'
rv$result5.show <- 'Circuit By:'
rv$reset5.show <- 'Gladwin Lam'
}}})})
observeEvent(input$revealCircuit, {
activeTurnTimer(F)
activetimer5(T)
timer5(time5)
})
observeEvent(input$restarttimer5, {
activetimer5(T)
timer5(time5)
rv$result5.show <- 'Circuit By: '
rv$reset5.show <- 'Gladwin Lam '
})
observeEvent(input$submitNodeChoice, {
if(dsep(car.circuit, "A", "O", input$nodeChoice)){
rv$c5.result <- 'Success'
}else{
rv$c5.result <- 'Nuclear bomb has exploded! Try again next turn!'
rv$reset5.show <- 'Gladwin Lam'
}
rv$result5.show <- 'Circuit By:'
activetimer5(F)
})
output$time5left <- renderText({
paste("Time left: ", timer5(),'s',sep='')
})
output$result5Show <- renderText({
paste(rv$result5.show)
})
output$reset5Show <- renderText({
paste(rv$reset5.show)
})
output$result5Text <- renderText({
paste(rv$c5.result)
})
}
shinyApp(ui, server)
# choices = sample(1:4,3)
# dice.choice = 3
# as.character(pmf[[choices[dice.choice = 3]]]$X[which(runif(1) <= cumsum(pmf[[choices[dice.choice = 3]]]$Y))[1]])
#
# ggplot(data = pmf[[1]], aes(x=as.factor(X),y=Y,label=formatC(Y, digits = 3, format ='fg'))) +
# geom_col(fill = 'pink', width = 1/1.618) + geom_text(nudge_y = 0.001) + geom_hline(aes(yintercept = 1/6)) +
# labs(x = 'Dice Number', y = 'P(X=x)', title = 'Dice 3 Probability Mass Function') + coord_cartesian(ylim=c(0,0.3)) +
# theme_minimal()
# library(bnlearn)
|
4e4235bb56a3340ef093e9aa11f4fb7e04cfa9d7
|
cb9ee9a7732befaa609ad2b1ec031a40d53ff2da
|
/Analysis/Step3_analysis_Monocle_Droplet.R
|
7620eed9abf39e0e07c1aaaba995da8f5def7f60
|
[] |
no_license
|
rhondabacher/scSpatialReconstructCompare-Paper
|
b247a13109b06067f38003c5209bf7e250df6b3c
|
6fe728d40e50812f9284f43f3c1678b7b37f8ebc
|
refs/heads/master
| 2022-10-07T13:44:30.576357
| 2020-06-06T22:37:53
| 2020-06-06T22:37:53
| 186,037,949
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,045
|
r
|
Step3_analysis_Monocle_Droplet.R
|
setwd("scSpatialReconstructCompare-Paper")
load("RDATA/dataReady_DropletPaper.RData")
use.data <- droplet_liver_data
# Only keep genes that are not all zero for Monocle analysis:
use.data <- use.data[names(which(rowSums(use.data) > 0)),]
library(monocle)
pd <- new("AnnotatedDataFrame",
data = data.frame(colnames(use.data),
row.names=colnames(use.data)))
fd <- new("AnnotatedDataFrame",
data = data.frame(gene_short_name=rownames(use.data),
row.names=rownames(use.data)))
use.data.monocle.cds <- newCellDataSet(as.matrix(use.data),
phenoData = pd,
featureData = fd,
expressionFamily=negbinomial.size())
# Set-up for trajectory analysis:
use.data.monocle.cds <- estimateSizeFactors(use.data.monocle.cds)
use.data.monocle.cds <- estimateDispersions(use.data.monocle.cds)
disp_table <- dispersionTable(use.data.monocle.cds)
# Choose cutoff for gene inclusion:
use.hvg <- rownames(hvg[rev(order(hvg[,3])),])[1:200]
use.data.monocle.cds <- setOrderingFilter(use.data.monocle.cds, use.hvg)
plot_ordering_genes(use.data.monocle.cds)
############### Reduce Dim #################################
use.data.monocle.cds <- reduceDimension(use.data.monocle.cds,
max_components = 2, norm_method="log")
############### Order Cells ################################
use.data.monocle.cds <- orderCells(use.data.monocle.cds, reverse = T)
## Format dataframe for plotting
pt_data <- use.data.monocle.cds@phenoData@data[,3]
names(pt_data) <- use.data.monocle.cds@phenoData@data[,1]
pt_data.droplet <- pt_data
##############Significant Genes############################
diff_test_res <- differentialGeneTest(use.data.monocle.cds, fullModelFormulaStr = "~sm.ns(Pseudotime)")
head(diff_test_res[,c("gene_short_name", "pval", "qval")])
de.droplet <- diff_test_res
save.image("RDATA/analysis_Monocle_Droplet.RData")
|
e00a802c14902dff7e0e157eeef385ffe277070f
|
65ad984f92fc48af3dec983d6996c5f24f6a43ec
|
/plot4.R
|
d3687f73591f2ab0bce7b9fd802e2961bfaae455
|
[] |
no_license
|
lpatzer/ExData_Plotting1
|
c2d13979d642f28d909e8f5b85dfb1cef3b86278
|
ee2177f8878127d3221112ed12b61db78d8383ed
|
refs/heads/master
| 2021-01-17T05:17:00.928488
| 2015-05-07T20:29:30
| 2015-05-07T20:29:30
| 35,229,454
| 0
| 0
| null | 2015-05-07T15:55:32
| 2015-05-07T15:55:32
| null |
UTF-8
|
R
| false
| false
| 1,664
|
r
|
plot4.R
|
wd<-"C:/Users/patzerl/Documents/Coursera/EDA/Project 1/"
setwd(wd)
data<-read.table("household_power_consumption.txt", sep=";", header=T)
######## CLEAN COLUMNS ############
data$Clean_Date<-as.Date(data$Date, "%d/%m/%Y")
data$Clean_Global_active_power<-as.numeric(paste(data$Global_active_power))
data$Clean_Sub_metering_1<-as.numeric(paste(data$Sub_metering_1))
data$Clean_Sub_metering_2<-as.numeric(paste(data$Sub_metering_2))
data$Clean_Sub_metering_3<-as.numeric(paste(data$Sub_metering_3))
data$TimeDate<-as.POSIXct(paste(data$Clean_Date, data$Time), format="%Y-%m-%d %H:%M:%S")
data$Clean_Voltage<-as.numeric(paste(data$Voltage))
data$Clean_Global_reactive_power<-as.numeric(paste(data$Global_reactive_power))
######## SUBSET DATA ##############
two.day<-data[data$Clean_Date>="2007-02-01" & data$Clean_Date <"2007-02-03",]
######### GRAPH 4 #################
par(mfcol=c(2,2))
plot(two.day$TimeDate, two.day$Clean_Global_active_power, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(two.day$TimeDate, two.day$Clean_Sub_metering_1, type="l", ylab="Energy Sub Metering", xlab="")
lines(two.day$TimeDate, two.day$Clean_Sub_metering_2, type="l", col="red")
lines(two.day$TimeDate, two.day$Clean_Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lty=1, bty="n")
plot(two.day$TimeDate, two.day$Clean_Voltage, type="l", ylab="Voltage", xlab="TimeDate")
plot(two.day$TimeDate, two.day$Clean_Global_reactive_power, type="l", ylab="Global_reactive_power", xlab="TimeDate")
dev.copy(png, "plot4.png", width=480, height=480)
dev.off()
|
4ac1b58bdac73cc413e57a94510a22a8208c5dc4
|
cd7962decce586dd068af18743a46705ede05660
|
/man/isInsideConvexHull.Rd
|
f2ad65533ba0694e2007972a6991be79d9fce250
|
[] |
no_license
|
oshea-patrick/RSpatial
|
a6e103ae4da87783a38f1cb846e97c7d8de98a38
|
22a65f50618f7972f9c5bc203e7b8db0ed3900d4
|
refs/heads/master
| 2020-06-01T02:59:36.566504
| 2019-08-08T17:49:42
| 2019-08-08T17:49:42
| 190,607,270
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,249
|
rd
|
isInsideConvexHull.Rd
|
\name{isInsideConvexHull}
\alias{isInsideConvexHull}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
isInsideConvexHull
}
\description{
Checks to see if a Point resides within a convex hull, which is presumably in clockwise order.
}
\usage{
isInsideConvexHull(hulls, checkx, checky, size)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{hulls}{
This is a dataframe consisting of two columns, "lon" and "lat", corresponding to latitude and longitude.
}
\item{checkx}{
The x coordinate of the point being checked.
}
\item{checky}{
The y coordinate of the point being checked.
}
\item{size}{
The size of the dataframe, hulls.
}
}
\details{
Written in C++.
}
\value{
Returns a boolean value, TRUE or FALSE.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Patrick J. O'Shea
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
x = 1
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
ef9e4f07a16eccf420952a8e5677fda85c1a2e41
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/10899_0/rinput.R
|
d6cccb24927d0b5b5af9d8b50cf943e59dec6667
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("10899_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10899_0_unrooted.txt")
|
c6a60559d9fa413288c3a024b38b8998ae22049f
|
d6f307076775df59956b79395fa546bca1be5510
|
/man/parse_salaries.Rd
|
a2ba59d59703796a3035b33dbc8b14ccf22e1664
|
[
"MIT"
] |
permissive
|
jimtheflash/dfstools
|
7f893bfe0356a88052081d3c49ff77ecb2e88b45
|
b76de925da20c1d26e8fd4784149b0eca166846b
|
refs/heads/master
| 2021-07-04T10:24:43.340369
| 2020-12-15T20:26:05
| 2020-12-15T20:26:05
| 208,372,779
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 706
|
rd
|
parse_salaries.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_salaries.R
\name{parse_salaries}
\alias{parse_salaries}
\title{Parse dfs player salary files}
\usage{
parse_salaries(
path = NULL,
sport = NULL,
platform = NULL,
remove_postponed_games = TRUE
)
}
\arguments{
\item{path}{character, path to salary file}
\item{sport}{character, which sports league? Supports nfl, nba, golf for draftkings}
\item{platform}{charcter, which dfs platform? Supports draftkings}
\item{remove_postponed_games}{logical should games that are postponed by removed from slate? default is TRUE}
}
\value{
data.frame of players with salary info
}
\description{
Parse dfs player salary files
}
|
427ebde1c4c3b6722e9ed712f9eb55d3ed132d43
|
2b0e7454e2c87076f4f97d35000bf3426b7d9aaa
|
/man/lcfs.update.Rd
|
d05a41c8e6cbd91472e32e6ccae31a3db6628854
|
[] |
no_license
|
raphael210/QDataGet
|
52df9d791d7d1d8933555dbdfa9d81e42558a5ee
|
83531020e180fe8d07fdfa4a75413fd2b95cd6b4
|
refs/heads/master
| 2020-04-12T06:29:33.198718
| 2019-02-01T07:50:14
| 2019-02-01T07:50:14
| 64,194,185
| 0
| 5
| null | 2017-03-16T03:29:45
| 2016-07-26T06:00:12
|
R
|
UTF-8
|
R
| false
| true
| 957
|
rd
|
lcfs.update.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pub03_DatabaseOperationFuncs.R
\name{lcfs.update}
\alias{lcfs.update}
\title{lcfs.update}
\usage{
lcfs.update(factorID, begT, endT, stockID, splitNbin = "month")
}
\arguments{
\item{factorID}{a single charactor of factorID}
\item{begT}{the begin date of the updating}
\item{endT}{the end date of the updating}
\item{stockID}{a vector of stockID}
}
\description{
update \bold{one} specific factorscore.
}
\examples{
# update a factorscore on all the time, of all the stocks
lcfs.update("F000008")
# update a factor on certain time
lcfs.update("F000008",20130322,20130330)
# update a factor of certain stocks
lcfs.update("F000008",20130322,20130330,c("EQ000001","EQ000002"))
# update a factorscore on certin time, of certain stocks
lcfs.update("F000008",20130322,20130330,c("EQ000001","EQ000002"))
}
\seealso{
\code{\link{lcdb.update.QT_FactorScore}}, \code{\link{lcfs.add}}
}
|
32ffb4217e59c36525b4ddedfcd18074379cd66a
|
1dad0d0cc3555ae6373dc876e18071a849bf11a9
|
/R/announced-prefixes.R
|
9cc3fa85193f0f559201fd009f911428d53b19f9
|
[] |
no_license
|
firebitsbr/ripestat
|
53dc9e9a2d87ff8154622f94a905b155da340c11
|
45f405f9021e08669bd7843e26936e8eadd35cfa
|
refs/heads/master
| 2020-04-08T00:23:18.488402
| 2018-05-14T03:33:27
| 2018-05-14T03:33:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,065
|
r
|
announced-prefixes.R
|
.get_announced_prefixes <- function(asn, starttime=NULL, endtime=NULL, min_peers_seeing=3) {
if (length(asn) > 1) {
warning("asn length > 1; Using first element.")
asn <- asn[1]
}
asn <- trimws(toupper(asn))
if (!grepl("^AS", asn)) asn <- sprintf("AS%s", asn)
if (!is.null(starttime)) {
starttime <- parsedate::format_iso_8601(parsedate::parse_iso_8601(starttime))
if (is.na(starttime)) stop("'starttime' must be a valid R date/time object or a valid ISO-8601 time string.", call.=FALSE)
}
if (!is.null(endtime)) {
endtime <- parsedate::format_iso_8601(parsedate::parse_iso_8601(endtime))
if (is.na(endtime)) stop("'endtime' must be a valid R date/time object or a valid ISO-8601 time string.", call.=FALSE)
}
httr::GET(
url = "https://stat.ripe.net/data/announced-prefixes/data.json",
query = list(
resource = asn,
starttime = starttime,
endtime = endtime,
min_peers_seeing = min_peers_seeing
),
httr::user_agent(RIPESTAT_PACKAGE_USER_AGENT)
) -> res
httr::stop_for_status(res)
out <- httr::content(res, as="text")
out <- jsonlite::fromJSON(out)
out
}
#' Announced Prefixes
#'
#' This data call returns all announced prefixes for a given ASN.
#' The results can be restricted to a specific time period.
#'
#' @md
#' @param asn The Autonomous System Number for which to return prefixes.
#' Will auto-prefix with `AS` if just numeric.
#' @param starttime,endtime Start/end times for the query. When not `NULL` should be
#' an ISO-8601 or Unix timestamp. If `starttime` is `NULL` the query defaults to two weeks
#' before current date and time. If `endtime` is `NULL`, the query falls back to current date and time.
#' @param min_peers_seeing Minimum number of RIS peers seeing the prefix for it to be included in the results.
#' Excludes low-visibility/localized announcements. Default: `3`.
#' @export
#' @examples \dontrun{
#' get_announced_prefixes("AS3333")
#' }
get_announced_prefixes <- memoise::memoise(.get_announced_prefixes)
|
dd54262e355bb969fbf991e1b6ed53f0b8ebf60c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/phylosim/examples/UNREST.Rd.R
|
9742565900077f0f0f7c021192bd9fd6f8c40ee8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 818
|
r
|
UNREST.Rd.R
|
library(phylosim)
### Name: UNREST
### Title: The UNREST class
### Aliases: UNREST
### ** Examples
p<-UNREST(rate.list=list(
"T->C"=1, "T->A"=2, "T->G"=3, "C->T"=4, "C->A"=1,
"C->G"=2, "A->T"=3, "A->C"=4, "A->G"=1, "G->T"=2,
"G->C"=3, "G->A"=4
))
# get a summary
summary(p)
# display a bubble plot
plot(p)
# The following code demonstrates how to use
# the process in a simulation.
# create a sequence, attach process p
s<-NucleotideSequence(length=20,processes=list(list(p)))
# sample states
sampleStates(s)
# make the first five positions invariable
setRateMultipliers(s,p,0,1:5)
# get rate multipliers
getRateMultipliers(s,p)
# create a simulation object
sim<-PhyloSim(root.seq=s,phylo=rcoal(2))
# run simulation
Simulate(sim)
# print alignment
sim$alignment
|
40b1e184554a2035c56eea8d4b37a729f9b07840
|
1e87d5db6b12a90e11b0cfccbce02a640d0f0023
|
/water_potential.R
|
32f76387dc11c12c8b485945b0f4549482050807
|
[] |
no_license
|
haleybranch/Mimulus2018
|
8620431ae374991fc776b75837decfdbadd6f47e
|
b31e3b42ca50e98c30f38d5acd18b81fcfd12420
|
refs/heads/master
| 2020-03-25T22:10:46.307341
| 2019-10-08T21:57:25
| 2019-10-08T21:57:25
| 144,209,166
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,882
|
r
|
water_potential.R
|
#### Water potential
wpotential <- na.omit(data.frame(read.csv("waterpotentialJuly2018.csv"))) # red sensor
head(wpotential)
tail(wpotential)
# orient data longwise
wpotential <- wpotential %>%
gather(Bench, WP, WP1:WP4)
head(wpotential)
# add AM/PM to time
wpotential <- wpotential %>%
mutate(Time2 = paste(Time, AMPM),
DateTime = paste(Measurement, Time2))
head(wpotential)
# change format to 24 hour, convert format for ggplot
wpotential$Time3 <- as.POSIXct(strptime(wpotential$DateTime, "%Y-%m-%d %I:%M:%S %p"))
wpotential$Day <- as.POSIXct(wpotential$Measurement)
head(wpotential, 20)
str(wpotential)
# remove old date and time columns,
wpotential <- wpotential %>%
select(Day, DayTime = Time3, Bench, WP)
head(wpotential)
tail(wpotential)
# graph of raw WP by bench
ggplot(data=wpotential, aes(x=DayTime, y=WP)) +
geom_point(aes(color=Bench))
# mean per day
wpotential.mean <- wpotential %>%
group_by(Bench, Day) %>%
summarise(WP.mean = mean(WP)) %>%
ungroup() %>%
select(Bench, Day, WP.mean)
head(wpotential.mean, 10)
# graph of mean wp per day
ggplot(data=wpotential.mean, aes(x=Day, y=WP.mean)) +
geom_point(aes(color=Bench))
## amy stopped here
##### creating new subsets for days of interest
#attempt 1
wpotential.start <- wpotential.mean %>%
group_by(Measurement)
wpotential.start
#attempt 2
wpotential.mean$Measurement <- as.numeric(wpotential.mean$Measurement)
drought.start <- wpotential.mean[wpotential$Measurement == c(2018-07-01, 2018-07-14),]
drought.start <- data.frame(Measurement = c(2018-07-01, 2018-07-14))
#attempt3
plot(wpotential.mean$Time[wpotential.mean$Measurement < 2018-07-14],
wpotential.mean$WP[wpotential.mean$Measurement < 2018-07-14])
#attempt 4
drought.start <- subset(wpotential2.mean, wpotential2.mean$Measurement > 2018-07-01 & wpotential2.mean$Measurement < 2018-07-15)
#attempt 5
replace.value(wpotential.mean, Measurement, from = 2018-07-01, to=as.factor(1), verbose = FALSE)
wpotential.mean
#attempt 6
wpotential.mean$Measurement <- as.character(wpotential.mean$Measurement)
#attempt 7
# create a new column using mutate with an if else statement
# or split the measurement column into year, month, day -- and use if the day is less than 14 ...
# splitstring function?
#step 1 - separate YYYY-MM-DD into columns
wpotential.mean <- separate(wpotential.mean, "Measurement", c("Year", "Month", "Day"), sep = "-")
#step 2 - create new column with 3 stages: 1 = 3/4 water, 2 = 1/2 water, 3 = 0 water
wpot <- as.data.frame(wpotential.mean)
wpot$Day <- as.numeric(wpot$Day)
wpot$Stage[wpot$Day<=14]<-1
wpot$Stage[wpot$Day>14 & wpot$population<30] <-2
wpot$Stage[wpot$Day>=30] <-3
wpot$Stage <- as.character(wpot$Stage)
#graph the daily water potential during each stage
ggplot(data=wpot, aes(x=Time, y=WP.mean)) +
geom_point(aes(color=Bench)) +
facet_wrap(aes(group=Stage))
|
877d763153045bb40b92f1e3f46a8273b0e48756
|
bb176c57cf4372a02ce92fb2ddfbe391326c1d0e
|
/R/man/GMI.Rd
|
68d0b62ca815f59854a0757fc295403e26a086ce
|
[
"MIT"
] |
permissive
|
jbpauly/cgmquantify
|
da7cf310426ecf3332abf8c656ce6fe567076910
|
38772141e4fda9e452302874bd561433fcb61baa
|
refs/heads/master
| 2023-03-01T17:02:56.888616
| 2021-02-10T03:16:33
| 2021-02-10T03:16:33
| 337,473,687
| 0
| 0
|
MIT
| 2021-02-10T03:16:34
| 2021-02-09T16:55:52
| null |
UTF-8
|
R
| false
| true
| 329
|
rd
|
GMI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GMI.R
\name{GMI}
\alias{GMI}
\title{Compute Glycemic Management Indicator}
\usage{
GMI(df)
}
\arguments{
\item{df}{Data frame read through readfile}
}
\value{
A numeric value representing GMI
}
\description{
This function computes the estimated GMI
}
|
4cf9d859fa5c35dc4e3554ff0e8c43488da617d4
|
b6511f622e3f97ae7d75282bc697dd9c9e4cd089
|
/cachematrix.R
|
72006ed6889623f8bd40a56327197052f90c2be9
|
[] |
no_license
|
iamzog/ProgrammingAssignment2
|
be6d29550e2f1db381d2891ba4c94b7a1b0cce8e
|
67cd6c60502c5965d4ff5a074735476df4969281
|
refs/heads/master
| 2021-01-21T01:39:47.835395
| 2014-08-22T21:29:48
| 2014-08-22T21:29:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 904
|
r
|
cachematrix.R
|
## This function creates the matrix that is cached
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Calculates the inverse or returns previously calculated inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
x <- matrix(c(1,2,3,0,1,4,5,6,0), nrow = 3) #creates matrix
x #shows matrix
solve(x) #shows inverse of matrix
z <- makeCacheMatrix(x) #caches the matrix
cacheSolve(z) #solves inverse which matches solve(x)
cacheSolve(z) #shows that it doesn't recalculate
|
bd473c5659a2538fafef1cd67e6089172d75c480
|
a66d2fa34cb3543597d2a9e35b268df245230ede
|
/tests/testthat/helper-round_dbl.R
|
2e78a83c7eb2e00dc99404632fcd0d600f3bf61e
|
[
"MIT"
] |
permissive
|
2DegreesInvesting/r2dii.match
|
da6727f3da80ccd36110186c7ddf09bc10066484
|
87a10c8555bc0f7f6f667dfd132991ad2ac2c9b0
|
refs/heads/main
| 2023-03-16T13:04:21.769127
| 2022-09-07T11:37:46
| 2022-09-07T11:37:46
| 208,819,359
| 4
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 203
|
r
|
helper-round_dbl.R
|
round_dbl <- function(data, digits = 4L) {
data[detect_dbl(data)] <- lapply(data[detect_dbl(data)], round, digits = digits)
data
}
detect_dbl <- function(data) {
unlist(lapply(data, is.double))
}
|
5db0c410d349e6c908aabfa36f96d598d0a7f260
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/roptim/examples/example1_rosen_nograd_bfgs.Rd.R
|
15050390b946bc3dcf722b0e368b00e7666b8cbe
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 437
|
r
|
example1_rosen_nograd_bfgs.Rd.R
|
library(roptim)
### Name: example1_rosen_nograd_bfgs
### Title: Example 1: Minimize Rosenbrock function (with numerical
### gradient) using BFGS
### Aliases: example1_rosen_nograd_bfgs
### ** Examples
fr <- function(x) { ## Rosenbrock Banana function
x1 <- x[1]
x2 <- x[2]
100 * (x2 - x1 * x1)^2 + (1 - x1)^2
}
optim(c(-1.2,1), fr, NULL, method = "BFGS")
## corresponding C++ implementation:
example1_rosen_nograd_bfgs()
|
2ef0313b39aa433bb16621a60920e30c333c6d4d
|
3853c7c2aec2afda0e14bf57c76aae4408ee74d2
|
/man/missing_count.Rd
|
be46ea7e638caa7cc19121b028388bb07077f5da
|
[] |
no_license
|
lenamax2355/shinyr
|
9342a439046517f4dd836f0b38dcc41b2d3dd6e0
|
5f1194b9ca6f39a2446aed166c31b68196bec108
|
refs/heads/master
| 2023-06-09T01:53:26.610183
| 2021-06-23T04:38:34
| 2021-06-23T04:38:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 471
|
rd
|
missing_count.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/missing_count.R
\name{missing_count}
\alias{missing_count}
\title{Missing Count}
\usage{
missing_count(x)
}
\arguments{
\item{x}{vector}
}
\value{
Number of missing values in the given set of values
}
\description{
Count the number of missing values in a vector.
}
\details{
missing_count
}
\examples{
missing_count(c(1,2,3))
missing_count(c(NA, 1, NA, "NULL", ""))
}
\author{
Jayachandra N
}
|
7130a254edb229252d589c3fcb1d05d06a09c917
|
9de3d1c1975c4f9a1a730cd732547d462bb79350
|
/man/MSIGDB_Geneset_Small_Names.Rd
|
ac9ce9aa68f60cc8e6e25a98e5edc274abc6b49b
|
[] |
no_license
|
millerh1/correlationAnalyzeR
|
c0dc804bca7a6307e104df90c4aa7d4032ee9b6e
|
1869773b3b9370207667efa07953a80f0bf1b0fc
|
refs/heads/master
| 2021-06-16T18:12:45.244298
| 2021-03-30T02:53:08
| 2021-03-30T02:53:08
| 183,645,303
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 502
|
rd
|
MSIGDB_Geneset_Small_Names.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{MSIGDB_Geneset_Small_Names}
\alias{MSIGDB_Geneset_Small_Names}
\title{A vector of valid MSIGDB geneset names with fewer then 500 genes associated with them}
\format{
An object of class \code{character} of length 21940.
}
\source{
msigdbr()
}
\usage{
MSIGDB_Geneset_Small_Names
}
\description{
A vector of valid MSIGDB geneset names with fewer then 500 genes associated with them
}
\keyword{data}
|
ff4c87cd04379ffa04c94759069105c717af9590
|
326758331e3577e5256dacfc3d9c76b34810d0ec
|
/Server/error-srv.R
|
20f00a5853e3b75d560ad563f184c8de79f380b6
|
[] |
no_license
|
astrasb/Power_Analysis_App
|
96e2e6afd3c02077192e75b42af18e1810b194ca
|
c336e3496b95cf11d317bc46f723df8f535c819e
|
refs/heads/master
| 2022-12-16T10:45:00.096779
| 2020-09-11T18:55:31
| 2020-09-11T18:55:31
| 258,887,778
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 657
|
r
|
error-srv.R
|
# Error Messages
output$error <- renderUI({
result<-dataOutput()
if (is.null(result)){
str1 <-c('<h5>Number of data columns or rows does
not match the selected statistical test. </h5>
<h5>Please pick another file.</h5>')
str2 <-c('<p>
Instructions for correct formating of data inputs
can be found under the Application Instructions
section below. <p>')
HTML(paste(str1, str2,sep = '<br/>'))}
})
outputOptions(output, 'error', suspendWhenHidden = FALSE)
|
3164c80cacfc622bb75964183b2e565c92e94277
|
6eb0c9e95e7dc19d762fcf37da0b92e27eb212a5
|
/DTU_ML_kursus/02450Toolbox_R/Scripts/ex5_1_7.R
|
2172c69cd4624b9887d446051e43cb0931fb6201
|
[] |
no_license
|
AnnaLHansen/projects
|
81b125e8789c2555c8a2b05c469193094e25610f
|
fb6fe1d268c81146fb819cf42722fe93f9af31f6
|
refs/heads/master
| 2021-07-19T15:29:46.507559
| 2020-09-04T12:23:31
| 2020-09-04T12:23:31
| 211,500,384
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 343
|
r
|
ex5_1_7.R
|
# exercise 5.1.7
source("Scripts/ex5_1_6.R") # get data and tree model
library(rpart)
# Define a new data object with the attributes given in the text
x = data.frame(t(c(6.9, 1.09, .06, 2.1, .0061, 12, 31, .99, 3.5, .44, 12)))
colnames(x) <- attributeNames
# Evaluate the classification tree for the new data object
predict(mytree, newdat=x)
|
94b91140af8318cd2d3147419d9f73278cdf4c43
|
b57ef22a45544ea715e8c58a7bff4472e27ef47f
|
/powers-master/man/root_square.Rd
|
22c1c72939f0c85e01923fb2b3fdc10492e50019
|
[] |
no_license
|
STAT545-UBC-hw-2018-19/hw07-lh563
|
c1b3508f46c2bb3b4e8ac2962bb163ae38ca856d
|
96dba6315c1255275a8e443d7e98215a0c7c074b
|
refs/heads/master
| 2020-04-06T20:37:13.772110
| 2018-11-15T22:45:41
| 2018-11-15T22:45:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 216
|
rd
|
root_square.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/square.R
\name{root_square}
\alias{root_square}
\title{root_square}
\usage{
root_square(x, plot_it = FALSE)
}
\description{
root_square
}
|
1b7b978dabda0ccd4a82e924d0b7c8974eb4ffe5
|
a03534ec23df4282fa35a0e31dcf3e94734faa6d
|
/R/MeSH_DB_Update.R
|
91688f468a24a2f10427bf2086f44e13e0c1c3c9
|
[] |
no_license
|
liulihe954/EnrichKit
|
501beaf67c54b3d6bc94d261497ee58b140a4589
|
febbce860477032cd050db9eb60cf5fa00af63ce
|
refs/heads/master
| 2021-07-21T03:48:04.025631
| 2020-10-09T05:40:02
| 2020-10-09T05:40:02
| 222,389,799
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,973
|
r
|
MeSH_DB_Update.R
|
#' Update/download new database - Mesh.
#' New database will be gathered and stored locally.
#'
#' @param keyword Keyword used for naming. Default \code{keyword = "MeSH_DB"}
#' @param DB_location Store path of the new database. Default is current working directory.
#'
#' @return NA. New dataset will be packed and stored in .RData format.
#' @export
#' @import MeSH.db MeSH.Bta.eg.db dplyr
#' @examples MeSH_DB_Update()
#'
MeSH_DB_Update =function(keyword = "MeSH_DB",DB_location = '.'){
## download DB
ptm <- proc.time();message(paste("Accessing Database..."))
message(paste("Database: ",keyword," download starts!"))
key_Bta <- keys(MeSH.Bta.eg.db, keytype = "MESHID")
#key_Bta = key_Bta[1:5]
List = MeSHDbi::select(MeSH.db,keys = key_Bta,keytype = "MESHID",columns = c("MESHID","MESHTERM"))
list_Bta = MeSHDbi::select(MeSH.Bta.eg.db,
keys = key_Bta,
columns = columns(MeSH.Bta.eg.db)[1:3],
keytype = "MESHID") %>%
dplyr::filter(MESHCATEGORY %in% c("D","G")) %>%
dplyr::left_join(List,by= c("MESHID" = "MESHID")) %>%
dplyr::select(-MESHCATEGORY)
message("Downloads finished! Time used: ")
print(proc.time() - ptm)
## parse into terms/records (for easier extraction)
DB_List = list_Bta %>%
dplyr::filter(nchar(list_Bta[,2]) != 0) %>%
mutate(TermDescription = paste(.[,2],.[,3],sep = "---" )) %>%
dplyr::select(names(list_Bta)[1],TermDescription) %>%
split_tibble(column = 'TermDescription',keep = names(list_Bta)[1])
#
MeSH_DB = DB_List
save(MeSH_DB,file = paste0(DB_location,'/',keyword,'.rda'))
#
file_name = paste0(keyword,'.rda')
message(paste("Totally ",length(DB_List),'records were updated on ',Sys.time()))
if (DB_location != '.'){
message(paste("Database was saved in ",DB_location," in the name of",file_name))
} else {
pwd = getwd();
message(paste("Database was saved in ",pwd," in the name of",file_name))
}
load(paste0(DB_location,'/',keyword,'.rda'))
}
|
571bdde7183db8ed77936f43549ffb5a0d04bba4
|
6a103070f0eff21ee7497559e3a96e26005f4961
|
/R/Salvagecashflowtablelibraries.R
|
7e0a2881bc62bec0b8c87fd290929afc979c0642
|
[
"MIT"
] |
permissive
|
tedfarry/spinozatest
|
f3719e2da0d8bc2b5371eeeb4e0c73154880801d
|
adaa0c98f10b435fc73805d785a460a25f687023
|
refs/heads/master
| 2021-06-12T12:35:32.113106
| 2021-03-26T01:05:08
| 2021-03-26T01:05:08
| 166,516,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 86
|
r
|
Salvagecashflowtablelibraries.R
|
install.packages("flextable")
install.packages("FinCal")
install.packages("rjson")
|
3e1b3a57688e0b0896e0b264f1d132465aa260f6
|
8e531193fd089f7a0c10ad49891fd3f07f42a544
|
/man/occdat_eg1.Rd
|
40da322b544077db1bb9de02defa757829e8bff8
|
[
"MIT"
] |
permissive
|
corersky/mapr
|
e25c3fdc3868f3c1310ca2f7ad0837990619de8a
|
8979429bd4a7a5ba8e4204accde5d38221d3a076
|
refs/heads/master
| 2020-12-28T09:20:38.564271
| 2019-12-09T13:32:01
| 2019-12-09T13:32:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 454
|
rd
|
occdat_eg1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapr-package.R
\docType{data}
\name{occdat_eg1}
\alias{occdat_eg1}
\title{Example dataset: output from call to \code{\link[spocc:occ]{spocc::occ()}}}
\format{A data frame with 25 rows and 62 variables}
\description{
A dataset with 25 rows, and 62 columns, from the query:
\code{spocc::occ(query='Accipiter striatus', from='gbif', limit=25, has_coords=T)}
}
\keyword{datasets}
|
4fbc4a1ca1c62c44c9be4d7e5690d8ea1baff641
|
e20dae6615dd906842fb6a7eeee3e41cb625cc51
|
/R/utility_functions.R
|
3a8c931c2a20a5444eee9e2486789a745d457b55
|
[] |
no_license
|
LenaCarel/nmfem
|
1ba1f330569ced860425a149de5423ae14dee26a
|
58b1b232277d597f0c24bfc415114a2aa30cf3f2
|
refs/heads/master
| 2020-05-02T22:08:32.791009
| 2019-12-01T21:39:48
| 2019-12-01T21:39:48
| 178,243,096
| 1
| 2
| null | 2019-04-01T14:02:31
| 2019-03-28T16:32:37
|
R
|
UTF-8
|
R
| false
| false
| 4,538
|
r
|
utility_functions.R
|
#' Extract log-likelihood from a mixture of multinomials
#'
#' @param X a matrix of dimension \code{N} (number of observation) \code{x M} (number of variables) containing multinomials observations.
#' @param Theta matrix of dimension \code{M x H}.
#' @param Lambda matrix of dimension \code{H x K}. Can be \code{NULL}.
#' @param p vector containing the proportions of each cluster. Must be of dimension \code{K} (or \code{H} if \code{Lambda} is \code{NULL}).
#' @return The function returns the log-likelihood of the data to the model
#' @examples
#' travelers <- travelers[ ,-1]
#' M <- ncol(travelers)
#' K <- 5
#'
#' Theta0 <- t(dplyr::sample_n(travelers, K))
#' Theta0 <- Theta0 / matrix(rep(apply(Theta0, 2, sum), M), nrow = M, ncol = K, byrow = TRUE)
#' travelers <- as.matrix(travelers)
#' p0 <- rep(1 / K, K)
#'
#' llh <- loglik_mult(travelers, Theta0, p = p0)
#' llh
#'
#' @export
loglik_mult <- function(X, Theta, Lambda = NULL, p){
H <- ncol(Theta)
if(is.null(Lambda)) Lambda <- diag(nrow = H, ncol = H)
lTL <- log(Theta %*% Lambda)
lTL[lTL == -Inf] <- 0
Mat = X %*% lTL
rowmax_Mat = apply(Mat, 1, max)
Mat = exp(Mat - rowmax_Mat)
out_mat <- log( Mat %*% p)
out_mat[out_mat == -Inf] <- 0
output <- sum(out_mat) + sum(rowmax_Mat)
return(output)
}
#' Logarithm of a factorial
#'
#' @param x an integer
#' @return log(factorial(x))
#' @keywords internal
lnfact <- function(x){
if(x == 0){
output <- 0
}
else{
output <- sum(log(c(1:x)))
}
return(output)
}
#' Log-likelihood function from M-step, when t is estimated
#'
#' @param Xtt matrix. Correspond to the matrix multiplication of t(X) and t.
#' @param Theta matrix of dimension \code{M x H}.
#' @param Lambda matrix of dimension \code{H x K}.
#' @return Returns the log-likelihood function from M-step, when t is estimated.
#' @keywords internal
Q <- function(Xtt, Theta, Lambda){
tl <- Theta %*% Lambda
tl[tl == 0] <- 10^(-12)
tl[is.na(tl)] <- 10^(-12)
output <- sum(Xtt * log(tl))
return(output)
}
#' Update of lambda
#'
#' @param Xtt matrix. Correspond to the matrix multiplication of t(X) and t.
#' @param Theta matrix of dimension \code{M x H}.
#' @param Lambda matrix of dimension \code{H x K}.
#' @return Returns the updated value of Lambda
#' @keywords internal
lambda_update <- function(Xtt, Theta, Lambda){
H <- ncol(Theta)
K <- ncol(Lambda)
M <- nrow(Theta)
mpow <- floor(log10(Theta))
mpow[mpow == -Inf] <- 0
pow <- max(mpow)
if(pow > 200) Theta <- Theta*10^(-pow)
tmp <- Xtt / (Theta %*% Lambda)
tmp <- ifelse(is.na(tmp), 0, tmp)
tmp <- ifelse(tmp == Inf, 0, tmp)
mpow <- floor(log10(tmp))
mpow[mpow == -Inf] <- 0
pow <- max(mpow)
if(pow > 200) tmp <- tmp * 10 ^ (- pow)
sum_Theta <- matrix(rep(apply(Theta, 2, sum),K), nrow = H, ncol = K)
Lambda <- Lambda * (t(Theta) %*% tmp) / sum_Theta
Lambda <- Lambda / matrix(rep(apply(Lambda, 2, sum), H), nrow = H, ncol = K, byrow = TRUE)
return(Lambda)
}
#' Update of theta
#'
#' @param Xtt matrix. Correspond to the matrix multiplication of t(X) and t.
#' @param Theta matrix of dimension \code{M x H}.
#' @param Lambda matrix of dimension \code{H x K}.
#' @return Returns the updated value of Theta
#' @keywords internal
theta_update <- function(Xtt, Theta, Lambda){
H <- ncol(Theta)
K <- ncol(Lambda)
M <- nrow(Theta)
tmp <- Xtt / (Theta %*% Lambda)
tmp <- ifelse(is.na(tmp), 0, tmp)
tmp <- ifelse(tmp == Inf, 0, tmp)
sum_Lambda <- matrix(rep(apply(Lambda, 1, sum), M), nrow = M, ncol = H, byrow = TRUE)
Theta <- Theta * (tmp %*% t(Lambda)) / sum_Lambda
return(Theta)
}
#' Update of t
#'
#' @param X a matrix of dimension \code{N} (number of observation) \code{x M} (number of variables) containing multinomials observations.
#' @param Theta matrix of dimension \code{M x H}.
#' @param Lambda matrix of dimension \code{H x K}.
#' @param p vector containing the proportions of each cluster. Must be of dimension \code{K}.
#' @return Returns the updated value of Theta
#' @keywords internal
t_update <- function(X, Theta, Lambda, p){
n <- nrow(X)
K <- ncol(Lambda)
t <- matrix(data = NA, nrow = n, ncol = K)
logTL <- log(Theta %*% Lambda)
logTL[logTL == -Inf] <- log(1e-60)
TL <- Theta %*% Lambda
for(i in 1:n){
logvec <- log(p) + X[i, ] %*% logTL
Expdifflogvec <- exp(matrix(data = logvec, nrow = K, ncol = K, byrow=TRUE) - matrix(data = logvec, nrow = K, ncol = K, byrow = FALSE))
t[i, ] <- 1 / rowSums(Expdifflogvec)
}
return(t)
}
|
f9c4c061657a32a4eff304374245d4301a931e52
|
eb09acc1e170228d123eb713c79563c2f10d6f8d
|
/TwitterSentiment/R/waffle_func.R
|
62344014056717fc6d8278004925f075157c353a
|
[] |
no_license
|
PHP-2560/final-project-twittersentiment
|
16f6fd3c423d3fcc5f5ad38272766e7d84063308
|
0244f0a00880b62ae0f68078fdcdd152892df336
|
refs/heads/master
| 2020-04-11T04:47:53.675453
| 2018-12-18T03:50:57
| 2018-12-18T03:50:57
| 161,526,369
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,791
|
r
|
waffle_func.R
|
waffleFun <- function(data, lexicon = c("bing", "afinn", "nrc"), dropwords, num_Rows)
{
lexicon = match.arg(lexicon)
if(lexicon == "afinn")
{
lex <- get_sentiments(lexicon) %>% filter(!word %in% dropwords)
#Analysis of "bing" or "nrc lexicon
sent =
data %>%
inner_join(lex)
#Data for Waffle Graph
dataAfinnGrouped <- sent %>%
group_by(score) %>%
summarise(n = n())
values = dataAfinnGrouped$n
names = dataAfinnGrouped$score
#Waffle graph
val_names <- sprintf("%s (%s)", names, percent(round(values/sum(values), 2)))
names(values) <- val_names
graph =
waffle(values, title = "Afinn Sentiments", rows = num_Rows, colors = c('gold', 'hotpink3', 'mediumturquoise', 'purple', 'midnightblue', 'mediumvioletred', 'plum', 'red', 'steelblue4', 'yellow')) + theme(plot.background = element_rect(fill = "snow1"))
}else
{
lex <- get_sentiments(lexicon) %>% filter(!word %in% dropwords)
#Analysis of "bing" or "nrc lexicon
sent =
data %>%
inner_join(lex)
#Data for Waffle Graph
dataGrouped <- sent %>%
group_by(sentiment) %>%
summarise(n = n())
values = dataGrouped$n
names = dataGrouped$sentiment
#Waffle Graph
val_names <- sprintf("%s (%s)", names, percent(round(values/sum(values), 2)))
names(values) <- val_names
graph =
waffle(values, title = paste(lexicon, "Sentiments"), rows = num_Rows, colors = c('gold', 'hotpink3', 'mediumturquoise', 'purple', 'midnightblue', 'mediumvioletred', 'plum', 'red', 'steelblue4', 'yellow')) + theme(plot.background = element_rect(fill = "snow1"))
}
return(graph)
}
|
aca1869fe343dc91463d56cfb9fa5991df054ec3
|
a3c78700a65f10714471a0d307ab984e8a71644d
|
/models/linkages/man/read_restart.LINKAGES.Rd
|
5599ca9ee148a0ce992458f2aac67d014e0f61df
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
PecanProject/pecan
|
e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f
|
ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c
|
refs/heads/develop
| 2023-08-31T23:30:32.388665
| 2023-08-28T13:53:32
| 2023-08-28T13:53:32
| 6,857,384
| 187
| 217
|
NOASSERTION
| 2023-09-14T01:40:24
| 2012-11-25T23:48:26
|
R
|
UTF-8
|
R
| false
| true
| 634
|
rd
|
read_restart.LINKAGES.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_restart.LINKAGES.R
\name{read_restart.LINKAGES}
\alias{read_restart.LINKAGES}
\title{read_restart.LINKAGES}
\usage{
read_restart.LINKAGES(
outdir,
runid,
stop.time,
settings,
var.names = NULL,
params = NULL
)
}
\arguments{
\item{outdir}{output directory}
\item{runid}{run ID}
\item{stop.time}{year that is being read}
\item{var.names}{var.names to be extracted}
\item{multi.settings}{PEcAn settings object}
}
\value{
X.vec vector of forecasts
}
\description{
Read Restart for LINKAGES
}
\author{
Ann Raiho \email{araiho@nd.edu}
}
|
a71e25ecda3d801e1d65c77b918515854f87f1f5
|
96082676c6dfe8049111fb777229ca779272a411
|
/R_code/other_code/MelbourneThomas13PLoS_code/community.r
|
1f64007ce27ab78024216233f4a979aaf3e97c72
|
[] |
no_license
|
dietahanson/AustraliaLoopAnalysis
|
8724b54ad034b20f9a179964dc6ab9dc9758d12b
|
abc3c56ece5c7e3cbc717d7993eb2393f50ce62e
|
refs/heads/master
| 2023-07-06T18:03:25.613225
| 2023-06-21T22:41:51
| 2023-06-21T22:41:51
| 134,459,503
| 0
| 0
| null | 2023-06-21T22:41:52
| 2018-05-22T18:35:26
|
R
|
UTF-8
|
R
| false
| false
| 4,874
|
r
|
community.r
|
## Functions to process and analyse community matrices
library(utils)
## Extract node labels
node.labels <- function(edges) {
levels(edges$From)
}
## Convert edge descriptions to an adjacency matrix. The
## required.groups argument determines which edge groups will appear
## in the matrix.
adjacency.matrix <- function(edges,labels=F,required.groups=c(0)) {
z <- ifelse(edges$Group %in% required.groups,1,0)
n <- nlevels(edges$From)
nms <- levels(edges$From)
A <- matrix(0L,n,n,dimnames=if(labels) list(nms,nms))
type <- c("N","P","U","Z")
weight <- c(-1,1,NA,0)
A[cbind(edges$To,edges$From)] <- z*weight[match(edges$Type,type)]
A
}
## Add self loops to enforce self limitation
enforce.limitation <- function(edges) {
loops <- edges$To[edges$To==edges$From]
limit <- setdiff(levels(edges$From),edges$From[loops])
n <- length(limit)
rbind(edges,
data.frame(From=factor(limit,levels=levels(edges$From)),
To=factor(limit,levels=levels(edges$From)),
Group=rep(0,n),
Type=factor(rep("N",n),levels(edges$Type)),
Pair=max(edges$Pair)+1:n))
}
## Create functions to generate random community matrices given the
## edge list describing the web topology. This returns a list of two
## functions, "community" draws a random community matrix, and
## "select" determines which optional edges will be retained in the
## web topology. The user can specify a list of the edge groups that are
## required to be retained in the model.
community.sampler <- function(edges,required.groups=c(0)) {
n.nodes <- nlevels(edges$From)
n.edges <- nrow(edges)
W <- matrix(0,n.nodes,n.nodes)
## Ranges and indices of non-zero matrix entries
lower <- ifelse(edges$Type=="U" | edges$Type=="N",-1L,0L)
upper <- ifelse(edges$Type=="U" | edges$Type=="P",1L,0L)
k.edges <- as.vector(unclass(edges$To)+(unclass(edges$From)-1)*n.nodes)
## The indices of the matrix entries that can be omitted (zeroed), the
## expansion index that relates matching edges of a pair, and the
## number of edges that can be omitted.
required <- edges$Group %in% required.groups
k.optional <- k.edges[!required]
optional <- factor(edges$Pair[!required])
expand <- as.vector(unclass(optional))
n.omit <- max(0,expand)
zs <- rep(1,n.omit)
if(n.omit > 0) {
community <- function() {
W[k.edges] <- runif(n.edges,lower,upper)
W[k.optional] <- W[k.optional]*zs[expand]
W
}
select <- function(p) {
zs <<- rbinom(n.omit,1,p)
zs
}
} else {
community <- function(p) {
W[k.edges] <- runif(n.edges,lower,upper)
W
}
select <- function(p) {
zs
}
}
list(community=community,select=select,optional.pairs=levels(optional))
}
## Check the stability of a simulated community matrix W
stable.community <- function(W) {
all(Re(eigen(W,symmetric=FALSE,only.values=T)$values)<0)
}
## Return sign of s, with values of magnitude less than epsilon
## rounded down to zero
signum <- function(s,epsilon=1.0E-5) {
(s > epsilon) - (s < -epsilon)
}
## Mutual information for discrete x, y
mutual.info <- function(x,y) {
tab <- table(factor(x),factor(y))
p <- tab/sum(tab)
sum(ifelse(tab==0,0,p*log2(p/(rowSums(p)%o%colSums(p)))))
}
## Generate a function to check a press condition. User must supply a
## vector of named elements that specify the relative magnitude of the
## press perturbation, and a vector of named elements that specify the
## signs of the change in the monitored nodes.
press.validate <- function(edges,perturb,monitor,epsilon=1.0E-5) {
index <- function(name) {
k <- match(name,levels(edges$From))
if(any(is.na(k)))
warning("Unknown nodes:",paste(name[is.na(k)],collapse=" "))
k
}
## Indices of perturb
k.perturb <- index(names(perturb))
k.monitor <- index(names(monitor))
S.press <- double(nlevels(edges$From))
S.press[k.perturb] <- -perturb
monitor <- sign(monitor)
## Return function to check condition
function(W) {
s <- tryCatch(solve(W,S.press),error=function(e) NULL)
!is.null(s) && all(signum(s[k.monitor],epsilon)==monitor)
}
}
## Generate a function to determine the impact of a press perturbation
press.impact <- function(edges,perturb,monitor=NULL) {
index <- function(name) {
k <- match(name,levels(edges$From))
if(any(is.na(k)))
warning("Unknown nodes:",paste(name[is.na(k)],collapse=" "))
k
}
## Indices of perturb
k.perturb <- index(names(perturb))
S.press <- double(nlevels(edges$From))
S.press[k.perturb] <- -perturb
if(length(monitor)==0) {
impact <- function(W) solve(W,S.press)
} else {
k.monitor <- index(names(monitor))
impact <- function(W) solve(W,S.press)[k.monitor]
}
## Return function to compute impact
impact
}
|
8bf621c3ea4adc29014a5c77ce973282ca39d058
|
242737293b846c619d2aef6762d88c42bf6c8553
|
/man/annotateCC.Rd
|
daea4f9d761d6c66dc5a3c36a75955541f7d4121
|
[] |
no_license
|
dtharvey/eChem
|
d95006456b06d8ce142b1e1fc683a9935b7f0a34
|
2811d48c1d47d591214c55ec1f1cb05aa81ac409
|
refs/heads/master
| 2020-03-21T09:30:26.450840
| 2019-07-06T12:23:44
| 2019-07-06T12:23:44
| 138,403,219
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,250
|
rd
|
annotateCC.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/annotateCC.R
\name{annotateCC}
\alias{annotateCC}
\title{Annotate Chronocoulogram}
\usage{
annotateCC(filename, time.delay, scale.factor = 1, main_title = NULL)
}
\arguments{
\item{filename}{Name of the file that contains the results of a simulated chronocoulometry experiment.}
\item{time.delay}{Time after the application of a pulse for which the current is reported.}
\item{scale.factor}{Setting to a value less than 1 adjusts the \emph{y}-axis limits so that the limits are not defined by the current spike.}
\item{main_title}{An optional main title.}
}
\value{
Returns a plot of the chronocoulogram with annotations.
}
\description{
Plots a chronocoulogam and annotates it with either the charge
for a single pulse experiment, or, for a double pulse
experiment, with the charge following the forward and the
reverse pulse, and the charge ratio. The charges are displayed
for a designated time after a pulse, which defaults to the
length of the pulse if a value is not provided.
}
\examples{
ex_ca = simulateCA(e.start = 0.25, e.pulse = -0.25, e.form = 0,
pulses = "double", t.2 = 20, x.units = 100, t.units = 1000)
ex_cc = simulateCC(ex_ca)
annotateCC(ex_cc)
}
|
852c89af81389edbc8f23d8e147be998cf5a929c
|
82bbc478a9d2a54aba543a9551a1183c6acf8554
|
/codes/unit09_onewayANOVA_repeated.R
|
4904d4cc6475667309fe76f8a52aa187de23d7a6
|
[] |
no_license
|
SCgeeker/BasicStatistics
|
c085910ca110553510d05c2e050c36a590868777
|
fa8a4c3b64241b488f74bebf178d3f4d1c2753fa
|
refs/heads/master
| 2021-05-08T14:36:33.882512
| 2020-01-20T06:48:21
| 2020-01-20T06:48:21
| 120,090,750
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,783
|
r
|
unit09_onewayANOVA_repeated.R
|
## Sampling distribution of i repeated measures
mu <- sample(0:10,1) ## pick up one random number as mu
i <- 3 ## number of repeated measures
j <- 10 ## number of participants in each group
range_a <- 0 ## set up of group difference range
mean_s <- 3 ## set up of participants' effect
mean_res <- 0 ## set up the mean of residuals
Q_sim <- NULL
## Accumulating simulated quntities
for(k in 1:10000){
## Setting of pseudo effects and residuals
a <- rep(range_a, i) ## group differences to grand mean
s <- rnorm(j,mean_s,1)## participant's difference
res <- rnorm(i*j,mean_res,1) ## rest of the residuals
## Making of pseudo data
data <- data.frame(y = mu + rep(a,j) + rep(s,i) + res, S = rep(seq(1:j),each=i), x = rep(seq(1:i),j))
group_means <- with(data, tapply(y,x,mean))
participant_means <- with(data, tapply(y, S, mean))
grand_mean <- mean(data$y)
## Compute the test statistics
SS_Group <- j*sum((group_means - grand_mean)^2)
MS_Group <- SS_Group/(i-1)
SS_Res <- sum((data$y - rep(group_means,each=j) - rep(participant_means, i) + grand_mean)^2)
MS_Res <- SS_Res/((j-1)*(i-1))
Q_sim <- c(Q_sim,MS_Group/MS_Res)
k = k+1
}
## Draw the sampling distribution of MSG/MSR
hist(Q_sim,freq = FALSE,xlim=c(0,5), ylim=c(0,5) )
## Compute the accumulated frequencies of distributions
sampling <- hist(Q_sim, plot = FALSE)
## Draw the theoretical probability distributions
lines(df(sampling$breaks, df1=(i-1),df2=((i-1)*(j-1))),col="red")
lines(df(sampling$breaks, df1=i,df2=i*j),col="blue")
## SS of sampling and probability functions(F distributions)
sum((sampling_density - df(sampling$breaks, df1=(i-1),df2=(i*(j-1)))[-1] )^2)
sum((sampling_density - df(sampling$breaks, df1=i,df2=i*j)[-1])^2)
## Estimation of Type 1 Error
sum(Q_sim > qf(.95,df1=(i-1),df2=(i-1)*(j-1)))
|
35a47c3bac1a0f45d92a1af69a4ca2bea6b3a9da
|
02e46494362cf053a7c13d8dbfa4b2b018c8298e
|
/man/RegDiffTSPred.Rd
|
71e17196a46b302c3ce246c3d48a2324978348eb
|
[] |
no_license
|
david-salgado/TSPred
|
5445ba75b5c96c94f6f38cc4a9409a1444efad75
|
66ba0d3d57bbe5519f5ef5f7cbb236f3df47cacd
|
refs/heads/master
| 2022-04-26T03:44:30.782136
| 2020-04-25T10:31:22
| 2020-04-25T10:31:22
| 258,748,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,839
|
rd
|
RegDiffTSPred.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RegDiffTSPred.R
\docType{methods}
\name{RegDiffTSPred}
\alias{RegDiffTSPred}
\alias{RegDiffTSPred,vector-method}
\alias{RegDiffTSPred,StQList-method}
\title{Method to predict according to the regular difference time series model.}
\usage{
RegDiffTSPred(x, VarNames, frequency = 12L, forward = 2L)
\S4method{RegDiffTSPred}{vector}(x, VarNames, frequency = 12L, forward = 2L)
\S4method{RegDiffTSPred}{StQList}(x, VarNames, frequency = 12L,
forward = 2L)
}
\arguments{
\item{x}{\code{Vector} or object of class \linkS4class{StQList} upon which the prediction will be
made.}
\item{VarNames}{character vector with the variable names for which the prediction will be made;
by default it is NULL.}
\item{frequency}{integer indicating the frequency of the time periods in the time series; by
default it is 12L.}
\item{forward}{integer indicating the number of periods ahead when the prediction will be made;
by default it is 2L.}
}
\value{
It returns a \code{data.table} with components Pred and STD, containing the point
prediction and the estimated standard deviations, respectively, for each variable.
}
\description{
This method implements the predicted value and their standard deviation according to
the regular difference time series model \eqn{(1-B)y_{t}=a_{t}}{(1-B)y<sub>t</sub>=a<sub>t</sub>}.
}
\examples{
# Predicting one and two months ahead in time
data(Example1.TS)
RegDiffTSPred(Example1.TS, forward = 1L)
RegDiffTSPred(Example1.TS, forward = 2L)
# Predicting upon a times series with many NA values
data(Example2.TS)
RegDiffTSPred(Example2.TS, forward = 1L)
\dontrun{
# With an object of class StQList
data(StQListExample)
VarNames <- c('ActivEcono_35._6._2.1.4._0', 'GeoLoc_35._6._2.1._1.2.5.')
RegDiffTSPred(StQListExample, VarNames)
}
}
|
fcb0ef2b4a92b989654ab029b4664e21f938f98a
|
a14132853e6eae96fc86408d31f3658f5581383e
|
/run_analysis.R
|
2b03e43dffc6be66e1b696f3f9070968a6417574
|
[] |
no_license
|
AnyaRum/Getting_and_cleaning_data_Course_project
|
ed2923ef5aada48f12c26994435bf1edca7de69e
|
69c2ccf3a6dc730d5b0fff5636853cb2711cb59c
|
refs/heads/master
| 2020-05-31T06:49:15.580916
| 2015-06-21T13:12:10
| 2015-06-21T13:12:10
| 37,808,375
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,195
|
r
|
run_analysis.R
|
##########################################################################################################
## Coursera Getting and Cleaning Data Course Project
## by Anna Rumyantseva June 2015
##########################################################################################################
# Clean workspace
rm(list=ls())
# Download data and unzipping data
url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if(!file.exists('dataUPC.zip')){
download.file(url, destfile = './dataUPC.zip')
unzip("dataUPC.zip", files = NULL, list = FALSE, overwrite = TRUE,
junkpaths = FALSE, exdir = ".", unzip = "internal",
setTimes = FALSE)
}
setwd("./UCI HAR Dataset")
##########################################################################################################
# Step 1. Merges the training and the test sets to create one data set.
## Loading data
# Names of measured features
features_names = read.table("./features.txt")
colnames(features_names) = c('n','feature')
# Activity labels and names
activity_names = read.table("./activity_labels.txt")
colnames(activity_names) = c('activity_id','activity_name')
# Training dataset
subject_train = read.table('./train/subject_train.txt')
colnames(subject_train) = 'subject_id'
activity_train = read.table('./train/y_train.txt')
colnames(activity_train) = 'activity_id'
data_train = read.table('./train/X_train.txt')
colnames(data_train) = features_names$feature
df_train = cbind(subject_train, activity_train, data_train)
# Test dataset
subject_test = read.table('./test/subject_test.txt')
colnames(subject_test) = 'subject_id'
activity_test = read.table('./test/y_test.txt')
colnames(activity_test) = 'activity_id'
data_test = read.table('./test/X_test.txt')
colnames(data_test) = features_names$feature
df_test = cbind(subject_test, activity_test, data_test)
# Merging two frames together
df = rbind(df_train, df_test)
##########################################################################################################
# Step 2. Extracts only the measurements on the mean and standard deviation for each measurement.
feature_subset_id = grepl("std()", features_names$feature, fixed = TRUE) | grepl("mean()", features_names$feature, fixed = TRUE)
df_mean_std = df[, c(TRUE, TRUE, feature_subset_id)]
##########################################################################################################
# Step 3. Uses descriptive activity names to name the activities in the data set
df_mean_std$activity_id = factor(df_mean_std$activity_id)
levels(df_mean_std$activity_id) <- activity_names$activity_name
##########################################################################################################
# Step 4. Appropriately labels the data set with descriptive variable names.
colNames = colnames(df_mean_std)
for (i in 1:length(colNames))
{
colNames[i] = gsub("\\()","",colNames[i])
colNames[i] = gsub("-std","STD",colNames[i])
colNames[i] = gsub("-mean","MEAN",colNames[i])
colNames[i] = gsub("^(t)","time ",colNames[i])
colNames[i] = gsub("^(f)","freq ",colNames[i])
colNames[i] = gsub("([Gg]ravity)","Gravity_",colNames[i])
colNames[i] = gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body_",colNames[i])
colNames[i] = gsub("[Gg]yro","Gyro_",colNames[i])
colNames[i] = gsub("Mag","Magnitude_",colNames[i])
colNames[i] = gsub("Acc","Acceleration_",colNames[i])
colNames[i] = gsub("Jerk","Jerk_",colNames[i])
colNames[i] = gsub("_STD"," STD",colNames[i])
colNames[i] = gsub("_MEAN"," MEAN",colNames[i])
colNames[i] = gsub("-X"," (X)",colNames[i])
colNames[i] = gsub("-Y"," (Y)",colNames[i])
colNames[i] = gsub("-Z"," (Z)",colNames[i])
}
##########################################################################################################
# Step 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy_table = aggregate(.~ subject_id + activity_id, data = df_mean_std, FUN= "mean" )
colnames(tidy_table) = colNames
## Saving the tidy dataset
write.table(tidy_table, file = "tidy.txt")
|
0d43d9ec130c646b8c25bbf4ff627b89a891bd58
|
e8f175cc7948afd3c5576ddbec1edd5e027e7ffc
|
/code/clean_perm_data.R
|
cb8cc80a1318245fa9787904f5c4943b67943144
|
[] |
no_license
|
acforrester/PERM_data
|
dff6fab7c05e74edb70dcde5f5c31ddef107eff7
|
9b4e582c0785a4e4992ecfce14b7ae66a5df7f37
|
refs/heads/main
| 2023-06-26T09:19:22.939695
| 2021-07-23T18:44:58
| 2021-07-23T18:44:58
| 388,882,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,512
|
r
|
clean_perm_data.R
|
## Clean PERM data
# load packages
if (!require("pacman")) install.packages("pacman")
pacman::p_load(tidyverse, glue)
# PERM: clean 2000-2004 ---------------------------------------------------
lapply(2000:2004, function(year){
read_csv(glue("./data/raw/Perm_external_FY{year}.txt"),
col_names = T,
col_types = cols(.default = "c")) %>%
# combine into df
bind_rows(.) %>%
# names to lowercase
rename_all(tolower) %>%
# drop cols
select(
-state_case_num,
-reduction_in_recruit,
-region_id,
-contact_first,
-contact_last,
-att_firm_name,
-att_last,
-att_first,
-att_city,
-att_state,
-unit_of_pay_prev
) %>%
# col renames
rename(
case_status = last_sig_event,
decision_date = last_event_date,
employer_name = emp_name,
employer_city = emp_city,
employer_state = emp_state,
pw_soc_code = occ_code,
pw_soc_title = occ_title,
pw_level = prevail_wage,
pw_amount = prevail_wage,
wage_per = unit_of_pay,
wage_offer_from = salary
) %>%
# edited variables
mutate(
wage_per = case_when(
wage_per == "H" ~ "Hour",
wage_per == "W" ~ "Week",
wage_per == "M" ~ "Month",
wage_per == "A" ~ "Year"
)
) %>%
# write to csv
write_csv(., glue("./data/clean/PERM_FY{year}.csv"), na = "")
})
# PERM: Clean 2005 --------------------------------------------------------
dat05 <- read_csv(glue("./data/raw/Perm_external_FY2005.txt"),
col_names = T,
col_types = cols(.default = "c")) %>%
# names to lowercase
rename_all(tolower) %>%
# edited variables
mutate(
# application type
application_type = "PERM"
) %>%
# col renames
rename(
case_status = final_case_status,
decision_date = certified_date,
pw_soc_code = prevailing_wage_soc_code,
pw_soc_title = prevailing_wage_soc_title,
pw_level = prevailing_wage_level,
pw_amount = prevailing_wage_amount
) %>%
# drop cols
select(
-prevailing_wage_source,
-prevailing_wage_other_source
) %>%
# write to csv
write_csv(., glue("./data/clean/PERM_FY2005.csv"), na = "")
# PERM: clean 2006-2007 ---------------------------------------------------
dat06 <- read_csv(glue("./data/raw/Perm_external_FY2006.txt"),
col_names = T,
col_types = cols(.default = "c")) %>%
# names to lowercase
rename_all(tolower) %>%
# column renames
rename(
case_status = final_case_status,
pw_soc_code = prevailing_wage_soc_code,
pw_soc_title = prevailing_wage_soc_title,
pw_level = prevailing_wage_level,
pw_amount = prevailing_wage_amount
) %>%
# edited variables
mutate(
# application type
application_type = "PERM",
# decision date
decision_date = if_else(is.na(certified_date), denied_date, certified_date)
) %>%
# subset cols
select(
-certified_date,
-denied_date,
-prevailing_wage_job_title,
-prevailing_wage_source,
-prevailing_wage_other_source
) %>%
# write to csv
write_csv(., glue("./data/clean/PERM_FY2006.csv"), na = "")
dat07 <- read_csv(glue("./data/raw/Perm_external_FY2007.txt"),
col_names = T,
col_types = cols(.default = "c")) %>%
# names to lowercase
rename_all(tolower) %>%
# column renames
rename(
pw_soc_title = pw_job_title_9089,
pw_level = pw_level_9089,
pw_amount = pw_amount_9089,
wage_offer_from = wage_offer_from_9089,
wage_offer_to = wage_offer_to_9089,
wage_per = wage_offer_unit_of_pay_9089,
naics_code = `2007_naics_us_code`,
naics_title = `2007_naics_us_title`
) %>%
# subset columns
select(
-us_economic_sector,
-ends_with("_9089")
) %>%
# make some vars
mutate(
processing_center = case_when(
substr(case_no, 1, 1) == "A" ~ "Atlanta Processing Center",
substr(case_no, 1, 1) == "C" ~ "Chicago Processing Center"
),
wage_per = case_when(
wage_per == "hr" ~ "Hour",
wage_per == "wk" ~ "Week",
wage_per == "bi" ~ "Bi-Weekly",
wage_per == "mth" ~ "Month",
wage_per == "yr" ~ "Year"
)
) %>%
# write to csv
write_csv(., glue("./data/clean/PERM_FY2007.csv"), na = "")
## EOF
|
7e087357dd06383b59a62e63ad17852c2b04c72f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/s2dverification/examples/Spread.Rd.R
|
7f37af6318da424ccc6201a7ce12a1786ea13236
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,783
|
r
|
Spread.Rd.R
|
library(s2dverification)
### Name: Spread
### Title: Computes InterQuartile Range, Maximum-Minimum, Standard
### Deviation and Median Absolute Deviation of the Ensemble Members
### Aliases: Spread
### Keywords: datagen
### ** Examples
# Load sample data as in Load() example:
example(Load)
clim <- Clim(sampleData$mod, sampleData$obs)
ano_exp <- Ano(sampleData$mod, clim$clim_exp)
runmean_months <- 12
dim_to_smooth <- 4 # Smooth along lead-times
smooth_ano_exp <- Smoothing(ano_exp, runmean_months, dim_to_smooth)
smooth_ano_exp_m_sub <- smooth_ano_exp - InsertDim(Mean1Dim(smooth_ano_exp, 2,
narm = TRUE), 2, dim(smooth_ano_exp)[2])
spread <- Spread(smooth_ano_exp_m_sub, c(2, 3))
PlotVsLTime(spread$iqr,
toptitle = "Inter-Quartile Range between ensemble members",
ytitle = "K", monini = 11, limits = NULL,
listexp = c('CMIP5 IC3'), listobs = c('ERSST'), biglab = FALSE,
hlines = c(0), fileout = 'tos_iqr.eps')
PlotVsLTime(spread$maxmin, toptitle = "Maximum minus minimum of the members",
ytitle = "K", monini = 11, limits = NULL,
listexp = c('CMIP5 IC3'), listobs = c('ERSST'), biglab = FALSE,
hlines = c(0), fileout = 'tos_maxmin.eps')
PlotVsLTime(spread$sd, toptitle = "Standard deviation of the members",
ytitle = "K", monini = 11, limits = NULL,
listexp = c('CMIP5 IC3'), listobs = c('ERSST'), biglab = FALSE,
hlines = c(0), fileout = 'tos_sd.eps')
PlotVsLTime(spread$mad, toptitle = "Median Absolute Deviation of the members",
ytitle = "K", monini = 11, limits = NULL,
listexp = c('CMIP5 IC3'), listobs = c('ERSST'), biglab = FALSE,
hlines = c(0), fileout = 'tos_mad.eps')
|
1d6fc27aa984debc7fca501fb7d077681d4dbb54
|
ab85982a5c86c4ccb866f98c6c253d4be28a6273
|
/inv-create-script.R
|
ab9f717e018bb2655f6d3769038b843e879e06d0
|
[] |
no_license
|
welluc/inventory-processing
|
a3321188786ce2ed76d0d0db562979a480b60e21
|
267483e25cbc4b6f55a59a814a36b221e79ae81b
|
refs/heads/master
| 2020-05-29T09:16:33.495172
| 2016-10-21T19:47:07
| 2016-10-21T19:47:07
| 69,494,392
| 0
| 0
| null | 2016-10-21T19:47:07
| 2016-09-28T19:00:39
|
R
|
UTF-8
|
R
| false
| false
| 1,692
|
r
|
inv-create-script.R
|
##################################################
# Inventory Create Script
##################################################
# setwd and source inv-global-datapath.R
source('inv-pkgs.R')
source('inv-global-vars.R')
source('inv-functions.R')
source('invCreateWrapper.R')
invCreateWrapper(
w_evalid='01',
w_minfileid=1,
w_maxfileid=776/4 + 1, # necessary bc of loop logic in rbindloop. look at creation of b and looping over b to understand.
w_nidgrp=20,
w_invcolid=g_invcol,
w_timingfilename='CreateInvTiming',
w_newcolid=g_newcolname,
w_convflag=g_convflag,
w_convcol=g_convcolname,
w_convcolfun=g_convcolfunname,
w_savefile='invdt',
w_writeout=TRUE
)
invCreateWrapper(
w_evalid='02',
w_minfileid=776/4 + 1,
w_maxfileid=2*776/4 + 1,
w_nidgrp=20,
w_invcolid=g_invcol,
w_timingfilename='CreateInvTiming',
w_newcolid=g_newcolname,
w_convflag=g_convflag,
w_convcol=g_convcolname,
w_convcolfun=g_convcolfunname,
w_savefile='invdt',
w_writeout=TRUE
)
invCreateWrapper(
w_evalid='03',
w_minfileid=2*776/4 + 1,
w_maxfileid=3*776/4 + 1,
w_nidgrp=20,
w_invcolid=g_invcol,
w_timingfilename='CreateInvTiming',
w_newcolid=g_newcolname,
w_convflag=g_convflag,
w_convcol=g_convcolname,
w_convcolfun=g_convcolfunname,
w_savefile='invdt',
w_writeout=TRUE
)
invCreateWrapper(
w_evalid='04',
w_minfileid=3*776/4 + 1,
w_maxfileid=776, # read stops at (w_maxfileid - 1) and there are 775 files
w_nidgrp=20,
w_invcolid=g_invcol,
w_timingfilename='CreateInvTiming',
w_newcolid=g_newcolname,
w_convflag=g_convflag,
w_convcol=g_convcolname,
w_convcolfun=g_convcolfunname,
w_savefile='invdt',
w_writeout=TRUE
)
|
b07b4465caf0a9a7c0a8e5f17dfc138b596c9263
|
c491aa065591acc508b31cb6dd6829603b8d11ee
|
/man/BudgetItaly.Rd
|
f88dd708a81384691c80f58d3f9d3b079eb28789
|
[] |
no_license
|
sbgraves237/Ecdat
|
8ce323c1d334bcbd5549a5b3c63437a4ec638070
|
5982c446a147b4ee585aebe0f8053bb0f2d8dd1f
|
refs/heads/master
| 2023-05-22T06:07:56.876130
| 2023-05-06T16:10:33
| 2023-05-06T16:10:33
| 193,949,304
| 2
| 1
| null | 2021-12-26T13:06:46
| 2019-06-26T17:26:16
|
R
|
UTF-8
|
R
| false
| false
| 1,163
|
rd
|
BudgetItaly.Rd
|
\name{BudgetItaly}
\docType{data}
\alias{BudgetItaly}
\title{Budget Shares for Italian Households }
\description{
a cross-section from 1973 to 1992
\emph{number of observations} : 1729
\emph{observation} : households
\emph{country} : Italy
}
\usage{data(BudgetItaly)}
\format{A dataframe containing :
\describe{
\item{wfood}{food share}
\item{whouse}{housing and fuels share}
\item{wmisc}{miscellaneous share}
\item{pfood}{food price}
\item{phouse}{housing and fuels price}
\item{pmisc}{miscellaneous price}
\item{totexp}{total expenditure}
\item{year}{year }
\item{income}{income}
\item{size}{household size}
\item{pct}{cellule weight}
}
}
\source{
Bollino, Carlo Andrea, Frederico Perali and
Nicola Rossi (2000) \dQuote{Linear household
technologies}, \emph{Journal of Applied
Econometrics}, \bold{15(3)}, 253--274.
}
\references{
Journal of Applied Econometrics data archive : \url{http://qed.econ.queensu.ca/jae/}.
}
\seealso{\code{\link{Index.Source}}, \code{\link{Index.Economics}}, \code{\link{Index.Econometrics}}, \code{\link{Index.Observations}}}
\keyword{datasets}
|
52e0ff43a43ad647163377627a6c480091d760ae
|
f5b8fb6e644a462279376fe14a53c625eae0e945
|
/man/getForwardRates.Rd
|
331b194c92ce893b74d5f8b55fc72e796c624307
|
[] |
no_license
|
cran/ESG
|
42ac2dbfca49a471c2a0eb45f43bf21f6a705d6a
|
8bd871cb5c421f6b31bcac64fa574ac0d985b673
|
refs/heads/master
| 2023-08-31T15:37:26.565993
| 2023-08-29T09:10:07
| 2023-08-29T11:30:33
| 17,678,957
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 621
|
rd
|
getForwardRates.Rd
|
\name{getForwardRates}
\alias{getForwardRates}
\alias{getForwardRates,Scenarios-method}
\title{getForwardRates method}
\description{
Get the forward rates for a Scenarios object.
}
\examples{
scenarios1 <- new("Scenarios")
scenarios1 <- setParamsBaseScenarios(scenarios1, horizon=5, nScenarios=10)
scenarios1 <- scenarios1 <- setRiskParamsScenarios(scenarios1, vol=.1, k=2,volStock=.2,
volRealEstate=.15, volDefault=.2, alpha=.1,beta=1, eta=.05,rho=.5, stock0=100,realEstate0=50,
liquiditySpread0=.01, defaultSpread0=.01)
data(ZC)
scenarios1 <- setForwardRates(scenarios1, ZC, horizon=5)
getForwardRates(scenarios1)
}
|
3127d2191e4b0c612f1b91c8047faf26874d95e2
|
f9189e1a57f733f0fc79f9453008377d4aaa5a3b
|
/R/selct.R
|
2e4985532bc0028fc807d121003c9cb347dfbaad
|
[] |
no_license
|
junruidi/actcool
|
763df94e188f1bd1989432dee8b5ac21fd8cbbfe
|
09d563e6f391290342a9cbba1ebbb899460399e0
|
refs/heads/master
| 2020-06-10T21:01:07.379576
| 2016-12-14T21:05:26
| 2016-12-14T21:05:26
| 75,874,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 499
|
r
|
selct.R
|
#' selct
#'
#' This function selects a daily activity record given ID and day
#' @param data dataframe containing subject-day level activity count
#' @param id id of the subject
#' @param day day number of the subject
#' @return a vector containing daily activity record
#' @keywords selct
#' @export
#' @examples
#' data(act)
#' selct(data = act ,id = 21034,day = 1)
#'
#'
#
selct = function(data,id,day){
x = as.vector(t(data[which(data$ID == id & data$Day == day),-c(1:2)]))
return(x)
}
|
099c96be3bbcba90a8fc3383205973b69125b210
|
8949716b21f47957f0b572875ce34d8e46a5f33c
|
/Week6-SumSquares.R
|
5ecaf0eab2a03ae805bbcfd4552ded6ad8aaf06c
|
[] |
no_license
|
Ali93NY/Linear-Regression-Class
|
1b1ada65ea046e934c2709da95e1df46d9634ece
|
9eec1c5315dc1880b4b919065e71eac6057cc2bd
|
refs/heads/master
| 2020-09-19T22:53:24.072917
| 2019-11-27T01:37:59
| 2019-11-27T01:37:59
| 224,317,163
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,403
|
r
|
Week6-SumSquares.R
|
# Get data
data <- read.table("http://users.stat.ufl.edu/~rrandles/sta4210/Rclassnotes/data/textdatasets/KutnerData/Chapter%20%207%20Data%20Sets/CH07TA01.txt", header = FALSE)
data
# Changing the variables to have more meaningful names
names(data)[1]<-paste("X1")
names(data)[2]<-paste("X2")
names(data)[3]<-paste("X3")
names(data)[4]<-paste("Y")
attach(data)
# Total sum of squares:
(SST = sum((Y - mean(Y))^2))
# [1] 495.3895
# Using only X1 as a predictor:
reg1 = lm(Y ~ X1)
summary(reg1)
# Call:
# lm(formula = Y ~ X1)
#
# Residuals:
# Min 1Q Median 3Q Max
# -6.1195 -2.1904 0.6735 1.9383 3.8523
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -1.4961 3.3192 -0.451 0.658
# X1 0.8572 0.1288 6.656 3.02e-06 ***
# ---
# Signif. codes: 0 *** 0.001 ** 0.01 * 0.05 . 0.1 1
#
# Residual standard error: 2.82 on 18 degrees of freedom
# Multiple R-squared: 0.7111, Adjusted R-squared: 0.695
# F-statistic: 44.3 on 1 and 18 DF, p-value: 3.024e-06
# Side note: to obtain the covariance matrix of the coefficients:
vcov(reg1)
anova(reg1)
# Analysis of Variance Table
#
# Response: Y
# Df Sum Sq Mean Sq F value Pr(>F)
# X1 1 352.27 352.27 44.305 3.024e-06 ***
# Residuals 18 143.12 7.95
# ---
# Signif. codes: 0 *** 0.001 ** 0.01 * 0.05 . 0.1 1
# SSR(X1) = 352.27
# SSE(X1) = 143.12
# Alternative for SST:
sum(anova(reg1)[,2])
# Using only X2 as a predictor:
reg2 = lm(Y ~ X2)
anova(reg2)
# Analysis of Variance Table
#
# Response: Y
# Df Sum Sq Mean Sq F value Pr(>F)
# X2 1 381.97 381.97 60.617 3.6e-07 ***
# Residuals 18 113.42 6.30
# ---
# Signif. codes: 0 *** 0.001 ** 0.01 * 0.05 . 0.1 1
# Therefore, SSR(X2) = 381.97
# Using both X1 and X2 as predictors:
reg12 = lm(Y ~ X1 + X2)
anova(reg12)
# Analysis of Variance Table
#
# Response: Y
# Df Sum Sq Mean Sq F value Pr(>F)
# X1 1 352.27 352.27 54.4661 1.075e-06 ***
# X2 1 33.17 33.17 5.1284 0.0369 * # X2 = 33.17 ? ; X2 given X1
# Residuals 17 109.95 6.47
# ---
# Signif. codes: 0 *** 0.001 ** 0.01 * 0.05 . 0.1 1
# SSR(X2|X1) = 33.17
# SSR(X1, X2) = 352.27 + 33.17 = 385.44
# Regression with all 3 predictors:
reg123 = lm(Y ~ X1+X2+X3)
anova(reg123)
# Analysis of Variance Table
#
# Response: Y
# Df Sum Sq Mean Sq F value Pr(>F)
# X1 1 352.27 352.27 57.2768 1.131e-06 ***
# X2 1 33.17 33.17 5.3931 0.03373 *
# X3 1 11.55 11.55 1.8773 0.18956
# Residuals 16 98.40 6.15
# ---
# Signif. codes: 0 ?***? 0.001 ?**? 0.01 ?*? 0.05 ?.? 0.1 ? ? 1
# SSR(X3|X1, X2) = 11.55
# SSR(X1, X2, X3) = 396.99
# Partial F-statistic for Ho: b3 = 0 is F* = 1.8773
# Corresponding p-value = 0.18956. Therefore X3 is not significant if X1 and X2 are already in the model
# Partial F-test with built-in command
anova(reg1,reg123)
# Analysis of Variance Table
#
# Model 1: Y ~ X1
# Model 2: Y ~ X1 + X2 + X3
# Res.Df RSS Df Sum of Sq F Pr(>F)
# 1 18 143.120
# 2 16 98.405 2 44.715 3.6352 0.04995 *
# ---
# SSR(X2, X3|X1) = 44.715 = 396.99 - 352.27
# Tests
anova(reg123)
# Analysis of Variance Table
#
# Response: Y
# Df Sum Sq Mean Sq F value Pr(>F)
# X1 1 352.27 352.27 57.2768 1.131e-06 ***
# X2 1 33.17 33.17 5.3931 0.03373 *
# X3 1 11.55 11.55 1.8773 0.18956
# Residuals 16 98.40 6.15
# F = 1.8773 is for testing
# Full: Y = b0 + b1*X1 + b2*X2 + b3*X3
# Reduced Y = b0 + b1*X1 + b2*X2
# 1.8773 = t^2 for b3 from
summary(reg123)
# Call:
# lm(formula = Y ~ X1 + X2 + X3)
#
# Residuals:
# Min 1Q Median 3Q Max
# -3.7263 -1.6111 0.3923 1.4656 4.1277
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 117.085 99.782 1.173 0.258
# X1 4.334 3.016 1.437 0.170
# X2 -2.857 2.582 -1.106 0.285
# X3 -2.186 1.595 -1.370 0.190
#
# Residual standard error: 2.48 on 16 degrees of freedom
# Multiple R-squared: 0.8014, Adjusted R-squared: 0.7641
# F-statistic: 21.52 on 3 and 16 DF, p-value: 7.343e-06
(-1.37)^2
#[1] 1.8769
# Exercise: What is the square of t stat for X1 equal to?
1.437^2
# [1] 2.064969 # significane of X1 given X2 & X3
reg231 = lm(Y~ X2+X3+X1)
anova(reg231)
# Analysis of Variance Table
#
# Response: Y
# Df Sum Sq Mean Sq F value Pr(>F)
# X2 1 381.97 381.97 62.1052 6.735e-07 ***
# X3 1 2.31 2.31 0.3762 0.5483
# X1 1 12.70 12.70 2.0657 0.1699
# Residuals 16 98.40 6.15
# 2.0657 is partial F stat for testing if X1 should be dropped from
# a model with X1, X2, and X3
# --------------------------------------------------------------------
# Task in class
# B1 = B3 = 0 --- F?
# SSR (X2|X1X3) ?
# My attempt
reg13 = lm( Y ~ X1 + X3)
anova(reg2, reg13)
# Right Way
reg132 = lm(Y ~ X1+X3+X2)
anova(reg132)
# SSR (X2|X1X3) = 7.53
reg213 = lm(Y ~ X2+X1+X3)
anova(reg2,reg213)
# B1 = B3 = ........ F = 1.221
qf(0.95,2,16)
# 3.633723
# -----------------------------------------------------------
anova(reg1,reg123)
# Analysis of Variance Table
#
# Model 1: Y ~ X1
# Model 2: Y ~ X1 + X2 + X3
# Res.Df RSS Df Sum of Sq F Pr(>F)
# 1 18 143.120
# 2 16 98.405 2 44.715 3.6352 0.04995 *
# F = 3.6352 is for testing
# Full: Y = b0 + b1*X1 + b2*X2 + b3*X3
# Reduced Y = b0 + b1*X1
# Compute R^2(Y,X2|X1) = SSR(X2|X1)/SSE(X1) = 33.17/143.12 = 0.2317636
# Using the rsq package:
install.packages("rsq")
library(rsq)
rsq.partial(reg12, reg1)
# Exercise 1: Compute both manually and with the rsq package:
# a) R^2(Y, X1 | X2)
# b) R^2(Y, X3 | X1, X2)
# c) Obtain the standardized regression model using only X1 as a predictor and verify the formula for the slope.
# a)
reg21 = lm(Y ~ X2 + X1)
rsq.partial(reg21 , reg2)
anova(reg21)
# SSR(X1|X2) = 3.47
anova(reg2)
# SSE(X2) = 113.42
# 3.47/113.42 = 0.30
# b)
rsq.partial(reg123, reg12)
# c)
summary(lm(scale(Y) ~ scale(X1)))
# b1* = 0.8433
0.8433*sd(Y)/sd(X1)
summary(reg1)
|
017538104eab6ff0907c3803eb2684e42dad2c84
|
8b0bd121bc542c9431dec1d385c3376ab4238d0b
|
/runDESeq2_transcriptomeGenesContrastGroups.R
|
02731bde046134795234be63bc2c6df6b38bc92d
|
[] |
no_license
|
USDA-ARS-GBRU/RNA-seq_JNV1
|
5e13481d53394c15d499905faa0473daf82f5356
|
10f5a1913ffb2cf23d0c5912c029b74190547ad7
|
refs/heads/master
| 2021-09-17T06:32:06.803104
| 2018-06-28T16:21:20
| 2018-06-28T16:21:20
| 126,068,873
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,128
|
r
|
runDESeq2_transcriptomeGenesContrastGroups.R
|
args <- commandArgs(TRUE)
colData <- read.csv("experimentDesign_4level.txt",sep="\t",row.names=1)
summary(colData)
head(colData)
countData <- as.matrix(read.csv("geneskallistoNoRiboRNACounts.txt",sep="\t",row.names="geneID"))
summary(countData)
#countData["FVEG_10402","tg1b4hrOpen_42738000"]
all(rownames(colData) %in% colnames(countData)) #make sure names match
countData <- countData[, rownames(colData)] #reorder count data
all(rownames(colData) == colnames(countData)) #check order
#s <- "-0.438185 -0.766791 0.695282\n0.759100 0.034400 0.524807"
#x <- lapply(strsplit(s, "\n")[[1]], function(x) {as.numeric(strsplit(x, '\\s+')[[1]])})
contrS <- "4h_control 4h_hypoxic\n24h_control 24h_hypoxic"
contr <- lapply(strsplit(contrS, "\n")[[1]], function(x) {strsplit(x, '\\s+')[[1]]})
contr
library("DESeq2")
dds <- DESeqDataSetFromMatrix(countData = countData,colData = colData,design = ~ group)
dds <- DESeq(dds)
# rawCounts <- counts(dds, normalized=FALSE)
# write.csv(as.data.frame(rawCounts),file="rawEstCounts.csv")
# normCounts <- counts(dds, normalized=TRUE)
# write.csv(as.data.frame(normCounts),file="normEstCounts.csv")
library("pheatmap")
rld <- rlog(dds, blind=FALSE)
sink("summary.txt")
for (i in contr) {
name <- paste(i[2],"__",i[1], sep="")
results1 <- results(dds, alpha=.01,contrast=c("group",as.character(i[2]),as.character(i[1])))
#head(results1)
print(name) #MUST USE PRINT
summary(results1)
resOrdered <- results1[order(results1$padj),]
resDF <- as.data.frame(resOrdered)
write.csv(resDF,file=paste(i[1],"__",i[2],"_results.csv", sep=""))
sub <- subset(resOrdered, padj < .01)
subDF <- as.data.frame(sub)
print("absolute log2change average of significant")
print(paste(name,mean(abs(subDF$log2FoldChange)), sep = " "))
print("log2change average of significant")
print(paste(name,mean(subDF$log2FoldChange), sep = " "))
# clist <- head(row.names(resDF), n=200)
# sub1Rld <- assay(rld)[clist,]
# sub1RldNorm = t(apply(sub1Rld, 1, function(x)(x-min(x))/(max(x)-min(x))))
# df <- as.data.frame(colData(dds)[,c("group")])
# print(head(df))
# pdf(paste(i[1],"_",i[2],"_heatmapCluster_Top500_AbsoluteScale", sep=""),height=20)
# pheatmap(sub1Rld, cluster_rows=TRUE, cluster_cols=FALSE, show_colnames=FALSE, fontsize_row=3, annotation_col=df)
# dev.off()
# pdf(paste(i[1],"_",i[2],"_heatmapCluster_Top500_ZeroToOneScale", sep=""),height=20)
# pheatmap(sub1RldNorm, cluster_rows=TRUE, cluster_cols=FALSE, show_colnames=FALSE, fontsize_row=3, annotation_col=df)
# dev.off()
}
sink()
#
# sampleDists <- dist(t(assay(rld)))
# library("RColorBrewer")
# sampleDistMatrix <- as.matrix(sampleDists)
# rownames(sampleDistMatrix) <- paste(rld$time, rld$condition, sep="_")
# colnames(sampleDistMatrix) <- NULL
# colors <- colorRampPalette(rev(brewer.pal(9,"Blues")))(255)
# pdf("heatMapSamples.pdf")
# pheatmap(sampleDistMatrix, clustering_distance_rows=sampleDists, clustering_distance_cols=sampleDists, col=colors)
# dev.off()
# pdf("pcaSamples.pdf")
# plotPCA(rld, intgroup=c("condition","time"))
# dev.off()
|
2386b70d9341dda0fe973a8f1134d4cb29da1f8e
|
3593fdf70b57effc2abff5004209220aac2c7f41
|
/R/Stats_NPRatio.R
|
dfb43c703cbf486fc78ef64f6a3b320729286b78
|
[] |
no_license
|
ShunHasegawa/WTC_IEM
|
dcc00054709c59acf226044c5aa3ddcc09b6da16
|
3ffb6c0f306ac366e61d6a2e5de02c26da30501d
|
refs/heads/master
| 2016-09-06T10:46:05.650853
| 2015-08-31T21:06:30
| 2015-08-31T21:06:30
| 20,788,741
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,792
|
r
|
Stats_NPRatio.R
|
## --------Stat_WTC_IEM_ChMean_NPRatio
#############
# N:P ratio #
#############
## ---- Stat_FACE_IEM_Analyse_NP
############
# NP ratio #
############
bxplts(value= "gmNP", data= IEM_DF)
bxplts(value= "NP", data= IEM_DF)
# use log (geometric mean)
Iml_NP <- lmer(sqrt(gmNP) ~ temp * Time + (1|Chamber), data = IEM_DF)
Anova(Iml_NP, test.statistic = "F")
# The final model is
Fml_NP <- stepLmer(Iml_NP)
Anova(Fml_NP)
AnvF_NP <- Anova(Fml_NP, test.statistic = "F")
AnvF_NP
# model diagnosis
plot(Fml_NP)
qqnorm(resid(Fml_NP))
qqline(resid(Fml_NP))
############################
# ANCOVA fit soil variable #
############################
#######################
# plot soil variables #
#######################
# each chamber
xyplot(gmNP ~ moist|temp, groups = Chamber, type = c("r", "p"), data = IEM_DF)
xyplot(gmNP ~ moist|Chamber, type = c("r", "p"), data = IEM_DF)
# each time
xyplot(gmNP ~ moist|temp, groups = Time, type = c("r", "p"), data = IEM_DF)
xyplot(gmNP ~ moist|Time, type = c("r", "p"), data = IEM_DF)
scatterplotMatrix(~gmNP + moist + Temp5_Mean|temp, data = IEM_DF, diag = "boxplot")
scatterplotMatrix(~log(gmNP) + log(moist) + Temp5_Mean|temp, data = IEM_DF, diag = "boxplot")
Iml_ancv_NP <- lmer(log(gmNP) ~ temp * (moist + Temp5_Mean) + (1|Chamber), data = IEM_DF)
Anova(Iml_ancv_NP)
Fml_ancv_NP <- stepLmer(Iml_ancv_NP, alpha.fixed = .1)
AnvF_ancv_NP <- Anova(Fml_ancv_NP, test.statistic = "F")
AnvF_ancv_NP
# model diagnosis
plot(Fml_ancv_NP)
qqnorm(resid(Fml_ancv_NP))
qqline(resid(Fml_ancv_NP))
par(mfrow = c(1, 2))
TransVirsreg(visreg(Fml_ancv_NP, xvar = "moist", by = "temp", plot = FALSE),
overlay = TRUE,
trans = exp,
point = list(col = c(1, 2), cex = 1),
line = list(col = c(1, 2)))
TransVirsreg(visreg(Fml_ancv_NP, xvar = "Temp5_Mean", by = "temp", plot = FALSE),
overlay = TRUE,
trans = exp,
point = list(col = c(1, 2), cex = 1),
line = list(col = c(1, 2)))
## ----Stat_WTC_IEM_NPRatio_Smmry
Iml_NP@call
Anova(Iml_NP)
Fml_NP@call
Anova(Fml_NP)
AnvF_NP
# ANCOVA
Iml_ancv_NP@call
Anova(Iml_ancv_NP)
Fml_ancv_NP
# Chi test
Anova(Iml_ancv_NP)
# F test
AnvF_ancv_NP
par(mfrow = c(1, 2))
TransVirsreg(visreg(Fml_ancv_NP, xvar = "moist", by = "temp", plot = FALSE),
overlay = TRUE,
trans = exp,
point = list(col = c(1, 2), cex = 1),
line = list(col = c(1, 2)))
TransVirsreg(visreg(Fml_ancv_NP, xvar = "Temp5_Mean", by = "temp", plot = FALSE),
overlay = TRUE,
trans = exp,
point = list(col = c(1, 2), cex = 1),
line = list(col = c(1, 2)))
|
06be10f3960cbf0668f7fbc2ba5000c57043175d
|
c3543b71b1914937d460ca7409da9653dbeff7d4
|
/R/Rpostgis.R
|
64ba74a7424af65dec1ec618f743b14406b0c2ca
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
jmlondon/Rpostgis
|
655dc460eb3aa0246ab3b6a6c5165ff315643f5e
|
e79078dc997a2cedba0e8319a5c4b58e8da1b00c
|
refs/heads/master
| 2021-01-01T17:22:03.107814
| 2015-05-19T02:20:37
| 2015-05-19T02:20:37
| 35,519,803
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 615
|
r
|
Rpostgis.R
|
#' Rpostgis: A package for import/export of spatial data from PostGIS
#'
#' Rpostgis provides simple access to PostgreSQL/PostGIS spatial databases
#' without the need to build the rgdal packages from source (pre-built binaries
#' on CRAN are not built against the PostgreSQL/PostGIS libraries). Initially,
#' the focus of this package is on reading and writing data, but further
#' development for various spatial functions could be considered.
#'
#' @section Rpostgis functions:
#' \itemize{
#' \item dbReadSpatial
#' \item dbWriteSpatial
#' }
#'
#' @docType package
#' @name Rpostgis
NULL
#> NULL
|
3a3157d9ab1b476d5df5574b092736d845aa9028
|
29d87698c80e23cad4d31dafad48fee6a4e899fb
|
/R/models.R
|
2fa2090542ce06c67848abe7b309672fb6aaf4f2
|
[] |
no_license
|
fostergeotech/Vs30_NZ
|
56459df71b8d0148bf89cfe548a78b5f707c69bf
|
2760af63199f48ed326e370ccfd9ec8a78891aa2
|
refs/heads/master
| 2020-04-10T15:42:52.496050
| 2020-01-16T22:39:57
| 2020-01-16T22:39:57
| 161,119,831
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,868
|
r
|
models.R
|
# models.R
#
# all models - general functions for calling specific models
#
# the model lists are used by other functions to
# decide which models are applicable in a given context.
# For example, the geology map processing script assigns Vs30 on a
# per-polygon basis and therefore does not / cannot use slope.
# So items in hybMODELs are not used there.
#
# IMPORTANT: I couldn't fully automate the assignment of model estimates
# in all scripts. Therefore, some manual updates need to be performed
# each time a new model is added.
# Do a "grep -i manually R/*" to find my notes about where this needs to be done
# (At the moment it's the following scripts/functions:
# * processQmap.R
# * classifyThings.R
# * vspr.R
# * models.R (this file)
# * modelsUpdate.R
# )
setwd("~/VsMap")
geoMODELs <- c("AhdiAK", "YongCA") # Geology-only
updMODELs <- c("AhdiAK_noQ3", # Bayes updated models - added 20171129
"YongCA_noQ3") # Bayes updated models - added 20171219
hybMODELs <- c("AhdiAK_noQ3_hyb09c") # hybrid models
wtdMODELs <- c("AhdiYongWeighted1") # weighted model(s)
allMODELs <- c(geoMODELs, updMODELs, hybMODELs, wtdMODELs)
# In the case of weighted models, for both individual vspr points (classifyThings.R, vspr.R),
# AND for rasters (makeRaster_AhdiYongWeighted1.R, makeRaster_AhdiYongWeighted1_sigma.R),
# the weighted Vs30 and sigma estimates are generated by log averaging and square root of sum of squares.
# There is no need for a unique MODEL_AhdiYongWeighted1.R. So I have removed that file and
# don't need to source it below.
allMODELsButWtd <- c(geoMODELs, updMODELs, hybMODELs)
for(theModelJustForThisLoop in allMODELsButWtd) {
source(paste0("R/MODEL_",theModelJustForThisLoop,".R"))
}
|
666e851d4fd0e7afc9023a9b265fc2ca4ca9ef68
|
7c39da976f28af016e5b1f847e68473c659ea05d
|
/man/listFusionData.Rd
|
8f85212f3fe7416f2365fa8d3742be10106fe70a
|
[] |
no_license
|
cancer-genomics/trellis
|
b389d5e03959f8c6a4ee7f187f7749048e586e03
|
5d90b1c903c09386e239c01c10c0613bbd89bc5f
|
refs/heads/master
| 2023-02-24T05:59:44.877181
| 2023-01-09T20:38:36
| 2023-01-09T20:38:36
| 59,804,763
| 3
| 1
| null | 2023-01-11T05:22:52
| 2016-05-27T04:45:14
|
R
|
UTF-8
|
R
| false
| true
| 427
|
rd
|
listFusionData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uniprot.R
\name{listFusionData}
\alias{listFusionData}
\title{Collect fusion-related data into a single list}
\usage{
listFusionData(rlist, fusions)
}
\arguments{
\item{rlist}{a \code{RearrangementList}}
\item{fusions}{an object returned by \code{fusionList}}
}
\value{
a named list
}
\description{
Collect fusion-related data into a single list
}
|
d753280d33a247b582fbf352880a8f26fd5e4e3c
|
2ef9859dc1c051598bbce26fa88450d21567882e
|
/Code/2_Data_Analysis/table_figure_log_reg_model.R
|
5001700286d6f75c304b8c7fa650f1d2ae29739a
|
[] |
no_license
|
noispuc/Peres_etal_PublicHealth_Socio_demographic_COVID19_mortality
|
745a636354f8dd06634c50229d6760b189930d5e
|
62f0522fcad5fbaa37c265c1265f2d1f66ee6970
|
refs/heads/main
| 2023-03-01T20:53:33.095112
| 2021-02-04T21:42:08
| 2021-02-04T21:42:08
| 328,505,306
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,674
|
r
|
table_figure_log_reg_model.R
|
# Libraries ---------------------------------------------------------------
library(tidyverse)
library(lme4)
library(broom.mixed)
library(gtsummary)
# Data input --------------------------------------------------------------
srag_outcome <- vroom::vroom("Input/srag_filtrado_08_08_modelagem.csv.gz")
srag_descritiva <-
srag_outcome %>%
select(CS_SEXO, FAIXA_IDADE, CS_RACA, CS_ESCOL_N, REGIAO,
EVOLUCAO, CARDIOPATI, HEMATOLOGI, HEPATICA, DIABETES,
NEUROLOGIC, PNEUMOPATI, IMUNODEPRE, RENAL, OBESIDADE) %>%
mutate_at(
c("CARDIOPATI", "HEMATOLOGI", "HEPATICA", "DIABETES",
"NEUROLOGIC", "PNEUMOPATI", "IMUNODEPRE", "RENAL", "OBESIDADE",
"EVOLUCAO"),
as.factor
) %>%
mutate(CS_SEXO = factor(CS_SEXO, levels = c("Male", "Female"))) %>%
mutate(CS_RACA = factor(CS_RACA, levels = c("White", "Black/Brown", "Asian", "Indigenous"))) %>%
mutate(CS_ESCOL_N = factor(CS_ESCOL_N, levels = c("College/University", "High school", "Up to high school", "Illiterate"))) %>%
mutate(REGIAO = factor(REGIAO, levels = c("South", "Southeast", "Central-West", "Northeast", "North"))) %>%
mutate(EVOLUCAO = ifelse(EVOLUCAO == "Death", 1, 0)) %>%
mutate(FAIXA_IDADE = as.factor(FAIXA_IDADE))
srag_descritiva_missing <-
srag_outcome %>%
filter(!is.na(REGIAO)) %>%
select(CS_SEXO, FAIXA_IDADE, CS_RACA, CS_ESCOL_N, REGIAO,
EVOLUCAO, CARDIOPATI, HEMATOLOGI, HEPATICA, DIABETES,
NEUROLOGIC, PNEUMOPATI, IMUNODEPRE, RENAL, OBESIDADE) %>%
mutate_at(
c("CARDIOPATI", "HEMATOLOGI", "HEPATICA", "DIABETES",
"NEUROLOGIC", "PNEUMOPATI", "IMUNODEPRE", "RENAL", "OBESIDADE",
"EVOLUCAO"),
as.factor
) %>%
# mutate_all(function(x){ifelse(is.na(x), "Not reported", x)}) %>%
mutate(CS_SEXO = factor(CS_SEXO, levels = c("Male", "Female"))) %>%
mutate(CS_RACA = factor(CS_RACA, levels = c("White", "Black/Brown", "Asian", "Indigenous"))) %>%
mutate(CS_ESCOL_N = factor(CS_ESCOL_N, levels = c("College/University", "High school", "Up to high school", "Illiterate"))) %>%
mutate(REGIAO = factor(REGIAO, levels = c("South", "Southeast", "Central-West", "Northeast", "North"))) %>%
mutate(EVOLUCAO = ifelse(EVOLUCAO == "Death", 1, 0)) %>%
mutate(FAIXA_IDADE = as.factor(FAIXA_IDADE)) %>%
mutate_at(
c("CS_SEXO", "CS_RACA", "CS_ESCOL_N", "CARDIOPATI", "HEMATOLOGI",
"HEPATICA", "DIABETES", "NEUROLOGIC", "PNEUMOPATI",
"IMUNODEPRE", "RENAL", "OBESIDADE"),
~fct_explicit_na(., na_level = "Not Reported")
) %>%
mutate(
EVOLUCAO = factor(EVOLUCAO)
)
# Proportion of ICU and invasive ventilation ------------------------------
## IHM by Race group
model_complete <-
glm(EVOLUCAO ~ ., family = "binomial",
data = srag_descritiva
) %>%
broom::tidy(., exponentiate = TRUE, conf.int = TRUE)
writexl::write_xlsx(model_complete, "Output/Supplementary/model_complete.xlsx")
# model_complete <-
# rms::lrm(EVOLUCAO ~ .,
# data = srag_descritiva
# )
model_not_rep <-
glm(EVOLUCAO ~ ., family = "binomial",
data = srag_descritiva_missing
) %>%
broom::tidy(., exponentiate = TRUE, conf.int = TRUE)
writexl::write_xlsx(model_not_rep, "Output/Supplementary/model_not_rep.xlsx")
# Plot - Model OR estimantes (Figure 3) -----------------------------------
df_plot_estimates <-
srag_descritiva %>%
select(
CS_SEXO,
FAIXA_IDADE,
CS_RACA,
CS_ESCOL_N,
REGIAO
) %>%
distinct() %>%
pivot_longer(CS_SEXO:REGIAO, names_to = "variable", values_to = "type") %>%
distinct() %>%
arrange(variable, type) %>%
mutate(
type = ifelse(is.na(type), "Not Reported", as.character(type))
) %>%
filter(type != "Not Reported") %>%
mutate(var_comp = paste0(variable, type)) %>%
left_join(
model_complete %>%
select(
term, comp_estimate = estimate,
comp_conf.low = conf.low, comp_conf.high = conf.high
)
, by = c("var_comp" = "term")
) %>%
mutate(
comp_estimate = ifelse(is.na(comp_estimate), 1, comp_estimate)
) %>%
mutate(
index = 1:n(),
index_group = case_when(
variable == "CS_SEXO" ~ 100,
variable == "FAIXA_IDADE" ~ 200,
variable == "CS_ESCOL_N" ~ 300,
variable == "REGIAO" ~ 400,
variable == "CS_RACA" ~ 500
),
order = index + index_group
)
plot_model_estimates <-
df_plot_estimates %>%
mutate(
variable = factor(variable,
levels = c("CS_SEXO", "FAIXA_IDADE", "CS_ESCOL_N", "CS_RACA", "REGIAO"),
labels = c("Sex", "Age", "Level of Education", "Self-reported race", "Region"))
) %>%
arrange(variable) %>%
ggplot() +
geom_point(aes(y = fct_reorder(type, -order), x = comp_estimate, color = variable)) +
geom_errorbarh(aes(y = fct_reorder(type, -order), xmin = comp_conf.low, xmax = comp_conf.high, color = variable)) +
geom_vline(aes(xintercept = 1), linetype = "dashed", size = 0.2) +
scale_x_continuous(trans = "log10", breaks = c(0.1, 0.25, 0.5, 1, 2, 5, 10)) +
labs(x = "Odds Ratio (95% Confidence Interval)", y = "") +
scale_color_discrete(name = "") +
theme_bw() +
theme(legend.position = "right")
ggsave("Output/Figures/figure3_model_estimates.pdf", plot_model_estimates,
units = "in", width = 7, height = 5, dpi = 900)
|
63a2d0e82c5ffee80d178b6a12005be28c6aa13c
|
b42f987b112121ca39d1bdc12e598028009f2a62
|
/tutorial_follow.R
|
ca751bc1fd58eb195a65a84b3ccb447fbdfc8c4e
|
[] |
no_license
|
marinereilly/herring2019
|
c623b92485f48eed88f4f18b5ca19d770ee94d89
|
ddab46d3f29c568176ac7b3ae019c650253bc5e6
|
refs/heads/master
| 2020-07-13T06:13:59.773141
| 2019-09-04T18:57:08
| 2019-09-04T18:57:08
| 205,013,403
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,484
|
r
|
tutorial_follow.R
|
#Quick Analysis based off of https://rviews.rstudio.com/2017/09/25/survival-analysis-with-r/
library(survival)
library(dplyr)
library(ggplot2)
library(ggfortify)
library(ranger)
#Load and Prep data
herring<-read.csv("Herring_from_history.csv")
View(herring)
herring<-herring[,-1]
herring<-herring[,-c(15:16)]
herring<-herring %>%
mutate(event_time=End-Start) %>%
select(-Correlated_Flow,-Temperature)
#KM Model Fits
km<- with(herring, Surv(event_time, Pass))
km_fit <- survfit(Surv(event_time, Pass) ~ 1, data=herring)
summary(km_fit)
autoplot(km_fit)
km_species_fit <- survfit(Surv(event_time, Pass) ~ Species, data=herring)
autoplot(km_species_fit)
#COX PH Model Fits
#Cox model assumes that the covariates do not vary with time. In a vignette [12] that
#accompanies the survival package Therneau, Crowson and Atkinson demonstrate that the
#Karnofsky score (karno) is, in fact, time-dependent so the assumptions for the Cox model
#are not met. The vignette authors go on to present a strategy for dealing with time
#dependent covariates.
#https://cran.r-project.org/web/packages/survival/vignettes/timedep.pdf
cox1 <- coxph(Surv(event_time, Pass) ~ Species + Fultons + USGS_Flow, data = herring)
summary(cox1)
cox1_fit <- survfit(cox1)
autoplot(cox1_fit)
aa_fit <-aareg(Surv(event_time, Pass) ~ Species + Fultons + USGS_Flow, data = herring)
autoplot(aa_fit)
r_fit <- ranger(Surv(event_time, Pass) ~ Species + Fultons + USGS_Flow, data = herring,
mtry = 4,
importance = "permutation",
splitrule = "extratrees",
verbose = TRUE)
# Average the survival models
event_times <- r_fit$unique.death.times
surv_prob <- data.frame(r_fit$survival)
avg_prob <- sapply(surv_prob,mean)
# Plot the survival models for each patient
plot(r_fit$unique.death.times,r_fit$survival[1,],
type = "l",
ylim = c(0,1),
col = "red",
xlab = "Days",
ylab = "survival",
main = "Herring Survival Curves")
#
cols <- colors()
for (n in sample(c(2:dim(herring)[1]), 20)){
lines(r_fit$unique.death.times, r_fit$survival[n,], type = "l", col = cols[n])
}
lines(event_times, avg_prob, lwd = 2)
legend(40, 0.3, legend = c('Average = black'))
vi <- data.frame(sort(round(r_fit$variable.importance, 4), decreasing = TRUE))
names(vi) <- "importance"
head(vi)
#looking at the documentation we may want our code to look more like:
survival_model<-Surv(Start, End, Pass, data=herring, type="interval2")
|
a841ad2a76b83020555e0b941ba81fe57edd80c3
|
36f8dd36e5dae24e8dcabf6efcf6f94277c3e9d5
|
/cachematrix.R
|
7869fb1dd2afa611247dae05e446c7ff2c020f03
|
[] |
no_license
|
ZofiaProkop/Rprogramming
|
30d6b107b646cadc50e06c7f22324cd4c080570a
|
61bf6009f8a5878dabe50717d84b81dfc1ae489b
|
refs/heads/master
| 2021-01-23T11:56:18.468449
| 2015-07-22T13:22:49
| 2015-07-22T13:22:49
| 39,502,915
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,066
|
r
|
cachematrix.R
|
## this pair of functions will work together to calculate and store the inverse of a matrix passed as an argument to the first of them
## makeCacheMatrix takes a matrix as an argument and creates a list containing 4 functions: set, get, setinv and getinv (see comments below)
makeCacheMatrix <- function(x = matrix()) { #takes a matrix as an argument
inv <- NULL ##creates an object 'inv' (which will later store the matrix' inverse) and sets its value to null
set <- function(y) {
x <<- y #changes the stored matrix into another one
inv <<- NULL # resets the 'inv' to null (important, because if the inverse of an 'old' matrix has already been stored under 'inv'...
} #... it no longer applies after the matrix has been changed.
get <- function() x #(gets a matrix passed as an argument to makeCacheMatrix)
setinv <- function(inverse) inv <<- inverse #sets the value of 'inv' equal to the value of an object passed to setinv as an argument
getinv <- function() inv #(gets the value of 'inv')
list(get = get, setinv = setinv, getinv = getinv) #list of the 3 functions is returned as the output of makeCacheMatrix
}
## cacheSolve calculates and stores the inverse of a matrix stored in the object created by makeCacheMatrix, unless it has already been done
## returns the inverse
cacheSolve <- function(x, ...) { #takes as an argument a list created by makeCacheMatrix
inv <- x$getinv()
if(!is.null(inv)) { #checks the output of makeCacheMatrix (passed to cacheSolve as an argument) for the matrix' inverse
message("getting cached data")
return(inv) ## if the inverse is already cached there, returns it and tells us so :)
}
data <- x$get() ## if the inverse's hasn't been calculated yet, gets the matrix stored in the argument
inv <- solve(data, ...) #... calculates its inverse
x$setinv(inv) #...modifies its original argument (an output of makeCacheMatrix) by passing the calculated inverse as an argument to the
#setinv function there
inv #...and returns the inverse
}
|
7aad53476596490d71ac82971640c973bbc58221
|
d153fbff4387eefdd765af1bf177294a18baeeaa
|
/data/raw-data/top_journals_for_cran_cites_articles_per_year.R
|
509b82f26a9a836b66abf0f3ccafee663431550b
|
[] |
no_license
|
benmarwick/March-2019-Cambridge-Big-Data-Archaeology
|
36c764c5a8608012b6973fc91c5896731efd841e
|
46e6c49b04f868b9625cb848f2952b512240f60f
|
refs/heads/master
| 2020-04-29T23:02:05.489775
| 2019-03-28T08:16:25
| 2019-03-28T08:16:25
| 176,465,241
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,323
|
r
|
top_journals_for_cran_cites_articles_per_year.R
|
# total number of articles per year
# copy-paste from WoS year filter
top_journals_for_cran_cites_articles_per_year <-
data_frame(year = 2007:2017,
total_number_articles_PLOSONE = c(1230,
2717,
4404,
6729,
13786,
23456,
31504,
31482,
29807,
23039,
21076),
total_number_articles_SCIENTIFICREPORTS = c(0,
0,
0,
0,
205,
804,
2554,
4027,
10947,
21046,
24789
),
total_number_articles_ECOLOGYANDEVOLUTION = c(0,
0,
0,
0,
50,
262,
430,
391,
497,
730,
981),
total_number_articles_PEERJ = c(0,
0,
0,
0,
0,
0,
232,
471,
799,
1294,
1370),
total_number_articles_FORESTECOLOGYANDMANAGEMENT = c(442,
707,
612,
475,
482,
526,
656,
556,
455,
606,
611),
total_number_articles_JOURNALOFSTATISTICALSOFTWARE =
c(63,
43,
42,
60,
102,
86,
55,
90,
89,
83,
92),
total_number_articles_MOLECULARECOLOGY =
c(430,
440,
428,
473,
427,
476,
469,
468,
446,
433,
500),
total_number_articles_PROCEEDINGSOFTHEROYALSOCIETYBBIOLOGICALSCIENCES =
c(413,
360,
540,
463,
495,
640,
547,
667,
636,
575,
591),
total_number_articles_ECOSPHERE = c(0,
0,
0,
21,
137,
126,
156,
164,
296,
410,
415
),
total_number_articles_SCIENCEOFTHETOTALENVIRONMENT =
c(582,
692,
666,
723,
660,
1024,
954,
1796,
1483,
2533,
2743)
)
write.csv(top_journals_for_cran_cites_articles_per_year,
here::here("data",
"raw-data",
"top_journals_for_cran_cites_articles_per_year.csv"))
|
ba4c19d42512784b60a7ac18637eaa4513f890a3
|
d7b37c417aa39d293fdea631f7f44acf6099861f
|
/tests/testthat/test-fetch_catalog.R
|
17f7224454e6eec1ecbfeb5960b598ee97a9c666
|
[
"MIT"
] |
permissive
|
iecastro/healthdatacsv
|
b96c4af532b3bbfd57661aa77f59721423be0c75
|
03f7f854bf6e98cad4527ad3f84717fac2567ff9
|
refs/heads/master
| 2021-07-11T04:10:40.827369
| 2020-11-05T13:25:14
| 2020-11-05T13:25:14
| 217,180,336
| 11
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,088
|
r
|
test-fetch_catalog.R
|
test_that("output is tibble", {
skip_on_cran()
expect_is(fetch_catalog(), "tbl_df")
expect_is(fetch_catalog(agency = "Centers for Disease Control and Prevention"),
"tbl_df")
expect_is(fetch_catalog(keyword = "adopt"), "tbl_df")
expect_is(fetch_catalog(agency = "Centers for Disease Control and Prevention",
keyword = "influenza"),
"tbl_df")
})
test_that("message is displayed for wrong argument input", {
skip_on_cran()
expect_message(fetch_catalog("CDC"))
expect_message(fetch_catalog(keyword = "adpt"))
expect_message(fetch_catalog(agency = "cdc", keyword = "adpt"))
expect_message(fetch_catalog(agency = "cdc", keyword = "adopt"))
expect_message(fetch_catalog(agency = "Centers for Disease Control and Prevention",
keyword = "adopt"))
})
test_that("keyword accepts regex pattern", {
skip_on_cran()
expect_equal(
nrow(fetch_catalog(keyword = "influenza|adopt")),
nrow(fetch_catalog(keyword = "influenza")) +
nrow(fetch_catalog(keyword = "adopt"))
)
})
|
1813649ae9a45dd5a700482ffc07756ffc518053
|
b07be4526114276ea7e874279336f7657289cee9
|
/featureSelectors/RFE.R
|
5843fd9634454b3e3237c5093feaf09918946791
|
[] |
no_license
|
mmwind/RPredictionTestingWorkbench
|
f8bd6c018b2e87489394e3e25bc2d9e782217cbf
|
870643a4dc374e34068c9805795e4f4b2ea61e5a
|
refs/heads/master
| 2021-01-11T21:37:16.861446
| 2017-01-13T05:58:34
| 2017-01-13T05:58:34
| 78,819,836
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 581
|
r
|
RFE.R
|
#Select the most informative features using Random Feature Elimination
library(caret)
library(mlbench)
# function returns selected feature subset
RFE <- function(training){
targetIndex <- "Target"
xtrain <- training[,-length(training[1,])]
ytrain <- training[, targetIndex]
# define the control using a random forest selection function
control <- rfeControl(functions=rfFuncs, method="cv", number=2, verbose = T)
# run the RFE algorithm
results <- rfe(xtrain, ytrain, sizes=c(1:length(xtrain[1,])), rfeControl=control)
return(predictors(results))
}
|
30f57e1a0d928601ee3df780c811960ddbbe524c
|
58f7e798793e68a9b22d767782d1e5e0bdde7755
|
/src/01_pipeline/00_Quarterly_Census_of_Employment_and_Wages_TO_DISK.R
|
0323b9e2a89a6c6c46910314fbc501f309b1a4df
|
[] |
no_license
|
tjvananne/dataoftheunion
|
b661e1fb654738ddc5c6cdc8af3ad5928525abb7
|
6dd67de84532dcefdc8a5dd43c821164d2f6e3bb
|
refs/heads/master
| 2022-01-20T05:53:11.141499
| 2021-12-30T19:39:31
| 2021-12-30T19:39:31
| 173,947,402
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,960
|
r
|
00_Quarterly_Census_of_Employment_and_Wages_TO_DISK.R
|
# Quarterly Census of Employment and Wages
# download and unzip to DISK (low memory solution)
# I'm targeting a machine with ~8GB RAM so that
# most people can also run this code
# INFO ------------------
# This script has been built with the intention that it is
# entirely reproducible. No need to go download a file or
# set up your directory structure a special way.
# the only pre-work necessary is to make sure you have the
# packages that I'm using installed in your environment.
# Quarterly Census of Employment and Wages
# https://www.bls.gov/cew/datatoc.htm
# SCRIPT CONFIG ------
YEAR <- 2010
ZIP_FILE_PATH <- paste0("cache/00_QCEW_", YEAR, ".zip")
FULL_FILE <- paste0("cache/00_QCEW_", YEAR, "_FULL_FILE.rds")
INDUSTRY_CODE_FILE <- paste0("cache/industry_code_file.csv")
# high level == more summarized industry codes
# low level == more detailed industry codes
COUNTY_HIGH_LEVEL <- paste0("proc_data/00_QCEW_", YEAR, "_COUNTY_HIGHLVL.rds")
COUNTY_MID_LEVEL <- paste0("proc_data/00_QCEW_", YEAR, "_COUNTY_MIDLVL.rds")
COUNTY_LOW_LEVEL <- paste0("proc_data/00_QCEW_", YEAR, "_COUNTY_LOWLVL.rds")
COUNTY_DETAILED <- paste0("proc_data/00_QCEW_", YEAR, "_COUNTY_DETAILED.rds")
# LOAD LIBS ------------
library(RCurl) # web request
library(dplyr) # data manipulation
library(readr) # fast reading of binary data (read_table)
library(tidyr) # more data manipulation
library(data.table)
# HELPER FUNCS ---------
# build the URL to query for data zip file
build_qcew_query <- function(p_year) {
paste0("https://data.bls.gov/cew/data/files/", p_year,
"/csv/", p_year, "_qtrly_singlefile.zip")
}
# not sure how useful this is yet...
# do we need a read version of this as well?
persist_to_disk <- function(p_obj, p_file_name) {
# check input
this_extension <- tolower(tools::file_ext(p_file_name))
if (!this_extension %in% c("rds", "csv")) {
stop("Must pass in either a '.rds' or '.csv' filepath name")
}
# if rds
if (this_extension == "rds") {
saveRDS(p_obj, p_file_name)
# if csv
} else if (this_extension == "csv") {
fwrite(p_obj, p_file_name)
}
}
# DOWNLOAD AND SAVE INDUSTRY CODES --------
if (!file.exists(INDUSTRY_CODE_FILE)) {
download.file("https://data.bls.gov/cew/doc/titles/industry/industry_titles.csv",
destfile = INDUSTRY_CODE_FILE)
}
# CREATE CACHE DIR --------
if(!dir.exists("cache")) {dir.create("cache")}
# DOWNLOAD AND READ ZIP FILE ---------
if(!file.exists(FULL_FILE)) {
# download and write to disk
print("Downloading file...")
download.file(url=build_qcew_query(YEAR), destfile=ZIP_FILE_PATH)
file_name <- unzip(ZIP_FILE_PATH, list=T)[[1]]
print("Caching to disk...")
csv_path <- unzip(ZIP_FILE_PATH, files=file_name, exdir="cache")
# column descriptions:
# https://data.bls.gov/cew/doc/layouts/csv_quarterly_layout.htm
print("Reading from disk...")
df <- fread(csv_path, nrows=5, colClasses="character")
rem_cols <- names(df)[grepl("disclosure", names(df))]
rem_cols <- c(rem_cols, names(df)[grepl("oty", names(df))])
rem_cols <- c(rem_cols, names(df)[grepl("contribut", names(df))])
df <- fread(csv_path, colClasses="character", drop=rem_cols)
gc()
print("Caching to disk in more efficient format...")
saveRDS(df, FULL_FILE)
} else {
df <- readRDS(FULL_FILE)
}
# I prefer the behavior of data frames
setDF(df)
gc()
# CREATE PROC_DATA CACHE -------
if(!dir.exists("proc_data")) {dir.create("proc_data")}
# COUNTY FILTERS -------
# https://data.bls.gov/cew/doc/titles/agglevel/agglevel_titles.htm
# It gets even more detailed than agglvl_code '76'
# read in the industry code data
industry_codes <- read.csv(INDUSTRY_CODE_FILE, stringsAsFactors = F)
# joining individually because it will save memory
df_highlvl <- df[df$agglvl_code == '74', ]
df_highlvl <- merge(x=df_highlvl, y=industry_codes,
by="industry_code", all.x=T, all.y=F)
saveRDS(df_highlvl, COUNTY_HIGH_LEVEL)
rm(df_highlvl); gc()
df_midlvl <- df[df$agglvl_code == '75', ]
df_midlvl <- merge(x=df_midlvl, y=industry_codes,
by="industry_code", all.x=T, all.y=F)
saveRDS(df_midlvl, COUNTY_MID_LEVEL)
rm(df_midlvl); gc()
df_lowlvl <- df[df$agglvl_code == '76', ]
df_lowlvl <- merge(x=df_lowlvl, y=industry_codes,
by="industry_code", all.x=T, all.y=F)
saveRDS(df_lowlvl, COUNTY_LOW_LEVEL)
rm(df_lowlvl); gc()
# # I want to explore industries where agglvl_code is 78 (most detailed)
# industry_codes$industry_title[industry_codes$industry_code %in%
# df$industry_code[df$agglvl_code == '78']]
df_detailed <- df[df$agglvl_code == '78', ]
df_detailed <- merge(x=df_detailed, y=industry_codes,
by="industry_code", all.x=T, all.y=F)
saveRDS(df_detailed, COUNTY_DETAILED)
rm(df_detailed); gc()
|
00879345333ca75e514638eed674f3a0d139bf55
|
0dfe50e7f553927442a27ed4b1cf366216b06727
|
/examples/raw-material-outcome/process-troubleshooting.R
|
dd6bab8e0efd4fe76ed8c141a96b23d689b377a9
|
[] |
no_license
|
kgdunn/figures
|
3543d2bcb96cc61cc9c2217da3a4210dd23b1103
|
662076362df316069ba9c903a0a71344da887142
|
refs/heads/main
| 2021-07-06T06:59:34.977099
| 2021-06-14T20:47:11
| 2021-06-14T20:47:11
| 244,129,830
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,753
|
r
|
process-troubleshooting.R
|
rm <- read.csv('http://stats4.eng.mcmaster.ca/datasets/raw-materials-data-take-home.csv')
y <- as.numeric(as.factor(rm$Class))
X <- rm[,3:8]
library(car)
some(rm)
rm.pca <- prcomp(X, scale=TRUE)
rm.P <- rm.pca$rotation
rm.T <- rm.pca$x
rm.P
var(rm.T)
mean(rm[,3:8])
sd(rm[,3:8])
c1 = rm$Outcome == "Adequate"
c2 = rm$Outcome == "Poor"
bitmap('process-troubleshooting.png', type="png256", width=15, height=6.5, res=300, pointsize=14)
layout(matrix(c(1,2), 1, 2))
par(mar=c(4.5, 4.5, 4.5, 0.5)) # (bottom, left, top, right); defaults are par(mar=c(5, 4, 4, 2) + 0.1)
par(cex.lab=1.5, cex.main=1.5, cex.sub=1.5, cex.axis=1.5)
plot(rm.T[c1,1], rm.T[c1,2], col="darkgreen", pch=2, lwd=2, xlab=expression(t[1]), ylab=expression(t[2]), xlim=range(rm.T[,1]), ylim=range(rm.T[,2]), main="Score plot")
points(rm.T[c2,1], rm.T[c2,2], col="red", pch=1, lwd=2)
abline(h=0, v=0)
text(rm.T[c2,1], rm.T[c2,2], rownames(X)[c2], pos=1)
legend(x=1, y=-2.5, legend=c("Adequate yield", "Poor yield"), col=c("darkgreen", "red"), lty=c(0,0), pch=c(2, 1), lwd=c(2,2), cex=1.0)
p1max = max(abs(rm.P[,1]))*1.1
p2max = max(abs(rm.P[,2]))*1.1
plot(rm.P[,1], rm.P[,2], col="black", xlim=c(-p1max, p1max), ylim=c(-p2max, p2max), xlab=expression(p[1]), ylab=expression(p[2]), main="Loadings plot")
text(rm.P[seq(1,5),1], rm.P[seq(1,5),2], colnames(X)[seq(1,5)], pos=3)
text(rm.P[6,1], rm.P[6,2], colnames(X)[6], pos=1)
abline(h=0, v=0)
dev.off()
bitmap('unsupervised-classification-process.png', type="png256", width=15, height=15/2, res=300, pointsize=14)
layout(matrix(c(1,2), 1, 2))
par(mar=c(4.5, 4.5, 4.5, 0.5)) # (bottom, left, top, right); defaults are par(mar=c(5, 4, 4, 2) + 0.1)
par(cex.lab=1.5, cex.main=1.5, cex.sub=1.5, cex.axis=1.5)
plot(rm.T[c1,1], rm.T[c1,2], col="darkgreen", pch=2, lwd=2, xlab=expression(t[1]), ylab=expression(t[2]), xlim=range(rm.T[,1]), ylim=range(rm.T[,2]), main="Score plot")
points(rm.T[c2,1], rm.T[c2,2], col="darkred", pch=1, lwd=2)
abline(h=0, v=0)
legend(x=1, y=-2.5, legend=c("Adequate yield", "Poor yield"), col=c("darkgreen", "darkred"), lty=c(0,0), pch=c(2, 1), lwd=c(2,2), cex=1.0)
slope = 1/5
inter = 0.75
abline(a=0.75, 1/5, col="black", lwd=2)
x0=-2.5
y0=x0*slope+inter
c = y0-(-1/slope)*x0
x1=-2.8
arrows(x0=x0, y0=y0, x1=x1, y1=(-1/slope)*x1+c, angle=15, code=2, col="darkred")
text(-2.9, 2, "Poor region", col="darkred")
x0=-2.6
y0=x0*slope+inter
perp.slope = -1/slope
c = y0-(perp.slope)*x0
x1=-2.3
arrows(x0=x0, y0=y0, x1=x1, y1=(perp.slope)*x1+c, angle=15, code=2, col="darkgreen")
text(x1, -1.5, "Adequate region", col="darkgreen")
plot(rm.P[,1], rm.P[,2], col="black", xlim=c(-1,1), xlab=expression(p[1]), ylab=expression(p[2]), main="Loadings plot")
text(rm.P[1:5,1], rm.P[1:5,2], colnames(X[1:5]), pos=4)
text(rm.P[6,1], rm.P[6,2], colnames(X[6]), pos=1)
abline(h=0, v=0)
dev.off()
# > rm
# Lot.number Outcome Size5 Size10 Size15 TGA DSC TMA
# 1 B370 Adequate 13.8 9.2 41.2 787.3 18.0 65.0
# 2 B880 Adequate 11.2 5.8 27.6 772.2 17.7 68.8
# 3 B452 Adequate 9.9 5.8 28.3 602.3 18.3 50.7
# 4 B287 Adequate 10.4 4.0 24.7 677.9 17.7 56.5
# 5 B576 Adequate 12.3 9.3 22.0 593.5 19.5 52.0
# 6 B914 Poor 13.7 7.8 27.0 597.9 18.1 49.8
# 7 B404 Poor 15.5 10.7 34.3 668.5 19.6 55.7
# 8 B694 Poor 15.4 10.7 35.9 602.8 19.2 53.6
# 9 B875 Poor 14.9 11.3 41.0 614.6 18.5 50.0
# 10 B475 Adequate 13.7 8.5 28.0 700.4 18.0 57.0
# 11 B517 Poor 16.1 11.6 39.2 682.8 17.5 56.4
# 12 B296 Adequate 12.8 5.4 23.7 739.4 18.2 59.8
# 13 B403 Adequate 10.3 2.5 17.1 595.7 18.4 49.5
# 14 B430 Poor 12.9 9.7 36.3 642.4 19.1 55.0
# 15 B145 Adequate 13.0 7.3 27.0 682.8 19.1 55.3
# 16 B319 Adequate 11.7 5.2 20.2 655.8 19.2 56.3
# 17 B859 Adequate 10.7 6.8 27.7 661.2 18.3 55.5
# 18 B990 Adequate 13.0 5.4 25.3 701.9 19.1 61.0
# 19 B616 Adequate 11.9 7.2 29.8 661.0 18.5 55.4
# 20 B133 Adequate 11.3 7.9 30.0 699.9 18.1 58.1
# 21 B535 Adequate 11.1 4.5 24.8 576.3 19.5 51.6
# 22 B745 Poor 10.2 5.8 24.7 575.9 18.5
# 23 B380 Adequate 11.4 4.8 26.5 636.0 18.6 58.2
# 24 B986 Adequate 10.7 4.8 26.5 726.7 19.2 58.4
# -----------------------------------------------------------
# MEAN 12.4 7.2 28.7 660.6 18.6 55.6
rm.mean <- mean(rm[,3:8])
rm.sd <- sd(rm[,3:8])
x.8 = (rm[8,3:8] - rm.mean)/rm.sd
x.8 * t(rm.P[,1])
x.8 * t(rm.P[,2])
x.22 = (rm[22,3:8] - rm.mean)/rm.sd
x.22 * t(rm.P[,2])
# >
# Size5 Size10 Size15 TGA DSC TMA
#
|
9d5e0f5832fbe94aa40f79965e7648b13d9aa7a4
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/11122_0/rinput.R
|
8ded2b598012b2b420569c62d09a9b178830ba8c
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("11122_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11122_0_unrooted.txt")
|
e22ca040fc1176f7708b78efbf4627ca9a4bbd44
|
b6348fac2cf708bfee8a395cedc04ec33df84e2f
|
/man/getFragments.Rd
|
1e632377562ceedae98202151d1d05989c3b5eaa
|
[] |
no_license
|
cpanse/uvpd
|
51d1c369fe883ed064855eb1ace65b560099581f
|
4dad6d8f2f1e4062c95625aeb78dbb405ae5fa5a
|
refs/heads/master
| 2023-03-17T11:21:36.320033
| 2021-03-11T13:30:07
| 2021-03-11T13:30:07
| 119,033,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 666
|
rd
|
getFragments.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uvpd.R
\name{getFragments}
\alias{getFragments}
\title{get all fragments of a SMILES code}
\usage{
getFragments(smiles = "CC(C)(C)C(O)C(OC1=CC=C(Cl)C=C1)N1C=NC=N1", ...)
}
\arguments{
\item{smiles}{}
}
\value{
a \code{data.frame} containing the SMILES and MH1P charged fragments.
}
\description{
get all fragments of a SMILES code
}
\examples{
df <- getFragments(treeDepth = 1)
plot(table(df$MH1P))
}
\references{
\itemize{
\item \url{https://cran.r-project.org/package=rcdk}
\item \url{https://github.com/ipb-halle/MetFragR}
}
}
\seealso{
\code{exec/make-data.R}
}
\author{
AB,CP 2019
}
|
a56d53832e812fe4ff7709acad768853b9ecd7a7
|
d65ef7502086af48cadc9a19e9c9b2c05bb2c37f
|
/tests/testthat.R
|
f9bdc5b4a00d92f3b9681fdf49d3dc1d456fd953
|
[
"MIT"
] |
permissive
|
kravitz-eli/metaRek
|
a72664370c7e5980096ad33577f93cc86aa77722
|
1176fbd864af862aabadcc7cf7a122155cf19708
|
refs/heads/master
| 2022-12-14T02:16:15.494468
| 2020-09-22T19:02:06
| 2020-09-22T19:02:06
| 297,743,256
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58
|
r
|
testthat.R
|
library(testthat)
library(metaRek)
test_check("metaRek")
|
3d6c16399d7a96175de36be6366a9498ab4d8594
|
5b55d8d4a1e6275605e7e740cfb3cec5528b485b
|
/R/modeG.R
|
1179d1fa72fab4c04e63f129eb237e5b29aae675
|
[] |
no_license
|
cran/MasterBayes
|
2103a6dfddb562c02b37f32c79ca51bce477a6e6
|
a2bbdc296453f21114f7fd9e1a8d825ed6d86730
|
refs/heads/master
| 2022-07-23T18:11:50.598009
| 2022-06-22T12:00:10
| 2022-06-22T12:00:10
| 17,691,892
| 1
| 2
| null | 2017-09-27T20:22:15
| 2014-03-13T02:32:13
|
C++
|
UTF-8
|
R
| false
| false
| 404
|
r
|
modeG.R
|
"modeG"<-function(postG, threshold=0){
id<-rownames(postG[[1]])
G<-as.list(1:length(postG))
names(G)<-names(postG)
n<-sum(postG[[1]][1,])
for(i in 1:length(G)){
an<-allele.names(as.genotype(colnames(postG[[i]])))
G[[i]]<-as.genotype(colnames(postG[[i]])[apply(postG[[i]],1,which.max)], alleles=an)
G[[i]][which(apply(postG[[i]],1,max)/n < threshold)]<-NA
}
list(G=G, id=id)
}
|
af79be2bcc12c20dbd59d44536efc26a908f316b
|
e272e1a291f4ae00dc3f783a46daa383275403cb
|
/R/timeStackProbaV.R
|
f19040555d6bce35e2adb201637f1b874cd5c9c7
|
[
"MIT"
] |
permissive
|
johanez/probaV
|
84a49a30ea8155e58a3c34bbdbabc1257faca8d8
|
eea7c7ad91555bab7b02448e71a156917c5e5581
|
refs/heads/master
| 2021-01-10T09:53:18.362963
| 2016-12-12T13:31:58
| 2016-12-12T13:31:58
| 44,959,172
| 4
| 2
| null | 2016-11-02T16:23:23
| 2015-10-26T09:42:43
|
R
|
UTF-8
|
R
| false
| false
| 1,341
|
r
|
timeStackProbaV.R
|
#' @title Build time stack from probav images
#'
#' @description Stacks Prova-V layers.
#' @author J Eberenz
#' @param x Character. Directory of a Proba-V geotiffs or list of filenames.
#' @param patttern Character. As in \code{\link{list.files}}
#' @param orderChrono Logical. Wether to oder the stack chronologically. Defatult \code{TRUE}.
#' @param tile Character. Whcih tile to process. Format: "X00Y00".
#' @param quick Logical. See \code{raster::stack}
#' @param end_date Date. Last date to process.
#' @param ... Additional arguments to \code{raster::writeRaster}
#'
#' @return a RasterStack or rasterBrick.
#'
#' @export
#'
#' @import raster
timeStackProbaV <- function(x, pattern, order_chrono=TRUE, tile=NULL, quick=FALSE, end_date=NULL, ...){
df_info <- getProbaVinfo(x, pattern)
if(order_chrono){
df_info <- df_info[order(df_info$date),]
}
if (!is.null(tile)) df_info <- df_info[df_info$tile==tile, ]
if (!is.null(end_date)) df_info <- df_info[as.numeric(df_info$date) <= end_date,]
s <- raster::stack(file.path(fdir, df_info$fpath), quick=quick)
#cat("build brick ... ")
#s <- brick(s)
names(s) <- row.names(df_info)
s <- setZ(x=s, z=format(df_info$date, "%Y%j"))
if(hasArg(filename)) {
cat("writing...")
out <- writeRaster(s, progress="bar", ... )
return(out)
}
return(s)
}
|
82c6597bdf630187d10b598b7a5fd9ead83c0009
|
80805e40084dd2b38bc988dfdfd821391848e293
|
/man/getIsotopeAddList.Rd
|
012dbd8e784848feb591540b5c8979845384545a
|
[] |
no_license
|
AspirinCode/masstrixR
|
b7b685ced0446584aa7443972b7eb7bf449e754d
|
844348cf0773f6548a838b1af64810384068698f
|
refs/heads/master
| 2020-04-27T20:50:05.036069
| 2019-03-07T13:20:21
| 2019-03-07T13:20:21
| 174,672,525
| 0
| 1
| null | 2019-03-09T09:14:02
| 2019-03-09T09:14:01
| null |
UTF-8
|
R
| false
| true
| 327
|
rd
|
getIsotopeAddList.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utils_Formula.R
\name{getIsotopeAddList}
\alias{getIsotopeAddList}
\title{function to get the differences between monoisotope and isotope}
\usage{
getIsotopeAddList()
}
\description{
function to get the differences between monoisotope and isotope
}
|
9c64ada4fc4ae3e3d98055c4b3b551695f362de3
|
882384720aaad106f30d2893574483ec664c6ca9
|
/jason/cubs.R
|
a60e048b0f8b2bc2c6d6c867ca0abdc64f573359
|
[] |
no_license
|
imouzon/dmc2015
|
deb1bfb5a25955b289726e48723b7e736f71f8b6
|
8a1aa31a7a917501a9a1b5a96c313685591a31b7
|
refs/heads/master
| 2020-12-24T19:17:58.268371
| 2015-06-29T11:36:59
| 2015-06-29T11:36:59
| 33,014,468
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,912
|
r
|
cubs.R
|
## cubs stands for "coupon upper bounds". This function reads in the type of
## data we're using and outputs a data frame with columns:
## couponID: self-explanatory
## upper.bound: the smallest possible value of (product + coupon)
## num.orders: how many orders the coupon was used in
## unique.basket.vals: how many unique basket values there were for those orders
cubs <- function(trn) {
library(dplyr)
library(ggvis)
library(stringr)
library(tidyr)
trn <- trn %>% tbl_df()
## Goal: For each coupon, minimize the basket values over all orders in
## which that coupon is used. I use the column name "position" under
## the assumption that the customers see coupon 1 first in the list,
## then coupon 2, then coupon 3 (perhaps, say, in an email).
trn2 <- trn %>%
mutate(couponID1 = as.character(couponID1),
couponID2 = as.character(couponID2),
couponID3 = as.character(couponID3)) %>%
gather(key = position, value = couponID, couponID1, couponID2, couponID3) %>%
mutate(position = as.numeric(str_extract(position, "\\d"))) %>%
filter(coupon1Used | coupon2Used | coupon3Used) %>%
mutate(used = rep(NA, length(position)))
## Now I fill in the new columns with the relevant information.
for (i in 1:length(trn2$used)) {
if (trn2$position[i] == 1)
trn2$used[i] <- trn2$coupon1Used[i]
else if (trn2$position[i] == 2)
trn2$used[i] <- trn2$coupon2Used[i]
else
trn2$used[i] <- trn2$coupon3Used[i]
}
## Get the results!
coupon.tdf <- trn2 %>%
filter(used == 1) %>%
group_by(couponID) %>%
summarize(upper.bound = min(basketValue),
num.orders = n(),
unique.basket.vals = n_distinct(basketValue)) %>%
arrange(desc(unique.basket.vals))
return(coupon.tdf)
}
|
ba667ce868fb53500d8c498c982874ee6eace3fc
|
1e2d95ba8862d5d8c596013539acb9f6d228f18e
|
/man/annotate_cna_seg.Rd
|
ad1a0e8d821bcfaf65078d00e1be3ce298bd906b
|
[
"MIT"
] |
permissive
|
jmonlong/scCNAutils
|
507501f12bad7fe2ce3f5af5b710169f814739e9
|
37028ecc7796b056976a9bf5c2b0e9a6882c1ba2
|
refs/heads/master
| 2022-05-01T15:27:58.414469
| 2022-04-26T03:08:34
| 2022-04-26T03:08:34
| 155,943,135
| 8
| 8
|
MIT
| 2022-04-21T06:30:28
| 2018-11-03T02:39:09
|
R
|
UTF-8
|
R
| false
| true
| 623
|
rd
|
annotate_cna_seg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/annotate_cna_seg.R
\name{annotate_cna_seg}
\alias{annotate_cna_seg}
\title{Annotate CN segments}
\usage{
annotate_cna_seg(seg.df, hmm.df)
}
\arguments{
\item{seg.df}{a data.frame with segment information}
\item{hmm.df}{a data.frame with bin information}
}
\value{
an annotated version of seg.df with a column wt.pv wit the pvalue of the
Wilcoxon test.
}
\description{
Annotate the CN segment predicted by the HMM. The signal in the segment is compared
to the signal in "neutral" segments nearby using a Wilcoxon test.
}
\author{
Jean Monlong
}
|
5a26888479d81f02623c0495949c73daac6e7036
|
bad7f450ff3647f6c4011ca4a1ef4dc0fbe21d87
|
/Cluster_analysis/103_4_swing_histogram.R
|
2318ded635b3d1c0a5e7a83c80dad81e78474a62
|
[] |
no_license
|
DominicOH/Research_Project_Analysis
|
eac1d5acff695dd12ed65b0d4c1fc45eec4c17c7
|
337b36aaabf0ce09e2b0c5f72702b18c7cbf1190
|
refs/heads/master
| 2023-08-04T04:12:12.417352
| 2021-09-18T11:09:02
| 2021-09-18T11:09:02
| 375,396,165
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,315
|
r
|
103_4_swing_histogram.R
|
# Packages ----
library('tidyverse')
library('factoextra')
# Source ----
load('Data/swing_dend_data.RData')
# Concept ----
# Making a histogram out of the swing data. Point is to show how much of the
# samples are apical swinging.
# Dataframe ----
# turning the swing dataframe with fold changes into just a dataframe with swing
# The name of the sample is also needed to split the two up.
swing_hist_df <- swing_fc_df %>%
select(Ox_LDL_swing, LDL_swing) %>%
pivot_longer(cols = c(Ox_LDL_swing, LDL_swing),
names_to = 'Treated sample',
values_to = 'Swing')
# Histogram ----
swing_hist_df$`Treated sample`[swing_hist_df$`Treated sample` == "Ox_LDL_swing"] <-
"Ox-LDL"
swing_hist_df$`Treated sample`[swing_hist_df$`Treated sample` == "LDL_swing"] <-
"LDL"
png(filename = 'Images/swing_histogram.png', units = "cm",
width = 16, height = 8, res = 200)
ggplot(data = swing_hist_df, aes(fill = `Treated sample`)) +
geom_histogram(mapping = aes(x = Swing)) +
scale_x_continuous(limits = c(-1, 1)) +
facet_grid(~ `Treated sample`) +
scale_fill_brewer(type = "div", palette = "Dark2") +
theme_bw() +
theme(legend.position = "none", ) +
labs(x = "Apical polarisation change", y = "Count") +
geom_vline(xintercept = 0, lty = "dashed")
dev.off()
|
477bebaf9c226a3cf0c6f9505ae6f2c48eaa5046
|
8c9598a06fb0b1b7a00eb74e63a1ed2cd8329eb5
|
/R/EventCountStat.R
|
e2e4e93738c64462ae85150abd2f44fc3942d304
|
[] |
no_license
|
gpaux/Mediana
|
1653df59542b80cb3951ce453f8450261b48a752
|
e3a7d7f49292f1f3e4b91e831d957353b798df36
|
refs/heads/master
| 2021-06-08T22:45:10.395261
| 2021-05-29T12:54:24
| 2021-05-29T12:54:24
| 39,732,450
| 22
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,307
|
r
|
EventCountStat.R
|
######################################################################################################################
# Compute the number of events based on non-missing values in the combined sample
EventCountStat = function(sample.list, parameter) {
# Determine the function call, either to generate the statistic or to return description
call = (parameter[[1]] == "Description")
if (call == FALSE | is.na(call)) {
# Error checks
if (length(sample.list) == 0)
stop("Analysis model: One sample must be specified in the EventCountStat statistic.")
# Merge the samples in the sample list
sample1 = do.call(rbind, sample.list)
# Select the outcome column and remove the missing values due to dropouts/incomplete observations
outcome1 = sample1[, "outcome"]
# Remove the missing values due to dropouts/incomplete observations
outcome1.complete = outcome1[stats::complete.cases(outcome1)]
# Observed events in Sample 1 (negation of censoring indicators)
event1 = !sample1[, "patient.censor.indicator"]
event1.complete = event1[stats::complete.cases(outcome1)]
# Number of events in Sample 1
result = sum(event1.complete)
}
else if (call == TRUE) {
result = list("Number of Events")
}
return(result)
}
# End of EventCountStat
|
d9f4d37e255ce79bc956c5717e4ca15a8d2d27b1
|
362a6f84d689dec7eabdcdd3cf6be03b41141a82
|
/start.R
|
f268bdf3537baa8d30f3a5cbfc132f8b0e60ea57
|
[] |
no_license
|
yship1002/use_infercnv_locally
|
349dad229d6213f796c1ca22f8d4ab26c47dd467
|
28d63bc1c589f66ec881c4296defd9150a3f7d6f
|
refs/heads/main
| 2023-06-03T04:32:16.955211
| 2021-06-14T18:20:59
| 2021-06-14T18:20:59
| 376,917,510
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,476
|
r
|
start.R
|
#!/usr/bin/env Rscript
library(infercnv)
args <- commandArgs(trailingOnly=TRUE)
raw_counts_matrix <- args[1]
raw_counts_matrix <- "/lustre/project/wdeng7/R_LIB_Share/Library/infercnv/extdata/oligodendroglioma_expression_downsampled.counts.matrix.gz"
annotations_file <- args[2]
annotations_file <- "/lustre/project/wdeng7/R_LIB_Share/Library/infercnv/extdata/oligodendroglioma_annotations_downsampled.txt"
gene_order_file <- args[3]
gene_order_file <- "/lustre/project/wdeng7/R_LIB_Share/Library/infercnv/extdata/gencode_downsampled.EXAMPLE_ONLY_DONT_REUSE.txt"
ref_group_names <- args[4]
ref_group_names <- c("Microglia/Macrophage","Oligodendrocytes (non-malignant)")
output_folder <- args[5]
output_folder <- "/home/jyang10/test_infercnv"
infercnv_obj = CreateInfercnvObject(raw_counts_matrix=raw_counts_matrix,
annotations_file=annotations_file,
delim="\t",
gene_order_file=gene_order_file,
ref_group_names=ref_group_names)
infercnv_obj = infercnv::run(infercnv_obj,
cutoff=1, # cutoff=1 works well for Smart-seq2, and cutoff=0.1 works well for 10x Genomics
out_dir=output_folder,
cluster_by_groups=TRUE, output_format="pdf",
denoise=TRUE,no_prelim_plot=TRUE,
HMM=TRUE)
|
9a1cca4ac5fe9b7984ba02a028134d8b176fb0fa
|
2f33e51b4937b0c5b9ed6ef12e60ce1d38f254c6
|
/R/2020/2020_Week30_AusCatOutcomes.R
|
7ed2abef502b69e137bc41af69843184e7aaca4e
|
[] |
no_license
|
MaiaPelletier/tidytuesday
|
54c384d5a631957e76af83ac9f734f87638536e6
|
ed25cf98fbafaaedc76df729d6170f143d7ddc5e
|
refs/heads/master
| 2021-10-12T00:13:01.730556
| 2021-09-27T20:28:35
| 2021-09-27T20:28:35
| 219,841,220
| 15
| 7
| null | 2020-12-04T14:23:40
| 2019-11-05T20:10:06
|
R
|
UTF-8
|
R
| false
| false
| 2,907
|
r
|
2020_Week30_AusCatOutcomes.R
|
# Week 30: Australian Cat Outcomes --------------------------------------------
# Load libraries
library(dplyr)
library(ggplot2)
library(forcats)
library(wesanderson)
library(ggtext)
library(cowplot) # once again, {cowplot} saves the damn day
library(here)
library(mythemes) # my personal theme package
# Set my custom theme
theme_set(theme_maia())
# Load fun fonts to use for plot
extrafont::loadfonts(device = "win")
# Read data
animal_outcomes <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-07-21/animal_outcomes.csv')
cat_outcomes <-
animal_outcomes %>%
filter(animal_type == "Cats") %>%
select(year, animal_type, outcome, outcome_total = Total) %>%
mutate(
outcome = case_when(
outcome %in% c("Rehomed", "Reclaimed") ~ "Rescued",
outcome == "Euthanized" ~ "Euthanized",
TRUE ~ "Other"
),
outcome = factor(outcome),
outcome = fct_relevel(outcome, "Euthanized", "Rescued", "Other")
) %>%
group_by(year, outcome) %>%
summarise(outcome_total = sum(outcome_total)) %>%
mutate(outcome_percent = outcome_total/sum(outcome_total))
# Create plot
p <-
cat_outcomes %>%
ggplot(aes(year, outcome_percent)) +
geom_line(aes(color = outcome), size = 1) +
labs(
title = "Adopt, don't shop!",
subtitle = "The rate of cats being humanely euthanized in Australia has decreased substantially over the last decade.",
x = NULL,
y = NULL,
caption = "@MaiaPelletier | #TidyTuesday | Data source: RSPCA"
) +
scale_y_continuous(
limits = c(0, 0.75),
breaks = c(0, 0.25, 0.5, 0.75),
labels = scales::percent_format()
) +
scale_color_manual(
values = wes_palette("Royal2")[c(5, 3, 4)],
name = NULL
) +
theme(
rect = element_rect(fill = "#fff1eb"),
text = element_text(family = "Lato"),
plot.title = element_text(family = "Patrick Hand SC",
hjust = 0.5,
size = 26,
margin = margin(5, 0, 15, 0)),
plot.subtitle = element_text(size = 10, hjust = 0.5),
axis.text.x = element_text(size = 10),
plot.caption = element_text(family = "Lato")
)
# Draw on images & annotation
ggdraw(p) +
draw_image(here("images", "cats", "cat_bgrd.png"), height = 0.2, width = 0.2, x = 0.75, y = 0.63) +
draw_image(here("images", "cats", "happy_cat.png"), height = 0.075, width = 0.075, x = 0.255, y = 0.8855) +
draw_image(here("images", "cats", "sad_cat.png"), height = 0.08, width = 0.08, x = 0.7, y = 0.885) +
geom_text(data = data.frame(x = 0.855, y = 0.72, label = "66% of the\nRSPCA's cats\nwere adopted\nin 2018!"),
aes(x, y, label = label),
color = "grey25", family = "Lato", size = 3
) +
ggsave(paste0("imgs_week30/animaloutcomes_", format(Sys.time(), "%Y%m%d_%H%M%S"), ".png"), type = 'cairo')
|
2ecad1b2f8bec50443793236b42e2d545b920829
|
8f9a7f8a781e030178239e0153387f75ae1582f9
|
/man/retrieve_gs_colnames.Rd
|
f8050f53c011a84cc915258f4050fe34ddf0bfe5
|
[] |
no_license
|
meerapatelmd/gUnit
|
fefe35524a76779a5fd24586d73f05f177b2d1ab
|
cbfe7309ceee4a7f30a4519f9fbffa910ec0ea65
|
refs/heads/master
| 2023-01-21T03:38:09.108929
| 2020-11-29T09:25:36
| 2020-11-29T09:25:36
| 211,777,544
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 377
|
rd
|
retrieve_gs_colnames.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retrieve_gs_colnames.R
\name{retrieve_gs_colnames}
\alias{retrieve_gs_colnames}
\title{Get colnames every tab in Google Sheet}
\usage{
retrieve_gs_colnames(gsheet_name, system.sleep = 2)
}
\arguments{
\item{gsheet_name}{name of Google Sheet}
}
\description{
Get colnames every tab in Google Sheet
}
|
22ffcb09d18164c989d853e9d36a1dde365072d5
|
7bb3f64824627ef179d5f341266a664fd0b69011
|
/Probability_And_Statistics,_4th_Edition_by_Morris_H._Degroot,_Carnegie-Mellon_University_&_Mark_J._Schervish,_Carnegie-Mellon_University/CH3/EX3.3.9/Ex3_3_9.R
|
3003fa0c8a97a18c7bdde82e7cdde75f06f4e03c
|
[
"MIT"
] |
permissive
|
prashantsinalkar/R_TBC_Uploads
|
8bd0f71834814b1d03df07ce90b2eae3b7d357f8
|
b3f3a8ecd454359a2e992161844f2fb599f8238a
|
refs/heads/master
| 2020-08-05T23:06:09.749051
| 2019-10-04T06:54:07
| 2019-10-04T06:54:07
| 212,746,586
| 0
| 0
|
MIT
| 2019-10-04T06:03:49
| 2019-10-04T06:03:48
| null |
UTF-8
|
R
| false
| false
| 1,110
|
r
|
Ex3_3_9.R
|
# Chapter 3 - Random Variables and Distributions, Section - 3.3 (The Cumulative Distribution Function), Page No. - 115
# A function 'C' is defined in order to calculate combination of 'n' items taken 'k' at a time.
# Variables 'N' and 'D' are declared in the function definition of 'C' for calculation operations.
C<-function(n,k)
{
N<-c(1);
D<-c(1);
if(k>0)
{
for(i in 1:k)
{
N=N*(n-i+1);
D=D*(i);
}
output<-N/D;
return(output)
}
else if (k==0)
{
return(1)
}
}
# Binomial distribution with parameters 'n' and 'p'.
n<-c(5);
p<-c(0.3);
f<-function(x)
{
return(C(n,x)*(p^(x))*((1-p)^(n-x)))
}
# "pbinom" function is used in order to find the c. d. f. of a binomial distribution. For description type ?pbinom in the console.
F<-function(x)
{
return(pbinom(x,n,p))
}
# Displaying values of p.f. 'f'.
x<-c(0:5)
for(k in 1:length(x))
{
print(f(x[k]))
}
# Displaying values of c.d.f. 'F'.
for(l in 1:length(x))
{
print(F(x[l]))
}
# The answer may slightly vary due to rounding off values.
|
b0519d8164eed50b0ef37e5544a38e14e8789544
|
f543f63fb9d310fb9d171f063ad4f89d0b119765
|
/cachematrix.R
|
36b8c3656179c0d16d3b343d47b1f326f1b80af7
|
[] |
no_license
|
manthaappu/ProgrammingAssignment2
|
d0e664e216c045b9066b03eb04f0e5367f92bf9a
|
91b361cf7ec720428028c7dba895a79c66ffa27c
|
refs/heads/master
| 2020-12-30T23:22:27.351440
| 2014-11-22T08:00:34
| 2014-11-22T08:00:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 701
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inverse = NULL
get <- function() x
set <- function(new) {
x<<-new
inverse <<- NULL
}
getInverse <- function() inverse
setInverse <- function(newInverse) inverse <<- newInverse
list(get = get, set = set, getInverse = getInverse, setInverse = setInverse)
}
## check for cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getInverse()
if(!is.null(inverse)) {
returm(m)
}
data = x$get()
inverse <- solve(data)
x$setInverse(inverse)
inverse
}
|
0a2b087513347ceee45cd05f2fb64a6b3c527ffb
|
d1e1c9b25aebcea37927c08a8f344713562b3e42
|
/man/F4-mu.GE.Rd
|
b6839970e5edf5b4e2b68f3a0301529d45f39271
|
[] |
no_license
|
cran/muStat
|
43783938835cae3e7a5afb5f8285f9b36ec8b07d
|
a77f2af75558f6a558d1044945f6085281655361
|
refs/heads/master
| 2021-01-17T06:33:48.507309
| 2010-09-17T00:00:00
| 2010-09-17T00:00:00
| 17,697,709
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 989
|
rd
|
F4-mu.GE.Rd
|
\name{mu.GE}
\alias{mu.GE}
\title{ GE Matrix }
\description{
\code{mu.GE} returns a matrix with each element denoting the logical
value if one value is greater than or equal to another.
}
\usage{
mu.GE(x, y=x)
}
\arguments{
\item{x}{ data matrix, variables as columns }
\item{y}{ second data matrix, optional }
}
\details{
The (i,j) entry of GE matrix is 1 if
\eqn{x_i \geq y_j}{\code{x_i >= y_j}}, 0 otherwise.
The square matrix GE is stored by column in a vector.
}
\value{
a vector which contains the GE matrix.
}
\section{Algorithm}{
\preformatted{
mu.GE <- function(x, y=x) {
<\dots>
if (length(y)>1)
apply(rbind(x,y),2,mu.GE,nrow(x))
else
as.numeric(NAtoZer(outer(x[1:y],x[-(1:y)],">=")))
}
}
}
\author{
Knut M. Wittkowski \email{kmw@rockefeller.edu},
Tingting Song \email{ttsong@gmail.com}
}
\examples{
a <- c(4, 2, 5, 1, 4, NA, 6)
mu.GE(a)
}
\keyword{univar}
\keyword{nonparametric}
|
7e561f1f9ed39f04da82a734b5b5eef59cac910d
|
e27b406119a999d6359e51c445413c6a41132ff2
|
/cna_all_counties.R
|
4300a97c2ab545af25d676f741de10de6623936e
|
[] |
no_license
|
pcollin5/kingsport
|
ce1662d08f4cbdd5ec498bf051b37a523884d833
|
6e6c0bead01d41bf4a195e12ce564a6280ccfd0d
|
refs/heads/master
| 2020-12-23T17:10:50.875217
| 2020-07-14T19:36:54
| 2020-07-14T19:36:54
| 237,213,361
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 129,734
|
r
|
cna_all_counties.R
|
####load packages####
pd_packages <- c("tidyverse", "tidycensus", "leaflet", "mapview", "DT", "sf", "report",
"knitr", "rmarkdown", "kableExtra", "RColorBrewer", "tigris",
"directlabels", "officer", "flextable")
lapply(packages, library, character.only = TRUE)
####load the data#####
dp_table_variables_18 <- load_variables(2018, "acs5/profile", cache = TRUE)
new_names_18 <- c("variable", "label", "concept")
names(dp_table_variables_18) <- new_names_18
####data profiles####
##carter##
Carter_dp02_2018 <- get_acs(geography = "tract", county = "Carter", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Carter_dp03_2018 <- get_acs(geography = "tract", county = "Carter", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Carter_dp04_2018 <- get_acs(geography = "tract", county = "Carter", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Carter_dp05_2018 <- get_acs(geography = "tract", county = "Carter", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
##greene##
Greene_dp02_2018 <- get_acs(geography = "tract", county = "Greene", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Greene_dp03_2018 <- get_acs(geography = "tract", county = "Greene", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Greene_dp04_2018 <- get_acs(geography = "tract", county = "Greene", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Greene_dp05_2018 <- get_acs(geography = "tract", county = "Greene", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
##hancock
Hancock_dp02_2018 <- get_acs(geography = "tract", county = "Hancock", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Hancock_dp03_2018 <- get_acs(geography = "tract", county = "Hancock", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Hancock_dp04_2018 <- get_acs(geography = "tract", county = "Hancock", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Hancock_dp05_2018 <- get_acs(geography = "tract", county = "Hancock", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
##hawkins
Hawkins_dp02_2018 <- get_acs(geography = "tract", county = "Hawkins", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Hawkins_dp03_2018 <- get_acs(geography = "tract", county = "Hawkins", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Hawkins_dp04_2018 <- get_acs(geography = "tract", county = "Hawkins", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Hawkins_dp05_2018 <- get_acs(geography = "tract", county = "Hawkins", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
##johnson
Johnson_dp02_2018 <- get_acs(geography = "tract", county = "Johnson", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Johnson_dp03_2018 <- get_acs(geography = "tract", county = "Johnson", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Johnson_dp04_2018 <- get_acs(geography = "tract", county = "Johnson", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Johnson_dp05_2018 <- get_acs(geography = "tract", county = "Johnson", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
##sullivan
Sullivan_dp02_2018 <- get_acs(geography = "tract", county = "Sullivan", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Sullivan_dp03_2018 <- get_acs(geography = "tract", county = "Sullivan", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Sullivan_dp04_2018 <- get_acs(geography = "tract", county = "Sullivan", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Sullivan_dp05_2018 <- get_acs(geography = "tract", county = "Sullivan", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
##washington
Washington_dp02_2018 <- get_acs(geography = "tract", county = "Washington", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Washington_dp03_2018 <- get_acs(geography = "tract", county = "Washington", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Washington_dp04_2018 <- get_acs(geography = "tract", county = "Washington", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Washington_dp05_2018 <- get_acs(geography = "tract", county = "Washington", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
##unicoi
Unicoi_dp02_2018 <- get_acs(geography = "tract", county = "Unicoi", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Unicoi_dp03_2018 <- get_acs(geography = "tract", county = "Unicoi", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Unicoi_dp04_2018 <- get_acs(geography = "tract", county = "Unicoi", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Unicoi_dp05_2018 <- get_acs(geography = "tract", county = "Unicoi", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
###2018##
##carter
Carter_dp02_18 <- inner_join(Carter_dp02_2018, dp_table_variables, by = "variable")
Carter_dp03_18 <- inner_join(Carter_dp03_2018, dp_table_variables, by = "variable")
Carter_dp04_18 <- inner_join(Carter_dp04_2018, dp_table_variables, by = "variable")
Carter_dp05_18 <- inner_join(Carter_dp05_2018, dp_table_variables, by = "variable")
##greene
Greene_dp02_18 <- inner_join(Greene_dp02_2018, dp_table_variables, by = "variable")
Greene_dp03_18 <- inner_join(Greene_dp03_2018, dp_table_variables, by = "variable")
Greene_dp04_18 <- inner_join(Greene_dp04_2018, dp_table_variables, by = "variable")
Greene_dp05_18 <- inner_join(Greene_dp05_2018, dp_table_variables, by = "variable")
##hancock
Hancock_dp02_18 <- inner_join(Hancock_dp02_2018, dp_table_variables, by = "variable")
Hancock_dp03_18 <- inner_join(Hancock_dp03_2018, dp_table_variables, by = "variable")
Hancock_dp04_18 <- inner_join(Hancock_dp04_2018, dp_table_variables, by = "variable")
Hancock_dp05_18 <- inner_join(Hancock_dp05_2018, dp_table_variables, by = "variable")
##hawkins
Hawkins_dp02_18 <- inner_join(Hawkins_dp02_2018, dp_table_variables, by = "variable")
Hawkins_dp03_18 <- inner_join(Hawkins_dp03_2018, dp_table_variables, by = "variable")
Hawkins_dp04_18 <- inner_join(Hawkins_dp04_2018, dp_table_variables, by = "variable")
Hawkins_dp05_18 <- inner_join(Hawkins_dp05_2018, dp_table_variables, by = "variable")
##johnson
Johnson_dp02_18 <- inner_join(Johnson_dp02_2018, dp_table_variables, by = "variable")
Johnson_dp03_18 <- inner_join(Johnson_dp03_2018, dp_table_variables, by = "variable")
Johnson_dp04_18 <- inner_join(Johnson_dp04_2018, dp_table_variables, by = "variable")
Johnson_dp05_18 <- inner_join(Johnson_dp05_2018, dp_table_variables, by = "variable")
##sullivan
Sullivan_dp02_18 <- inner_join(Sullivan_dp02_2018, dp_table_variables, by = "variable")
Sullivan_dp03_18 <- inner_join(Sullivan_dp03_2018, dp_table_variables, by = "variable")
Sullivan_dp04_18 <- inner_join(Sullivan_dp04_2018, dp_table_variables, by = "variable")
Sullivan_dp05_18 <- inner_join(Sullivan_dp05_2018, dp_table_variables, by = "variable")
##washington
Washington_dp02_18 <- inner_join(Washington_dp02_2018, dp_table_variables, by = "variable")
Washington_dp03_18 <- inner_join(Washington_dp03_2018, dp_table_variables, by = "variable")
Washington_dp04_18 <- inner_join(Washington_dp04_2018, dp_table_variables, by = "variable")
Washington_dp05_18 <- inner_join(Washington_dp05_2018, dp_table_variables, by = "variable")
##unicoi
Unicoi_dp02_18 <- inner_join(Unicoi_dp02_2018, dp_table_variables, by = "variable")
Unicoi_dp03_18 <- inner_join(Unicoi_dp03_2018, dp_table_variables, by = "variable")
Unicoi_dp04_18 <- inner_join(Unicoi_dp04_2018, dp_table_variables, by = "variable")
Unicoi_dp05_18 <- inner_join(Unicoi_dp05_2018, dp_table_variables, by = "variable")
##2018##
dp02_18 <- rbind(Carter_dp02_18, Greene_dp02_18, Hancock_dp02_18, Hawkins_dp02_18, Johnson_dp02_18, Sullivan_dp02_18, Washington_dp02_18, Unicoi_dp02_18)
dp03_18 <- rbind(Carter_dp03_18, Greene_dp03_18, Hancock_dp03_18, Hawkins_dp03_18, Johnson_dp03_18, Sullivan_dp03_18, Washington_dp03_18, Unicoi_dp03_18)
dp04_18 <- rbind(Carter_dp04_18, Greene_dp04_18, Hancock_dp04_18, Hawkins_dp04_18, Johnson_dp04_18, Sullivan_dp04_18, Washington_dp04_18, Unicoi_dp04_18)
dp05_18 <- rbind(Carter_dp05_18, Greene_dp05_18, Hancock_dp05_18, Hawkins_dp05_18, Johnson_dp05_18, Sullivan_dp05_18, Washington_dp05_18, Unicoi_dp05_18)
#2018#
dp_2018 <- rbind(dp02_18, dp03_18, dp04_18, dp05_18)
#Carter
#2018
Carter_County_dp02_2018 <- get_acs(geography = "county", county = "Carter", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Carter_County_dp03_2018 <- get_acs(geography = "county", county = "Carter", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Carter_County_dp04_2018 <- get_acs(geography = "county", county = "Carter", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Carter_County_dp05_2018 <- get_acs(geography = "county", county = "Carter", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
#Greene
#2018
Greene_County_dp02_2018 <- get_acs(geography = "county", county = "Greene", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Greene_County_dp03_2018 <- get_acs(geography = "county", county = "Greene", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Greene_County_dp04_2018 <- get_acs(geography = "county", county = "Greene", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Greene_County_dp05_2018 <- get_acs(geography = "county", county = "Greene", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
#Hancock
#2018
Hancock_County_dp02_2018 <- get_acs(geography = "county", county = "Hancock", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Hancock_County_dp03_2018 <- get_acs(geography = "county", county = "Hancock", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Hancock_County_dp04_2018 <- get_acs(geography = "county", county = "Hancock", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Hancock_County_dp05_2018 <- get_acs(geography = "county", county = "Hancock", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
#Hawkins
#2018
Hawkins_County_dp02_2018 <- get_acs(geography = "county", county = "Hawkins", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Hawkins_County_dp03_2018 <- get_acs(geography = "county", county = "Hawkins", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Hawkins_County_dp04_2018 <- get_acs(geography = "county", county = "Hawkins", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Hawkins_County_dp05_2018 <- get_acs(geography = "county", county = "Hawkins", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
#Johnson
#2018
Johnson_County_dp02_2018 <- get_acs(geography = "county", county = "Johnson", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Johnson_County_dp03_2018 <- get_acs(geography = "county", county = "Johnson", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Johnson_County_dp04_2018 <- get_acs(geography = "county", county = "Johnson", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Johnson_County_dp05_2018 <- get_acs(geography = "county", county = "Johnson", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
#Sullivan
#2018
Sullivan_County_dp02_2018 <- get_acs(geography = "county", county = "Sullivan", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Sullivan_County_dp03_2018 <- get_acs(geography = "county", county = "Sullivan", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Sullivan_County_dp04_2018 <- get_acs(geography = "county", county = "Sullivan", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Sullivan_County_dp05_2018 <- get_acs(geography = "county", county = "Sullivan", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
#Washington
#2018
Washington_County_dp02_2018 <- get_acs(geography = "county", county = "Washington", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Washington_County_dp03_2018 <- get_acs(geography = "county", county = "Washington", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Washington_County_dp04_2018 <- get_acs(geography = "county", county = "Washington", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Washington_County_dp05_2018 <- get_acs(geography = "county", county = "Washington", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
#Unicoi
#2018
Unicoi_County_dp02_2018 <- get_acs(geography = "county", county = "Unicoi", state = "TN", table = "DP02", year = 2018, geometry = TRUE)
Unicoi_County_dp03_2018 <- get_acs(geography = "county", county = "Unicoi", state = "TN", table = "DP03", year = 2018, geometry = TRUE)
Unicoi_County_dp04_2018 <- get_acs(geography = "county", county = "Unicoi", state = "TN", table = "DP04", year = 2018, geometry = TRUE)
Unicoi_County_dp05_2018 <- get_acs(geography = "county", county = "Unicoi", state = "TN", table = "DP05", year = 2018, geometry = TRUE)
####2018##
##carter
Carter_County_dp02_18 <- inner_join(Carter_County_dp02_2018, dp_table_variables, by = "variable")
Carter_County_dp03_18 <- inner_join(Carter_County_dp03_2018, dp_table_variables, by = "variable")
Carter_County_dp04_18 <- inner_join(Carter_County_dp04_2018, dp_table_variables, by = "variable")
Carter_County_dp05_18 <- inner_join(Carter_County_dp05_2018, dp_table_variables, by = "variable")
####2018##
##Greene
Greene_County_dp02_18 <- inner_join(Greene_County_dp02_2018, dp_table_variables, by = "variable")
Greene_County_dp03_18 <- inner_join(Greene_County_dp03_2018, dp_table_variables, by = "variable")
Greene_County_dp04_18 <- inner_join(Greene_County_dp04_2018, dp_table_variables, by = "variable")
Greene_County_dp05_18 <- inner_join(Greene_County_dp05_2018, dp_table_variables, by = "variable")
####2018##
##Hancock
Hancock_County_dp02_18 <- inner_join(Hancock_County_dp02_2018, dp_table_variables, by = "variable")
Hancock_County_dp03_18 <- inner_join(Hancock_County_dp03_2018, dp_table_variables, by = "variable")
Hancock_County_dp04_18 <- inner_join(Hancock_County_dp04_2018, dp_table_variables, by = "variable")
Hancock_County_dp05_18 <- inner_join(Hancock_County_dp05_2018, dp_table_variables, by = "variable")
####2018
##Hawkins
Hawkins_County_dp02_18 <- inner_join(Hawkins_County_dp02_2018, dp_table_variables, by = "variable")
Hawkins_County_dp03_18 <- inner_join(Hawkins_County_dp03_2018, dp_table_variables, by = "variable")
Hawkins_County_dp04_18 <- inner_join(Hawkins_County_dp04_2018, dp_table_variables, by = "variable")
Hawkins_County_dp05_18 <- inner_join(Hawkins_County_dp05_2018, dp_table_variables, by = "variable")
####2018##
##Johnson
Johnson_County_dp02_18 <- inner_join(Johnson_County_dp02_2018, dp_table_variables, by = "variable")
Johnson_County_dp03_18 <- inner_join(Johnson_County_dp03_2018, dp_table_variables, by = "variable")
Johnson_County_dp04_18 <- inner_join(Johnson_County_dp04_2018, dp_table_variables, by = "variable")
Johnson_County_dp05_18 <- inner_join(Johnson_County_dp05_2018, dp_table_variables, by = "variable")
####2018##
##Sullivan
Sullivan_County_dp02_18 <- inner_join(Sullivan_County_dp02_2018, dp_table_variables, by = "variable")
Sullivan_County_dp03_18 <- inner_join(Sullivan_County_dp03_2018, dp_table_variables, by = "variable")
Sullivan_County_dp04_18 <- inner_join(Sullivan_County_dp04_2018, dp_table_variables, by = "variable")
Sullivan_County_dp05_18 <- inner_join(Sullivan_County_dp05_2018, dp_table_variables, by = "variable")
####2018##
##Washington
Washington_County_dp02_18 <- inner_join(Washington_County_dp02_2018, dp_table_variables, by = "variable")
Washington_County_dp03_18 <- inner_join(Washington_County_dp03_2018, dp_table_variables, by = "variable")
Washington_County_dp04_18 <- inner_join(Washington_County_dp04_2018, dp_table_variables, by = "variable")
Washington_County_dp05_18 <- inner_join(Washington_County_dp05_2018, dp_table_variables, by = "variable")
####2018##
##Unicoi
Unicoi_County_dp02_18 <- inner_join(Unicoi_County_dp02_2018, dp_table_variables, by = "variable")
Unicoi_County_dp03_18 <- inner_join(Unicoi_County_dp03_2018, dp_table_variables, by = "variable")
Unicoi_County_dp04_18 <- inner_join(Unicoi_County_dp04_2018, dp_table_variables, by = "variable")
Unicoi_County_dp05_18 <- inner_join(Unicoi_County_dp05_2018, dp_table_variables, by = "variable")
#2018
County_dp02_18 <- rbind(Carter_County_dp02_18, Greene_County_dp02_18, Hancock_County_dp02_18, Hawkins_County_dp02_18, Johnson_County_dp02_18, Sullivan_County_dp02_18, Washington_County_dp02_18, Unicoi_County_dp02_18)
County_dp03_18 <- rbind(Carter_County_dp03_18, Greene_County_dp03_18, Hancock_County_dp03_18, Hawkins_County_dp03_18, Johnson_County_dp03_18, Sullivan_County_dp03_18, Washington_County_dp03_18, Unicoi_County_dp03_18)
County_dp04_18 <- rbind(Carter_County_dp04_18, Greene_County_dp04_18, Hancock_County_dp04_18, Hawkins_County_dp04_18, Johnson_County_dp04_18, Sullivan_County_dp04_18, Washington_County_dp04_18, Unicoi_County_dp04_18)
County_dp05_18 <- rbind(Carter_County_dp05_18, Greene_County_dp05_18, Hancock_County_dp05_18, Hawkins_County_dp05_18, Johnson_County_dp05_18, Sullivan_County_dp05_18, Washington_County_dp05_18, Unicoi_County_dp05_18)
County_dp_2018 <- rbind(County_dp02_18, County_dp03_18, County_dp04_18, County_dp05_18)
####Demographics####
# Age
age_vars_18 <- c("DP05_0001", "DP05_0005", "DP05_0006", "DP05_0007", "DP05_0008", "DP05_0009",
"DP05_0010", "DP05_0011", "DP05_0012", "DP05_0013", "DP05_0014",
"DP05_0015", "DP05_0016", "DP05_0017", "DP05_0018", "DP05_0021", "DP05_0024")
age_vars_percent_18 <- c("DP05_0001P", "DP05_0005P", "DP05_0006P", "DP05_0007P", "DP05_0008P",
"DP05_0009P", "DP05_0010P", "DP05_0011P", "DP05_0012P", "DP05_0013P", "DP05_0014P",
"DP05_0015P", "DP05_0016P", "DP05_0017P", "DP05_0018P", "DP05_0021P",
"DP05_0024P")
age_col_dt_names_reg <- c("Total Population",
"Age Under 5 Years", "Age 5-9 Years", "Age 10-14 Years", "Age 15-19 Years",
"Age 20-24 Years", "Age 25-34 Years", "Age 35-44 Years",
"Age 45-54 Years", "Age 55-59 Years", "Age 60-64 Years",
"Age 65-74 Years", "Age 75-84 Years", "Age 85+ Years",
"Median Age", "Age 18+ Years", "Age 65+ Years")
dt_age_tract_counts <- dp_2018 %>%
filter(variable %in% age_vars_18)
dt_age_county_counts <- County_dp_2018 %>%
filter(variable %in% age_vars_18)
dt_age_tract_percent <- dp_2018 %>%
filter(variable %in% age_vars_percent_18)
dt_age_county_percent <- County_dp_2018 %>%
filter(variable %in% age_vars_percent_18)
#making data tables for the age variables
#census tracts data table
age_tract_df <- cbind(age_col_dt_names_reg, dt_age_tract_counts, dt_age_tract_percent)
age_tract_dt <- age_tract_df[,c(1,3,5,6,12,13)]
age_dt <- st_set_geometry(age_tract_dt, NULL)
age_dt_names <- c("Age Group", "Census Tract", "Count Estimate", "Count Margin of Error", "Percent of Total Population", "Percent Margin of Error")
names(age_dt) <- age_dt_names
age_data_table <- datatable(age_dt, caption = "Tract Level Age Group Profile for Upper East Tennessee 2018")
age_data_table
#county data table
dt_age_county_counts <- County_dp_2018 %>%
filter(variable %in% age_vars_18)
dt_age_county_percent <- County_dp_2018 %>%
filter(variable %in% age_vars_percent_18)
age_county_df <- cbind(age_col_dt_names_reg, dt_age_county_counts, dt_age_county_percent)
age_county_dt <- age_county_df[,c(1,3,5,6,12,13)]
age_county_dt <- st_set_geometry(age_county_dt, NULL)
age_dt_county_names <- c("Age Group", "County", "Count Estimate", "Count Margin of Error", "Percent of Total Population", "Percent Margin of Error")
names(age_county_dt) <- age_dt_county_names
age_county_data_table <- datatable(age_county_dt, caption = "County Level Age Group Profile for Upper East Tennessee 2018")
age_county_data_table
#age group plot
age_group_plot <- age_county_dt %>%
filter(`Age Group` != "Total Population") %>%
filter(`Age Group` != "Median Age") %>%
filter(`Age Group` != "Age 18+ Years") %>%
filter(`Age Group` != "Age 65+ Years") %>%
ggplot(aes(x = `Age Group`, y = `Count Estimate`, fill = County)) +
geom_col()+
scale_x_discrete()
age_group_plot
#race
race_vars_18 <- c("DP05_0063", "DP05_0064", "DP05_0065", "DP05_0066", "DP05_0067", "DP05_0068", "DP05_0069",
"DP05_0071")
race_percent_vars_18 <- c("DP05_0063P", "DP05_0064P", "DP05_0065P", "DP05_0066P", "DP05_0067P", "DP05_0068P", "DP05_0069P",
"DP05_0071P")
race_vars_names_18 <- c("Total Population", "White", "African American", "American Indian or Alaskan Native",
"Asian", "Native Hawaiian or Pacific Island", "Some other Race", "Hispanic of Any Race")
race_percent_names_18 <- c("Percent Total Population", "Percent White", "Percent African American", "Percent American Indian or Alaskan Native",
"Percent Asian", "Percent Native Hawaiian or Pacific Island", "Percent Some other Race", "Percent Hispanic of Any Race")
df_race_tract_counts_18 <- dp_2018 %>%
filter(variable %in% race_vars_18)
df_race_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% race_vars_18)
df_race_tract_percent_18 <- dp_2018 %>%
filter(variable %in% race_percent_vars_18)
df_race_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% race_percent_vars_18)
##combine the percents into one table
df_race_tract_18 <- cbind(race_vars_names_18, df_race_tract_counts_18, df_race_tract_percent_18)
df_race_county_18 <- cbind(race_vars_names_18, df_race_county_counts_18, df_race_county_percent_18)
trimmed_df_race_tract_18 <- df_race_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_race_county_18 <- df_race_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_race_tract_18 <- st_set_geometry(trimmed_df_race_tract_18, NULL)
data_table_race_county_18 <- st_set_geometry(trimmed_df_race_county_18, NULL)
##rename the columns
race_table_tract_names <- c("Racial Group", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
race_table_county_names <- c("Racial Group", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_race_tract_18) <- race_table_tract_names
names(data_table_race_county_18) <- race_table_county_names
##make the data tables
race_tract_data_table_18 <- datatable(data_table_race_tract_18, caption = "Racial Breakdown of Census Tract Groups for Upper East Tennessee 2018")
race_county_data_table_18 <- datatable(data_table_race_county_18, caption = "Racial Breakdown for Upper East Tennessee 2018")
race_tract_data_table_18
race_county_data_table_18
## county level bar chart
wc_race_bar_18 <- data_table_race_county_18 %>%
filter(`Racial Group` != "Total Population") %>%
filter(County == "Washington County, Tennessee") %>%
ggplot(aes(x = `Racial Group`, y = `Percent`)) +
ggtitle("Washington County, Tennessee")+
geom_col(fill = brewer.pal(n = 7, "Dark2"))
wc_race_bar_18
data_table_race_county_18 %>%
filter(`Racial Group` != "Total Population") %>%
filter(County == "Washington County, Tennessee") %>%
ggplot(aes(x = `Racial Group`)) +
ggtitle("Washington County, Tennessee")+
geom_dotplot(fill = brewer.pal(n = 7, "Dark2"))
all_counties_race_plot <- data_table_race_county_18 %>%
filter(`Racial Group` != "Total Population") %>%
ggplot(aes(x = `Racial Group`, y = Count, fill = County)) +
geom_col()
all_counties_race_plot
###make a map of white
race_tract_map_names <- c("Race", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
race_county_map_names <- c("Race", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_race_county_18) <- race_county_map_names
names(trimmed_df_race_tract_18) <- race_tract_map_names
percent_white_county <- trimmed_df_race_county_18 %>%
filter(Race == "White")
percent_white_tract <- trimmed_df_race_tract_18 %>%
filter(Race == "White")
white_people_map <- mapview(list(percent_white_county,percent_white_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent White County", "Percent White Tract"),
legend = list(FALSE, TRUE))
white_people_map
##median age map
median_age_county <- County_dp_2018 %>%
filter(variable == "DP05_0018")
median_age_tract <- dp_2018 %>%
filter(variable == "DP05_0018")
map_names_county_median_age <- c("County", "Median Age", "Margin of Error", "geometry" )
map_names_tract_median_age <- c("Census Tract", "Median Age", "Margin of Error", "geometry" )
trimmed_median_age_tract <- median_age_tract[,c(2,4,5)]
trimmed_median_age_county <- median_age_county[,c(2,4,5)]
names(trimmed_median_age_county) <- map_names_county_median_age
names(trimmed_median_age_tract) <- map_names_tract_median_age
median_age_map <- mapview(list(trimmed_median_age_county, trimmed_median_age_tract),
zcol = list("Median Age", "Median Age"),
layer.name = list("Median Age County", "Median Age Tract"),
legend = list(FALSE, TRUE))
median_age_map
#### Social Characteristics ####
###households##
household_vars_18 <- c("DP02_0001", "DP02_0002", "DP02_0003", "DP02_0004", "DP02_0007", "DP02_0009", "DP02_0012", "DP02_0013",
"DP02_0015", "DP02_0016", "DP02_0017", "DP02_0020", "DP02_0044", "DP02_0045", "DP02_0046", "DP02_0047",
"DP02_0048", "DP02_0050")
household_percent_vars_18 <- c("DP02_0001P", "DP02_0002P", "DP02_0003P", "DP02_0004P", "DP02_0007P", "DP02_0009P", "DP02_0012P", "DP02_0013P",
"DP02_0015P", "DP02_0016P", "DP02_0017P", "DP02_0020P", "DP02_0044P", "DP02_0045P", "DP02_0046P", "DP02_0047P",
"DP02_0048P", "DP02_0050P")
household_vars_names_18 <- c("Total Households", "Family Households", "Family Households with Children under 18", "Married Couple Family Households",
"Single Male Households with Children under 18", "Single Female Households with Children under 18", "Households of Age 65 or Greater Living Alone",
"Total Households with Children under 18", "Average Household Size", "Average Family Size", "Population in Households", "Children in Households",
"Number of Grandparents Living With and Responsible for Children under 18 Years Old", "Number of Grandparents Living With and Responsible for Children under 1 Year Old",
"Number of Grandparents Living With and Responsible for Children 1-2 Years Old", "Number of Grandparents Living With and Responsible for Children 3-4 Years Old",
"Number of Grandparents Living With and Responsible for Children 5+ Years Old", "Number of Female Grandparents Living With and Responsible for Children under 18 Years Old")
df_household_tract_counts_18 <- dp_2018 %>%
filter(variable %in% household_vars_18)
df_household_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% household_vars_18)
df_household_tract_percent_18 <- dp_2018 %>%
filter(variable %in% household_percent_vars_18)
df_household_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% household_percent_vars_18)
##combine the percents into one table
df_household_tract_18 <- cbind(household_vars_names_18, df_household_tract_counts_18, df_household_tract_percent_18)
df_household_county_18 <- cbind(household_vars_names_18, df_household_county_counts_18, df_household_county_percent_18)
trimmed_df_household_tract_18 <- df_household_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_household_county_18 <- df_household_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_household_tract_18 <- st_set_geometry(trimmed_df_household_tract_18, NULL)
data_table_household_county_18 <- st_set_geometry(trimmed_df_household_county_18, NULL)
##rename the columns
household_table_tract_names <- c("Household Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
household_table_county_names <- c("Household Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_household_tract_18) <- household_table_tract_names
names(data_table_household_county_18) <- household_table_county_names
##make the data tables
household_tract_data_table_18 <- datatable(data_table_household_tract_18, caption = "Household Characteristics of Census Tract Groups for Upper East Tennessee 2018")
household_county_data_table_18 <- datatable(data_table_household_county_18, caption = "Household Characteristics County Level for Upper East Tennessee 2018")
household_tract_data_table_18
household_county_data_table_18
#single female household map
household_tract_map_names <- c("Household Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
household_county_map_names <- c("Household Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_household_county_18) <- household_county_map_names
names(trimmed_df_household_tract_18) <- household_tract_map_names
percent_single_mothers_county <- trimmed_df_household_county_18 %>%
filter(`Household Measure` == "Single Female Households with Children under 18")
percent_single_mothers_tract <- trimmed_df_household_tract_18 %>%
filter(`Household Measure` == "Single Female Households with Children under 18")
single_mothers_map <- mapview(list(percent_single_mothers_county,percent_single_mothers_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Single Mothers County", "Percent Single Mothers Tract"),
legend = list(FALSE, TRUE))
single_mothers_map
##education
education_vars_18 <- c("DP02_0052", "DP02_0053", "DP02_0054", "DP02_0055", "DP02_0056", "DP02_0057", "DP02_0058", "DP02_0059", "DP02_0060",
"DP02_0061", "DP02_0062", "DP02_0063", "DP02_0064", "DP02_0065", "DP02_0066", "DP02_0067")
education_percent_vars_18 <- c("DP02_0052P", "DP02_0053P", "DP02_0054P", "DP02_0055P", "DP02_0056P", "DP02_0057P", "DP02_0058P", "DP02_0059P", "DP02_0060P",
"DP02_0061P", "DP02_0062P", "DP02_0063P", "DP02_0064P", "DP02_0065P", "DP02_0066P", "DP02_0067P")
education_vars_names_18 <- c("Number of Children Greater than 3 Years of Age Enrolled in School", "Number of Children Enrolled in Nursery School or Preschool",
"Number of Children Enrolled in Kindergarten", "Number of Children Enrolled in Grades 1-8", "Number of Children Enrolled in Grades 9-12",
"Number of People Enrolled in College or Graduate School", "Total Population 25 Years and Up", "25 Years and Up: Educational Attainment less than 9th Grade",
"25 Years and Up Educational Attainment: Grade 9-12, No Diploma", "25 Years and Up Educational Attainment: Highschool Diploma or Equivalent",
"25 Years and Up Educational Attainment: Some College, no Degree", "25 Years and Up Educational Attainment: Associates Degree", "25 Years and Up Educational Attainment: Bachelors Degree",
"25 Years and Up Educational Attainment: Graduate Degree", "25 Years and Up Educational Attainment: Highschool Degree or Higher", "25 Years and Up Educational Attainment: Bachelors Degree or Higher")
df_education_tract_counts_18 <- dp_2018 %>%
filter(variable %in% education_vars_18)
df_education_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% education_vars_18)
df_education_tract_percent_18 <- dp_2018 %>%
filter(variable %in% education_percent_vars_18)
df_education_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% education_percent_vars_18)
##combine the percents into one table
df_education_tract_18 <- cbind(education_vars_names_18, df_education_tract_counts_18, df_education_tract_percent_18)
df_education_county_18 <- cbind(education_vars_names_18, df_education_county_counts_18, df_education_county_percent_18)
trimmed_df_education_tract_18 <- df_education_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_education_county_18 <- df_education_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_education_tract_18 <- st_set_geometry(trimmed_df_education_tract_18, NULL)
data_table_education_county_18 <- st_set_geometry(trimmed_df_education_county_18, NULL)
##rename the columns
education_table_tract_names <- c("Education Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_education_tract_18) <- education_table_tract_names
names(data_table_education_county_18) <- education_table_county_names
##make the data tables
education_tract_data_table_18 <- datatable(data_table_education_tract_18, caption = "Education Characteristics of Census Tract Groups for Upper East Tennessee 2018")
education_county_data_table_18 <- datatable(data_table_education_county_18, caption = "Education Characteristics for Counties in Upper East Tennessee 2018")
education_table_county_names <- c("Education Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
education_tract_data_table_18
education_county_data_table_18
###make a map of less than highschool diploma
education_tract_map_names <- c("Education Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
education_county_map_names <- c("Education Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_education_county_18) <- education_county_map_names
names(trimmed_df_education_tract_18) <- education_tract_map_names
percent_hs_or_higher_county <- trimmed_df_education_county_18 %>%
filter(`Education Measure` == "25 Years and Up Educational Attainment: Highschool Degree or Higher")
percent_hs_or_higher_tract <- trimmed_df_education_tract_18 %>%
filter(`Education Measure` == "25 Years and Up Educational Attainment: Highschool Degree or Higher")
hs_or_higher_map <- mapview(list(percent_hs_or_higher_county, percent_hs_or_higher_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Highschool Degree or Equivalent or Higher County", "Percent Highschool Degree or Equivalent or Higher Tract"),
legend = list(FALSE, TRUE))
hs_or_higher_map
##preschool/nursery school enrollment map
preschool_enrollment_county <- trimmed_df_education_county_18 %>%
filter(`Education Measure` == "Number of Children Enrolled in Nursery School or Preschool")
preschool_enrollment_tract <- trimmed_df_education_tract_18 %>%
filter(`Education Measure` == "Number of Children Enrolled in Nursery School or Preschool")
preschool_enrollment_map <- mapview(list(preschool_enrollment_county, preschool_enrollment_tract),
zcol = list("Count", "Count"),
layer.name = list("Preschool Enrollment County", "Preschool Enrollment Tract"),
legend = list(FALSE, TRUE))
preschool_enrollment_map
## Disability status ###
disability_vars_18 <- c("DP02_0070", "DP02_0071", "DP02_0072", "DP02_0073", "DP02_0074", "DP02_0075", "DP02_0076", "DP02_0077")
disability_percent_vars_18 <- c("DP02_0070P", "DP02_0071P", "DP02_0072P", "DP02_0073P", "DP02_0074P", "DP02_0075P", "DP02_0076P", "DP02_0077P")
disability_vars_names_18 <- c("Total Civilian Population", "Civilian Population with a Disability","Total Civilian Population Under 18 Years of Age", "Civilian Population Under 18 Years of Age with a Disability",
"Total Civilain Population Ages 18-64", "Civilian Population Ages 18-74 with a Disability", "Total Civilian Population Over 65 Years of Age", "Civilian Population Over 65 Years of Age with a Disability")
df_disability_tract_counts_18 <- dp_2018 %>%
filter(variable %in% disability_vars_18)
df_disability_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% disability_vars_18)
df_disability_tract_percent_18 <- dp_2018 %>%
filter(variable %in% disability_percent_vars_18)
df_disability_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% disability_percent_vars_18)
##combine the percents into one table
df_disability_tract_18 <- cbind(disability_vars_names_18, df_disability_tract_counts_18, df_disability_tract_percent_18)
df_disability_county_18 <- cbind(disability_vars_names_18, df_disability_county_counts_18, df_disability_county_percent_18)
trimmed_df_disability_tract_18 <- df_disability_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_disability_county_18 <- df_disability_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_disability_tract_18 <- st_set_geometry(trimmed_df_disability_tract_18, NULL)
data_table_disability_county_18 <- st_set_geometry(trimmed_df_disability_county_18, NULL)
##rename the columns
disability_table_tract_names <- c("Disability Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
disability_table_county_names <- c("Disability Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_disability_tract_18) <- disability_table_tract_names
names(data_table_disability_county_18) <- disability_table_county_names
##make the data tables
disability_tract_data_table_18 <- datatable(data_table_disability_tract_18, caption = "Disability Characteristics of Census Tract Groups for Upper East Tennessee 2018")
disability_county_data_table_18 <- datatable(data_table_disability_county_18, caption = "Disability Characteristics for Counties in Upper East Tennessee 2018")
##percent disability or greater map
disability_tract_map_names <- c("Disability Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
disability_county_map_names <- c("Disability Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_disability_county_18) <- disability_county_map_names
names(trimmed_df_disability_tract_18) <- disability_tract_map_names
percent_disabled_county <- trimmed_df_disability_county_18 %>%
filter(`Disability Measure` == "Civilian Population with a Disability")
percent_disabled_tract <- trimmed_df_disability_tract_18 %>%
filter(`Disability Measure` == "Civilian Population with a Disability")
disability_map <- mapview(list(percent_disabled_county, percent_disabled_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Population with a Disability County", "Percent Population with a Disability Tract"),
legend = list(FALSE, TRUE))
disability_map
## Residence
residence_vars_18 <- c("DP02_0079", "DP02_0080", "DP02_0081", "DP02_0082", "DP02_0083", "DP02_0084", "DP02_0085")
residence_percent_vars_18 <- c("DP02_0079P", "DP02_0080P", "DP02_0081P", "DP02_0082P", "DP02_0083P", "DP02_0084P", "DP02_0085P")
residence_vars_names_18 <- c("Residence 1 Year Ago in same House", "Residence 1 Year Ago in Different House in US", "Residence 1 Year Ago in Different House in Same County",
"Residence 1 Year Ago in Different House Different County", "Residence 1 Year Ago in Different House Same State", "Residence 1 Year Ago in Different House Different State",
"Residence 1 Year Ago in Different House Abroad")
df_residence_tract_counts_18 <- dp_2018 %>%
filter(variable %in% residence_vars_18)
df_residence_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% residence_vars_18)
df_residence_tract_percent_18 <- dp_2018 %>%
filter(variable %in% residence_percent_vars_18)
df_residence_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% residence_percent_vars_18)
##combine the percents into one table
df_residence_tract_18 <- cbind(residence_vars_names_18, df_residence_tract_counts_18, df_residence_tract_percent_18)
df_residence_county_18 <- cbind(residence_vars_names_18, df_residence_county_counts_18, df_residence_county_percent_18)
trimmed_df_residence_tract_18 <- df_residence_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_residence_county_18 <- df_residence_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_residence_tract_18 <- st_set_geometry(trimmed_df_residence_tract_18, NULL)
data_table_residence_county_18 <- st_set_geometry(trimmed_df_residence_county_18, NULL)
##rename the columns
residence_table_tract_names <- c("Residence Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
residence_table_county_names <- c("Residence Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_residence_tract_18) <- residence_table_tract_names
names(data_table_residence_county_18) <- residence_table_county_names
##make the data tables
residence_tract_data_table_18 <- datatable(data_table_residence_tract_18, caption = "Residence Characteristics of Census Tract Groups for Upper East Tennessee 2018")
residence_county_data_table_18 <- datatable(data_table_residence_county_18, caption = "Residence Characteristics for Counties in Upper East Tennessee 2018")
##residence less than 1 year map
residence_tract_map_names <- c("Residence Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
residence_county_map_names <- c("Residence Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_residence_county_18) <- residence_county_map_names
names(trimmed_df_residence_tract_18) <- residence_tract_map_names
percent_residence_diff_state_county <- trimmed_df_residence_county_18 %>%
filter(`Residence Measure` == "Residence 1 Year Ago in Different House Different State")
percent_residence_diff_state_tract <- trimmed_df_residence_tract_18 %>%
filter(`Residence Measure` == "Residence 1 Year Ago in Different House Different State")
residence_diff_state_map <- mapview(list(percent_residence_diff_state_county, percent_residence_diff_state_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Residence in a Different State 1 Year Ago County", "Percent Residence in a Different State 1 Year Ago Tract"),
legend = list(FALSE, TRUE))
#place of birth
birthplace_vars_18 <- c("DP02_0088", "DP02_0089", "DP02_0090", "DP02_0092", "DP02_0093", "DP02_0094", "DP02_0095",
"DP02_0101", "DP02_0102", "DP02_0104", "DP02_0105", "DP02_0106", "DP02_0108", "DP02_0109")
birthplace_percent_vars_18 <- c("DP02_0088P", "DP02_0089P", "DP02_0090P", "DP02_0092P", "DP02_0093P", "DP02_0094P", "DP02_0095P",
"DP02_0101P", "DP02_0102P", "DP02_0104P", "DP02_0105P", "DP02_0106P", "DP02_0108P", "DP02_0109P")
birthplace_vars_names_18 <- c("Total Born in the USA", "Total Born in State of Residence", "Total Born in Different State of Residence", "Total Foreign Born",
"Foreign Born Total Population", "Foreign Born Naturalized Citizens", "Foreign Born Non-Citizens", "Foreign Born Entering US since 2010",
"Foreign Born Entering US before 2010", "European Born", "Asian Born", "African Born", "Latin American Born", "North American Born, Outside of USA")
df_birthplace_tract_counts_18 <- dp_2018 %>%
filter(variable %in% birthplace_vars_18)
df_birthplace_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% birthplace_vars_18)
df_birthplace_tract_percent_18 <- dp_2018 %>%
filter(variable %in% birthplace_percent_vars_18)
df_birthplace_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% birthplace_percent_vars_18)
##combine the percents into one table
df_birthplace_tract_18 <- cbind(birthplace_vars_names_18, df_birthplace_tract_counts_18, df_birthplace_tract_percent_18)
df_birthplace_county_18 <- cbind(birthplace_vars_names_18, df_birthplace_county_counts_18, df_birthplace_county_percent_18)
trimmed_df_birthplace_tract_18 <- df_birthplace_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_birthplace_county_18 <- df_birthplace_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_birthplace_tract_18 <- st_set_geometry(trimmed_df_birthplace_tract_18, NULL)
data_table_birthplace_county_18 <- st_set_geometry(trimmed_df_birthplace_county_18, NULL)
##rename the columns
birthplace_table_tract_names <- c("Birthplace Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
birthplace_table_county_names <- c("Birthplace Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
##make the data tables
birthplace_tract_data_table_18 <- datatable(data_table_birthplace_tract_18, caption = "Birthplace Characteristics of Census Tract Groups for Upper East Tennessee 2018")
birthplace_county_data_table_18 <- datatable(data_table_birthplace_county_18, caption = "Birthplace Characteristics for Counties in Upper East Tennessee 2018")
## birthplace map
birthplace_tract_map_names <- c("Birthplace Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
birthplace_county_map_names <- c("Birthplace Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_birthplace_county_18) <- birthplace_county_map_names
names(trimmed_df_birthplace_tract_18) <- birthplace_tract_map_names
percent_birthplace_state_county <- trimmed_df_birthplace_county_18 %>%
filter(`Birthplace Measure` == "Total Born in State of Residence")
percent_birthplace_state_tract <- trimmed_df_birthplace_tract_18 %>%
filter(`Birthplace Measure` == "Total Born in State of Residence")
birthplace_state_map <- mapview(list(percent_birthplace_state_county, percent_birthplace_state_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Born in Tennessee County", "Percent Born in Tennessee Tract"),
legend = list(FALSE, TRUE))
birthplace_state_map
#foreign born map
percent_foreign_born_county <- trimmed_df_birthplace_county_18 %>%
filter(`Birthplace Measure` == "Total Foreign Born")
percent_foreign_born_tract <- trimmed_df_birthplace_tract_18 %>%
filter(`Birthplace Measure` == "Total Foreign Born")
foreign_born_map <- mapview(list(percent_foreign_born_county, percent_foreign_born_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Foreign Born County", "Percent Foreign Born Tract"),
legend = list(FALSE, TRUE))
foreign_born_map
#language spoken at home
language_vars_18 <- c("DP02_0112", "DP02_0113", "DP02_0114", "DP02_0115")
language_percent_vars_18 <- c("DP02_0112P", "DP02_0113P", "DP02_0114P", "DP02_0115P")
language_vars_names_18 <- c("Population over 5 Years Old that Primarily Speak a Language Other than English", "Population over 5 Years Old that Speak English less than Very Well",
"Population over 5 Years Old for which Spanish is the Primary Language", "Population over 5 Years Old for Which Spanish is the Primary Language and Who Speak English Less Than Very Well")
df_language_tract_counts_18 <- dp_2018 %>%
filter(variable %in% language_vars_18)
df_language_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% language_vars_18)
df_language_tract_percent_18 <- dp_2018 %>%
filter(variable %in% language_percent_vars_18)
df_language_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% language_percent_vars_18)
##combine the percents into one table
df_language_tract_18 <- cbind(language_vars_names_18, df_language_tract_counts_18, df_language_tract_percent_18)
df_language_county_18 <- cbind(language_vars_names_18, df_language_county_counts_18, df_language_county_percent_18)
trimmed_df_language_tract_18 <- df_language_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_language_county_18 <- df_language_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_language_tract_18 <- st_set_geometry(trimmed_df_language_tract_18, NULL)
data_table_language_county_18 <- st_set_geometry(trimmed_df_language_county_18, NULL)
##rename the columns
language_table_tract_names <- c("Language Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
language_table_county_names <- c("Language Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_language_tract_18) <- language_table_tract_names
names(data_table_language_county_18) <- language_table_county_names
##make the data tables
language_tract_data_table_18 <- datatable(data_table_language_tract_18, caption = "Language Characteristics of Census Tract Groups for Upper East Tennessee 2018")
language_county_data_table_18 <- datatable(data_table_language_county_18, caption = "Language Characteristics for Counties in Upper East Tennessee 2018")
## language maps
language_tract_map_names <- c("Language Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
language_county_map_names <- c("Language Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_language_county_18) <- language_county_map_names
names(trimmed_df_language_tract_18) <- language_tract_map_names
percent_language_county <- trimmed_df_language_county_18 %>%
filter(`Language Measure` == "Population over 5 Years Old that Primarily Speak a Language Other than English")
percent_language_tract <- trimmed_df_language_tract_18 %>%
filter(`Language Measure` == "Population over 5 Years Old that Primarily Speak a Language Other than English")
language_map <- mapview(list(percent_language_county, percent_language_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Primary Language Other than English County", "Percent Primary Language Other than English Tract"),
legend = list(FALSE, TRUE))
language_map
#computers
computer_vars_18 <- c("DP02_0150", "DP02_0151", "DP02_0152")
computer_percent_vars_18 <- c("DP02_0150P", "DP02_0151P", "DP02_0152P")
computer_vars_names_18 <- c("Total Households", "Households with a Computer", "Households with a Broadband Internet Subscription")
df_computer_tract_counts_18 <- dp_2018 %>%
filter(variable %in% computer_vars_18)
df_computer_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% computer_vars_18)
df_computer_tract_percent_18 <- dp_2018 %>%
filter(variable %in% computer_percent_vars_18)
df_computer_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% computer_percent_vars_18)
##combine the percents into one table
df_computer_tract_18 <- cbind(computer_vars_names_18, df_computer_tract_counts_18, df_computer_tract_percent_18)
df_computer_county_18 <- cbind(computer_vars_names_18, df_computer_county_counts_18, df_computer_county_percent_18)
trimmed_df_computer_tract_18 <- df_computer_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_computer_county_18 <- df_computer_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_computer_tract_18 <- st_set_geometry(trimmed_df_computer_tract_18, NULL)
data_table_computer_county_18 <- st_set_geometry(trimmed_df_computer_county_18, NULL)
##rename the columns
computer_table_tract_names <- c("Computer Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
computer_table_county_names <- c("Computer Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
##make the data tables
computer_tract_data_table_18 <- datatable(data_table_computer_tract_18, caption = "Computer Characteristics of Census Tract Groups for Upper East Tennessee 2018")
computer_county_data_table_18 <- datatable(data_table_computer_county_18, caption = "Computer Characteristics for Counties in Upper East Tennessee 2018")
#broadband internet map
computer_tract_map_names <- c("Computer Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
computer_county_map_names <- c("Computer Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_computer_county_18) <- computer_county_map_names
names(trimmed_df_computer_tract_18) <- computer_tract_map_names
percent_computer_county <- trimmed_df_computer_county_18 %>%
filter(`Computer Measure` == "Households with a Broadband Internet Subscription")
percent_computer_tract <- trimmed_df_computer_tract_18 %>%
filter(`Computer Measure` == "Households with a Broadband Internet Subscription")
computer_map <- mapview(list(percent_computer_county, percent_computer_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Households with a Broadband Internet Subscription County", "Percent Households with a Broadband Internet Subscription Tract"),
legend = list(FALSE, TRUE))
computer_map
#### Economic Characteristics ####
# workforce demographics
workforce_vars_18 <- c("DP03_0001", "DP03_0002", "DP03_0003", "DP03_0004", "DP03_0005", "DP03_0007", "DP03_0009",
"DP03_0012", "DP03_0013", "DP03_0014", "DP03_0015", "DP03_0016", "DP03_0017")
workforce_percent_vars_18 <- c("DP03_0001P", "DP03_0002P", "DP03_0003P", "DP03_0004P", "DP03_0005P", "DP03_0007P", "DP03_0009P",
"DP03_0012P", "DP03_0013P", "DP03_0014P", "DP03_0015P", "DP03_0016P", "DP03_0017P")
workforce_vars_names_18 <- c("Total Population Aged 16 Years and Above", "Total Population Aged 16 Years and Above: In Labor Force",
"Total Population Aged 16 Years and Above: In Civilian Labor Force", "Total Population Aged 16 Years and Above: Employed",
"Total Population Aged 16 Years and Above: Unemployed", "Total Population Aged 16 Years and Above: Not in Labor Force",
"Unemployment Rate", "Total Population Aged 16 Years and Above: Females in Civilian Labor Force", "Total Population Aged 16 Years and Above: Employed Females in Civilian Labor Force",
"Total Households with Children under 6 Years of Age", "Households with Children under 6 Years of Age, All Parents in Labor Force",
"Total Households with Children Aged 6-17 Years Old", "Households with Children aged 6-17 Years Old, All Parents in Labor Force")
df_workforce_tract_counts_18 <- dp_2018 %>%
filter(variable %in% workforce_vars_18)
df_workforce_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% workforce_vars_18)
df_workforce_tract_percent_18 <- dp_2018 %>%
filter(variable %in% workforce_percent_vars_18)
df_workforce_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% workforce_percent_vars_18)
##combine the percents into one table
df_workforce_tract_18 <- cbind(workforce_vars_names_18, df_workforce_tract_counts_18, df_workforce_tract_percent_18)
df_workforce_county_18 <- cbind(workforce_vars_names_18, df_workforce_county_counts_18, df_workforce_county_percent_18)
trimmed_df_workforce_tract_18 <- df_workforce_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_workforce_county_18 <- df_workforce_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_workforce_tract_18 <- st_set_geometry(trimmed_df_workforce_tract_18, NULL)
data_table_workforce_county_18 <- st_set_geometry(trimmed_df_workforce_county_18, NULL)
##rename the columns
workforce_table_tract_names <- c("Workforce Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
workforce_table_county_names <- c("Workforce Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_workforce_tract_18) <- workforce_table_tract_names
names(data_table_workforce_county_18) <- workforce_table_county_names
##make the data tables
workforce_tract_data_table_18 <- datatable(data_table_workforce_tract_18, caption = "Workforce Characteristics of Census Tract Groups for Upper East Tennessee 2018")
workforce_county_data_table_18 <- datatable(data_table_workforce_county_18, caption = "Workforce Characteristics for Upper East Tennessee 2018")
workforce_tract_data_table_18
workforce_county_data_table_18
#unemployment map
##workforce unemployed
workforce_tract_map_names <- c("Workforce Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
workforce_county_map_names <- c("Workforce Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_workforce_county_18) <- workforce_county_map_names
names(trimmed_df_workforce_tract_18) <- workforce_tract_map_names
percent_workforce_county <- trimmed_df_workforce_county_18 %>%
filter(`Workforce Measure` == "Total Population Aged 16 Years and Above: Unemployed")
percent_workforce_tract <- trimmed_df_workforce_tract_18 %>%
filter(`Workforce Measure` == "Total Population Aged 16 Years and Above: Unemployed")
workforce_map <- mapview(list(percent_workforce_county, percent_workforce_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Total Population Aged 16 Years and Above: Unemployed County", "Percent Total Population Aged 16 Years and Above: UnemployedTract"),
legend = list(FALSE, TRUE))
workforce_map
#workers transportation
work_trans_vars_18 <- c("DP03_0018", "DP03_0019", "DP03_0020", "DP03_0021", "DP03_0022", "DP03_0023", "DP03_0024", "DP03_0025")
work_trans_percent_vars_18 <- c("DP03_0018P", "DP03_0019P", "DP03_0020P", "DP03_0021P", "DP03_0022P", "DP03_0023P", "DP03_0024P", "DP03_0025P")
work_trans_vars_names_18 <- c("Total Workers Aged 16 Years and Above", "Total Workers Driving to Work Alone", "Total Workers that Carpooled",
"Total Workers that used Public Transit", "Total Workers that Walked", "Total Workers that used Other Means of Transportation", "Total Workers that Worked at Home", "Mean Travel Time to Work")
df_work_trans_tract_counts_18 <- dp_2018 %>%
filter(variable %in% work_trans_vars_18)
df_work_trans_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% work_trans_vars_18)
df_work_trans_tract_percent_18 <- dp_2018 %>%
filter(variable %in% work_trans_percent_vars_18)
df_work_trans_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% work_trans_percent_vars_18)
##combine the percents into one table
df_work_trans_tract_18 <- cbind(work_trans_vars_names_18, df_work_trans_tract_counts_18, df_work_trans_tract_percent_18)
df_work_trans_county_18 <- cbind(work_trans_vars_names_18, df_work_trans_county_counts_18, df_work_trans_county_percent_18)
trimmed_df_work_trans_tract_18 <- df_work_trans_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_work_trans_county_18 <- df_work_trans_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_work_trans_tract_18 <- st_set_geometry(trimmed_df_work_trans_tract_18, NULL)
data_table_work_trans_county_18 <- st_set_geometry(trimmed_df_work_trans_county_18, NULL)
##rename the columns
work_trans_table_tract_names <- c("Workers Transportation Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
work_trans_table_county_names <- c("Workers Transportation Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
##make the data tables
work_trans_tract_data_table_18 <- datatable(data_table_work_trans_tract_18, caption = "Workers Transportation Characteristics of Census Tract Groups for Upper East Tennessee Tennessee 2018")
work_trans_county_data_table_18 <- datatable(data_table_work_trans_county_18, caption = "Workers Transportation Characteristics for Counties in Upper East Tennessee 2018")
work_trans_tract_data_table_18
work_trans_county_data_table_18
##public transit users and mean commute time maps
work_trans_tract_map_names <- c("Workers Transportation Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
work_trans_county_map_names <- c("Workers Transportation Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_work_trans_county_18) <- work_trans_county_map_names
names(trimmed_df_work_trans_tract_18) <- work_trans_tract_map_names
#mean time map
percent_work_trans_county <- trimmed_df_work_trans_county_18 %>%
filter(`Workers Transportation Measure` == "Mean Travel Time to Work")
percent_work_trans_tract <- trimmed_df_work_trans_tract_18 %>%
filter(`Workers Transportation Measure` == "Mean Travel Time to Work")
work_trans_map <- mapview(list(percent_work_trans_county, percent_work_trans_tract),
zcol = list("Count", "Count"),
layer.name = list("Mean Travel Time to Work County", "Mean Travel Time to Work Tract"),
legend = list(FALSE, TRUE))
#public transit
percent_public_transit_county <- trimmed_df_work_trans_county_18 %>%
filter(`Workers Transportation Measure` == "Total Workers that used Public Transit")
percent_public_transit_tract <- trimmed_df_work_trans_tract_18 %>%
filter(`Workers Transportation Measure` == "Total Workers that used Public Transit")
public_transit_map <- mapview(list(percent_public_transit_county, percent_public_transit_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Workers that used Public Transit County", "Percent Workers that used Public Transit Tract"),
legend = list(FALSE, TRUE))
public_transit_map
# occupation type
occ_vars_18 <- c("DP03_0026", "DP03_0027", "DP03_0028", "DP03_0029", "DP03_0030", "DP03_0031")
occ_percent_vars_18 <- c("DP03_0026P", "DP03_0027P", "DP03_0028P", "DP03_0029P", "DP03_0030P", "DP03_0031P")
occ_vars_names_18 <- c("Total Civilian Employed Population", "Total Management, Business, Science, and Art Occupations",
"Total Service Occupations", "Total Sales and Office Occupations", "Total Natural Resources, Construction, and Maintenance Occupations",
"Total Production, Transportation, and Material Moving Occupations")
df_occ_tract_counts_18 <- dp_2018 %>%
filter(variable %in% occ_vars_18)
df_occ_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% occ_vars_18)
df_occ_tract_percent_18 <- dp_2018 %>%
filter(variable %in% occ_percent_vars_18)
df_occ_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% occ_percent_vars_18)
##combine the percents into one table
df_occ_tract_18 <- cbind(occ_vars_names_18, df_occ_tract_counts_18, df_occ_tract_percent_18)
df_occ_county_18 <- cbind(occ_vars_names_18, df_occ_county_counts_18, df_occ_county_percent_18)
trimmed_df_occ_tract_18 <- df_occ_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_occ_county_18 <- df_occ_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_occ_tract_18 <- st_set_geometry(trimmed_df_occ_tract_18, NULL)
data_table_occ_county_18 <- st_set_geometry(trimmed_df_occ_county_18, NULL)
##rename the columns
occ_table_tract_names <- c("Occupation Type", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
occ_table_county_names <- c("Occupation Type", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_occ_tract_18) <- occ_table_tract_names
names(data_table_occ_county_18) <- occ_table_county_names
##make the data tables
occ_tract_data_table_18 <- datatable(data_table_occ_tract_18, caption = "Occupation Type of Census Tract Groups for Upper East Tennessee 2018")
occ_county_data_table_18 <- datatable(data_table_occ_county_18, caption = "Occupation Type for Counties in Upper East Tennessee 2018")
occ_tract_data_table_18
occ_county_data_table_18
##service occupations map
occ_tract_map_names <- c("Occupation Type", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
occ_county_map_names <- c("Occupation Type", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_occ_county_18) <- occ_county_map_names
names(trimmed_df_occ_tract_18) <- occ_tract_map_names
percent_occ_county <- trimmed_df_occ_county_18 %>%
filter(`Occupation Type` == "Total Service Occupations")
percent_occ_tract <- trimmed_df_occ_tract_18 %>%
filter(`Occupation Type` == "Total Service Occupations")
occ_map <- mapview(list(percent_occ_county, percent_occ_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Service Occupations County", "Percent Service Occupations Tract"),
legend = list(FALSE, TRUE))
occ_map
#industry type
industry_vars_18 <- c("DP03_0033", "DP03_0034", "DP03_0035", "DP03_0036", "DP03_0037", "DP03_0038", "DP03_0039",
"DP03_0040", "DP03_0041", "DP03_0042", "DP03_0043", "DP03_0044", "DP03_0045", "DP03_0048", "DP03_0049")
industry_percent_vars_18 <- c("DP03_0033P", "DP03_0034P", "DP03_0035P", "DP03_0036P", "DP03_0037P", "DP03_0038P", "DP03_0039P",
"DP03_0040P", "DP03_0041P", "DP03_0042P", "DP03_0043P", "DP03_0044P", "DP03_0045P", "DP03_0048P", "DP03_0049P")
industry_vars_names_18 <- c("Agriculture, Forestry, Fishing and Hunting, Mining", "Construction", "Manufacturing",
"Wholesale Trade", "Retail Trade", "Transportation and Warehousing, Utilities", "Information",
"Finance and Insurance, Real Estate, Rental and Leasing", "Professional, Sceintific, and Mangagement, Administrative and Waste Management Services",
"Educational Services, Healthcare and Social Assistance", "Arts, Entertainment, Recreation and Accomodation and Food Services",
"Other", "Public Administration", "Government Workers", "Self Employed in Own Business")
df_industry_tract_counts_18 <- dp_2018 %>%
filter(variable %in% industry_vars_18)
df_industry_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% industry_vars_18)
df_industry_tract_percent_18 <- dp_2018 %>%
filter(variable %in% industry_percent_vars_18)
df_industry_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% industry_percent_vars_18)
##combine the percents into one table
df_industry_tract_18 <- cbind(industry_vars_names_18, df_industry_tract_counts_18, df_industry_tract_percent_18)
df_industry_county_18 <- cbind(industry_vars_names_18, df_industry_county_counts_18, df_industry_county_percent_18)
trimmed_df_industry_tract_18 <- df_industry_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_industry_county_18 <- df_industry_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_industry_tract_18 <- st_set_geometry(trimmed_df_industry_tract_18, NULL)
data_table_industry_county_18 <- st_set_geometry(trimmed_df_industry_county_18, NULL)
##rename the columns
industry_table_tract_names <- c("Industry Type", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
industry_table_county_names <- c("Industry Type", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_industry_tract_18) <- industry_table_tract_names
names(data_table_industry_county_18) <- industry_table_county_names
##make the data tables
industry_tract_data_table_18 <- datatable(data_table_industry_tract_18, caption = "Industry Type of Census Tract Groups for Upper East Tennessee 2018")
industry_county_data_table_18 <- datatable(data_table_industry_county_18, caption = "Industry Type for Counties in Upper East Tennessee 2018")
industry_tract_data_table_18
industry_county_data_table_18
##service industryupations map
industry_tract_map_names <- c("Industry Type", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
industry_county_map_names <- c("Industry Type", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_industry_county_18) <- industry_county_map_names
names(trimmed_df_industry_tract_18) <- industry_tract_map_names
percent_self_employed_county <- trimmed_df_industry_county_18 %>%
filter(`Industry Type` == "Self Employed in Own Business")
percent_self_employed_tract <- trimmed_df_industry_tract_18 %>%
filter(`Industry Type` == "Self Employed in Own Business")
self_employed_map <- mapview(list(percent_self_employed_county, percent_self_employed_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Self Employed County", "Percent Self Employed Tract"),
legend = list(FALSE, TRUE))
self_employed_map
## income and benefits
income_vars_18 <- c("DP03_0051", "DP03_0052", "DP03_0053", "DP03_0054", "DP03_0055", "DP03_0056", "DP03_0057", "DP03_0058", "DP03_0059", "DP03_0060", "DP03_0061", "DP03_0062",
"DP03_0063", "DP03_0064", "DP03_0065", "DP03_0066", "DP03_0067", "DP03_0072", "DP03_0073", "DP03_0074", "DP03_0088", "DP03_0092", "DP03_0093", "DP03_0094")
income_percent_vars_18 <- c("DP03_0051P", "DP03_0052P", "DP03_0053P", "DP03_0054P", "DP03_0055P", "DP03_0056P", "DP03_0057P", "DP03_0058P", "DP03_0059P", "DP03_0060P", "DP03_0061P", "DP03_0062P",
"DP03_0063P", "DP03_0064P", "DP03_0065P", "DP03_0066P", "DP03_0067P", "DP03_0072P", "DP03_0073P", "DP03_0074P", "DP03_0088P", "DP03_0092P", "DP03_0093P", "DP03_0094P")
income_vars_names_18 <- c("Total Households", "Household Income: Less than 10,000", "Household Income: 10,000-14,999", "Household Income: 15,000-24,999", "Household Income: 25,000-34,999", "Household Income: 35,000-49,000",
"Household Income: 50,000-74,999", "Household Income: 75,000-99,999", "Household Income: 100,000-149,999", "Household Income: 150,000-199,999", "Household Income: 200,000 and Up", "Median Household Income",
"Mean Household Income", "Total Households with Earnings", "Mean Earnings of Total Households with Earnings", "Total Households with Social Security", "Mean Social Security Income of Households with Social Security Income",
"Total Households with Cash Public Assistance Income", "Mean Cash Public Assistance Income of Households with Public Assistance Income", "Total Households with Food Stamps or SNAP Benefits in past 12 Months",
"Per Capita Income", "Median Earnings for Workers", "Median Earnings for Male Full Time Workers", "Median Earnings for Female Full Time Workers")
df_income_tract_counts_18 <- dp_2018 %>%
filter(variable %in% income_vars_18)
df_income_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% income_vars_18)
df_income_tract_percent_18 <- dp_2018 %>%
filter(variable %in% income_percent_vars_18)
df_income_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% income_percent_vars_18)
##combine the percents into one table
df_income_tract_18 <- cbind(income_vars_names_18, df_income_tract_counts_18, df_income_tract_percent_18)
df_income_county_18 <- cbind(income_vars_names_18, df_income_county_counts_18, df_income_county_percent_18)
trimmed_df_income_tract_18 <- df_income_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_income_county_18 <- df_income_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_income_tract_18 <- st_set_geometry(trimmed_df_income_tract_18, NULL)
data_table_income_county_18 <- st_set_geometry(trimmed_df_income_county_18, NULL)
##rename the columns
income_table_tract_names <- c("Income Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
income_table_county_names <- c("Income Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_income_tract_18) <- income_table_tract_names
names(data_table_income_county_18) <- income_table_county_names
##make the data tables
income_tract_data_table_18 <- datatable(data_table_income_tract_18, caption = "Income Measures of Census Tract Groups for Upper East Tennessee 2018")
income_county_data_table_18 <- datatable(data_table_income_county_18, caption = "Income Measures for Counties in Upper East Tennessee 2018")
income_tract_data_table_18
income_county_data_table_18
##median household income map
income_tract_map_names <- c("Income Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
income_county_map_names <- c("Income Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_income_county_18) <- income_county_map_names
names(trimmed_df_income_tract_18) <- income_tract_map_names
median_household_income_county <- trimmed_df_income_county_18 %>%
filter(`Income Measure` == "Median Household Income")
median_household_income_tract <- trimmed_df_income_tract_18 %>%
filter(`Income Measure` == "Median Household Income")
median_household_income_map <- mapview(list(median_household_income_county, median_household_income_tract),
zcol = list("Count", "Count"),
layer.name = list("Median Household Income County", "Median Household Income Tract"),
legend = list(FALSE, TRUE))
median_household_income_map
#mean household income
mean_household_income_county <- trimmed_df_income_county_18 %>%
filter(`Income Measure` == "Mean Household Income")
mean_household_income_tract <- trimmed_df_income_tract_18 %>%
filter(`Income Measure` == "Mean Household Income")
mean_household_income_kpt <- trimmed_income_kpt_18_for_map %>%
filter(`Income Measure` == "Mean Household Income")
mean_household_income_map <- mapview(list(mean_household_income_county, mean_household_income_tract),
zcol = list("Count", "Count"),
layer.name = list("Mean Household Income County", "Mean Household Income Tract"),
legend = list(FALSE, TRUE))
mean_household_income_map
#combined map
income_spread_map <- median_household_income_map + mean_household_income_map
income_spread_map
#SNAP
snap_household_income_county <- trimmed_df_income_county_18 %>%
filter(`Income Measure` == "Total Households with Food Stamps or SNAP Benefits in past 12 Months")
snap_household_income_tract <- trimmed_df_income_tract_18 %>%
filter(`Income Measure` == "Total Households with Food Stamps or SNAP Benefits in past 12 Months")
snap_household_income_map <- mapview(list(snap_household_income_county, snap_household_income_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent SNAP Households County", "Percent SNAP Households Tract"),
legend = list(FALSE, TRUE))
snap_household_income_map
#below 10,000
b10_household_income_county <- trimmed_df_income_county_18 %>%
filter(`Income Measure` == "Household Income: Less than 10,000")
b10_household_income_tract <- trimmed_df_income_tract_18 %>%
filter(`Income Measure` == "Household Income: Less than 10,000")
b10_household_income_map <- mapview(list(b10_household_income_county, b10_household_income_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent of Households with Income Below 10,000 County", "Percent of Households with Income Below 10,000 Tract"),
legend = list(FALSE, TRUE))
b10_household_income_map
#social security
ss_household_income_county <- trimmed_df_income_county_18 %>%
filter(`Income Measure` == "Total Households with Social Security")
ss_household_income_tract <- trimmed_df_income_tract_18 %>%
filter(`Income Measure` == "Total Households with Social Security")
ss_household_income_map <- mapview(list(ss_household_income_county, ss_household_income_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent of Households with Social Security County", "Percent of Households with Social Security Tract"),
legend = list(FALSE, TRUE))
ss_household_income_map
#cash assistance
cash_household_income_county <- trimmed_df_income_county_18 %>%
filter(`Income Measure` == "Total Households with Cash Public Assistance Income")
cash_household_income_tract <- trimmed_df_income_tract_18 %>%
filter(`Income Measure` == "Total Households with Cash Public Assistance Income")
cash_household_income_map <- mapview(list(cash_household_income_county, cash_household_income_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent of Households with Cash Public Assistance County", "Percent of Households with Cash Public Assistance Tract"),
legend = list(FALSE, TRUE))
cash_household_income_map
#target map
target_map <- ss_household_income_map + cash_household_income_map + b10_household_income_map + snap_household_income_map
target_map
# health insurance
insurance_vars_18 <- c("DP03_0095", "DP03_0096", "DP03_0097", "DP03_0098", "DP03_0099", "DP03_0100", "DP03_0101",
"DP03_0102", "DP03_0104", "DP03_0105", "DP03_0106", "DP03_0107", "DP03_0108", "DP03_0109",
"DP03_0110", "DP03_0111", "DP03_0112", "DP03_0113")
insurance_percent_vars_18 <- c("DP03_0095P", "DP03_0096P", "DP03_0097P", "DP03_0098P", "DP03_0099P", "DP03_0100P", "DP03_0101P",
"DP03_0102P", "DP03_0104P", "DP03_0105P", "DP03_0106P", "DP03_0107P", "DP03_0108P", "DP03_0109P",
"DP03_0110P", "DP03_0111P", "DP03_0112P", "DP03_0113P")
insurance_vars_names_18 <- c("Total Civilian Non-Institutionalized Population", "Total Population with Health Insurance", "Total Population with Private Health Insurance",
"Total Population with Public Health Insurance", "Total Population with No Health Insurance", "Total Population under 19 Years of Age",
"Total Population under 19 Years of Age Without Health Insurance", "Total Population Aged 19-64", "Total Population Aged 19-64 Employed",
"Total Population Aged 19-64 Employed with Health Insurance", "Total Population Aged 19-64 Employed with Private Health Insurance", "Total Population Aged 19-64 Employed with Public Health Insurance",
"Total Population Aged 19-64 Employed with No Health Insurance", "Total Population Aged 19-64 Unemployed", "Total Population Aged 19-64 Unemployed with Health Insurance",
"Total Population Aged 19-64 Unemployed with Private Health Insurance", "Total Population Aged 19-64 Unemployed with Public Health Insurance", "Total Population Aged 19-64 Unemployed with No Health Insurance")
df_insurance_tract_counts_18 <- dp_2018 %>%
filter(variable %in% insurance_vars_18)
df_insurance_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% insurance_vars_18)
df_insurance_tract_percent_18 <- dp_2018 %>%
filter(variable %in% insurance_percent_vars_18)
df_insurance_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% insurance_percent_vars_18)
##combine the percents into one table
df_insurance_tract_18 <- cbind(insurance_vars_names_18, df_insurance_tract_counts_18, df_insurance_tract_percent_18)
df_insurance_county_18 <- cbind(insurance_vars_names_18, df_insurance_county_counts_18, df_insurance_county_percent_18)
trimmed_df_insurance_tract_18 <- df_insurance_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_insurance_county_18 <- df_insurance_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_insurance_tract_18 <- st_set_geometry(trimmed_df_insurance_tract_18, NULL)
data_table_insurance_county_18 <- st_set_geometry(trimmed_df_insurance_county_18, NULL)
##rename the columns
insurance_table_tract_names <- c("Insurance Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
insurance_table_county_names <- c("Insurance Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_insurance_tract_18) <- insurance_table_tract_names
names(data_table_insurance_county_18) <- insurance_table_county_names
##make the data tables
insurance_tract_data_table_18 <- datatable(data_table_insurance_tract_18, caption = "Insurance Measures of Census Tract Groups for Upper East Tennessee 2018")
insurance_county_data_table_18 <- datatable(data_table_insurance_county_18, caption = "Insurance Measures for Counties in Upper East Tennessee 2018")
insurance_tract_data_table_18
insurance_county_data_table_18
## insurance map
insurance_tract_map_names <- c("Insurance Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
insurance_county_map_names <- c("Insurance Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_insurance_county_18) <- insurance_county_map_names
names(trimmed_df_insurance_tract_18) <- insurance_tract_map_names
#uninsuraed map
uninsured_county <- trimmed_df_insurance_county_18 %>%
filter(`Insurance Measure` == "Total Population with No Health Insurance")
uninsured_tract <- trimmed_df_insurance_tract_18 %>%
filter(`Insurance Measure` == "Total Population with No Health Insurance")
uninsured_map <- mapview(list(uninsured_county, uninsured_tract),
zcol = list("Percent", "Percent", NULL),
layer.name = list("Percent Uninsured County", "Percent Uninsured Tract"),
legend = list(FALSE, TRUE))
uninsured_map
#public health insurnace
public_county <- trimmed_df_insurance_county_18 %>%
filter(`Insurance Measure` == "Total Population with Public Health Insurance")
public_tract <- trimmed_df_insurance_tract_18 %>%
filter(`Insurance Measure` == "Total Population with Public Health Insurance")
public_map <- mapview(list(public_county, public_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Population with Public Health Insurance County", "Percent Population with Public Health Insurance Tract"),
legend = list(FALSE, TRUE))
public_map
# poverty
poverty_vars_18 <- c("DP03_0119", "DP03_0120", "DP03_0121", "DP03_0122", "DP03_0123", "DP03_0124", "DP03_0125", "DP03_0126",
"DP03_0127", "DP03_0128", "DP03_0129", "DP03_0131", "DP03_0132", "DP03_0133", "DP03_0134", "DP03_0135")
poverty_percent_vars_18 <- c("DP03_0119P", "DP03_0120P", "DP03_0121P", "DP03_0122P", "DP03_0123P", "DP03_0124P", "DP03_0125P", "DP03_0126P",
"DP03_0127P", "DP03_0128P", "DP03_0129P", "DP03_0131P", "DP03_0132P", "DP03_0133P", "DP03_0134P", "DP03_0135P")
poverty_vars_names_18 <- c("Percentage of All Families Below Poverty Line past 12 Months", "Percentage of All Families Below Poverty Level past 12 Months, with Children under 18 Years Old",
"Percentage of All Families Below Poverty Level past 12 Months with Children under 5 Years Old", "Percentage of All Married Families Below Poverty Level past 12 Months ",
"Percentage of All Married Families Below Poverty Level past 12 Months, with Children below 18 Years Old", "Percentage of All Married Families Below Poverty Level past 12 Months, with Children under 5 Years Old",
"Percentage of All Families Below Poverty Level Past 12 Months Female, No Husband", "Percentage of All Families Below Poverty Level Past 12 Months Female, No Husband, with Children Below 18 Years Old",
"Percentage of All Families Below Poverty Level Past 12 Months Female, No Husband, with Children Below 5 Years Old", "Percentage of All People Below Poverty Level past 12 Months", "Percentage of All People Below Poverty Level past 12 Months Under 18 Years Old",
"Percentage of All People Below Poverty Level past 12 Months under 5 Years Old", "Percentage of All People Below Poverty Level past 12 Months Aged 5-17", "Percentage of All People Below Poverty Level past 12 Months Over 18 Years Old",
"Percentage of All People Below Poverty Level past 12 Months Aged 18-64", "Percentage of All People Below Poverty Level past 12 Months Over 65 Years Old")
df_poverty_tract_counts_18 <- dp_2018 %>%
filter(variable %in% poverty_vars_18)
df_poverty_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% poverty_vars_18)
df_poverty_tract_percent_18 <- dp_2018 %>%
filter(variable %in% poverty_percent_vars_18)
df_poverty_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% poverty_percent_vars_18)
##combine the percents into one table
df_poverty_tract_18 <- cbind(poverty_vars_names_18, df_poverty_tract_counts_18, df_poverty_tract_percent_18)
df_poverty_county_18 <- cbind(poverty_vars_names_18, df_poverty_county_counts_18, df_poverty_county_percent_18)
trimmed_df_poverty_tract_18 <- df_poverty_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_poverty_county_18 <- df_poverty_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_poverty_tract_18 <- st_set_geometry(trimmed_df_poverty_tract_18, NULL)
data_table_poverty_county_18 <- st_set_geometry(trimmed_df_poverty_county_18, NULL)
##rename the columns
poverty_table_tract_names <- c("Poverty Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
poverty_table_county_names <- c("Poverty Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_poverty_tract_18) <- poverty_table_tract_names
names(data_table_poverty_county_18) <- poverty_table_county_names
##make the data tables
poverty_tract_data_table_18 <- datatable(data_table_poverty_tract_18, caption = "Poverty Measures of Census Tract Groups for Upper East Tennessee 2018")
poverty_county_data_table_18 <- datatable(data_table_poverty_county_18, caption = "Poverty Measures for Counties in Upper East Tennessee 2018")
poverty_tract_data_table_18
poverty_county_data_table_18
#poverty map
poverty_tract_map_names <- c("Poverty Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
poverty_county_map_names <- c("Poverty Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_poverty_county_18) <- poverty_county_map_names
names(trimmed_df_poverty_tract_18) <- poverty_tract_map_names
###map for poverty stuff###
u5_family_poverty_county <- trimmed_df_poverty_county_18 %>%
filter(`Poverty Measure` == "Percentage of All Families Below Poverty Level past 12 Months with Children under 5 Years Old")
u5_family_poverty_tract <- trimmed_df_poverty_tract_18 %>%
filter(`Poverty Measure` == "Percentage of All Families Below Poverty Level past 12 Months with Children under 5 Years Old")
u5_family_poverty_map <- mapview(list(u5_family_poverty_county, u5_family_poverty_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Of Families with Children Under 5 In Poverty County", "Percent Of Families with Children Under 5 In Poverty Tract"),
legend = list(FALSE, TRUE))
u5_family_poverty_map
#percent of all people below the poverty line
all_poverty_county <- trimmed_df_poverty_county_18 %>%
filter(`Poverty Measure` == "Percentage of All People Below Poverty Level past 12 Months")
all_poverty_tract <- trimmed_df_poverty_tract_18 %>%
filter(`Poverty Measure` == "Percentage of All People Below Poverty Level past 12 Months")
all_poverty_map <- mapview(list(all_poverty_county, all_poverty_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent Of All People In Poverty County", "Percent Of All People In Poverty Tract"),
legend = list(FALSE, TRUE))
all_poverty_map
#### Housing Characteristics ####
housing_units_vars_18 <- c("DP04_0001", "DP04_0002", "DP04_0003", "DP04_0004", "DP04_0005", "DP04_0014", "DP04_0017", "DP04_0018", "DP04_0019", "DP04_0020", "DP04_0021", "DP04_0022",
"DP04_0023", "DP04_0024", "DP04_0025", "DP04_0026", "DP04_0037", "DP04_0046", "DP04_0047", "DP04_0048", "DP04_0049", "DP04_0051", "DP04_0052", "DP04_0053",
"DP04_0054", "DP04_0055", "DP04_0056")
housing_units_percent_vars_18 <- c("DP04_0001P", "DP04_0002P", "DP04_0003P", "DP04_0004P", "DP04_0005P", "DP04_0014P", "DP04_0017P", "DP04_0018P", "DP04_0019P", "DP04_0020P", "DP04_0021P", "DP04_0022P",
"DP04_0023P", "DP04_0024P", "DP04_0025P", "DP04_0026P", "DP04_0037P", "DP04_0046P", "DP04_0047P", "DP04_0048P", "DP04_0049P", "DP04_0051P", "DP04_0052P", "DP04_0053P",
"DP04_0054P", "DP04_0055P", "DP04_0056P")
housing_units_vars_names_18 <- c("Total Housing Units", "Occupied Housing Units", "Vacant Housing Units", "Homeowner Vacancy Rate", "Rental Vacancy Rate", "Mobile Homes", "Homes Built 2014 or Later",
"Homes Built 2010-2013", "Homes Built 2000-2009", "Homes Built 1990-1999", "Homes Built 1980-1989", "Homes Built 1970-1979", "Homes Built 1960-1969", "Homes Built 1950-1959",
"Homes Built 1940-1949", "Homes Built 1939 or Earlier", "Median Number of Rooms per Unit", "Owner Occupied Housing Units", "Renter Occupied Housing Units", "Average Houshold Size of Owner Occupied Units",
"Average Household Size of Renter Occupied Units", "Occupied Housing Units Moved Into: 2017 or Later", "Occupied Housing Units Moved Into: 2015-2016", "Occupied Housing Units Moved Into: 2010-2014",
"Occupied Housing Units Moved Into: 2000-2009", "Occupied Housing Units Moved Into: 1990-1999", "Occupied Housing Units Moved Into: 1989 and Earlier")
df_housing_units_tract_counts_18 <- dp_2018 %>%
filter(variable %in% housing_units_vars_18)
df_housing_units_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% housing_units_vars_18)
df_housing_units_tract_percent_18 <- dp_2018 %>%
filter(variable %in% housing_units_percent_vars_18)
df_housing_units_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% housing_units_percent_vars_18)
##combine the percents into one table
df_housing_units_tract_18 <- cbind(housing_units_vars_names_18, df_housing_units_tract_counts_18, df_housing_units_tract_percent_18)
df_housing_units_county_18 <- cbind(housing_units_vars_names_18, df_housing_units_county_counts_18, df_housing_units_county_percent_18)
trimmed_df_housing_units_tract_18 <- df_housing_units_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_housing_units_county_18 <- df_housing_units_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_housing_units_tract_18 <- st_set_geometry(trimmed_df_housing_units_tract_18, NULL)
data_table_housing_units_county_18 <- st_set_geometry(trimmed_df_housing_units_county_18, NULL)
##rename the columns
housing_units_table_tract_names <- c("Housing Unit Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
housing_units_table_county_names <- c("Housing Unit Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_housing_units_tract_18) <- housing_units_table_tract_names
names(data_table_housing_units_county_18) <- housing_units_table_county_names
##make the data tables
housing_units_tract_data_table_18 <- datatable(data_table_housing_units_tract_18, caption = "Housing Unit Measures of Census Tract Groups for Upper East Tennessee 2018")
housing_units_county_data_table_18 <- datatable(data_table_housing_units_county_18, caption = "Housing Unit Measures for Counties in Upper East Tennessee 2018")
housing_units_tract_data_table_18
housing_units_county_data_table_18
##service housing_unitsupations map
housing_units_tract_map_names <- c("Housing Unit Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
housing_units_county_map_names <- c("Housing Unit Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_housing_units_county_18) <- housing_units_county_map_names
names(trimmed_df_housing_units_tract_18) <- housing_units_tract_map_names
#renter map
renter_county <- trimmed_df_housing_units_county_18 %>%
filter(`Housing Unit Measure` == "Renter Occupied Housing Units")
renter_tract <- trimmed_df_housing_units_tract_18 %>%
filter(`Housing Unit Measure` == "Renter Occupied Housing Units")
renter_map <- mapview(list(renter_county, renter_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent of Renter Occupied Units County", "Percent of Renter Occupied Units Tract"),
legend = list(FALSE, TRUE))
renter_map
#rental vacancy rate map
rental_county <- trimmed_df_housing_units_county_18 %>%
filter(`Housing Unit Measure` == "Rental Vacancy Rate")
rental_tract <- trimmed_df_housing_units_tract_18 %>%
filter(`Housing Unit Measure` == "Rental Vacancy Rate")
rental_map <- mapview(list(rental_county, rental_tract),
zcol = list("Count", "Count"),
layer.name = list("Rental Vacancy Rate County", "Rental Vacancy Rate Tract"),
legend = list(FALSE, TRUE))
rental_map
#Housing Affordability
#affordability
afford_vars_18 <- c("DP04_0110", "DP04_0111", "DP04_0112", "DP04_0113", "DP04_0114", "DP04_0115", "DP04_0123", "DP04_0124", "DP04_0126", "DP04_0127", "DP04_0128",
"DP04_0129", "DP04_0130", "DP04_0134", "DP04_0137", "DP04_0138", "DP04_0139", "DP04_0140", "DP04_0141", "DP04_0142")
afford_percent_vars_18 <- c("DP04_0110P", "DP04_0111P", "DP04_0112P", "DP04_0113P", "DP04_0114P", "DP04_0115P", "DP04_0123P", "DP04_0124P", "DP04_0126P", "DP04_0127P", "DP04_0128P",
"DP04_0129P", "DP04_0130P", "DP04_0134P", "DP04_0137P", "DP04_0138P", "DP04_0139P", "DP04_0140P", "DP04_0141P", "DP04_0142P")
afford_vars_names_18 <- c("Housing Units with a Mortgage", "Housing Units with a Mortgage SMOCAPI Less than 20%", "Housing Units with a Mortgage SMOCAPI 20-24.9%", "Housing Units with a Mortgage SMOCAPI 25-29.9%",
"Housing Units with a Mortgage SMOCAPI 30-34.9%", "Housing Units with a Mortgage SMOCAPI 35% or More", "Housing Units Without a Mortgage SMOCAPI 30-34.9%", "Housing Units Without a Mortgage SMOCAPI 35% or More",
"Total Occupied Units Paying Rent", "Rent Less Than $500", "Rent $500-$999", "Rent $1000-$1499", "Rent $1500-$1999", "Median Rent", "GRAPI Less than 15%", "GRAPI 15-19.9%", "GRAPI 20-24.9%", "GRAPI 25-29.9%",
"GRAPI 30-34.9%", "GRAPI 35% or More")
df_afford_tract_counts_18 <- dp_2018 %>%
filter(variable %in% afford_vars_18)
df_afford_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% afford_vars_18)
df_afford_tract_percent_18 <- dp_2018 %>%
filter(variable %in% afford_percent_vars_18)
df_afford_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% afford_percent_vars_18)
##combine the percents into one table
df_afford_tract_18 <- cbind(afford_vars_names_18, df_afford_tract_counts_18, df_afford_tract_percent_18)
df_afford_county_18 <- cbind(afford_vars_names_18, df_afford_county_counts_18, df_afford_county_percent_18)
trimmed_df_afford_tract_18 <- df_afford_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_afford_county_18 <- df_afford_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_afford_tract_18 <- st_set_geometry(trimmed_df_afford_tract_18, NULL)
data_table_afford_county_18 <- st_set_geometry(trimmed_df_afford_county_18, NULL)
##rename the columns
afford_table_tract_names <- c("Affordability Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
afford_table_county_names <- c("Affordability Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_afford_tract_18) <- afford_table_tract_names
names(data_table_afford_county_18) <- afford_table_county_names
tract_g1_18 <- data_table_afford_tract_18 %>%
filter(`Affordability Measure` == "GRAPI 30-34.9%")
tract_g2_18 <- data_table_afford_tract_18 %>%
filter(`Affordability Measure` == "GRAPI 35% or More")
tract_g3_18 <- left_join(tract_g1_18, tract_g2_18, by = "Census Tract")
tract_grapi30_18 <- tract_g3_18 %>%
transmute("Affordability Measure" = "GRAPI 30% or More",
"Census Tract" = `Census Tract`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Margin of Error.x`^2 + `Margin of Error.y`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent Margin of Error.x`^2 + `Percent Margin of Error.y`^2),2))
tract_s1_18 <- data_table_afford_tract_18 %>%
filter(`Affordability Measure` == "Housing Units with a Mortgage SMOCAPI 30-34.9%")
tract_s2_18 <- data_table_afford_tract_18 %>%
filter(`Affordability Measure` == "Housing Units with a Mortgage SMOCAPI 35% or More")
tract_s3_18 <-left_join(tract_s1_18, tract_s2_18, by = "Census Tract")
tract_smocapi30_18 <- tract_s3_18%>%
transmute("Affordability Measure" = "Housing Units with a Mortgage SMOCAPI 30% or More",
"Census Tract" = `Census Tract`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Margin of Error.x`^2 + `Margin of Error.y`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent Margin of Error.x`^2 + `Percent Margin of Error.y`^2),2))
tract_x1_18 <- data_table_afford_tract_18 %>%
filter(`Affordability Measure` == "Housing Units Without a Mortgage SMOCAPI 30-34.9%")
tract_x2_18 <- data_table_afford_tract_18 %>%
filter(`Affordability Measure` == "Housing Units Without a Mortgage SMOCAPI 35% or More")
tract_x3_18 <- left_join(tract_x1_18, tract_x2_18, by = "Census Tract")
tract_with_out_smocapi30_18 <- tract_s3_18%>%
transmute("Affordability Measure" = "Housing Units without a Mortgage SMOCAPI 30% or More",
"Census Tract" = `Census Tract`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Margin of Error.x`^2 + `Margin of Error.y`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent Margin of Error.x`^2 + `Percent Margin of Error.y`^2),2))
data_table_afford_tract_18 <- rbind(data_table_afford_tract_18, tract_grapi30_18, tract_smocapi30_18, tract_with_out_smocapi30_18)
#county
county_g1_18 <- data_table_afford_county_18 %>%
filter(`Affordability Measure` == "GRAPI 30-34.9%")
county_g2_18 <- data_table_afford_county_18 %>%
filter(`Affordability Measure` == "GRAPI 35% or More")
county_g3_18 <- left_join(county_g1_18, county_g2_18, by = "County")
county_grapi30_18 <- county_g3_18 %>%
transmute("Affordability Measure" = "GRAPI 30% or More",
"County" = `County`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Margin of Error.x`^2 + `Margin of Error.y`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent Margin of Error.x`^2 + `Percent Margin of Error.y`^2),2))
county_s1_18 <- data_table_afford_county_18 %>%
filter(`Affordability Measure` == "Housing Units with a Mortgage SMOCAPI 30-34.9%")
county_s2_18 <- data_table_afford_county_18 %>%
filter(`Affordability Measure` == "Housing Units with a Mortgage SMOCAPI 35% or More")
county_s3_18 <-left_join(county_s1_18, county_s2_18, by = "County")
county_smocapi30_18 <- county_s3_18%>%
transmute("Affordability Measure" = "Housing Units with a Mortgage SMOCAPI 30% or More",
"County" = `County`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Margin of Error.x`^2 + `Margin of Error.y`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent Margin of Error.x`^2 + `Percent Margin of Error.y`^2),2))
county_x1_18 <- data_table_afford_county_18 %>%
filter(`Affordability Measure` == "Housing Units Without a Mortgage SMOCAPI 30-34.9%")
county_x2_18 <- data_table_afford_county_18 %>%
filter(`Affordability Measure` == "Housing Units Without a Mortgage SMOCAPI 35% or More")
county_x3_18 <- left_join(county_x1_18, county_x2_18, by = "County")
county_with_out_smocapi30_18 <- county_s3_18%>%
transmute("Affordability Measure" = "Housing Units without a Mortgage SMOCAPI 30% or More",
"County" = `County`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Margin of Error.x`^2 + `Margin of Error.y`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent Margin of Error.x`^2 + `Percent Margin of Error.y`^2),2))
data_table_afford_county_18 <- rbind(data_table_afford_county_18, county_grapi30_18, county_smocapi30_18, county_with_out_smocapi30_18)
##make the data tables
afford_tract_data_table_18 <- datatable(data_table_afford_tract_18, caption = "Affordability Measures of Census Tract Groups for Upper East Tennessee 2018")
afford_county_data_table_18 <- datatable(data_table_afford_county_18, caption = "Affordability Measures for Counties in Upper East Tennessee 2018")
afford_tract_data_table_18
afford_county_data_table_18
#median rent map
afford_tract_map_names <- c("Affordability Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
afford_county_map_names <- c("Affordability Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_afford_county_18) <- afford_county_map_names
names(trimmed_df_afford_tract_18) <- afford_tract_map_names
median_rent_county <- trimmed_df_afford_county_18 %>%
filter(`Affordability Measure` == "Median Rent")
median_rent_tract <- trimmed_df_afford_tract_18 %>%
filter(`Affordability Measure` == "Median Rent")
median_rent_map <- mapview(list(median_rent_county, median_rent_tract),
zcol = list("Count", "Count"),
layer.name = list("Median Rent County", "Median Rent Tract"),
legend = list(FALSE, TRUE))
median_rent_map
#grapi above 35% map
grapi35_county <- trimmed_df_afford_county_18 %>%
filter(`Affordability Measure` == "GRAPI 35% or More")
grapi35_tract <- trimmed_df_afford_tract_18 %>%
filter(`Affordability Measure` == "GRAPI 35% or More")
grapi35_map <- mapview(list(grapi35_county, grapi35_tract),
zcol = list("Percent", "Percent"),
layer.name = list("GRAPI Above 35% County", "GRAPI Above 35% Tract"),
legend = list(FALSE, TRUE))
grapi35_map
rent_afford_map <- median_rent_map + grapi35_map
rent_afford_map
####grapi 30+ map
afford_tract_map_names <- c("Affordability Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
afford_county_map_names <- c("Affordability Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_afford_county_18) <- afford_county_map_names
names(trimmed_df_afford_tract_18) <- afford_tract_map_names
maptract_g1_18 <- trimmed_df_afford_tract_18 %>%
filter(`Affordability Measure` == "GRAPI 30-34.9%")
maptract_g2_18 <- data_table_afford_tract_18 %>%
filter(`Affordability Measure` == "GRAPI 35% or More")
maptract_g3_18 <- left_join(maptract_g1_18, maptract_g2_18, by = "Census Tract")
maptract_grapi30_18 <- maptract_g3_18 %>%
transmute("Affordability Measure" = "GRAPI 30% or More",
"Census Tract" = `Census Tract`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Count MOE`^2 + `Margin of Error`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent MOE`^2 + `Percent Margin of Error`^2),2))
maptract_s1_18 <- trimmed_df_afford_tract_18 %>%
filter(`Affordability Measure` == "Housing Units with a Mortgage SMOCAPI 30-34.9%")
maptract_s2_18 <- data_table_afford_tract_18 %>%
filter(`Affordability Measure` == "Housing Units with a Mortgage SMOCAPI 35% or More")
maptract_s3_18 <- left_join(maptract_s1_18, maptract_s2_18, by = "Census Tract")
maptract_smocapi30_18 <- maptract_s3_18 %>%
transmute("Affordability Measure" = "Housing Units with a Mortgage SMOCAPI 30% or More",
"Census Tract" = `Census Tract`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Count MOE`^2 + `Margin of Error`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent MOE`^2 + `Percent Margin of Error`^2),2))
maptract_x1_18 <- trimmed_df_afford_tract_18 %>%
filter(`Affordability Measure` == "Housing Units Without a Mortgage SMOCAPI 30-34.9%")
maptract_x2_18 <- data_table_afford_tract_18 %>%
filter(`Affordability Measure` == "Housing Units Without a Mortgage SMOCAPI 35% or More")
maptract_x3_18 <- left_join(maptract_x1_18, maptract_x2_18, by = "Census Tract")
maptract_smocapi30_without_18 <- maptract_x3_18 %>%
transmute("Affordability Measure" = "Housing Units without a Mortgage SMOCAPI 30% or More",
"Census Tract" = `Census Tract`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Count MOE`^2 + `Margin of Error`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent MOE`^2 + `Percent Margin of Error`^2),2))
mapcounty_g1_18 <- trimmed_df_afford_county_18 %>%
filter(`Affordability Measure` == "GRAPI 30-34.9%")
mapcounty_g2_18 <- data_table_afford_county_18 %>%
filter(`Affordability Measure` == "GRAPI 35% or More")
mapcounty_g3_18 <- left_join(mapcounty_g1_18, mapcounty_g2_18, by = "County")
mapcounty_grapi30_18 <- mapcounty_g3_18 %>%
transmute("Affordability Measure" = "GRAPI 30% or More",
"County" = `County`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Count MOE`^2 + `Margin of Error`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent MOE`^2 + `Percent Margin of Error`^2),2))
mapcounty_s1_18 <- trimmed_df_afford_county_18 %>%
filter(`Affordability Measure` == "Housing Units with a Mortgage SMOCAPI 30-34.9%")
mapcounty_s2_18 <- data_table_afford_county_18 %>%
filter(`Affordability Measure` == "Housing Units with a Mortgage SMOCAPI 35% or More")
mapcounty_s3_18 <- left_join(mapcounty_s1_18, mapcounty_s2_18, by = "County")
mapcounty_smocapi30_18 <- mapcounty_s3_18 %>%
transmute("Affordability Measure" = "Housing Units with a Mortgage SMOCAPI 30% or More",
"County" = `County`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Count MOE`^2 + `Margin of Error`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent MOE`^2 + `Percent Margin of Error`^2),2))
mapcounty_x1_18 <- trimmed_df_afford_county_18 %>%
filter(`Affordability Measure` == "Housing Units Without a Mortgage SMOCAPI 30-34.9%")
mapcounty_x2_18 <- data_table_afford_county_18 %>%
filter(`Affordability Measure` == "Housing Units Without a Mortgage SMOCAPI 35% or More")
mapcounty_x3_18 <- left_join(mapcounty_x1_18, mapcounty_x2_18, by = "County")
mapcounty_smocapi30_without_18 <- mapcounty_x3_18 %>%
transmute("Affordability Measure" = "Housing Units Without a Mortgage SMOCAPI 30% or More",
"County" = `County`,
"Count" = Count.x + Count.y,
"Margin of Error" = round(sqrt(`Count MOE`^2 + `Margin of Error`^2),2),
"Percent" = Percent.x + Percent.y,
"Percent Margin of Error" = round(sqrt(`Percent MOE`^2 + `Percent Margin of Error`^2),2))
grapi_above_30_map <- mapview(list(mapcounty_grapi30_18, maptract_grapi30_18),
zcol = list("Percent", "Percent"),
layer.name = list("GRAPI Above 30% County", "GRAPI Above 30% Tract"),
legend = list(FALSE, TRUE))
grapi_above_30_map
smocapi_above_30_map <- mapview(list(mapcounty_smocapi30_18, maptract_smocapi30_18),
zcol = list("Percent", "Percent"),
layer.name = list("SMOCAPI Above 30% County", "SMOCAPI Above 30% Tract"),
legend = list(FALSE, TRUE))
smocapi_above_30_map
# transportation per housing unit
transp_housing_vars_18 <- c("DP04_0057", "DP04_0058", "DP04_0059", "DP04_0060", "DP04_0061")
transp_housing_percent_vars_18 <- c("DP04_0057P", "DP04_0058P", "DP04_0059P", "DP04_0060P", "DP04_0061P")
transp_housing_vars_names_18 <- c("Total Occupied Housing Units", "Occupied Housing Units with Zero Vehicles Available",
"Occupied Housing Units with One Vehicle Available", "Occupied Housing Untis with Two Vehicles Available",
"Occupied Housing Units with Three or More Vehicles Available")
df_transp_housing_tract_counts_18 <- dp_2018 %>%
filter(variable %in% transp_housing_vars_18)
df_transp_housing_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% transp_housing_vars_18)
df_transp_housing_tract_percent_18 <- dp_2018 %>%
filter(variable %in% transp_housing_percent_vars_18)
df_transp_housing_county_percent_18 <- County_dp_2018 %>%
##combine the percents into one table
df_transp_housing_tract_18 <- cbind(transp_housing_vars_names_18, df_transp_housing_tract_counts_18, df_transp_housing_tract_percent_18)
df_transp_housing_county_18 <- cbind(transp_housing_vars_names_18, df_transp_housing_county_counts_18, df_transp_housing_county_percent_18)
trimmed_df_transp_housing_tract_18 <- df_transp_housing_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_transp_housing_county_18 <- df_transp_housing_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_transp_housing_tract_18 <- st_set_geometry(trimmed_df_transp_housing_tract_18, NULL)
data_table_transp_housing_county_18 <- st_set_geometry(trimmed_df_transp_housing_county_18, NULL)
##rename the columns
transp_housing_table_tract_names <- c("Housing Unit Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
transp_housing_table_county_names <- c("Housing Unit Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_transp_housing_tract_18) <- transp_housing_table_tract_names
names(data_table_transp_housing_county_18) <- transp_housing_table_county_names
##make the data tables
transp_housing_tract_data_table_18 <- datatable(data_table_transp_housing_tract_18, caption = "Transportation for Housing Unit Measures of Census Tract Groups for Upper East Tennessee 2018")
transp_housing_county_data_table_18 <- datatable(data_table_transp_housing_county_18, caption = "Transportation for Housing Unit Measures for Counties in Upper East Tennessee 2018")
transp_housing_tract_data_table_18
transp_housing_county_data_table_18
transp_housing_tract_map_names <- c("Housing Unit Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
transp_housing_county_map_names <- c("Housing Unit Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_transp_housing_county_18) <- transp_housing_county_map_names
names(trimmed_df_transp_housing_tract_18) <- transp_housing_tract_map_names
#zero vehicles map
zero_vehicles_county <- trimmed_df_transp_housing_county_18 %>%
filter(`Housing Unit Measure` == "Occupied Housing Units with Zero Vehicles Available")
zero_vehicles_tract <- trimmed_df_transp_housing_tract_18 %>%
filter(`Housing Unit Measure` == "Occupied Housing Units with Zero Vehicles Available")
zero_vehicles_map <- mapview(list(zero_vehicles_county, zero_vehicles_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent of Occupied Housing Units with Zero Vehicles Available County", "Percent of Occupied Housing Units with Zero Vehicles Available Tract", "Percent of Occupied Housing Units with Zero Vehicles Available City"),
legend = list(FALSE, TRUE, FALSE))
zero_vehicles_map
#heating
heating_vars_18 <- c("DP04_0062", "DP04_0063", "DP04_0064", "DP04_0065", "DP04_0066", "DP04_0067", "DP04_0068", "DP04_0069",
"DP04_0070", "DP04_0071", "DP04_0073", "DP04_0074", "DP04_0075", "DP04_0079")
heating_percent_vars_18 <- c("DP04_0062P", "DP04_0063P", "DP04_0064P", "DP04_0065P", "DP04_0066P", "DP04_0067P", "DP04_0068P", "DP04_0069P",
"DP04_0070P", "DP04_0071P", "DP04_0073P", "DP04_0074P", "DP04_0075P", "DP04_0079P")
heating_vars_names_18 <- c("Total Occupied Housing Units", "Occupied Housing Units Heated by Utility Gas", "Occupied Housing Units Heated by Bottled, Tank, or LP Gas",
"Occupied Housing Units Heated by Electric Heating", "Occupied Housing Units Heated by Fuel Oil or Kerosene", "Occupied Housing Units Heated by Coal",
"Occupied Housing Units Heated by Wood", "Occupied Housing Units Heated by Solar", "Occupied Housing Units Heated by Other Fuel", "Occupied Housing Units No Heat Source",
"Occupied Housing Units Lacking Complete Plumbing", "Occupied Housing Units Lacking a Complete Kitchen", "Occupied Housing Units with No Telephone Service Available",
"Occupied Housing Units with Greater than 1.51 Occupants per Room")
df_heating_tract_counts_18 <- dp_2018 %>%
filter(variable %in% heating_vars_18)
df_heating_county_counts_18 <- County_dp_2018 %>%
filter(variable %in% heating_vars_18)
df_heating_tract_percent_18 <- dp_2018 %>%
filter(variable %in% heating_percent_vars_18)
df_heating_county_percent_18 <- County_dp_2018 %>%
filter(variable %in% heating_percent_vars_18)
##combine the percents into one table
df_heating_tract_18 <- cbind(heating_vars_names_18, df_heating_tract_counts_18, df_heating_tract_percent_18)
df_heating_county_18 <- cbind(heating_vars_names_18, df_heating_county_counts_18, df_heating_county_percent_18)
trimmed_df_heating_tract_18 <- df_heating_tract_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
trimmed_df_heating_county_18 <- df_heating_county_18[,c(1,3,5,6,12,13)] #these are the ones with the location data
##get ride of location data for the tract/county
data_table_heating_tract_18 <- st_set_geometry(trimmed_df_heating_tract_18, NULL)
data_table_heating_county_18 <- st_set_geometry(trimmed_df_heating_county_18, NULL)
##rename the columns
heating_table_tract_names <- c("Housing Unit Measure", "Census Tract", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
heating_table_county_names <- c("Housing Unit Measure", "County", "Count", "Margin of Error", "Percent", "Percent Margin of Error")
names(data_table_heating_tract_18) <- heating_table_tract_names
names(data_table_heating_county_18) <- heating_table_county_names
##make the data tables
heating_tract_data_table_18 <- datatable(data_table_heating_tract_18, caption = "Heating and Misc Housing Unit Measures of Census Tract Groups for Upper East Tennessee 2018")
heating_county_data_table_18 <- datatable(data_table_heating_county_18, caption = "Heating and Misc Housing Unit Measures for Counties in Upper East Tennessee 2018")
heating_tract_data_table_18
heating_county_data_table_18
##service heatingupations map
heating_tract_map_names <- c("Heating and Misc Housing Unit Measure", "Census Tract", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
heating_county_map_names <- c("Heating and Misc Housing Unit Measure", "County", "Count", "Count MOE", "Percent", "Percent MOE", "geometry")
names(trimmed_df_heating_county_18) <- heating_county_map_names
names(trimmed_df_heating_tract_18) <- heating_tract_map_names
#no heat source map
no_heat_source_county <- trimmed_df_heating_county_18 %>%
filter(`Heating and Misc Housing Unit Measure` == "Occupied Housing Units No Heat Source")
no_heat_source_tract <- trimmed_df_heating_tract_18 %>%
filter(`Heating and Misc Housing Unit Measure` == "Occupied Housing Units No Heat Source")
no_heat_source_map <- mapview(list(no_heat_source_county, no_heat_source_tract),
zcol = list("Percent", "Percent"),
layer.name = list("Percent of Occupied Housing Units with No Heat Source County", "Percent of Occupied Housing Units with No Heat Source Tract"),
legend = list(FALSE, TRUE))
no_heat_source_map
|
2c68414e9e23dd3ea2d1df2b305a6211d6bd4531
|
a85e536f8cbe2af99fab307509920955bd0fcf0a
|
/RBuildIgnore/removed-from-package/test-evalFormula.R
|
7c0a1271fa94d9dd51ccd72157532710409736e6
|
[] |
no_license
|
ProjectMOSAIC/mosaic
|
87ea45d46fb50ee1fc7088e42bd35263e3bda45f
|
a64f2422667bc5f0a65667693fcf86d921ac7696
|
refs/heads/master
| 2022-12-13T12:19:40.946670
| 2022-12-07T16:52:46
| 2022-12-07T16:52:46
| 3,154,501
| 71
| 27
| null | 2021-02-17T21:52:00
| 2012-01-11T14:58:31
|
HTML
|
UTF-8
|
R
| false
| false
| 229
|
r
|
test-evalFormula.R
|
context('evalFormula()')
test_that("subset works ", {
expect_equivalent(
evalFormula( age ~ sex, data=mosaicData::HELPrct, age > 50 )[1:3],
evalFormula( age ~ sex, data=subset(mosaicData::HELPrct, age > 50))[1:3] )
})
|
b6217d1c98454dfee5e3a210aaceecd0037022a3
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/OTclust/man/otclust.Rd
|
a8b0577dc867ff7da05be9815be4e8e85945c4aa
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,598
|
rd
|
otclust.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/otclust.R
\name{otclust}
\alias{otclust}
\title{Mean partition by optimal transport anlignment.}
\usage{
otclust(ensemble, idx = NULL)
}
\arguments{
\item{ensemble}{-- a matrix of ensemble partition. Use \code{ensemble()} to generate an ensemble of perturbed partitions.}
\item{idx}{-- an integer indicating the index of reference partition in \code{ensemble}. If not specified, median partition is used as the reference partition.}
}
\value{
a list of alignment result.
\item{idx}{the index of reference partition.}
\item{avedist}{average distances between each partition and all ensemble partitions.}
\item{meanpart}{a list of mean partition.}
\item{distance}{Wasserstein distances between mean partition and the others.}
\item{numcls}{the number of clusters for each partition.}
\item{statistics}{average tightness ratio, average coverage ratio, 1-average jaccard distance.}
\item{cap}{cluster alignment and points based (CAP) separability.}
\item{id}{switched labels.}
\item{cps}{covering point set.}
\item{match}{topological relationship statistics between the reference partition and the others.}
\item{Weight}{weight matrix.}
}
\description{
This function calculates the mean partition of an ensemble of partitions by optimal transport alignment and uncertainty/stability measures.
}
\examples{
data(sim1)
# the number of clusters.
C = 4
ens.data = ensemble(sim1$X[1:100,], nbs=10, clust_param=C, clustering="kmeans", perturb_method=1)
# find mean partition and uncertainty statistics.
ota = otclust(ens.data)
}
|
46f731589bc275b7a6a44a13e7f402f554c97593
|
9745a42ad7584ccc68137f7bacf951d3c94016bc
|
/R/methodObjPredict.R
|
335482829ddafb3616e1e50294d1eccad53abb57
|
[] |
no_license
|
cran/modelObj
|
b65383ed0619a835221280c102c5da9080a4bbf0
|
51d19b44de626ec114d6d9f0f019fb639ed60da7
|
refs/heads/master
| 2022-06-18T19:01:17.279906
| 2022-06-07T07:30:09
| 2022-06-07T07:30:09
| 37,216,508
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,634
|
r
|
methodObjPredict.R
|
#' @include methodObj.R
#
# Class \code{methodObjPredict}
#
# Extends class \code{methodObj} to indicate that the method is a prediction
# method.
#
# @name methodObjPredict-class
#
# @slot newdataName A character giving the formal argument for input data.frame
# @slot objectName A character giving the formal argument for the input
# regression object
# @slot propenMissing A character indicating if a treatment variable is missing
# @slot method ANY A character name or function.
# @slot methodArgs A list of inputs to be passed to the method.
#
# @keywords internal
setClass("methodObjPredict",
slots = c(newdataName = "character",
objectName = "character",
propenMissing = "character"),
contains = c("methodObj"))
# Create an object of class methodObjPredict
#
# Creates an object of class methodObjPredict
#
# @param method A character name or the function for making predictions
# @param args A list of input arguments
#
# @return An object of class methodObjPredict
#
# @name newMethodObjPredict
# @rdname newMethodObjPredict
#
# @keywords internal
setGeneric(name = ".newMethodObjPredict",
def = function(args, ...) {
standardGeneric(".newMethodObjPredict")
})
# @rdname modelObj-internal-api
setMethod(f = ".newMethodObjPredict",
signature = c(args = 'ANY'),
definition = function(args, method) { stop("not allowed") })
# @rdname modelObj-internal-api
setMethod(f = ".newMethodObjPredict",
signature = c(args = 'NULL'),
definition = function(args, method) {
args <- list("object" = "object",
"newdata" = "newdata",
"propenMissing" = "smallest")
return( .newMethodObjPredict(method = method, args = args) )
})
# @rdname modelObj-internal-api
setMethod(f = ".newMethodObjPredict",
signature = c(args = 'list'),
definition = function(args, method) {
i <- sapply(X = args, FUN = function(x){all(x == "newdata")})
if (sum(i) == 0L) {
args <- c("newdata" = "newdata", args)
newdataName <- "newdata"
} else {
newdataName <- names(x = args)[i]
}
i <- sapply(X = args, FUN = function(x){all(x == "object")})
if (sum(i) == 0L) {
args <- c("object" = "object", args)
objectName <- "object"
} else {
objectName <- names(x = args)[i]
}
i <- which(x = names(x = args) == "propen.missing")
if (length(x = i) == 0L) {
propenMissing <- "smallest"
} else {
propenMissing <- tolower(x = args[[ i ]])
if (!{propenMissing %in% c("smallest","largest")}) {
stop("propen.missing is inappropriate value")
}
args[[ i ]] <- NULL
}
mo <- .newMethodObj(method = method, args = args)
obj <- new("methodObjPredict",
newdataName = newdataName,
objectName = objectName,
propenMissing = propenMissing,
mo)
return( obj )
})
# @rdname internal-predict
# @param newdata A data.frame of model covariates
# @param fitObj The value object returned by the regression
# @importFrom stats model.matrix
setMethod(f = ".predict",
signature = c(object = "methodObjPredict"),
definition = function(object, newdata, fitObj, model) {
if (!missing(x = newdata)) {
object@methodArgs[[ object@newdataName ]] <- as.symbol("newdata")
object@methodArgs[[ object@objectName ]] <- as.symbol("fitObj")
mm <- .predict(object = as(object = object, Class = "methodObj"),
newdata = newdata, fitObj = fitObj)
if (is(object = mm, class2 = "simpleError")) {
message("converting newdata to data.matrix and trying again")
mm <- .predict(object = as(object = object,
Class = "methodObj"),
newdata = data.matrix(frame = newdata),
fitObj = fitObj)
if (is(object = mm, class2 = "simpleError")) {
message("converting newdata to model.matrix and trying again")
mm <- .predict(object = as(object = object,
Class = "methodObj"),
newdata = stats::model.matrix(object = model,
data = newdata),
fitObj = fitObj)
}
}
} else {
object@methodArgs[[ object@newdataName ]] <- NULL
object@methodArgs[[ object@objectName ]] <- fitObj
mm <- .predict(object = as(object = object, Class = "methodObj"))
}
if (is(object = mm, class2 = "simpleError")) {
stop("prediction method could not be executed successfully",
call. = FALSE)
}
if (!is(object = mm, class2 = "matrix")) {
mm <- matrix(data = mm, ncol = 1L)
}
return( mm )
})
|
6ef0252785e9daa40e574394599807033ff4091b
|
022a0c5fe9106bb1c43db54e645abaad14193bf4
|
/man/imageGreenhouse.Rd
|
2be709e09b9927c9ea9eda256f3cc2ad485a1edb
|
[] |
no_license
|
sanchezi/phisStatR
|
98b58c71dd9ec9c28b9d4a07fa08417a6691f58e
|
3bc7cb2d5a6e504eed6ac48a2f3060eaca1b5bbf
|
refs/heads/master
| 2020-08-29T15:28:14.418532
| 2019-11-14T14:55:09
| 2019-11-14T14:55:09
| 218,074,545
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,447
|
rd
|
imageGreenhouse.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphicalFunctions.R
\name{imageGreenhouse}
\alias{imageGreenhouse}
\title{a function for representing a trait in the phenoarch greenhouse}
\usage{
imageGreenhouse(datain, trait, xcol, ycol, numrow, numcol, typeD,
typeT = 1, ylim = NULL, typeI = "ggplot2", typeV = NULL)
}
\arguments{
\item{datain}{a dataframe to explore}
\item{trait}{character, a parameter to draw}
\item{xcol}{character, name of the abscissa column (Line or x...)}
\item{ycol}{character, name of the ordinate column (Position or y...)}
\item{numrow}{numeric, number of rows in the greenhouse}
\item{numcol}{numeric, number of columns in the greenhouse}
\item{typeD}{numeric, type of dataframe (1==wide, 2==long). If typeD==2, the input dataset must contain
a 'Trait' column.}
\item{typeT}{numeric, type of the trait (1: quantitatif, 2: qualitatif), 1 is the default}
\item{ylim}{if trait is quantitative, numeric vectors of length 2, giving the trait coordinates ranges.
default = NULL}
\item{typeI}{character, type of image.
"video" for createDynam.R program that produces a video of an experiment,
"plotly" for interactive graphic for spatial visualisation,
"ggplot2" for classical graphic in report pdf , default.}
\item{typeV}{character, type de video, NULL by default, "absolute" for abs. video}
}
\value{
a ggplot2 object if plotly, the print of the ggplot2 object (a graph) otherwise
}
\description{
a function for representing a trait in the phenoarch greenhouse
}
\details{
the data.frame in input must have the positions of each pot (Line and
Position columns).
For plotly type graphic, the data frame in input must also contain id of plants
(Manip, Pot, Genotype, Repsce)
}
\examples{
\donttest{
# a video call
imageGreenhouse(datain=filter(plant2,Day==vecDay[Day]),trait="plantHeight",
xcol="Line",ycol="Position",numrow=28,numcol=60,
typeD=1,typeT=1,ylim=NULL,typeI="video")
# an interactive plotly call
test<-imageGreenhouse(datain=plant4, trait="Biomass24",xcol="Line",ycol="Position",
numrow=28,numcol=60,typeD=1,typeT=1, ylim=NULL,typeI="plotly")
# test is a ggplot2 object, you have to render it with: plotly::ggplotly(test)
# a classical ggplot2 call
imageGreenhouse(datain=plant4, trait="Biomass24",xcol="Line",ycol="Position",
numrow=28,numcol=60,typeD=1,typeT=1, ylim=NULL,typeI="ggplot2")
}
}
|
45444524ab88c4a22e722b0a87cac09825704369
|
5db4628e475e98b4980e932e1c707f5602b0572c
|
/week6_posc207_Sunny.R
|
2df0b0a6623550c1caa473e823ad02c690f0f387
|
[
"MIT"
] |
permissive
|
Gabe1991/POSC_207
|
69a051bc99832da73364329234e12a1d0eb8701c
|
1ea8e32ec3c76f63e72409e73b69e6abaa709329
|
refs/heads/master
| 2020-03-19T23:25:56.931186
| 2018-06-05T07:05:03
| 2018-06-05T07:05:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,968
|
r
|
week6_posc207_Sunny.R
|
#############################
# Sunny Shao #
# Basic Classes and methods #
# Week 6 #
#############################
library(tidyverse)
rm(list=ls())
setClass("cmps_2016", representation(data = "data.frame",
groups="character"))
####################################
# generate summarize table Data #
####################################
cmps <- readxl::read_xlsx("~/Dropbox/pd_research/new working directory/CMPS.xlsx")
cross<-table(cmps$pd, cmps$Generation)
prop.table(cross,2)*100
generation <- c(1:4)
pd <- c(47,53,56,60)
dat <- data.frame(generation, pd); dat
dat$pd <- as.integer(dat$pd)
######################################
# Create Function that outputs class #
######################################
dat_prep <- function(dat) {
dat <- dat[,1:2]
tab_out <- dat
groups <- c("Generational Status", "Percent Perceived Discrimination")
tab_out <- new("cmps_2016", data = tab_out, groups=groups)
return(tab_out)
}
############################################
# Creating Plot Method for Class: cmps_2016 #
############################################
plot.cmps_2016 <- function(x, ...) {
# Extract columns 1 and 2 from dat_prep function output
xvar <- x@data[,1]
yvar <- x@data[,2]
# Initite plot() within the function
# Note: ggplot2() will also work here
plot(xvar, yvar,
xlab=x@groups[1],
ylab=x@groups[2],
bty="n",
main = "Percent Perceived Discrimination by Generation",
...)
}
###########################
# Initiate First Function #
###########################
p_dat <- dat_prep(dat); p_dat
# Check Type of Clas (is S4 class/method?)
isS4(p_dat)
# Look at attributes
names(attributes(p_dat))
# How to Access Object Attributes
p_dat@data
p_dat@groups
p_dat@class
#####################
# Plot p_dat object #
#####################
plot(p_dat)
##################################
# Make Adjustments #
##################################
plot(p_dat, pch=12, col="blue")
# Try out plotting another data
cross<-table(cmps$edu, cmps$Generation)
prop.table(cross,2)*100
generation <- c(1:4)
edu <- c(23, 18, 16, 12)
my_dat2 <- data.frame(generation, edu); my_dat2
my_dat2$edu <- as.integer(my_dat2$edu)
my_dat2 <- dat_prep(my_dat2)
# Plot out the data #
plot(my_dat2, pch=4, col="brown")
text(2.5,22, "% BA or higher by generation") # you can add text to
legend("topright",
title ="BA or Higher",
pch=4,
col="brown",
legend="Generational Status",
bty="n",
cex=.7)
##################
# Summary Method #
##################
summary.cmps_2016 <- function(x, ...) {
xvar <- x@data[,1]
yvar <- x@data[,2]
# Print out a bunch of stuff #
cat("data.frame() dimensions\n")
print( dim(x@data) )
cat("XVar Length:\n")
print(length(xvar))
cat("YVar Length:\n")
print(length(yvar))
}
# Now just use summary function/method to *summarize* data #
summary(my_dat2)
|
4eefcc6d004418185b70ff284384ecb53e2e6e9a
|
427be10822d3cd82d1053c88b705366a29ecc346
|
/scripts/community_stats_clemente.R
|
42d71e092f6f6e754168dd6ec6af59c3c6ef6ed1
|
[] |
no_license
|
ajaybabu27/microbiome_pdb_pipeline
|
3f6284af2d123096475a062e1eb195563a3ac8a2
|
34d1a8f79cd306bce5adb599d037e7df0434d1a3
|
refs/heads/master
| 2021-06-09T04:17:57.516140
| 2020-12-24T23:16:58
| 2020-12-24T23:16:58
| 139,446,230
| 0
| 1
| null | 2020-12-24T22:42:44
| 2018-07-02T13:17:52
|
R
|
UTF-8
|
R
| false
| false
| 9,420
|
r
|
community_stats_clemente.R
|
#Author: Ajay
#Date: 10/10/2017
#Description: Generate abundance charts for Clemente Lab Microbial Community libraries
args = commandArgs(trailingOnly=TRUE)
#Sets the directory containing abundance tables of Clemente MC libraries as working directory.
setwd(args[1])
#Read abundance file
summary_df=read.table(file='summary.tsv',sep='\t',header=T)
summary_df$Sample<-as.character(summary_df$Sample)
y <- strsplit(summary_df$Sample,".",fixed=TRUE)
rownames(summary_df)<-unlist(lapply(y,FUN=function(x){paste(x[1],x[2],sep=".")}))
summary_df$Sample<-NULL
theoretical_df=read.table(file='/sc/arion/projects/InfectiousDisease/reference-db/microbial_community_standards/jose_mc_16s_abundance.csv',sep='\t',header=T,row.names = 1)
summary_df<-summary_df[,c(rownames(theoretical_df),'total')]
summary_df<-summary_df[grep('M',rownames(summary_df)),] #for run3
summary_df$total_mapped_reads=rowSums(summary_df[,c(1:15)])
summary_df$primary_mapped_per<-(summary_df$total_mapped_reads/summary_df$total)*100
summary_df_per<-(summary_df[,c(1:15)]/summary_df$total_mapped_reads)*100 # Convert to %
theoretical_df[,c(1:5)]=100*theoretical_df[,c(1:5)]
theoretical_df_t=t(theoretical_df[,c('M1','M6','M13','M14','M15')])
jose_summary_df<-rbind(summary_df_per,theoretical_df_t)
summary_df_per_t<-t(jose_summary_df)
class(summary_df_per_t)<-"numeric"
jose_summary_df$expt_cond<-row.names(jose_summary_df)
jose_summary_df$expt_cond<-as.factor(jose_summary_df$expt_cond)
#Calculate correlation
cor_tab<-cor(summary_df_per_t)
cor_tab<-as.data.frame(cor_tab)
#Calculate euclidean distance
dist_tab<-as.matrix(dist(jose_summary_df))
dist_tab<-as.data.frame(dist_tab)
cor_tab_plot_vals<-data.frame(expt_cond=c("M1","M1.1","M1.2","M1.L", "M6","M6.L", "M13","M13.L"),
cor_value=c('0_1',paste(round(dist_tab['M1','M1.1'],2),round(cor_tab['M1','M1.1'],2),sep='_'),
paste(round(dist_tab['M1','M1.2'],2),round(cor_tab['M1','M1.2'],2),sep='_'),
paste(round(dist_tab['M1','M1.L'],2),round(cor_tab['M1','M1.L'],2),sep='_'),
'0_1',paste(round(dist_tab['M6','M6.L'],2),round(cor_tab['M6','M6.L'],2),sep='_'),
'0_1',paste(round(dist_tab['M13','M13.L'],2),round(cor_tab['M13','M13.L'],2),sep='_')))
#Draw stacked abundance plot comparing samples with theoretical distribution.
library(reshape2)
library(ggplot2)
summary_df_per_melt<-melt(jose_summary_df)
summary_df_per_melt<-merge(summary_df_per_melt,cor_tab_plot_vals,by = 'expt_cond',all.x = T)
summary_df_per_melt<-merge(summary_df_per_melt,summary_df[,c('total','primary_mapped_per')],by.x = 1,by.y = 0,all.x=T)
summary_df_per_melt$mapped_per<-round(summary_df_per_melt$primary_mapped_per, 2)
summary_df_per_melt$total<-paste(round(summary_df_per_melt$total/1000,2),'K',sep='')
summary_df_per_melt<-merge(summary_df_per_melt,theoretical_df[,c('gram','GC.genome')],by.x='variable',by.y=0,all.x=T)
summary_df_per_melt$var_fill<-paste(summary_df_per_melt$variable,'(',summary_df_per_melt$gram,')',summary_df_per_melt$GC.genome)
summary_df_per_melt$GC.genome<-as.character(summary_df_per_melt$GC.genome)
summary_df_per_melt$GC.genome<-substr(summary_df_per_melt$GC.genome, 1, nchar(summary_df_per_melt$GC.genome)-1)
summary_df_per_melt$GC.genome<-as.numeric(summary_df_per_melt$GC.genome)
summary_df_per_melt$gram<-factor(as.character(summary_df_per_melt$gram),levels=c('+','-'))
summary_df_per_melt<-summary_df_per_melt[order(summary_df_per_melt$gram,summary_df_per_melt$GC.genome),]
summary_df_per_melt$var_fill<-factor(summary_df_per_melt$var_fill,levels=as.character(unique(summary_df_per_melt$var_fill)))
summary_df_per_melt<-summary_df_per_melt[!grepl('M14|M15',summary_df_per_melt$expt_cond),]
ggplot(data=summary_df_per_melt, aes(x = summary_df_per_melt$expt_cond, y = summary_df_per_melt$value,
fill=summary_df_per_melt$var_fill,label=signif(summary_df_per_melt$value, digits = 2))) +
geom_bar(stat='identity')+theme(axis.text.x = element_text(angle = 90, hjust = 1,vjust=-0.1,size=40))+
geom_text(size = 16, position = position_stack(vjust = 0.5))+
geom_text(data=summary_df_per_melt,aes(x=summary_df_per_melt$expt_cond,y=100,label=summary_df_per_melt$cor_value),hjust=-0.10,vjust=-0.1,angle=65,size=20)+
labs(x='Samples',y='% Abundance',fill='Organism (Gram +/-) GC%',size=40)+
theme(legend.text=element_text(size=40),plot.title = element_text(size=40),legend.key.size = unit(5,"line"),
legend.title=element_text(size=50),
axis.text=element_text(size=40),
axis.title=element_text(size=35,face="bold"))+
expand_limits(y = max(100 * 1.05))+
geom_point(aes(x = summary_df_per_melt$expt_cond,y=summary_df_per_melt$primary_mapped_per),color='white',size=5,show.legend=F)
ggsave("summary_mc_clemente_stacked.pdf", width = 126, height = 126,unit='cm',dpi=200)
ggsave("summary_mc_clemente_stacked.png", width = 126, height = 126,unit='cm',dpi=200)
#Draw pairwise scatter plot comparing samples with theoretical distribution.
library(GGally)
library(scales)
out_data<-as.data.frame(summary_df_per_t)
out_data$color<-rownames(out_data)
out_data<-merge(out_data,theoretical_df[,c(6,7)],by=0)
out_data$GC.genome<-as.character(out_data$GC.genome)
out_data$GC.genome<-substr(out_data$GC.genome, 1, nchar(out_data$GC.genome)-1)
out_data$GC.genome<-as.numeric(out_data$GC.genome)
out_data$gram<-factor(as.character(out_data$gram),levels=c('+','-'))
out_data$color<-factor(out_data$color,levels = out_data$color)
out_data<-out_data[order(out_data$gram,out_data$GC.genome),]
out_data$color<-as.character(out_data$color)
out_data$color<-factor(out_data$color,levels=unique(out_data$color))
combo_plot<-function(p,p1,p2){
g2 <- ggplotGrob(p2)
colors <- g2$grobs[[6]]$children[[3]]$gp$fill
# Change background color to tiles in the upper triangular matrix of plots
idx <- 1
for (k1 in 1:(p-1)) {
for (k2 in (k1+1):p) {
plt <- getPlot(p1,k1,k2) +
theme(panel.background = element_rect(fill = colors[idx], color="white"),
panel.grid.major = element_line(color=colors[idx])
)
p1 <- putPlot(p1,plt,k1,k2)
idx <- idx+1
}
}
return(p1)
}
out_data[2:11]=log2(out_data[2:11])
out_data<-do.call(data.frame,lapply(out_data, function(x) replace(x, is.infinite(x),NA)))
out_data<-do.call(data.frame,lapply(out_data, function(x) replace(x, is.na(x),0)))
for (sample_group in c('M1','M6','M13')){
if (sample_group=='M1'){
col_sel<-colnames(out_data)[grepl('M1\\.',colnames(out_data))]
col_sel<-c(col_sel,'M1')
}
else{
col_sel<-colnames(out_data)[grepl(sample_group,colnames(out_data))]
}
p1<-ggpairs(out_data,axisLabels='internal',lower=list(mapping = aes(colour = color,size=GC.genome,shape=gram)),columns = col_sel)
p2<-ggcorr(out_data[,c(col_sel)],label_round = 2,label = T,label_color = "black")
p<-length(col_sel)
pdf(paste("summary_mc_",sample_group,"_corr_plot_dna_conc.pdf",sep=''), width=15, height=15,onefile=F)
print(combo_plot(p,p1,p2))
dev.off()
}
#Draw edit distance charts
library(reshape2)
library(ggplot2)
library(scales)
test <- read.table("../PhiX/PhiXQC_edit_dist.txt",sep='\t',header = T)
test_m_temp<-melt(test,na.rm = T)
xpos <- c(Inf,Inf)
ypos <- c(Inf,Inf)
mean_char=paste('Mean=',as.character(round(mean(test_m_temp$value),digits=2)))
sd_char=paste('SD=',as.character(round(sd(test_m_temp$value),digits = 2)))
annotateText <- c(mean_char,sd_char)
hjustvar<-c(1,1)
vjustvar<-c(1,2.5)
pdf("clemente_edit_dist_perhist.pdf", onefile = TRUE,width=15, height=5)
p<-ggplot(test_m_temp, aes(x = factor(test_m_temp$value))) +
geom_bar(aes(y = (..count..)/sum(..count..))) +
scale_y_continuous(labels = percent_format()) + geom_vline(xintercept = quantile(test_m_temp$value,.90)+1,colour='red')+
labs(x='edit_distance',y='percentage_distribution',title="PhiX Edit Distance (Pre-QC)")+
geom_text(data=as.data.frame(annotateText),aes(x=xpos,y=ypos,hjust=hjustvar,vjust=vjustvar,label=annotateText))+
scale_x_discrete(limits=as.character(seq(0,10)))
print(p)
#Plot percentage histogram plots for edit distances
library(Rmisc)
library(ggplot2)
library(reshape2)
library(scales)
library(gridExtra)
test <- read.csv("clemente_postqc_edit_dist.txt",sep='\t',header = T)
test_m<-melt(test,na.rm = T)
list_plot=list()
for (x in levels(test_m$variable)){
test_m_temp<-test_m[test_m$variable==x,]
xpos <- c(Inf,Inf)
ypos <- c(Inf,Inf)
mean_char=paste('Mean=',as.character(round(mean(test_m_temp$value),digits=2)))
sd_char=paste('SD=',as.character(round(sd(test_m_temp$value),digits = 2)))
annotateText <- c(mean_char,sd_char)
hjustvar<-c(1,1)
vjustvar<-c(1,2.5)
p<-ggplot(test_m_temp, aes(x = factor(test_m_temp$value))) +
geom_bar(aes(y = (..count..)/sum(..count..))) +
scale_y_continuous(labels = percent_format()) + geom_vline(xintercept = quantile(test_m_temp$value,.90)+1,colour='red')+
labs(x='edit_distance',y='percentage_distribution',title=x)+
geom_text(data=as.data.frame(annotateText),aes(x=xpos,y=ypos,hjust=hjustvar,vjust=vjustvar,label=annotateText))+
scale_x_discrete(limits=as.character(seq(0,10)))
print(p)
list_plot[[x]]<-p
#do.call("grid.arrange", p)
}
dev.off()
|
c69f2d0fb6bebf4f7ac65a55c1d665be55ef7e41
|
6a8856f2818f3ee31060dde25fab1ef3003dc9ba
|
/plot5.R
|
32824c5198c83f79025701507c0ca1043275e438
|
[] |
no_license
|
roundse/ExploratoryDataAnalysis-Project2
|
6bbb979d001c2ece41605ccf068d2cd104e7e631
|
199b6de8c4eebcd4dc8d7fed30bc0bd579edba69
|
refs/heads/master
| 2020-04-08T14:29:31.922864
| 2018-11-28T03:39:24
| 2018-11-28T03:39:24
| 159,439,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,043
|
r
|
plot5.R
|
# Step 1: Load data and subset appropriately.
data <- readRDS("summarySCC_PM25.rds")
correct_years <- subset(data,year=="1999" | year == "2002" | year == "2005" | year == "2008")
balt <- subset(correct_years,fips=="24510")
# Step 2: Get all motor vehicles (i.e., those that
# are of type on-road.
mv <- subset(balt,balt$type == "ON-ROAD")
# Sum all observations of emissions by year, as before
em_by_yr <- tapply(mv$Emissions,mv$year,sum)
years <- names(em_by_yr)
# Step 3: Create a line plot showing the change in
# total coal combustion-related emissions over time.
plot(years,em_by_yr,
col="blue",
pch=16,
ylim=c(0,ceiling(max(em_by_yr))),
xlab="Year",
ylab="Total PM2.5 Emissions from \nMotor Vehicle Sources (Tons)",
main="Change in Total PM2.5 Motor Vehicle \nEmissions Over 1999-2008 in Baltimore City, MD")
lines(years,em_by_yr,col="blue")
# Add regression line.
abline(lm(em_by_yr~as.numeric(years)),col="blue",lty=2)
# Step 4: Save the line plot to a PNG device.
dev.copy(png,"plot5.png")
dev.off()
|
3a3c6d33f3cbe84433f6855d3ddb352454285bc9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/widals/examples/fun.load.Rd.R
|
b22c27ce6c66719b9256c2074bd93beff3520cea
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,739
|
r
|
fun.load.Rd.R
|
library(widals)
### Name: fun.load
### Title: Stochastic Search Helper Functions
### Aliases: fun.load fun.load.hals.a fun.load.hals.fill fun.load.widals.a
### fun.load.widals.fill
### Keywords: ~kwd1 ~kwd2
### ** Examples
### Here's an itty bitty example:
### we use stochastic search to find the minimum number in a vector
### GP isn't used here, and hence neither are p.ndx.ls nor f.d
### however, we still need to create them since MSS.snow requires their existence
fun.load.simpleExample <- function() {
if( run.parallel ) {
sfExport("xx")
}
p.ndx.ls <- list( c(1) )
assign( "p.ndx.ls", p.ndx.ls, pos=globalenv() )
f.d <- list( dlog.norm )
assign( "f.d", f.d, pos=globalenv() )
FUN.MH <- function(jj, GP.mx, X) {
our.cost <- sample(xx, 1)
}
assign( "FUN.MH", FUN.MH, pos=globalenv() )
FUN.GP <- NULL
assign( "FUN.GP", FUN.GP, pos=globalenv() )
FUN.I <- function(envmh, X) {
cat( "Hello, I have found an even smaller number in xx ---> ", envmh$current.best, "\n" )
}
assign( "FUN.I", FUN.I, pos=globalenv() )
FUN.EXIT <- function(envmh, X) {
cat( "Done", "\n" )
}
assign( "FUN.EXIT", FUN.EXIT, pos=globalenv() )
}
xx <- 1:600
GP <- c(1)
MH.source <- fun.load.simpleExample
run.parallel <- TRUE
sfInit(TRUE, 2)
MSS.snow(MH.source, Inf, p.ndx.ls, f.d, matrix(1, nrow=28), 28, 7)
sfStop()
### Here's another itty bitty example:
### we use stochastic search to find the mean of a vector
### i.e., the argmin? of sum ( x - ? )^2
fun.load.simpleExample2 <- function() {
if( run.parallel ) {
sfExport("xx")
}
p.ndx.ls <- list( c(1) )
assign( "p.ndx.ls", p.ndx.ls, pos=globalenv() )
f.d <- list( unif.mh )
assign( "f.d", f.d, pos=globalenv() )
FUN.MH <- function(jj, GP.mx, X) {
our.cost <- sum( ( xx - GP.mx[jj, 1] )^2 )
return(our.cost)
}
assign( "FUN.MH", FUN.MH, pos=globalenv() )
FUN.GP <- NULL
assign( "FUN.GP", FUN.GP, pos=globalenv() )
FUN.I <- function(envmh, X) {
cat( "Improvement ---> ", envmh$current.best, " ---- " , envmh$GP, "\n" )
}
assign( "FUN.I", FUN.I, pos=globalenv() )
FUN.EXIT <- function(envmh, X) {
our.cost <- envmh$current.best
GP <- envmh$GP
cat( "Done", "\n" )
cat( envmh$GP, our.cost, "\n" )
}
assign( "FUN.EXIT", FUN.EXIT, pos=globalenv() )
}
##set.seed(99999)
xx <- rnorm(300, 5, 10)
GP <- c(10)
MH.source <- fun.load.simpleExample2
run.parallel <- TRUE
sfInit(TRUE, 2)
MSS.snow(MH.source, Inf, p.ndx.ls, f.d, matrix(1/10, nrow=140, ncol=length(GP)), 140, 14)
sfStop()
##### in fact:
mean(xx)
|
c92330c641a291b169d8a4124242b7d9c18f15ad
|
87d40842a98dc8d752f0babe560c51f99ebbc47b
|
/R/Updated/dummy.R
|
e7a1afd1a9014015aca63e0dfc231905dd6e5247
|
[] |
no_license
|
Allisterh/SVARdoc
|
83a2f6d30edd571d692a58c5279e6ffcdd0f1a40
|
fc3eaa55bb4ebaf57f7f8c3ed8053df04a0e48bd
|
refs/heads/master
| 2023-04-04T13:41:54.833426
| 2021-04-22T15:07:53
| 2021-04-22T15:07:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,462
|
r
|
dummy.R
|
# Create dummay variables----------------------------------------
# Dummy Variables###########################################################
# D1
# Q386 to Q188 (55 to 61 in d) and 3 to 9 in da is an interest rate shock for
#spread 1. It does not happen with spread 2.
# is this needed ? d[120:121,]
da$D1=0
da$D1[c(3:9)]=1
# D2
#create a dummy for the shock of the 1994 interest rate increase.
#2Q94 to 2Q95. This is 86:90 in the original (d) and 34:38 in the (da)
#34 plus 52 is 86; 38 plus 52 is 90
da$D2=0
da$D2[c(34:38)]=1
# D3
#q32007 is 139 q42008 is 144. The dummy is designed to account for the sharp
#flow in funds (particularly bonds and money market) in this period.
#87 plus 52 is 139; 92 plus 52 is 144.
da$D3=0
da$D3[c(87:92)]=1
#dummies must be turned into matrix to use with VAR.
# dum<-cbind(da$D2, da$D3)
dum<-cbind(da$D1,da$D2, da$D3)
#Any other dummies? Maybe look at the residuals to see if anything is required.
#One possibility would be the dot.com burst. Check equity and FDI flow.
colnames(dum)<- c("D1", "D2", "D3")
# maybe update to include new dummy. Hau and Rey have breaks at
# 1994. Assuming first quarter, these would be row 34 onwards for 1994
# that would be 66 for 2002. This could be tried.
#
# Don't put them in the main file. Keep them exogeous.
#da$D4 = 0
#da$D4[c(34:length(da$D4))] = 1
#da$D5 = 0
#da$D5[c(66:length(da$D5))] = 1
#D4 <- da$D4
#D5 <- da$D5
# Remove the dummies
da <- da[, -c(8:10)]
|
e59bc501a59f3cc4ee702dde8a44b07b4c8124cb
|
7762a70d7bf436bf2349147134f97d61d5e9b492
|
/counts/headTail.R
|
e522945a5f38837c1f84abf165ce816dbea52dbb
|
[] |
no_license
|
ploverso/R-analysis-scripts
|
41c7cde1315b7e32f344f50b39515d46c7c0f9f8
|
f7466df7be1397c5bb01eff60fc64fd44d9da657
|
refs/heads/master
| 2021-01-02T09:34:10.282435
| 2015-06-26T03:14:11
| 2015-06-26T03:14:11
| 38,087,675
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 326
|
r
|
headTail.R
|
# Some simple functions for more succiently returning heads and tails of strings
strtail <- function(s, n) {
if(n < 0){
return(substring(s, 1 - n))
}
else{
return(substring(s, nchar(s) - n + 1))
}
}
strhead <- function(s, n) {
if(n < 0){
return(substr(s, 1, nchar(s) + n))
}
else{
return(substr(s, 1, n))
}
}
|
c9fbe838e6a927fbb9ba10afa6a1e1532efc1ea0
|
855f1ee3fb2a694355652ce86a59134c110f834f
|
/src/structures.r
|
47ed61408bbcd1131b7a4695afc13c6f97ab1452
|
[
"CC0-1.0"
] |
permissive
|
fvafrCU/programmieren_in_r
|
9d59131460bb01096c40ceddfdbe96f4403c83c5
|
12ce454e66304ac7bf702d8b4305444d8b9e5b44
|
refs/heads/master
| 2021-01-10T11:06:52.122948
| 2018-07-20T06:16:01
| 2018-07-20T06:16:01
| 48,226,799
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,920
|
r
|
structures.r
|
species_shares_1987 <-
structure(list(species_group_label = structure(c(8L, 17L, 4L,
10L, 11L, 16L, 3L, 5L, 13L,
7L, 1L, 9L, 14L, 2L, 6L,
12L, 15L),
.Label = c("BA", "BI", "BU",
"DGL", "EI", "ER",
"ES", "FI", "HB",
"KIE", "LAE",
"PA", "REI",
"sALH", "sALN",
"sNB", "TA"),
class = "factor"),
prediction = c(43.4629284667027, 7.86415232656841,
2.26216297553824, 8.06265606914114,
1.98345551354271, 0.258119453661548,
18.6633590554827, 6.44221004707153,
0.35059803997222, 3.10977158731384,
1.97088355582993, 0.869602592792654,
1.42365460297862, 0.718482907631762,
0.830511613550428, 0.835781310286287,
0.891669881935334),
error = c(0.536743142764836, 0.262752386955493,
0.129393158401086, 0.281028879558596,
0.0957647501950789, 0.0400083812731542,
0.389178612482042, 0.226936220859848,
0.0488116148358875, 0.137538118708037,
0.097771346923237, 0.0594793138895747,
0.0904682635067747, 0.055752264926238,
0.0736516611206084, 0.0868833933034175,
0.0604548633623663),
grouping = structure(c(1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L,
2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L),
.Label = c("conifer", "deciduous"),
class = "factor")
),
.Names = c("species_group_label", "prediction", "error",
"grouping"),
class = "data.frame", row.names = c(NA, -17L)
)
species_shares_2002 <-
structure(list(species_group_label = structure(c(8L, 17L, 4L, 10L, 11L, 16L,
3L, 5L, 13L, 7L, 1L, 9L,
14L, 2L, 6L, 12L, 15L),
.Label = c("BA", "BI", "BU",
"DGL", "EI", "ER",
"ES", "FI", "HB",
"KIE", "LAE",
"PA", "REI",
"sALH", "sALN",
"sNB", "TA"),
class = "factor"),
prediction = c(37.6541737569276, 7.7972024041839,
2.84416204823979, 6.50871828897042,
1.89646865974553, 0.445212707829209,
21.2065514011396, 6.91947701194286,
0.427773788150808, 4.21173366638045,
2.83784794317384, 1.18034501600187,
1.92995628756537, 0.837975958160955,
0.983188665990165, 0.705704472718757,
1.61350792287878),
error = c(0.516872726399891, 0.256407272426103,
0.148641313940045, 0.24019309828729,
0.0923020697386302, 0.0596218555415915,
0.394877125787159, 0.231660735729548,
0.0542174195188222, 0.161229780433931,
0.122025049342518, 0.0719240232167699,
0.105193197027116, 0.0620204258221561,
0.0798713826799925, 0.0719370996103648,
0.0886195995701987),
grouping = structure(c(1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L,
2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L),
.Label = c("conifer", "deciduous"),
class = "factor")
),
.Names = c("species_group_label", "prediction", "error",
"grouping"),
class = "data.frame", row.names = c(NA, -17L)
)
species_shares_2012 <-
structure(list(species_group_label = structure(c(8L, 17L, 4L, 10L, 11L, 16L,
3L, 5L, 13L, 7L, 1L, 9L,
14L, 2L, 6L, 12L, 15L),
.Label = c("BA", "BI", "BU",
"DGL", "EI", "ER",
"ES", "FI", "HB",
"KIE", "LAE",
"PA", "REI",
"sALH", "sALN",
"sNB", "TA"),
class = "factor"),
prediction = c(33.9691456012294, 7.99971033422974,
3.37185094122167, 5.58481905493661,
1.76833653476758, 0.469877064925086,
21.8136258163657, 7.05250096043027,
0.538397233960815, 4.91372646945832,
3.65022666154437, 1.43595082597003,
2.17737610449481, 1.24868228966577,
1.08279677295428, 0.724697656550135,
2.19827967729543),
error = c(0.497512506586033, 0.253156122081137,
0.157245423563603, 0.218553145335019,
0.0865678202466183, 0.0572587889177698,
0.392239679332439, 0.226761402556839,
0.059160429882746, 0.169849882166813,
0.137906630574863, 0.0806802789826653,
0.109612521393389, 0.0805661157918473,
0.0852946611195881, 0.0765991333570295,
0.104578534434981),
grouping = structure(c(1L, 1L, 1L, 1L,
1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
2L, 2L, 2L, 2L),
.Label = c("conifer", "deciduous"),
class = "factor")
),
.Names = c("species_group_label", "prediction", "error",
"grouping"),
class = "data.frame", row.names = c(NA, -17L)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.