blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d9ed8af4b3282ddb97373fe6c6943f89208eced | 8289fd6c2315dd84a2abc443e56c33eb09fd6346 | /Applied Econometrics/maxwell/replication/6_divorce_effect_by_age.R | bfd3f81f2ec59a2a6d0812398df813a402c38e86 | [] | no_license | elmerli/econometrics-essential | 826f9f2f3d2f46a1d1f3b262cf25fb0161b16025 | 45d6f8d0e21c9393a97312bdaf65d19f1c3a6024 | refs/heads/master | 2023-08-05T00:10:56.285099 | 2020-05-14T03:16:55 | 2020-05-14T03:16:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,653 | r | 6_divorce_effect_by_age.R | # 6_divorce_effect_by_age.R
# Maxwell Austensen
# AEM Replication
# NYU Wagner
# 19-12-2016
library(tidyverse)
library(haven)
library(stringr)
library(feather)
library(knitr)
library(broom)
library(sandwich)
sample3 <- read_feather(str_c(clean_, "sample3.feather"))
ols_full <- sample3 %>% mutate(oldest_lt12 = if_else(age_c < 12, 1, 0))
ols_lt12 <- ols_full %>% filter(oldest_lt12 == 1)
ols_ge12 <- ols_full %>% filter(oldest_lt12 == 0)
get_first_stage <- function(df, f){
lm(formula = f, data = df) %>%
augment() %>%
select(.fitted) %>%
bind_cols(df) %>%
mutate(marriage_ended = .fitted) # overwrite variable with predicted version
}
covariates <- " + age + age_birth + age_married + educ_yrs + I(age^2) + I(age_married^2) + I(age_birth^2) + I(educ_yrs^2) + age*educ_yrs + age_married*educ_yrs + age_birth*educ_yrs + urban + factor(state_birth) + factor(state_current)"
first_stage_formula <- str_interp("marriage_ended ~ firstborn_girl ${covariates}")
tsls_full <- ols_full %>% get_first_stage(first_stage_formula)
tsls_lt12 <- ols_full %>% filter(oldest_lt12 == 1) %>% get_first_stage(first_stage_formula)
tsls_ge12 <- ols_full %>% filter(oldest_lt12 == 0) %>% get_first_stage(first_stage_formula)
get_estimates <- function(p, data){
f <- str_interp("${p} ~ marriage_ended ${covariates}")
mod <- lm(formula = f, data = data)
# Robust stanadard errors (replicating Stata's robust option)
robust_se <-
mod %>%
vcovHC(type = "HC1") %>%
diag() %>%
sqrt() %>%
.[[2]]
mod %>%
tidy() %>%
filter(term == "marriage_ended") %>%
transmute(var = p,
est = estimate,
se = robust_se) %>%
gather("stat", "value", -var) %>%
unite(variable, var, stat)
}
get_table_col <- function(df){
map_df(econ_vars, get_estimates, data = df)
}
econ_vars <- c("hh_income_std", "poverty_status", "nonwoman_inc", "woman_inc", "woman_earn", "employed", "weeks_worked", "hours_worked")
ols_cols <- list(ols_full, ols_lt12, ols_ge12) %>% map(get_table_col)
ols_table <-
ols_cols[[1]] %>%
left_join(ols_cols[[2]], by = "variable") %>%
left_join(ols_cols[[3]], by = "variable") %>%
rename(`Entire Sample` = value.x,
`Oldest Child <12` = value.y,
`Oldest Child 12+` = value)
tsls_cols <- list(tsls_full, tsls_lt12, tsls_ge12) %>% map(get_table_col)
tsls_table <-
tsls_cols[[1]] %>%
left_join(tsls_cols[[2]], by = "variable") %>%
left_join(tsls_cols[[3]], by = "variable") %>%
rename(`Entire Sample` = value.x,
`Oldest Child <12` = value.y,
`Oldest Child 12+` = value)
get_f_stat <- function(df){
df %>%
lm(first_stage_formula, data = .) %>%
anova() %>%
tidy() %>%
filter(term == "firstborn_girl") %>%
select(statistic) %>%
.[[1]]
}
f_stat_row <- data_frame(variable = "F-statistic from first stage",
`Entire Sample` = get_f_stat(ols_full),
`Oldest Child <12` = get_f_stat(ols_lt12),
`Oldest Child 12+` = get_f_stat(ols_ge12))
obs_row <- data_frame(variable = "Sample Size",
`Entire Sample` = nrow(ols_full),
`Oldest Child <12` = nrow(ols_lt12),
`Oldest Child 12+` = nrow(ols_ge12))
ols_row <- data_frame(variable = "OLS")
tsls_row <- data_frame(variable = "TSLS")
table5 <- list(ols_row, ols_table, tsls_row, tsls_table, f_stat_row, obs_row) %>% bind_rows()
write_feather(table5, str_c(clean_, "/table5.feather")) |
df89afdf58dd45a7bcff1d7072bdec865eaed928 | 2033f89ac391bf7a6c85ec9212e925c4561a6eaf | /R/packages/package_install.R | a70b2cfca653a6672e17fed7faf7c7adea3c8cef | [] | no_license | phileas-condemine/drees_indicateur_viz | d8ca4dca94eef18f1d6ce9c0fb744d578ef856b5 | d9d3576b9eabbfdb05ad0b05604d9e81b1817d63 | refs/heads/master | 2021-05-05T14:38:57.662873 | 2018-01-30T12:50:00 | 2018-01-30T12:50:00 | 118,493,370 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 437 | r | package_install.R | pkg_file <- read.csv("./drees_indicateur_viz/R/packages.csv", sep=";")
packages_list <- pkg_file[pkg_file$installed_by %in% c("custom", "notebook"),]$pkgname
for (pkg in packages_list){
# print(paste0("check: ",pkg))
if(!require(pkg,character.only = T)){
print(paste0("need to install: ",pkg))
install.packages(pkg)
}
}
devtools::install_github("dgrtwo/gganimate")
devtools::install_github("hadley/ggplot2", force = TRUE) |
a8db15282ca1de07bbbaffa4bb7262e42219004e | 6bca977d67101a6274457ca850517ee41cf06c45 | /plot_functions/plot.overlaps.R | b3823816c162e6c1e742ab178d79d591509c1542 | [] | no_license | AAlhendi1707/preinvasive | bedcf1f1eca93ab9ae4b44bf32e4d0f9947a1fad | e683fa79ad76d0784437eba4b267fb165b7c9ae4 | refs/heads/master | 2022-01-06T18:25:52.919615 | 2019-01-18T09:39:42 | 2019-01-18T09:39:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,373 | r | plot.overlaps.R | # Plot prog vs reg comparisons alongside TCGA cancer vs control comparisons to show similarities
# Plot differentially methylated regions (DMRs) and copy number changes
# Use plot functions from the ggbio package
library(GenomicRanges)
library(ggbio)
data(ideoCyto, package = "biovizBase")
plot.overlaps <- function(filename1, filename2){
# Create a genomic ranges object from our DMR data
dn.dmrs <- GRanges(
seqnames = dmrs$ProbeLassoDMR$seqnames,
ranges = IRanges(
start = dmrs$ProbeLassoDMR$start, end = dmrs$ProbeLassoDMR$end
),
col = dmrs$ProbeLassoDMR$betaAv_Progressive - dmrs$ProbeLassoDMR$betaAv_Regressive,
source = "CIS"
)
# Create a simliar object of TCGA DMR data
dn.dmrs.tcga <- GRanges(
seqnames = dmrs.tcga$ProbeLassoDMR$seqnames,
ranges = IRanges(
start = dmrs.tcga$ProbeLassoDMR$start, end = dmrs.tcga$ProbeLassoDMR$end
),
col = dmrs.tcga$ProbeLassoDMR$betaAv_TCGA.SqCC - dmrs.tcga$ProbeLassoDMR$betaAv_TCGA.Control,
source = "TCGA"
)
# Combine the two
dn.dmrs <- c(dn.dmrs, dn.dmrs.tcga)
dn.dmrs$levels <- as.numeric(factor(dn.dmrs$source, levels=c("TCGA", "CIS")))
seqlengths(dn.dmrs) <- seqlengths(ideoCyto$hg19)[names(seqlengths(dn.dmrs))]
# Plot as a karyogram, splitting TCGA and CIS data sets on the y-axis
p.ylim <- autoplot(dn.dmrs, layout = "karyogram", aes(color=col, fill = col,
ymin = ifelse(source == "CIS", 5.5, 1),
ymax = ifelse(source == "CIS", 9, 4.5)
))
# Use the same colour scale as for methylation heatmaps
p.ylim + scale_colour_distiller(palette = 'RdYlBu')
ggsave(filename1, scale=2)
# pdf(filename1)
# dmr.plot
# dev.off()
##################################################################
# Create an analagous plot for copy number
##################################################################
cnas.segmented.mean.p <- cnas.segmented[,1:3]
sel <- wgs.pheno$name[which(wgs.pheno$progression == 1)]
cnas.segmented.mean.p$cn <- apply(cnas.segmented[,sel] / wgs.pheno$ploidy[match(sel, wgs.pheno$name)], 1, mean)
cnas.segmented.mean.r <- cnas.segmented[,1:3]
sel <- wgs.pheno$name[which(wgs.pheno$progression == 0)]
cnas.segmented.mean.r$cn <- apply(cnas.segmented[,sel] / wgs.pheno$ploidy[match(sel, wgs.pheno$name)], 1, mean)
dn.cnas.p <- GRanges(
seqnames = paste0("chr", cnas.segmented.mean.p$chr),
ranges = IRanges(
start = cnas.segmented.mean.p$start, end = cnas.segmented.mean.p$end
),
cn = cnas.segmented.mean.p$cn,
source = "Prog"
)
dn.cnas.r <- GRanges(
seqnames = paste0("chr", cnas.segmented.mean.r$chr),
ranges = IRanges(
start = cnas.segmented.mean.r$start, end = cnas.segmented.mean.r$end
),
cn = cnas.segmented.mean.r$cn,
source = "Reg"
)
sel <- which(tcga.cnas.segmented$chr %in% 1:22) # This step keeps factor names consistent with hg19
dn.cnas.tcga <- GRanges(
seqnames = paste0("chr", tcga.cnas.segmented.mean$chr[sel]),
ranges = IRanges(
start = tcga.cnas.segmented.mean$start[sel], end = tcga.cnas.segmented.mean$end[sel]
),
cn = tcga.cnas.segmented.mean$cn[sel],
source = "TCGA"
)
dn.cnas <- c(dn.cnas.p, dn.cnas.r, dn.cnas.tcga)
# Remove sex chromosomes.
sel <- which(as.character(seqnames(dn.cnas)) %in% paste0("chr", 1:22))
dn.cnas <- dn.cnas[sel]
dn.cnas$levels <- as.numeric(factor(dn.cnas$source, levels=c("TCGA", "Prog", "Reg")))
seqlengths(dn.cnas) <- seqlengths(ideoCyto$hg19)[names(seqlengths(dn.cnas))]
# Specify colours explicitly to match ext. data fig 4
dn.cnas$col <- "#E9EDF8"
dn.cnas$col[which(dn.cnas$cn < 0.75)] <- "#90BEDA"
dn.cnas$col[which(dn.cnas$cn < 0.5)] <- "darkblue"
dn.cnas$col[which(dn.cnas$cn > 1.25)] <- "#EB7C64"
dn.cnas$col[which(dn.cnas$cn > 2)] <- "#B81321"
p.ylim <- autoplot(dn.cnas, layout = "karyogram", aes(color=col, fill = col,
ymin = (levels - 1) * 10/3 + 0.5,
ymax = levels * 10 /3 - 0.5)
)
cols <- unique(dn.cnas$col)
names(cols) <- cols
# Do the plot
p.ylim + scale_color_manual(values=cols) + scale_fill_manual(values=cols)
ggsave(filename2, scale=2)
} |
a378f67fbaeab59f721ee41abf1c484fc860bd3c | e5752d64b42ecf663e15e1e477380b1729f0de11 | /scripts/landTakeoff.R | 61e314590ba4e7b36bf9131647805a1fc54d9831 | [] | no_license | adarsh66/Planefinder_DataAnalysis | 11a9837f6b95c3fff821906b8ec9edd37b94f40c | 40d7248f27c642892693275df17e0ccb028c3086 | refs/heads/master | 2021-01-18T21:23:39.399324 | 2016-05-14T15:43:29 | 2016-05-14T15:43:29 | 53,488,822 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,335 | r | landTakeoff.R | # landing and takeoff
# planeMap
library(dplyr)
library(ggplot2)
library(lubridate)
theme_stripped <- theme(panel.background = element_blank(),
panel.grid = element_blank())
flights <- readRDS("data/assembled_flights.rds")
flights$time <- as.POSIXct(flights$mtime, origin = "1969-12-31 23:00:10")
names(flights) <- sub(" ", "", names(flights))
flightsUK <- filter(flights, Latitude < 58 & Latitude > 50 &
Longitude < 2 & Longitude > -10)
# need to normalise the times
takeOff <- flightsUK %>%
group_by(FlightNumber) %>%
mutate(ntime = mtime - min(mtime)) %>%
filter(Altitude[1] < 100 & ntime < 4000)
# did it work?
Callsigns <- unique(takeOff$Callsign)
FlightNumbers <- unique(takeOff$FlightNumber)
Types <- unique(takeOff$Type)
takeOff %>%
filter(FlightNumber == FlightNumbers[20]) %>%
select(matches("time"), Altitude) %>%
head
yl <- range(takeOff$Altitude)
for(i in 1:length(Types)) {
p <- ggplot(filter(takeOff, Type == Types[i]),
aes(x = ntime, y = Altitude,
group = FlightNumber,
colour = Type)) +
geom_line(alpha = 0.5) +
ylim(yl) +
theme_stripped +
scale_color_brewer(palette = "Set1")
pdf(paste("plots/altTrajectories_UK", Types[i], ".pdf"), 6, 3)
print(p)
dev.off()
}
|
356318d55a23612535c1226430794008a4bf43ea | 5d4c8e0243a7b5f2cf42fd09b36ce54719ad9608 | /load_ngram.R | aec47755faff1560d9132a3efb9773e8bb1ef6f3 | [] | no_license | eakalak-suthampan/Coursera_Data_Science_Capstone | 8538a2d99523dde211b79d3315cae496b028d799 | 6f4b6c761638b3e276bac39595178b4dba157dd7 | refs/heads/master | 2021-01-20T06:43:02.127166 | 2017-12-13T18:26:01 | 2017-12-13T18:26:01 | 89,912,997 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,112 | r | load_ngram.R | library(data.table)
# load ngrams from the saved files
fourgram <- fread("alldata_fourgram_state.csv",sep = ",",header = FALSE,skip = 1,drop = 1)
names(fourgram) <- c("current_state","next_state","count")
trigram <- fread("alldata_trigram_state.csv",sep = ",",header = FALSE,skip = 1,drop = 1)
names(trigram) <- c("current_state","next_state","count")
bigram <- fread("alldata_bigram_state.csv",sep = ",",header = FALSE,skip = 1,drop = 1)
names(bigram) <- c("current_state","next_state","count")
# drop the same search terms (current_state) that below than top 5 of count
fourgram[, enum := 1:.N, by = current_state]
trigram[, enum := 1:.N, by = current_state]
bigram[, enum := 1:.N, by = current_state]
fourgram <- fourgram[enum <=5, 1:3]
trigram <- trigram[enum <=5, 1:3]
bigram <- bigram[enum <=5, 1:3]
# prune more data
fourgram <- fourgram[fourgram$count > 2, ]
#trigram <- trigram[trigram$count > 3, ]
#bigram <- bigram[bigram$count > 3, ]
# set binary search on the current_state column
setkey(fourgram,current_state)
setkey(trigram,current_state)
setkey(bigram,current_state)
|
543f64402f16a8fbb5f196511ab0c209be4ec894 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/GLDEX/examples/fun.data.fit.mm.Rd.R | c73767d1bafedca0521777e7cf24d4fa95ee5d41 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 273 | r | fun.data.fit.mm.Rd.R | library(GLDEX)
### Name: fun.data.fit.mm
### Title: Fit data using moment matching estimation for RS and FMKL GLD
### Aliases: fun.data.fit.mm
### ** Examples
## Fitting normal(3,2) distriution using the default setting
# junk<-rnorm(50,3,2)
# fun.data.fit.mm(junk)
|
e27eb8635c3a32eac7fca0a75b32bcdf5bb2a1e8 | b7bf81f0cb136a66d5cbdf1c4e95cf47f80d38e9 | /exercise-solutions/ch-8-solutions.R | bb404c1464bb02f84f88ea1d75af7fa203029916 | [] | no_license | ericlegoaec/advancing-into-analytics-book | efd2099d543973b93a51fea1066b84d49913f741 | 44af312267aa7f5e958209fc293eb7489a0b05a4 | refs/heads/main | 2023-02-24T11:37:10.483996 | 2021-01-24T18:13:25 | 2021-01-24T18:13:25 | 332,713,220 | 1 | 0 | null | 2021-01-25T10:42:34 | 2021-01-25T10:42:33 | null | UTF-8 | R | false | false | 1,687 | r | ch-8-solutions.R |
library(tidyverse)
library(readxl)
library(writexl)
census <- read_csv('../datasets/census/census.csv')
glimpse(census)
divisions <- read_csv('../datasets/census/census-divisions.csv')
glimpse(divisions)
# 0. Merge the datasets first
census <- left_join(census, divisions)
head(census)
# 1. Sort the data by region ascending, division ascending and population descending.
# (You will need to combine datasets to do this.)
# Write the results to an Excel worksheet.
census %>%
arrange(region, division, desc(population)) %>%
write_xlsx("../datasets/census/solutions-data/census-sorted.xlsx")
# 2. Drop the postal_code field from your merged dataset.
census <- select(census, -postal_code)
head(census)
# 3. Create a new column _density_ which is a calculation
# of population divided by land area.
census <- mutate(census, density = population/land_area)
head(census)
# 4. Visualize the relationship between land area and population
# for all observations in 2015.
census_2015 <- filter(census, year == 2015)
ggplot(data = census_2015, aes(x = land_area, y = population))+
geom_point()
# NOTE: It's possible to use `ggplot()` in the pipe...
census %>%
filter(year == 2015) %>%
ggplot(aes(x = land_area, y = population)) + geom_point()
# Check out our large land areas...
census_2015 %>% arrange(desc(land_area))
# 5. Find the total population for each region in 2015.
census_2015 %>%
group_by(region) %>%
summarise(ttl_population = sum(population))
# 6. Pivot by year, state and population
# First, add an ID row
pivot_wider(data = select(census, c('state','year','population')),
names_from = 'year', values_from = 'population')
|
192b0383f7628a0e09ec3c9c4bfd1bda62c9e54d | 5a92ef946366ce197b92d02f6225537e3acffdaa | /rankall.R | f59aeeb86348abe3ff01adc10d29083e367baa6c | [] | no_license | sauldnn/Hospital-Quality | b328d36671650cd9616be4d1c2d562c420568fb3 | faa58c01395f894fbe1c07278957c7b5e4a1b6c4 | refs/heads/master | 2023-03-06T01:07:00.537105 | 2021-02-26T02:50:35 | 2021-02-26T02:50:35 | 340,787,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,230 | r | rankall.R | rankall <-function(outname, num="best"){
#first capital letter and concatenate with a dot.
col_outname <- gsub("(^|[[:space:]])([[:alpha:]])", "\\1\\U\\2", perl=TRUE) %>%
gsub(" ", ".") %>%
paste("Hospital.30.Day.Death..Mortality..Rates.from", sep=".")
#check validation
X_full <-read.csv("outcome-of-care-measures.csv")
if ((state %in% X_full[[7]])==FALSE)
stop("invalid state")
else if((col_outname %in% colnames(X_full)) ==F)
stop("invalid outcome")
states <- unique(X_full[[State]])
Ret <- data.frame()
for (state in states){
X <- X_full[X_full$State == State, ]
X <- X[with(X, order(X[[col_outname]]))]
#take cases and calculate the ratio...
if (num=="best"){
num <- 1 #take the first element.
}
else if (num="worst"){ #Repeat for the "num" rank
num <- length(X[[col_outname]])
}
else (is.numeric(num)){
if(length(X$Hospital)<num)
stop(NA)}
ratio <-X[num, col_outname] #like random element of "num" rank position
X <- X[which(X[[col_outname]] == ratio), ]
X <- sort(X$Hospital.Name) #then sort (alpha) and ...
Ret <- rbind(Ret, data.frame(hospital = X[1],state = state))
}
Ret
} |
549698a26f4ed26ffd19472cf48fd37b9fbf6195 | 881884790c4e409d9baa745598befe99b7389c99 | /man/rga3h.Rd | 0918eaf7d5adb8a8e24f76a1fea47ecd94791622 | [] | no_license | cran/regrap | 4386429e4a2a2907dc1bcffdfefb6d99eb9f9a5e | 999fa790c83dff352dda88803818b0ab08d0eb82 | refs/heads/master | 2022-11-16T21:08:51.343468 | 2020-07-03T10:00:06 | 2020-07-03T10:00:06 | 278,311,131 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 892 | rd | rga3h.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rfun_rga3h.R
\name{rga3h}
\alias{rga3h}
\title{reverse graphical approach for three hypotheses}
\usage{
rga3h(w, G, p, alpha)
}
\arguments{
\item{w}{a vector of initial weights}
\item{G}{a matrix of initial transaction weights}
\item{p}{a vector of p-values}
\item{alpha}{a number of significance level}
}
\value{
a logical vector indicating whether the hypothesis is rejected: TRUE = rejected, FALSE = accepted
}
\description{
reverse graphical approach for three hypotheses
}
\examples{
w <- c(0.3,0.5,0.2)
G <- matrix(c(0,1/3,2/3, 1/2,0,1/2, 1/5,4/5,0),nrow=3,byrow=TRUE)
p <- c(0.012, 0.051, 0.021)
p <- c(0.012, 0.051, 0.019)
alpha <- 0.05
rga3h(w=w,G=G,p=p, alpha=alpha)
}
\references{
Gou, J. (2020). Reverse graphical approaches for multiple test procedures. Technical Report.
}
\author{
Jiangtao Gou
}
|
2cbd7c62995d106794160efed7900e8c1a877ba6 | d5889a013b8bca3992d9e34dcad1f59ad711e824 | /scripts/allPrinters-00-cache.R | 358bd5123a413c46f77468d44e60652d50d152be | [
"MIT"
] | permissive | cynthiahqy/dataset-pcmag | 5b88566df6be1ee57c3b2c13aef1374cdaf1e19a | 967af44d7332ae7ac2365bc833017433a8a2753c | refs/heads/master | 2020-03-12T08:33:27.402091 | 2019-04-11T07:33:56 | 2019-04-11T07:33:56 | 130,530,069 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,571 | r | allPrinters-00-cache.R | # Cache Dropbox/handtype_writtenReview.xlsx sheets to allPrinters/
# INITIALISE SESSION ----
library(tidyverse)
library(readxl)
library(here)
library(openxlsx)
library(stringr)
path2cache <- here("spreadsheets/cache/allPrinters/")
path2correct <- here("spreadsheets/cache/corrections/")
# READ & CACHE handtype-writtenreviews -----
## load handtype
wbook <- here("spreadsheets/handtype_writtenReview.xlsx")
## split x1, remainder x1r
x1 <- c("source_vol", "source_no", "product", "product_brand", "company", "price_list", "price_street", "engine_brand", "product_type")
## cache to csv variables in x1
read_excel(wbook, sheet = 3) %>%
.[c("row_id", x1)] %>%
mutate(product_brand = str_to_upper(product_brand),
product_type = str_to_lower(product_type),
engine_brand = str_to_upper(engine_brand)) %>%
write_csv(., paste0(path2cache, "allPrinters-x1.csv"))
## cache remaining variables
read_excel(wbook, sheet = 3) %>%
.[c("row_id", setdiff(names(.), x1))] %>%
write_csv(., paste0(path2cache, "allPrinters-x1r.csv"))
# IMPORT x1 tibble for CLEANING
# df_x1 <- read_csv(paste0(path2cache, "allPrinters-x1.csv"),
# col_types = cols(
# row_id = col_character(),
# source_vol = col_integer(),
# source_no = col_integer(),
# product = col_character(),
# product_brand = col_character(),
# company = col_character(),
# price_list = col_double(),
# price_street = col_double(),
# engine_brand = col_character(),
# product_type = col_character()
# )
# )
# DATA CLEANING ----
## correct01: ADD parent_co, CORRECT product_brand ----
### import latest version of allprinters_x1
all_printers <- read_csv(paste0(path2cache, "allPrinters-x1.csv"))
### create correction table for parent_co
unique_company <-
unique(all_printers$company) %>%
sort() %>%
as.tibble() %>%
colnames(unique_company) <- "company"
unique_company <- mutate(unique_company,
trun_company = str_remove(company, " (Inc\\.|Corp\\.|Co\\.|,$)"),
parent_co = str_to_upper(str_remove(company, " .+")))
# rm_comptype <- function(x) {
# str_remove(x, " (Inc\\.|Corp\\.|Co\\.)") %>%
# str_remove(., ",$")
# }
unique_company %>% write_csv(., paste0(path2correct, "company_names.csv"))
### create correction table for brands
unique_brand <-
unique(all_printers$product_brand) %>%
sort() %>%
as.tibble()
colnames(unique_brand) <- "x1_brand"
unique_brand %>%
write_csv(., paste0(path2correct, "brand_names.csv"))
### read in corrections for brands and companies, merge with all_printers and cache
correct_company <-
read_csv(paste0(path2correct, "company_names-v01.csv")) %>%
select(c("company", "parent_co")) %>%
left_join(all_printers, ., by = "company")
correct_co_brand <-
read_csv(paste0(path2correct, "brand_names-v01.csv")) %>%
select(c("x1_brand", "correct_brand")) %>%
left_join(correct_company, ., by = c("product_brand" = "x1_brand")) %>%
mutate(product_brand = correct_brand) %>%
select(-correct_brand)
# filter(product_brand == "ABATON")
write_csv(correct_co_brand, paste0(path2cache, "allPrinters-x1-correct01.csv"))
## correct02: CORRECT engine_brand ----
### import latest version of allprinters_x1
all_printers <- read_csv(paste0(path2cache, "allPrinters-x1-correct01.csv"))
### create correction table for engine_brand (manufacturer)
unique_engine <-
unique(all_printers$engine_brand) %>%
sort() %>%
as.tibble()
colnames(unique_engine) <- "old.engine_brand"
unique_engine <-
mutate(unique_engine, correct.engine_brand = old.engine_brand)
write_csv(unique_engine, paste0(path2correct, "engine_brands.csv"))
### read and merge corrections for engine_brand
correct_engine <-
read_csv(paste0(path2correct, "engine_brands-v01.csv")) %>%
select(c("old.engine_brand", "correct.engine_brand")) %>%
left_join(all_printers, ., by = c("engine_brand" = "old.engine_brand")) %>%
mutate(engine_brand = correct.engine_brand,
correct.engine_brand = NULL)
write_csv(correct_engine, paste0(path2cache, "allPrinters-x1-correct02.csv"))
## correct03: merge price_list and price_street, reorder variables ----
all_printers <- read_csv(paste0(path2cache, "allPrinters-x1-correct02.csv"))
all_printers[is.na(all_printers$price_street), ]["price_street"] <- 0
### create price_max = list price, or if no list price use street price
add_price <-
mutate(all_printers,
price_max = pmax(price_list, price_street)) %>%
select("row_id", starts_with("source"), "product", "product_brand", "parent_co", "engine_brand", "product_type", "price_max", starts_with("price"))
write_csv(add_price, paste0(path2cache, "allPrinters-x1-correct03.csv"))
## correct04: correct product_name ----
all_printers <- read_csv(paste0(path2cache, "allPrinters-x1-correct03.csv"))
unique_product <-
unique(all_printers$product) %>%
sort() %>%
as.tibble()
colnames(unique_product) <- "old.product"
unique_product <-
mutate(unique_product,
new.product_name = str_to_upper(old.product))
write_csv(unique_product, paste0(path2correct, "product_names.csv"))
### read in product_name corrections
correct_product <- read_csv(paste0(path2correct, "product_names-v02.csv")) %>%
select("old.product", "new.product_name") %>%
left_join(all_printers, ., by = c("product" = "old.product")) %>%
mutate(product = NULL) %>%
rename(product_name = new.product_name)
write_csv(correct_product, paste0(path2cache, "allPrinters-x1-correct04.csv"))
|
4175d02ecc317347e2d633ca964d9ecbce4cb62d | 5b36d862498265ee820b5ec3da3e67129bd8dc0a | /_3_DataFrames/_3_10_DealingWithMissingData_DataFrame.R | d84213ad7e52c58751a110dfd6013459bf97f036 | [
"MIT"
] | permissive | Himanshu-rathee/R_DataCamp | 331a292cdc6c3678b9100f70f325a7a79aa561e8 | 7d2163d035f28eecc861067530b15f4ed38b4b76 | refs/heads/master | 2022-08-15T17:40:01.668116 | 2020-05-25T19:08:52 | 2020-05-25T19:08:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 420 | r | _3_10_DealingWithMissingData_DataFrame.R | c1 <- 1:10
c2 <- letters[1:10]
charDf <- data.frame(col.name.1 = c1, col.name.2 = c2)
# check missing data
print(is.na(mtcars))
# checking missing data as a whole (any)
print(any(is.na(mtcars)))
# checking missing data in a column (any)
print(any(is.na(mtcars$mpg)))
# replace NA with 0
charDf[is.na(charDf)] <- 0
# replace the NA value in mpg to mean value of mpg
mtcars$mpg[is.na(mtcars$mpg)] <- mean(mtcars$mpg) |
05d556d2b6dce44a1ceb45ccf4c075ba2e32b7fa | 8af0800ea15c6ff5ce1c30c1e8bc85102667b669 | /R/frm_mdmb_regression_density.R | 0d4cbe92d3446ed1e93b5de17b7f0640c4038843 | [] | no_license | alexanderrobitzsch/mdmb | 34f0be7820987888101162fd263097c4088ab674 | d31faef05736391bee1122c4a728a606d69d791c | refs/heads/master | 2023-03-06T09:58:19.344444 | 2023-03-01T10:23:41 | 2023-03-01T10:23:41 | 95,305,766 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,361 | r | frm_mdmb_regression_density.R | ## File Name: frm_mdmb_regression_density.R
## File Version: 0.419
frm_mdmb_regression_density <- function(model, y, design_matrix=NULL, case=NULL,
X=NULL, offset=NULL )
{
# pars <- coef(model)
pars <- mdmb_extract_coef(mod=model)
class_model <- class(model)
np <- length(pars)
beta <- pars[ model$index_beta ]
if ( is.null(design_matrix) ){
if (is.null(X) ){
y_pred <- predict(model)
} else {
y_pred <- X %*% beta
if (! is.null(offset) ){
y_pred <- y_pred + offset
}
}
} else {
# y_pred <- predict(model, newdata=design_matrix )
# form <- attr( model$model, "terms")
form <- model$formula
Xdes <- stats::model.matrix( object=form, data=design_matrix )
offset_values <- offset_values_extract(formula=form, data=design_matrix )
y_pred <- Xdes %*% beta + offset_values
}
w <- model$weights
if ( is.null(w) ){
w <- rep( 1, length(y) )
}
y_sd <- mdmb_weighted_sd( x=y_pred, w=w )
#--- extract parameters
if (model$est_df){
logdf <- pars[model$index_df]
df <- mdmb_compute_df(x=logdf, df=Inf, est_df=TRUE)
} else {
df <- model$df
}
if ( is.null(model$index_lambda) ){
lambda <- model$lambda_fixed
} else {
lambda <- pars[model$index_lambda]
}
sigma <- pars[model$index_sigma]
use_probit <- model$probit
#*** y values on the transformed metric
if (class_model=="bct_regression"){
yt <- bc_trafo( y=y, lambda=lambda )
}
if (class_model=="yjt_regression"){
yt <- yj_trafo( y=y, lambda=lambda, probit=use_probit )
}
y_sd0 <- mdmb_weighted_sd( x=yt, w=w )
if ( ! is.null(model$sigma) ){
y_sd <- model$sigma
}
#--- R^2
R2 <- mean( y_sd^2 / y_sd0^2 )
#****** evaluated density
if (class_model =="bct_regression"){
d1 <- dbct_scaled( x=y, location=y_pred, shape=sigma, lambda=lambda, df=df )
}
if (class_model =="yjt_regression"){
d1 <- dyjt_scaled( x=y, location=y_pred, shape=sigma, lambda=lambda,
df=df, probit=use_probit )
}
d2 <- frm_normalize_posterior( post=d1, case=case )
res <- list( "like"=d1, "post"=d2, "sigma"=y_sd, R2=R2)
return(res)
}
|
d4c0a988d8255f27d32897d24b171e36bbbf0c57 | 54d16a72800b0da5796653350a73e6eb95016a65 | /R/downloadAutomatizado.R | 20d866dd54b6497e9210e3c8a4bc12bfd54bdd5e | [] | no_license | jairomr/PDFtoCSVforIMEA | edaea191366604621eb5aad2e6a4ff0a7c4d4791 | 03b2d0722608a534dbd9c889b0ecfd78a32bc2e8 | refs/heads/master | 2021-08-10T16:09:11.647916 | 2017-11-12T19:38:47 | 2017-11-12T19:38:47 | 110,460,219 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,030 | r | downloadAutomatizado.R |
getPDF<-function(file){
url<-paste('http://www.imea.com.br/upload/publicacoes/arquivos/',file,sep = '')
cat('Baixando ',file,'\n')
download.file(url,file)
cat('COMPLETO\n')
}
numeroDePagina<-function(categoria){
library("rjson")
json_file <-paste('http://www.imea.com.br/imea-site/relatorios-mercado-detalhe/buscarPublicacoes?categoria=',categoria,'&subcategoria=3&page=1',sep = '')
json_data <- fromJSON(file=json_file)
return(1+(floor(as.numeric(json_data$data$rows_total)/16)))
}
getAllPDFbyCategoria <- function(categoria) {
library("rjson")
for (i in 1:numeroDePagina(categoria)){
json_file <-paste('http://www.imea.com.br/imea-site/relatorios-mercado-detalhe/buscarPublicacoes?categoria=',as.character(categoria),'&subcategoria=3&page=',as.character(i),sep = '')
json_data <- fromJSON(file=json_file)
cat(json_file,'\n')
for (row in 1:length(json_data$data$rows)){
file=json_data$data$rows[[row]]$arquivo
if (!file.exists(file)) {
getPDF(file)
}
}
}
} |
af19cab9ee3181b33fbab3bbb5215f2e4fb542b3 | 8e1002933c5e79a33bc1aff1d216696aa2f96e68 | /Connectivity_between_MPAs_Stephen.R | 06d3fe1ef7539ebcb297a432428a4bb4ec77cfd1 | [
"MIT"
] | permissive | Christopher-Blackford/ParticleTracking | 34432a0e58eb453c04c222d6d186200cef01f742 | 5e4917b88e6bdd87753bc8157244c2767c61a5e9 | refs/heads/master | 2020-03-13T02:25:58.543461 | 2020-01-13T20:36:30 | 2020-01-13T20:36:30 | 84,609,475 | 0 | 1 | null | 2017-03-15T18:50:50 | 2017-03-10T23:32:45 | R | UTF-8 | R | false | false | 18,763 | r | Connectivity_between_MPAs_Stephen.R | #############################################################################################################################
##############################################################################################################################
###Connectivity_between_MPA.R
#Code by: Christopher Blackford (christopher.blackford@mail.utoronto.ca)
###READ.ME:
#This file takes:
#1) Input (release) and output (settle) data from the LTRANS model of larval dispersal
#2) A shapefile of the BC inshore area
#3) A shapefile of current MPA locations in BC
#
#To build shapefiles showing connectivity in the BC inshore region between
#current distribution of MPAs
#The analysis can be run across multiple years and for multiple PLD values
#
##
###
####
#####
#Clear workspace
rm(list=ls())
full.run.time <- proc.time() # 33 minutes
###################TABLE OF CONTENTS
###[1] Loading up larval release points
###[2]Choosing year of release and pld you are tracking
###[3] Identifying settlement locations and linking to release locations
###[4] Setting up study extent you will be using to clip your larval release points to your BC study extent
###[5] Removing some of the MPAs that got sliced to thin clipping to Remi's extent
###[6] Creating dataframe describing release and settlement of each particle
###[7] Creating connectivity tables - (down) column donates to (across) row
###[8] Creating shapefiles with larval dispersal data represented
###################Custom functions:
#Remove NAs in sp Dataframe object
# x sp spatial DataFrame object
# margin Remove rows (1) or columns (2)
sp.na.omit <- function(x, margin=1) {
if (!inherits(x, "SpatialPointsDataFrame") & !inherits(x, "SpatialPolygonsDataFrame"))
stop("MUST BE sp SpatialPointsDataFrame OR SpatialPolygonsDataFrame CLASS OBJECT")
na.index <- unique(as.data.frame(which(is.na(x@data),arr.ind=TRUE))[,margin])
if(margin == 1) {
cat("DELETING ROWS: ", na.index, "\n")
return( x[-na.index,] )
}
if(margin == 2) {
cat("DELETING COLUMNS: ", na.index, "\n")
return( x[,-na.index] )
}
}
rm(sp.na.omit)
###################Loading required packages:
require(plyr)
require(data.table)
require(tidyverse)
require(rgdal)
require(rgeos)
require(maptools)
require(spatialEco)
########################################################################
########################################################################
########################################################################
########################################################################
### [1] Loading up larval release points
#Acquiring files
filenames <- list.files(path = "./cuke_present/ReleaseLocations", pattern="rl_.", full.names=TRUE,recursive=T)
# load all files into a list, read_csv is much faster than read.csv
rllist <- lapply(filenames, read_csv,
col_names = c("long0","lat0","Z0","delay","site0"),
col_types = cols("d","d","i","i","i")
)
# set the names of the items in the list, so that you know which file it came from
rllist <- setNames(rllist,filenames)
# rbind the list
rl <- rbindlist(rllist, idcol="filename")
rl$bin <- as.numeric(gsub(".*rl_|.txt.*", "",rl$filename))
head(rl)
rm(rllist, filenames)
#Creating csv file ith all starting locations
#write.csv(rl, file="./output/release_settlement/Remi_release_lat_long.csv", row.names = F)
########################################################################
########################################################################
########################################################################
########################################################################
###[2] Setting up study extent you will be using to clip your larval release points to your BC study extent
#Loading my MPA shapefile to get proper projection
MPA_mask <- readOGR("K:/Christopher_PhD/CPAWS/Cleaned_standardized/All_PAs", "MPAS_merged")
My_BC_projection <- MPA_mask@proj4string
row.names(MPA_mask@data) <- MPA_mask@data$CB_ID #Change row.names because FID starts at 0 and you want it to start at 1
head(MPA_mask@data)
#Loading Remi's grid where larvae were released
grid <- readOGR("./cuke_present/StudyExtent/Starting_grid", "grid")
NAD_projection <- proj4string(grid)
proj4string(grid)
#Dissolve into one polygon since so you can change grid dimensions
grid <- spTransform(grid, My_BC_projection) #For some reason not "identical" to My_BC_projection, check later
grid <- gUnaryUnion(grid)
#Intersecting - don't know why this works and ConPoly2 <- grid[Ecozone_mask,] doesn't
MPA_mask <- gBuffer(MPA_mask, byid=TRUE, width=0) #Need to do this to avoid ring intersection
row.names(MPA_mask@data) <- MPA_mask@data$CB_ID #Change row.names because FID starts at 0 and you want it to start at 1
MPA_mask_id <- as.character(MPA_mask@data$CB_ID)
ConPoly <- gIntersection(grid, MPA_mask, byid = TRUE, id = MPA_mask_id)
#Adding dataframe so you can create a shapefile of new study extent
#Clipped dataframe
ConPoly_ID <- row.names(ConPoly)
ConPoly_ID <- as.numeric(ConPoly_ID)
ConPoly_ID <- as.data.frame(ConPoly_ID)
row.names(ConPoly_ID) <- ConPoly_ID$ConPoly_ID
#Original dataframe
ConPoly_data <- as.data.frame(MPA_mask[ConPoly_ID$ConPoly_ID, ])
MPAS <- SpatialPolygonsDataFrame(ConPoly, ConPoly_ID)
MPAS@data <- plyr::rename(MPAS@data, c("ConPoly_ID" = "CB_ID"))
rm(grid, ConPoly_ID, ConPoly, ConPoly_data, MPA_mask_id)
########################################################################
###Removing some of the MPAs that got sliced to thin clipping to Remi's extent
size_reduction_threshold <- 999 #You are only left with 9ish if you go down to 0
#Compare how much smaller clipped MPA layer is to MPA_mask file
MPA_mask@data$Merged_area <- gArea(MPA_mask, byid = TRUE)
MPA_clipped_size <- MPAS
MPA_clipped_size@data$Clip_Area <- gArea(MPA_clipped_size, byid = TRUE)
MPA_clipped_size <- sp::merge(MPA_clipped_size, MPA_mask@data, by = "CB_ID")
MPA_clipped_size@data$size_reduction <- 100*(1 - MPA_clipped_size@data$Clip_Area/MPA_clipped_size@data$Merged_area)
row.names(MPA_clipped_size@data) <- MPA_clipped_size@data$CB_ID #Change row.names
Size_reduction_df <- MPA_clipped_size@data
#write.csv(Size_reduction_df, "./Connectivity_between_MPA_Stephen/output_keep/size_reduction.csv", row.names = F)
#Histogram of how many MPAs got clipped by how much
Size_histogram <- ggplot(Size_reduction_df, aes(size_reduction)) +
geom_histogram(binwidth = 5, fill = "#FFFFFF", colour = "black") +
labs(title = "Histogram of MPA loss", x = "Percent loss", y = "Count") +
theme(
plot.title = element_text(size = 16),
axis.text = element_text(size = 16),
axis.title = element_text(size = 16),
axis.line = element_line("black"),
panel.background = element_blank()
)
Size_histogram
#Removing MPAs that were too clipped by Remi's extent (based on percent loss)
MPAS <- MPA_clipped_size[MPA_clipped_size@data$size_reduction <= size_reduction_threshold,]
MPAS_loop <- MPAS
########################################################################
########################################################################
########################################################################
########################################################################
### [3] Choosing year of release and pld you are tracking
memory.limit(size=15000)
# List the particle tracking files for that particular year and pld
year <- c(1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007)
pld <- c(30, 60, 120)
#year_time <- 1
#pld_time <- 1
#i <- 1
###To Loop: Remove hastags below and add closing brackets, delete temporary year_time and pld_time, and concatate year and pld vectors above
###
for (year_time in 1:length(year)){
for (pld_time in 1:length(pld)){
########################################################################
########################################################################
########################################################################
###[4] Identifying settlement locations and linking to release locations
#Acquiring files
filenames <- list.files(path=paste0("./cuke_present/ConData/G", year[year_time]), pattern=glob2rx(paste0("*para 1",formatC(pld[pld_time]+1, width = 3, format = "d", flag = "0"),"*")), full.names=TRUE,recursive=T)
# load all files into a list, read_csv is much faster than read.csv
datalist <- lapply(filenames, read_csv,
col_names = c("long","lat","Z","Out","site"),
col_types = cols("d","d","d","i","i")
)
# set the names of the items in the list, so that you know which file it came from
datalist <- setNames(datalist,filenames)
# rbind the list
dataset <- rbindlist(datalist, idcol="filename")
dataset$site <- NA
rm(datalist)
###This process takes a long time ~ 5 - 10 minutes
#Reshaping dataset to take filename info and turning it into columns
dataset <- dataset %>%
mutate(temp=substr(filename,24,nchar(filename))) %>%
# mutate(temp=substr(filename,25,nchar(filename))) %>% # you probably want this back to 24? #REMI COMMENT
separate(temp,c("temp_type_year","rday","bin","time"),"/",convert=TRUE) %>%
separate(temp_type_year,c("type","year"),sep=1,convert=TRUE) %>%
mutate(time=as.integer(substr(time,9,13))-1001)
#Linking release locations to settlement locations based on bin
for(i in unique(dataset$bin)){
x <- rl$bin==i
y <- dataset$bin==i
dataset$long0[y] <- rl$long0[x]
dataset$lat0[y] <- rl$lat0[x]
dataset$Z0[y] <- rl$Z0[x]
dataset$delay[y] <- rl$delay[x]
dataset$site0[y] <- rl$site0[x]
print(paste(i,sum(x),sum(y),sum(is.na(dataset$long0)))) # this is just to show its working
}
rm(filenames,x,y,i)
#Add larvae IDs to dataset
Con_df <- dataset
Con_df <- subset(Con_df, select = c(long0, lat0, Z0, long, lat, Z, year, rday))
Con_df$larvae_ID <- row.names(Con_df)
#Now you can remove some large files but only if you want to!
rm(dataset)
########################################################################
########################################################################
########################################################################
########################################################################
###[5] Creating dataframe describing release and settlement of each particle
#Clipping to CB_ID to do points in poly
MPAS <- MPAS_loop[,"CB_ID"]
MPA.dataframe.time <- proc.time() #6 minutes
#####Showing where each larvae begings and ends
Release_df <- subset(Con_df, select = c(long0, lat0, Z0, larvae_ID))
Settle_df <- subset(Con_df, select = c(long, lat, Z, larvae_ID, year, rday))
rm(Con_df) #to free up space
#Associate released points with where they were released from
xy <- subset(Release_df, select = c(long0, lat0))
Released_larvae <- SpatialPointsDataFrame(coords = xy, data = Release_df, proj4string = CRS(NAD_projection))
Released_larvae <- spTransform(Released_larvae, MPAS@proj4string) #use your custom BC projection for this
Released_larvae <- Released_larvae[MPAS,]
#Finding which polygons released larvae are in
Released_larvae <- point.in.poly(Released_larvae, MPAS) #takes many minutes
#Associate settled points with where they settled
xy <- subset(Settle_df, select = c(long, lat))
Settled_larvae <- SpatialPointsDataFrame(coords = xy, data = Settle_df, proj4string = CRS(NAD_projection))
Settled_larvae <- spTransform(Settled_larvae, MPAS@proj4string) #use your custom BC projection for this
Settled_larvae <- Settled_larvae[MPAS,]
#Finding which polygons settled larvae are in
Settled_larvae <- point.in.poly(Settled_larvae, MPAS) #takes many minutes
#Join dataframes to make precursor to connectivity matrices
MPA_df <- merge(Released_larvae@data, Settled_larvae@data, by = "larvae_ID", all = T)
#Remove NAs for when settled and released don't line up
MPA_df <- MPA_df[complete.cases(MPA_df),]
#Need to convert Polygon ID to numeric to sort properly - try to do this earlier in process????
MPA_df$CB_ID.x <- as.numeric(MPA_df$CB_ID.x)
MPA_df$CB_ID.y <- as.numeric(MPA_df$CB_ID.y)
MPA_df <- MPA_df[with(MPA_df, order(CB_ID.x, CB_ID.y)), ]
proc.time() - MPA.dataframe.time
rm(xy, Release_df, Settle_df)
########################################################################
########################################################################
########################################################################
###[6] Creating connectivity tables - (down) column donates to (across) row
#As connectivity matrices
Con_table <- table(MPA_df$CB_ID.x, MPA_df$CB_ID.y)
write.csv(Con_table, paste0("./Connectivity_between_MPA_Stephen/output_keep/Con_table/Con_table", size_reduction_threshold,"year", year[year_time], "_pld", pld[pld_time], ".csv"))
#As dataframe
Con_df <- as.data.frame(Con_table)
Con_df$Var1 <- as.character(Con_df$Var1)
Con_df$Var1 <- as.numeric(Con_df$Var1)
Con_df$Var2 <- as.character(Con_df$Var2)
Con_df$Var2 <- as.numeric(Con_df$Var2)
Con_df <- Con_df[with(Con_df, order(Var1, Var2)), ]
#write out Con_df
write.csv(Con_df, paste0("./Connectivity_between_MPA_Stephen/output_keep/Con_df/Con_df", size_reduction_threshold,"year", year[year_time], "_pld", pld[pld_time], ".csv"), row.names = F)
} #closing pld loop
print(paste0("Finished year ", year[year_time]))
} #closing year loop
########################################################################
########################################################################
########################################################################
###[7a] Merging connectivity dataframes across years to get average connectivity over decade
#Loading all connectivity dataframes
#temporary for rockfish project
year <- c(1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007)
pld <- c(30, 60, 120)
size_reduction_threshold <- 999
for (i in 1:length(pld)){
filenames <- list.files(path="./Connectivity_between_MPA_Stephen/output_keep/Con_df", pattern=glob2rx(paste0("Con_df", size_reduction_threshold, "year*_pld", pld[i], ".csv")), full.names=TRUE,recursive=T)
# load all files into a list
datalist <- lapply(filenames, read.csv)
head(datalist)
# set the names of the items in the list, so that you know which file it came from
datalist <- setNames(datalist,filenames)
# Merging the dataframe (rbind the list)
dataset <- data.table::rbindlist(datalist, idcol="filename")
rm(datalist)
dataset
#Averaging
Con_df <- group_by(dataset, Var1, Var2)
Con_df <- dplyr::summarise(Con_df, mean(Freq), sd(Freq))
Con_df <- plyr::rename(Con_df, c("Var1" = "MPA_Release", "Var2" = "MPA_Settle", "mean(Freq)" = "mean_Freq", "sd(Freq)" = "sd_Freq"))
#Remove rows that don't represent connection between
Nonzero_Con_df <- Con_df[which(Con_df$mean_Freq > 0),]
########################################################################
###7[b] Optional - Compensating for poor rockfish habitat
Rockfish_habitat_suit <- raster("./Connectivity_between_MPA_Stephen/Rockfish_project/output_rasters/habitat_quality/Rock_habsuit.tif")
Rockfish_habitat_suit[is.na(Rockfish_habitat_suit[])] <- 0
MPA_RockValue <- extract(x=Rockfish_habitat_suit, y=MPAS, fun = mean)
MPA_RockValue <- data.frame(MPA_RockValue)
MPA_RockValue$rows <- 1:nrow(MPA_RockValue)
MPAS@data$rows <- 1:nrow(MPAS@data)
Rock_df <- base::merge(MPAS@data, MPA_RockValue, by = "rows")
Rock_df <- Rock_df[c("CB_ID", "MPA_RockValue")]
Habitat_Con_df <- base::merge(Nonzero_Con_df, Rock_df, by.x = "MPA_Release", by.y = "CB_ID", all = T)
Value_to_larvae_constant <- 1/max(Rockfish_habitat_suit@data@values)
Habitat_Con_df$mean_Rockfish_adj <- Habitat_Con_df$mean_Freq*Habitat_Con_df$MPA_RockValue*Value_to_larvae_constant
dir.create(paste0("./Connectivity_between_MPA_Stephen/output_keep/Results/pld", pld[i]))
dir.create(paste0("./Connectivity_between_MPA_Stephen/output_keep/Results/pld", pld[i], "/size_reduction_threshold", size_reduction_threshold))
write.csv(Habitat_Con_df, paste0("./Connectivity_between_MPA_Stephen/output_keep/Results/pld", pld[i], "/size_reduction_threshold", size_reduction_threshold,
"/MPAs_pld", pld[i], ".csv"), row.names = FALSE)
}
proc.time() - full.run.time
##########
#########
########
#######
######
#####
####
###
##
#END
#EXTRA
########################################
##Converting to connectivity matrices
#Con_table_mean <- xtabs(Con_df$mean_Freq ~ Con_df$MPA_Release+Con_df$MPA_Settle)
#Con_table_mean <- as.data.frame.matrix(Con_table_mean) #this converts to dataframe but might actually be fine to keep as table
#Con_table_sd <- xtabs(Con_df$sd_Freq ~ Con_df$MPA_Release+Con_df$MPA_Settle)
#Con_table_sd <- as.data.frame.matrix(Con_table_sd) #this converts to dataframe but might actually be fine to keep as table
#creating directory for results output
#mean_results_directory <- paste0("./Connectivity_between_MPA_Stephen/output_keep/Results/pld", pld[i])
#dir.create(mean_results_directory)
#size_reduction_results_directory <- paste0(mean_results_directory, "/size_reduction_threshold", size_reduction_threshold)
#dir.create(paste0(size_reduction_results_directory))
#Writing out connectivity tables and dataframes for mean and standard deviation
#write.csv(Con_df, paste0(size_reduction_results_directory, "/Con_df", size_reduction_threshold, "_mean_pld", pld[i], ".csv"))
#write.csv(Con_table_mean, paste0(size_reduction_results_directory, "/Con", size_reduction_threshold, "_mean_pld", pld[i], ".csv"))
#write.csv(Con_table_sd, paste0(size_reduction_results_directory, "/Con", size_reduction_threshold, "_sd_pld", pld[i], ".csv"))
#write.csv(Habitat_handicapped, paste0(size_reduction_results_directory, "/RockfishHab_df", size_reduction_threshold, "_mean_pld", pld[i], ".csv"))
########################################
### [7c] Connectivity dataframes in terms of percentage
#still needs work to loop and clean
#Con_table_percent <- Con_df
#Released_each_MPA <- Released_larvae@data
#Released_each_MPA <- plyr::count(Released_each_MPA$CB_ID)
#Released_each_MPA <- dplyr::rename(Released_each_MPA, CB_ID = x, Number_larvae_release = freq)
#for (j in Released_each_MPA$CB_ID) {
# Old_row_value <- Con_table_percent[which(Con_table_percent$X == j),]
# New_row_value <- Old_row_value
##New row = 100 * old row value / total larvae released
# Con_table_percent[which(Con_table_percent$X == j),] <- (100*Con_table_percent[which(Con_table_percent$X == j),])/(Released_each_MPA$Number_larvae_release[Released_each_MPA$CB_ID == j])
# write.csv(Con_table_percent, paste0(size_reduction_results_directory, "/Con", size_reduction_threshold, "_percent_pld", pld[i], ".csv"))
########################################
|
c4b2d5c4b21dc89c08c741001d2727e0451a977d | a2aee752d7fd804ded63cafb587a25d6911f0db8 | /R/TechnicalImport.R | b6993ebc5a87204b890506627b2bada8da916479 | [] | no_license | jeroenbrons/knmiR | e8ca0d3cf5130d464f9239847059a93c7b46ce32 | d1d9c9f3cdad6053455916a8fd1312130a8d42d5 | refs/heads/master | 2021-07-13T06:30:18.545480 | 2017-07-04T13:36:18 | 2017-07-04T13:36:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 636 | r | TechnicalImport.R | ReadZippedFile <- function(url, colNames) {
con <- gzcon(url(url))
raw <- textConnection(readLines(con))
close(con)
data <- read.table(raw, col.names = colNames)
close(raw)
return(data)
}
UpdateJsonTable <- function(jsonTable) {
depth <- lat <- lon <- mag <- NULL
tmp <- as.data.table(jsonTable) # nolint
tmp[, date := as.Date(date, tz = "CET")]
tmp[, depth := as.numeric(depth)]
tmp[, lat := as.numeric(lat)]
tmp[, lon := as.numeric(lon)]
tmp[, mag := as.numeric(mag)]
setcolorder(tmp, c("date", "time", "place", "type",
"evaluationMode", "lat", "lon", "depth", "mag"))
tmp
}
|
ba396008c69fd20fd4db0d77a3a76d9e1c389d3b | 25d51bcc43ac49df9e9cca32ae46dd94350cb466 | /plot5.R | cc625d9757aa7cb9848f2388f34847d8985ed4c1 | [] | no_license | perplexedpigmy/ExData_Plotting2 | b2f0d4a5399ef86d4ca26a9b8ea3b32d1257bda6 | 155d47bd68d293e739ffdc164f79871c4e6cc284 | refs/heads/master | 2016-09-03T01:24:52.883666 | 2014-12-22T09:37:23 | 2014-12-22T09:37:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 839 | r | plot5.R | # Exploratory Data Analysis
# Proejct 2. Question 5
# How have emissions from motor vehicle sources
# changed from 1999–2008 in Baltimore City?
# Include utilities to retrieve external data sources
source('common.R')
getFile("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",
"data",
"national.emission.inventory.zip",
unzip = TRUE)
nei <- readRDS("data/summarySCC_PM25.rds")
baltimore <- subset(nei, fips == "24510" & type=="ON-ROAD")
agg <- aggregate(baltimore["Emissions"], list(year = baltimore$year), sum)
library(ggplot2)
png('plot5.png', width=480, height=480)
print(
ggplot(agg, aes(x=year, y=Emissions)) +
geom_line(size=1) +
ggtitle(expression("PM" [2.5] ~ " Motor Vehicle Emissions (Baltimore City)")) +
theme(legend.position="none")
)
dev.off()
|
3170af15801cf9aa057a0f8b1ea013e988bdcec2 | 744b8bce376eac9284ce8673fb3b0b349c074e33 | /viz/viz_model_results.R | 34731fec05b05f3b457ac44ea22ae4a12225bea0 | [] | no_license | njhenry/thesis_nmr_joint_model | 27b9021a195d7269528ff2a348a09b0f59a8b04b | 60ce24b7fea1c4205adac097d96b7e35cee84bc6 | refs/heads/main | 2023-08-20T06:28:46.071780 | 2021-11-02T04:42:28 | 2021-11-02T04:42:28 | 423,705,915 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,572 | r | viz_model_results.R | ## #######################################################################################
##
## VISUALIZE MEXICO MODEL RESULTS
##
## AUTHOR: Nat Henry, github: njhenry
## CREATED: 19 August 2021
## PURPOSE: Vizualize raw data as well as model results
##
## #######################################################################################
required_libs <- c(
'data.table','dplyr','ggplot2','glue','grid','gridExtra','RColorBrewer','scales','sf'
)
invisible(lapply(required_libs, library, character.only=TRUE))
# Set file paths
repo_dir <- '{REDACTED}'
config <- yaml::read_yaml(file.path(repo_dir, 'mexico/config.yaml'))
data_version <- '20210818'
run_dates <- list(
narrow = '20210930_allpois_narrow_sds_2', # Real data, narrow VR bias
wide = '20210930_allpois_wide_sds_2', # Real data, wide VR bias
sim_u = '20210930_allpois_sim' # Simulated data, unbiased BH
)
run_names <- names(run_dates)
# viz_dir <- file.path(config$dirs$viz_dir, gsub('-','',Sys.Date()))
viz_dir <- file.path(config$dirs$viz_dir, '20210930_2')
dir.create(viz_dir, showWarnings=FALSE)
# Load spatial metadata
loc_meta <- fread(file.path(config$dirs$prepped_data, data_version, 'location_metadata.csv'))
# Load shapefiles
ad2_sf <- sf::st_read(file.path(
config$dirs$vr_data, 'mex_adm2/shapefile_stable/shapefile_stable_2000_2017.shp'
))[, c('uid','geometry')]
ad1_sf <- sf::st_read(file.path(
config$dirs$vr_data, 'mex_adm2/shapefile_single_year/shapefile_single_year_2017_admin1.shp'
))[, c('GAUL_CODE', 'geometry')]
# Load model summaries
summs <- lapply(run_dates, function(rd){
mod_dir <- file.path(config$dirs$runs, rd)
summ_list <- list(
param = fread(file.path(mod_dir, 'param_summs.csv')),
pred = fread(file.path(mod_dir, 'pred_summs.csv')),
fe = fread(file.path(mod_dir, 'fe_summs.csv')),
sim_args = NULL
)
sim_args_fp <- file.path(mod_dir, 'sim_args.RDS')
if(file.exists(sim_args_fp)) summ_list$sim_args <- readRDS(sim_args_fp)
return(summ_list)
})
# Minor data prep - create merged admin2 metadata
ad1_meta <- loc_meta[level==1,][, .(location_id, adm_code, adm_ascii_name)]
colnames(ad1_meta) <- c('GAUL_CODE', 'parent_code','parent_name')
ad1_sf <- merge(x=ad1_sf, y=ad1_meta, by='GAUL_CODE', all.x=TRUE)
loc_merge_meta <- (loc_meta
[level==2, ]
[, .(parent_code=parent_code[1], adm_name=paste(adm_ascii_name,collapse=', ')), by=uid]
[ad1_meta, on = 'parent_code']
)
ad2_sf <- merge(x=ad2_sf, y=loc_merge_meta, by='uid')
for(rd in run_names){
summs[[rd]]$pred <- summs[[rd]]$pred[loc_merge_meta, on='uid'][, i.parent_code := NULL]
}
## FIG 1: DESCRIPTIVE NATIONAL PLOT ----------------------------------------------------->
excl_colors <- c(
'Less marginalized' = '#b3cde3',
'Moderately marginalized' = '#8c6bb1',
'Severely marginalized' = '#6e016b'
)
excl_dt <- copy(summs$narrow$pred)
excl_dt$excl_label <- sapply(excl_dt$excl_group, function(ii) names(excl_colors)[ii+1])
excl_sf <- merge(x=ad2_sf, y=excl_dt[, .(uid, excl_label)])
mex_exclusion_fig <- ggplot() +
geom_sf(data=excl_sf, aes(fill=excl_label), color='#222222', lwd=0.05) +
geom_sf(data=ad1_sf, color='#222222', fill=NA, lwd=.25) +
scale_fill_manual(values = excl_colors) +
labs(fill = 'Municipality grouping') +
coord_sf(crs=sf::st_crs(6372)) +
theme_minimal() +
theme(
axis.text.x=element_blank(), axis.ticks.x=element_blank(),
axis.text.y=element_blank(), axis.ticks.y=element_blank(),
panel.grid.major = element_line(colour = 'transparent'),
legend.position = c(0.77, 0.75)
)
png(file.path(viz_dir, 'excl_groups.png'), height=5.5, width=8, units='in', res=300)
print(mex_exclusion_fig)
dev.off()
## FIG 2: CHARACTERISTICS BY GROUPING --------------------------------------------------->
covar_labs <- data.table(
cov = c('indig','lit','electric','refrig','lowwage','hcrowd','piped_water'),
cov_title = c(
'Identify as indigenous*', 'Literate*','Households electrified*',
'Own refrigerator*','Low wage workers','Household crowding',
'Piped water in home*'
)
)
excl_melted <- melt(
data = excl_dt,
id.vars = c('uid', 'excl_label', 'excl_group'),
measure.vars = covar_labs$cov,
variable.name = 'cov'
)
excl_agg <- excl_melted[
, .(val_med=median(value), val_low=quantile(value,0.25), val_high=quantile(value,0.75)),
by=.(cov, excl_label, excl_group)
][covar_labs, on = 'cov']
exclusion_covars_fig <- ggplot(
data=excl_agg,
aes(y=val_med, ymin=val_low, ymax=val_high, x=cov_title, color=excl_label, fill=excl_label) ) +
geom_crossbar(position='dodge', color='#222222', width=.3, lwd=.15) +
scale_fill_manual(values = excl_colors, aesthetics = c('fill')) +
scale_y_continuous(labels = scales::percent) +
labs(x='', y='Proportion by municipality', fill='', color='') +
theme_bw() +
theme(
legend.position = 'bottom',
axis.text.x = element_text(angle = 45, hjust = 1)
)
png(file.path(viz_dir, 'excl_covs.png'), height=5, width=7.5, units='in', res=300)
print(exclusion_covars_fig)
dev.off()
## SIMULATION - MORTALITY WHEN SPECIFIED CORRECTLY -------------------------------------->
sim_u <- copy(summs$sim_u$pred)
sim_u$excl_label <- sapply(sim_u$excl_group, function(ii) names(excl_colors)[ii+1])
sim_u[,sim_mort:=sim_mort*1E3][,mean:=mean*1E3][,lower:=lower*1E3][,upper:=upper*1E3]
plot_max <- max(c(sim_u$upper, sim_u$sim_mort))
sim_fig_a <- ggplot(
data=sim_u, aes(color=excl_label,x=sim_mort,y=mean,ymin=lower,ymax=upper)
) +
lims(x=c(0, plot_max), y=c(0, plot_max)) +
labs(
title="Neonatal mortality per 1,000", x='True (simulated)', y='Model estimate',
color='Municipality\ngrouping'
) +
geom_point(size=.5) +
geom_linerange(lwd=.25, alpha=.7) +
geom_abline(intercept=0, slope=1, linetype=2, color='#888888', alpha=.6) +
scale_color_manual(values=excl_colors) +
theme_bw() + theme(legend.position='right')
sim_u$vrb_ratio_mean <- exp(sim_u$log_vr_bias_mean)
sim_u$vrb_ratio_lower <- exp(sim_u$log_vr_bias_lower)
sim_u$vrb_ratio_upper <- exp(sim_u$log_vr_bias_upper)
sim_u$vrb_ratio_true <- copy(sim_u$sim_vr_bias)
sim_u <- sim_u[order(-excl_group), ]
resid_breaks <- c(1/10, 1/5, 1/2, 1, 2, 5, 10)
resid_labels <- c('1:10','1:5','1:2','1','2:1','5:1','10:1')
resid_plot_range <- c(.1, 14)
sim_fig_b <- ggplot(data=sim_u, aes(
color = excl_label, x = vrb_ratio_true, y = vrb_ratio_mean, ymin = vrb_ratio_lower,
ymax = vrb_ratio_upper
)) +
geom_linerange(lwd=0.25, alpha=.6) +
geom_point(size=.5) +
geom_abline(intercept=0, slope=1, linetype=2, color='#888888', alpha=.6) +
geom_hline(yintercept = 1, linetype = 3, color='#888888', alpha=.6) +
geom_vline(xintercept = 1, linetype = 3, color='#888888', alpha=.6) +
labs(title='CRVS bias terms', x='True bias (simulated)', y='Model estimated bias', color='') +
scale_color_manual(values=excl_colors) +
scale_y_continuous(
trans = 'log10', breaks = resid_breaks, limits = resid_plot_range,
labels = resid_labels, oob=scales::squish
) +
scale_x_continuous(
trans = 'log10', breaks = resid_breaks, labels = resid_labels, limits = resid_plot_range
) +
theme_bw() + theme(legend.position='none')
png(file.path(viz_dir,'sim_results.png'), height=4.5, width=10, units='in', res=300)
grid.arrange(
ggplotGrob(sim_fig_a), ggplotGrob(sim_fig_b),
layout_matrix = matrix(c(1,1,1,1,2,2,2), nrow=1)
)
dev.off()
## PRESENT NMR AND VR BIAS RESULTS FROM FULL MODEL -------------------------------------->
full_est <- copy(summs$wide$pred)
# Set up color scales
nmr_colors <- RColorBrewer::brewer.pal(n=9, name='RdPu')
bias_colors <- RColorBrewer::brewer.pal(n=9, name='BrBG')
# Translate bias into a ratio
full_est[, vrb_mean := exp(log_vr_bias_mean) ]
full_est[, mean_per_1k := mean * 1000 ]
full_sf <- merge(x=ad2_sf, y=full_est[, .(uid, mean_per_1k, vrb_mean)])
## Map NMR
full_nmr_fig <- ggplot() +
geom_sf(data=full_sf, aes(fill=mean_per_1k), color='#222222', lwd=0.05) +
geom_sf(data=ad1_sf, color='#222222', fill=NA, lwd=.25) +
scale_fill_gradientn(
colors = nmr_colors,
limits = c(0, 15), breaks=seq(0, 15, by=3),
oob = scales::squish
) +
labs(fill = 'Neonatal\nMortality Rate') +
coord_sf(crs=sf::st_crs(6372)) +
theme_minimal() +
theme(
axis.text.x=element_blank(), axis.ticks.x=element_blank(),
axis.text.y=element_blank(), axis.ticks.y=element_blank(),
panel.grid.major = element_line(colour = 'transparent'),
legend.position = c(0.85, 0.75)
)
png(file.path(viz_dir, 'nmr_wide_model.png'), height=5.5, width=8, units='in', res=300)
print(full_nmr_fig)
dev.off()
## Map VR bias
full_bias_fig <- ggplot() +
geom_sf(data=full_sf, aes(fill=vrb_mean), color='#222222', lwd=0.05) +
geom_sf(data=ad1_sf, color='#222222', fill=NA, lwd=.25) +
scale_fill_gradientn(
colors = bias_colors,
limits = c(0.5, 2), breaks=c(0.5, 0.66, 1, 1.5, 2),
labels=c('1:2','2:3','1:1','3:2','2:1'),
trans = 'log10',
oob = scales::squish
) +
labs(fill = 'VR Bias Ratio\n(Mean)') +
coord_sf(crs=sf::st_crs(6372)) +
theme_minimal() +
theme(
axis.text.x=element_blank(), axis.ticks.x=element_blank(),
axis.text.y=element_blank(), axis.ticks.y=element_blank(),
panel.grid.major = element_line(colour = 'transparent'),
legend.position = c(0.85, 0.75)
)
bias_fig_sub <- suppressMessages(
full_bias_fig +
coord_sf(crs=sf::st_crs(6372), xlim=c(2800000, 3700000), ylim=c(347500, 1078750)) +
theme(
legend.position = 'none',
panel.border = element_rect(colour = "black", fill=NA, size=1),
panel.background = element_rect(colour = NA, fill='white')
)
)
png(file.path(viz_dir, 'vr_bias_wide_model.png'), height=5.5, width=8, units='in', res=300)
print(full_bias_fig)
# Add inset
npc <- function(x) unit(x, 'npc')
vp <- viewport(x=npc(.19), y=npc(.22), width=npc(.4), height=npc(.48))
grid::pushViewport(vp)
grid.draw(ggplotGrob(bias_fig_sub))
dev.off()
## Map differences between mortality rate at the admin2 and admin1 levels with the narrow
## model
joined_dt <- merge(
x = summs$wide$pred,
y = summs$narrow$pred[, .(uid, mean)],
by = 'uid',
suffixes = c('_wide','_narrow')
)
joined_dt[, nmr_diff := (mean_wide - mean_narrow) * 1e3 ]
joined_agg <- joined_dt[, .(
vr_births = sum(vr_births),
mean_narrow = weighted.mean(mean_narrow, w=vr_births),
mean_wide = weighted.mean(mean_wide, w=vr_births)
), by=.(parent_code,parent_name)
]
joined_agg[, nmr_diff := (mean_wide - mean_narrow) * 1E3 ]
diff_colors <- rev(RColorBrewer::brewer.pal(name='PiYG',n=9))
diff_breaks <- seq(-2, 2, by=1)
diff_labs <- c('-2', '-1', '0', '+1', '+2')
diff_lims <- range(diff_breaks)
# Top plot: Mortality rate difference at the state level
model_diff_ad1_sf <- merge(x=ad1_sf, y=joined_agg[, .(parent_code, nmr_diff)])
model_diff_ad1_fig <- ggplot() +
geom_sf(data=model_diff_ad1_sf, aes(fill=nmr_diff), color='#222222', lwd=0.25) +
scale_fill_gradientn(
colors = diff_colors,
limits = diff_lims, breaks=diff_breaks, labels = diff_labs,
oob = scales::squish
) +
coord_sf(crs=sf::st_crs(6372)) +
theme_minimal() +
labs(title = 'A', fill = 'NMR difference\n(per 1,000)') +
theme(
axis.text.x=element_blank(), axis.ticks.x=element_blank(),
axis.text.y=element_blank(), axis.ticks.y=element_blank(),
panel.grid.major = element_line(colour = 'transparent'),
legend.position = c(0.88, 0.8),
panel.border = element_rect(colour = "black", fill=NA, size=1)
)
# Bottom plot: Mortality rate difference at the municipality level
model_diff_ad2_sf <- merge(x=ad2_sf, y=joined_dt[, .(uid, nmr_diff)])
model_diff_ad2_fig <- ggplot() +
geom_sf(data=model_diff_ad2_sf, aes(fill=nmr_diff), color='#222222', lwd=0.05) +
geom_sf(data=ad1_sf, color='#222222', fill=NA, lwd=.25) +
scale_fill_gradientn(
colors = diff_colors,
limits = diff_lims, breaks=diff_breaks, labels = diff_labs,
oob = scales::squish
) +
labs(title = 'B') +
coord_sf(crs=sf::st_crs(6372), xlim=c(2500000, 4000000), ylim=c(347500, 1322500)) +
theme_minimal() +
theme(
axis.text.x=element_blank(), axis.ticks.x=element_blank(),
axis.text.y=element_blank(), axis.ticks.y=element_blank(),
panel.grid.major = element_line(colour = 'transparent'),
legend.position = 'none',
panel.border = element_rect(colour = "black", fill=NA, size=1)
)
png(file.path(viz_dir, 'crvs_bias_diff.png'), height=11, width=8, units='in', res=300)
grid.arrange(
ggplotGrob(model_diff_ad1_fig),
ggplotGrob(model_diff_ad2_fig),
layout_matrix = matrix(1:2, ncol=1)
)
dev.off()
|
dab366c9abc58219cd9b68e0b7e5cbfd61b7681e | 946d4938a7edb2279cc2edaaea6d1ab865113b43 | /src/add_overall_prediction_to_dataset.R | 119346a23ac61a196e4bf639ce6d9b156f28fb03 | [] | no_license | AlbertoParravicini/DM-Project | 3fa88ad10895bd8bb72a27158cbad53f22b9b8fc | 8479eb1b9be4479cb95c6eb88a744f918d7576e5 | refs/heads/master | 2020-07-24T08:18:15.556269 | 2016-07-07T11:44:01 | 2016-07-07T11:44:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,969 | r | add_overall_prediction_to_dataset.R | library(xts)
library(ggplot2)
library(dplyr)
library(tseries)
library(PerformanceAnalytics)
library(forecast)
library(astsa)
library(Metrics)
library(ggthemes) # visualization
prediction_length = 10
# DATA IMPORT AND CLEANING
# ------------------------------------------------------
# ------------------------------------------------------
dataset <- read.csv("Modified data/dataset_polimi_clusterized.csv", stringsAsFactors=FALSE, row.names=NULL)
# Remove the x column, if present
dataset <- dataset[ , !(names(dataset) %in% c("X"))]
# Convert dates to class "Data"
dataset$data <- as.Date(dataset$data)
# Convert "vendite" to numeric values if needed
if (class(dataset$vendite) == "factor") {
dataset$vendite <- as.numeric(levels(dataset$vendite))[dataset$vendite]
}
# Turn some features to factors
factorVars <- c('zona','area', "sottoarea",
'prod','giorno_mese', "giorno_settimana", "giorno_anno", "mese", "settimana_anno", "anno", "weekend","stagione", "key", "azienda_chiusa", "primo_del_mese", "cluster3", "cluster6", "cluster20")
dataset[factorVars] <- lapply(dataset[factorVars], function(x) as.factor(x))
summary(dataset)
# Use the exogen signal of the overall sales
vendite_giornaliere_prod <- read.csv("Modified data/vendite_giornaliere_prod.csv", row.names=NULL, stringsAsFactors=FALSE)
vendite_giornaliere_prod$prod <- as.factor(vendite_giornaliere_prod$prod)
# Turn dates to "Date" class
dataset$data <- as.Date(as.character(dataset$data),format="%Y-%m-%d")
vendite_giornaliere_prod$data <- as.Date(as.character(vendite_giornaliere_prod$data),format="%Y-%m-%d")
total_table <- merge(dataset, vendite_giornaliere_prod, by = c("prod", "data"), all.x = T)
# Rename "vendite_giorn_prod.y" to "vendite_giorn_prod"
names(total_table)[names(total_table) == 'vendite_giorn_prod.y'] <- 'vendite_giorn_prod'
View(total_table)
write.csv(total_table, file="Modified data/dataset_polimi_clusterized_tot_pred.csv", row.names = FALSE)
|
f06de6d6171b1d5ec3c705cdf433af6286501540 | 741efc2b2baa4949261fb2d36a87a96b0d7d726d | /build_data.R | 44a00df5f2e2c961fd746181243d4bc5c58859e3 | [] | no_license | mike-gusev/bordervis | 108b53714ff1e30a8b1d2c604360711dfaa18e23 | b7047c0f13dab3e79eb000949d17304d591cee98 | refs/heads/master | 2020-03-22T14:45:25.370704 | 2018-12-13T00:24:51 | 2018-12-13T00:24:51 | 140,203,055 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,286 | r | build_data.R | #building a complete usmx data set
#m.o.: import primary documents, select needed data, clean it, append to a growing dataset
library(tidyverse)
library(stringr)
library(stringi)
library(readxl)
load("data/us_region_helper.RData")
#land area first
#mx
load("data/mx_inafed_snim_population.RData")
mx_land_area_sqkm <- pob_datos %>%
filter(municipios==0) %>% #filter out states/federal
select("NAME_1"=estado, "NAME_2"=municipio, "area_sqkm"=superficie)
#this dataset is old and calls Coahuila by its old name
mx_land_area_sqkm <- mx_land_area_sqkm %>%
filter(str_detect(NAME_1, "Coahuila de Zaragoza")) %>%
mutate(NAME_1="Coahuila") %>%
rbind(mx_land_area_sqkm[!str_detect(mx_land_area_sqkm$NAME_1, "Coahuila de Zaragoza"),]) %>%
arrange(NAME_1, NAME_2)
#us
us_land_area_sqkm <- read_excel("data/uscensus/LND01.xls") %>%
select(Areaname, LND110210D) %>%
separate(Areaname, into=c("NAME_2", "abbr"), sep=", ") %>%
left_join(us_region_helper) %>%
mutate(area_sqkm=LND110210D*0.3861) %>%
select("NAME_1", "NAME_2", area_sqkm) %>%
filter(!is.na(NAME_1))
usmx_land_area_sqkm <- rbind(us_land_area_sqkm, mx_land_area_sqkm)
border_data <- usmx_land_area_sqkm %>%
mutate(area_sqkm=as.numeric(area_sqkm))
rm(mx_land_area_sqkm, pob_datos, us_land_area_sqkm, usmx_land_area_sqkm)
#population, murders, murder rate
#all come from same doc on mx side
load("data/mx_sesnp_crime.RData")
mx_murder_2016 <- sesnp_crime %>% filter(modalidad=="HOMICIDIOS") %>%
separate(date, into=c("year", "month"), sep="-") %>%
filter(year==2016) %>%
group_by(state, municipio, year) %>%
summarise(murder=sum(count), population=floor(mean(population))) %>%
ungroup() %>%
transmute(NAME_1=str_to_title(state),
NAME_2=str_to_title(municipio),
murder, population,
murder_rate=murder*100000/population)
load("data/us_fbi_ucr_crime_2016.RData")
us_murder_2016 <- us_crime %>%
select(county_name, "murder"=MURDER, population) %>%
separate(county_name, into=c("NAME_2", "abbr"), sep=", ") %>%
mutate(NAME_2=str_remove(NAME_2, " County| city| Census Area| Parish| Borough")) %>%
left_join(us_region_helper, by="abbr") %>%
dplyr::select(-abbr) %>%
arrange(NAME_1, NAME_2) %>%
mutate(murder_rate=murder*100000/population)
#bind
usmx_murder_2016 <- rbind(us_murder_2016, mx_murder_2016)
#rm(mx_murder, sesnp_crime, us_crime, us_murder_2016, mx_murder_2016)
#this dataset is missing accents/diacritics on mx side
#so to imbue it we generate a matching col in border_data to join by
border_data <- border_data %>%
mutate(rawname1=str_to_title(stri_trans_general(NAME_1, "Latin-ASCII")),
rawname2=str_to_title(stri_trans_general(NAME_2, "Latin-ASCII"))) %>%
full_join(usmx_murder_2016, by=c("rawname1"="NAME_1", "rawname2"="NAME_2")) %>%
select(-rawname1, -rawname2)
rm(usmx_murder_2016)
#pop. density
border_data <- border_data %>%
mutate(pop_dens_sqkm = population/area_sqkm)
#gini index
#us
us_gini_2010 <- read.csv("data/us_census_acs_gini_2010.csv") %>%
dplyr::select("region"=GEO.display.label, "gini"=HD01_VD01) %>%
separate(region, into=c("NAME_2", "NAME_1"), sep=", ") %>%
mutate(NAME_2=str_remove(NAME_2, " County| Borough| Census Area| Parish"))
#mx
load("data/mx_coneval_poverty.RData")
mx_gini_2010 <- a %>% dplyr::select("NAME_1"=nom_ent, "NAME_2"=nom_mun, "gini"=gini_10)
mx_gini_2010 <- rbind(
mx_gini_2010 %>% filter(str_detect(NAME_1, "Coahuila")) %>% mutate(NAME_1="Coahuila"),
mx_gini_2010 %>% filter(!str_detect(NAME_1, "Coahuila"))
) %>% arrange(NAME_1, NAME_2)
#bind
usmx_gini_2010 <- rbind(us_gini_2010, mx_gini_2010)
border_data <- border_data %>%
full_join(usmx_gini_2010, by=c("NAME_1", "NAME_2"))
rm(us_gini_2010, mx_gini_2010, usmx_gini_2010, a)
#poverty
#mx
load("data/mx_coneval_poverty.RData")
mx_poverty_2015 <- dplyr::select(a, "NAME_1"=nom_ent, "NAME_2"=nom_mun, "poverty_rate"=pobreza)
mx_poverty_2015 <- rbind(
mx_poverty_2015 %>% filter(str_detect(NAME_1, "Coahuila")) %>% mutate(NAME_1="Coahuila"),
mx_poverty_2015 %>% filter(!str_detect(NAME_1, "Coahuila"))
) %>% arrange(NAME_1, NAME_2) %>%
mutate(poverty_rate=100*poverty_rate)
#us
load("data/us_census_saipe_poverty_2015.RData")
us_poverty_2015 <- pov2015[,c(4,10)]
names(us_poverty_2015) <- c("region", "poverty_rate")
us_poverty_2015 <- us_poverty_2015 %>%
separate(region, into=c("NAME_2", "abbr"), sep=" \\(") %>%
transmute(NAME_2=str_remove(NAME_2, " County| Borough| Census Area| Parish"),
abbr=str_sub(abbr,1,-2), poverty_rate) %>%
left_join(us_region_helper, by="abbr") %>%
dplyr::select(NAME_1, NAME_2, poverty_rate) %>%
filter(!is.na(NAME_1), NAME_2 != "")
#bind
usmx_poverty_2015 <- rbind(us_poverty_2015, mx_poverty_2015)
border_data <- border_data %>%
full_join(usmx_poverty_2015, by=c("NAME_1", "NAME_2"))
rm(a, mx_poverty_2015, pov2015, us_poverty_2015, usmx_poverty_2015)
border_data <- border_data %>% transmute(
NAME_1, NAME_2,
"area_sqkm_2010"=area_sqkm,
"murder_2016"=murder,
"population_2016"=floor(population),
"murder_rate_2016"=round(murder_rate, digits=3),
"pop_dens_sqkm_2016"=round(pop_dens_sqkm, digits=3),
"gini_2010"=round(gini, digits=3),
"poverty_rate_2015"=round(poverty_rate, digits=1)
)
save(border_data, file="data/border_data.RData")
|
8474bdb50a4d3bd00b1207ab47657ec83cd92f4c | 8edf0521ebc0ca53ec618d6d220c47c851caaa71 | /man/SSbootstrap.Rd | 578d78cef4eefe083a9377aa660f318d1341dedc | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | amart/r4ss | 9b730038ee4c4b6d38aaabe81b6ad9fddf0eb4f3 | fbccbace9a70e846401d32577aeab9f25cb31ba5 | refs/heads/master | 2021-01-17T06:03:03.172272 | 2020-10-04T01:38:14 | 2020-10-04T01:38:14 | 24,735,775 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 705 | rd | SSbootstrap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SSbootstrap.R
\name{SSbootstrap}
\alias{SSbootstrap}
\title{Fit models to parametric bootstraps}
\usage{
SSbootstrap()
}
\description{
Run a series of models fit to parametric bootstrap data taken from
data.ss_new. This is not yet a generalized function, just some example code
for how to do a parametric bootstrap such as was done for the Pacific hake
model in 2006.
}
\note{
Thanks to Nancie Cummings for inspiration.
}
\references{
\url{http://www.pcouncil.org/wp-content/uploads/2006_hake_assessment_FINAL_ENTIRE.pdf}
(A description is on page 41 and Figures 55-56 (pg 139-140) show some
results.)
}
\author{
Ian Taylor
}
|
2fe6d77f93f91a3357d44d046682eb9ac24af953 | 6f63e85ca2b63cfe402dc7bedbd8c432ade7cbd6 | /app/app.R | a83741c5b350121c13ce7bf5fa95f4cf00fa369c | [] | no_license | rat-nick/education-analysis | 29b775b420f6693f453265dee201183a0042367e | 2ea01d8f463ff82d6137ba0194fd0e51fe852af8 | refs/heads/main | 2023-08-27T22:13:43.301914 | 2021-10-01T07:17:34 | 2021-10-01T07:17:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,427 | r | app.R | ## app.R ##
library(shinydashboard)
library(shiny)
library(ggridges)
library(plotly)
library(ggcorrplot)
library(stringr)
library(ggthemes)
source("scripts/data_import.R")
source("scripts/wrangling.R")
# UI definition ----
ui <- dashboardPage(
skin = "black",
dashboardHeader(title = "EduBoard"),
# Sidebar definitions -------------------------
dashboardSidebar(sidebarMenu(
menuItem(
text = "Uspeh ucenika",
tabName = "uspeh",
icon = icon("clipboard")
),
menuItem(
text = "Izostanci ucenika",
tabName = "izostanci",
icon = icon("clipboard")
)
)),
# Dashboard body definition ----------------
dashboardBody(tabItems(
# Uspeh tab ----------------------
tabItem(tabName = "uspeh",
fluidRow(
box(
width = 12,
collapsible = T,
title = "Podaci po predmetima",
fluidRow(
box(
collapsible = T,
selectInput(
inputId = "uspeh_godine",
choices = NULL,
multiple = T,
label = "Izaberite razrede"
)
),
box(
collapsible = T,
selectInput(
inputId = "uspeh_predmeti",
choices = NULL,
multiple = T,
label = "Izaberite predmete"
)
)
),
fluidRow(
box(
collapsible = T,
width = 6,
plotlyOutput("uspehCompHist")
),
box(
collapsible = T,
width = 6,
plotlyOutput("corrPlot")
)
),
box(
collapsible = T,
checkboxInput("poslednji", "Najmanja promena"),
plotlyOutput("najpogodjenijiPredmeti"),
sliderInput(
"numSubjects",
"Broj predmeta",
min = 1,
max = 10,
value = 3
)
)
)
)),
# Izostanci tab -------------------------------
tabItem(tabName = 'izostanci',
fluidRow(
box(
width = 12,
dataTableOutput("studentAttendanceDataTable"),
collapsible = T
)
),
fluidRow(
box(
width = 12,
column(
width = 4,
selectInput("attendanceColSelect", "Izaberite statistiku", choices = NULL)
),
column(
width = 4,
selectInput(
"period",
"Izaberite period",
choices = c("I polugodište", "II polugodište", "ukupno"),
selected = "ukupno"
)
),
fluidRow(column(
width = 10,
plotlyOutput("saDensity", height = 700),
), column(width = 2, textOutput("avgDiff"))),
sliderInput(
'saNumBins',
min = 5,
max = 30,
label = "Broj grupa",
value = 10
)
)
),
fluidRow(
box(
width = 12,
dataTableOutput("classAttendanceDataTable"),
collapsible = T
)
))
))
)
# server definition ------
server <- function(input, output, session) {
data <- read.csv("data/all_grades.csv")
class_attendance_data <-
read.csv("data/summary_attendance.csv")
rv <- reactiveValues()
rv$data <- data
rv$ca_data <- class_attendance_data
filtered <- reactive({
df <- rv$data
print(input$uspeh_godine)
print(input$uspeh_predmeti)
if (!is.null(input$uspeh_godine)) {
df <- df %>% filter(godina %in% input$uspeh_godine)
}
if (!is.null(input$uspeh_predmeti)) {
df <- df %>% filter(predmet %in% input$uspeh_predmeti)
}
return(df)
})
observe({
print("data change")
ch <- unique(isolate(filtered())['godina'])
updateSelectInput(session, "uspeh_godine", choices = ch)
})
observe({
print("data change")
ch <- unique(isolate(filtered())['predmet'])
updateSelectInput(session, "uspeh_predmeti", choices = ch)
})
output$uspehCompHist <- renderPlotly({
df <-
filtered() %>% mutate(pandemija = if_else(pandemija, "tokom pandemije", "pre pandemije"))
df %>% ggplot(aes(x = ocena, fill = pandemija)) + ggtitle("Raspodela ocena") +
geom_histogram(
data = ~ subset(., pandemija == "tokom pandemije"),
aes(y = 1 * ..count..),
alpha = 0.6,
bins = 5
) +
geom_histogram(
data = ~ subset(., pandemija == "pre pandemije"),
aes(y = -1 * ..count..),
alpha = 0.8,
bins = 5
) +
theme_minimal() +
theme(legend.position = "bottom") +
xlab("ocena") + ylab("broj ocena")
})
output$najpogodjenijiPredmeti <- renderPlotly({
no_fac <- rv$data %>% filter(!grepl("факулта", predmet))
dT <-
no_fac %>% filter(pandemija == T) %>% group_by(predmet) %>% summarise(prosek = mean(ocena))
dF <-
no_fac %>% filter(pandemija == F) %>% group_by(predmet) %>% summarise(prosek = mean(ocena))
multi <- if_else(input$poslednji, 1, -1)
df <- merge(x = dF, y = dT, by = "predmet") %>%
mutate(promena = prosek.y - prosek.x)
df <-
head(df[order(multi * df$promena),], input$numSubjects)
df %>% ggplot(aes(x = reorder(predmet, promena), y = promena)) +
ggtitle("Promena proseka od pocetka pandemije") +
geom_col(aes(stat = , fill = predmet), alpha = .7) +
coord_flip() + theme_minimal() +
xlab("predmet") + theme(legend.position = "bottom")
})
output$studentAttendanceDataTable <- renderDataTable({
cols <- c(1, 2, 3, 4, 6, 8, 12, 13, 14)
rv$ca_data[, cols]
})
output$classAttendanceDataTable <- renderDataTable({
cols <- c(1, 5, 7, 9, 10, 11, 12, 13, 14)
rv$ca_data[, cols]
})
output$corrPlot <- renderPlotly({
if (length(input$uspeh_predmeti) == 1) {
ggplot() + ggtitle("Izaberite vise od jednog predemet da biste videli grafik korelacije")
}
else{
df <- filtered() %>%
group_by(ucenik, predmet) %>%
summarise(prosek = mean(ocena)) %>%
pivot_wider(names_from = "predmet", values_from = "prosek") %>%
data.frame() %>% select(-c("ucenik"))
corr <- cor(df, use = "pairwise.complete.obs")
ggcorrplot(
corr,
type = "lower",
outline.color = 'white',
show.legend = T
) +
ggtitle("Korelacija proseka ocena") +
theme(axis.text.x = element_blank(),
axis.ticks = element_blank())
}
})
observe({
updateSelectInput(
session = session,
inputId = "attendanceColSelect",
choices = names(isolate(rv$ca_data))[2:11]
)
})
selected <- reactive({
df <- rv$ca_data %>% select(c(input$attendanceColSelect, 1, 12, 14))
df <- as.tibble(df)
print(head(df))
})
output$saDensity <- renderPlotly({
selectedAxis <- input$attendanceColSelect
df <-
rv$ca_data %>% mutate(
polugodiste = if_else(
polugodiste == 1,
"I polugodište",
if_else(polugodiste == 2, "II polugodište", "ukupno")
),
pandemija = if_else(pandemija == T, "Tokom pandemije", "Pre pandemije")
) %>%
filter(polugodiste %in% input$period)
srednja_vrednost_tokom_pandemije <-
mean(df[df$pandemija == "Tokom pandemije", selectedAxis])
srednja_vrednost_pre_pandemije <-
mean(df[df$pandemija == "Pre pandemije", selectedAxis])
promena_srednje_vrednosti <-
srednja_vrednost_tokom_pandemije - srednja_vrednost_pre_pandemije
base <-
data.frame(df) %>% ggplot(aes_string(x = selectedAxis))
base +
geom_vline(
aes(xintercept = srednja_vrednost_tokom_pandemije),
color = "blue",
linetype = "dashed",
size = 1
) +
geom_histogram(
data = subset(df, pandemija == "Pre pandemije"),
aes(y = ..count.., fill = pandemija),
alpha = .7,
bins = input$saNumBins,
position = "identity"
) +
#geom_density(data = subset(df, pandemija == "Pre pandemije"),alpha=.4, aes(y=-1*..count..)) +
geom_vline(
aes(xintercept = srednja_vrednost_pre_pandemije),
color = "red",
linetype = "dashed",
size = 1
) +
geom_histogram(
data = subset(df, pandemija == "Tokom pandemije"),
aes(y = -1 * ..count.., fill = pandemija) ,
alpha = .7,
bins = input$saNumBins,
position = "identity"
) + ylab("Broj odeljenja") +
ggtitle(label = "Statistika o izostancima po odeljenjima", subtitle = "Koliko odeljenja pripada određenoj grupi?")
})
}
shinyApp(ui, server) |
fb8bf4744f10614b94c971aad1b5db9cc92f0348 | f8eb55c15aec611480ede47d4e15e5a6e472b4fa | /analysis/0139_individual_stocks.R | 9236237f359507d096da0d6bcebc275ecb89b5ed | [] | no_license | nmaggiulli/of-dollars-and-data | a4fa71d6a21ce5dc346f7558179080b8e459aaca | ae2501dfc0b72d292314c179c83d18d6d4a66ec3 | refs/heads/master | 2023-08-17T03:39:03.133003 | 2023-08-11T02:08:32 | 2023-08-11T02:08:32 | 77,659,168 | 397 | 32 | null | null | null | null | UTF-8 | R | false | false | 9,572 | r | 0139_individual_stocks.R | cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(scales)
library(readxl)
library(lubridate)
library(ggrepel)
library(ggjoy)
library(tidyverse)
folder_name <- "0139_individual_stocks"
out_path <- paste0(exportdir, folder_name)
dir.create(file.path(paste0(out_path)), showWarnings = FALSE)
########################## Start Program Here ######################### #
start_year <- 2000
spx_2000 <- read_excel(paste0(importdir, "0139_sp500_individual_stocks/spx_components_2000.xlsx")) %>%
mutate(symbol = trimws(ticker)) %>%
select(symbol)
spx <- read.csv(paste0(importdir, "0139_sp500_individual_stocks/ycharts_spx.csv"), skip = 6) %>%
rename(symbol = Symbol,
name = Name,
metric = Metric) %>%
gather(-symbol, -name, -metric, key=key, value=value) %>%
mutate(year = as.numeric(gsub("X(\\d+)\\.(\\d+)\\.(\\d+)", "\\1", key, perl = TRUE))) %>%
arrange(symbol, year) %>%
filter(!is.na(value)) %>%
mutate(spx_ret = value/lag(value) - 1) %>%
filter(!is.na(spx_ret)) %>%
select(year, spx_ret)
raw <- read.csv(paste0(importdir, "0139_sp500_individual_stocks/ycharts_tr.csv"), skip = 6) %>%
rename(symbol = Symbol,
name = Name,
metric = Metric) %>%
gather(-symbol, -name, -metric, key=key, value=value) %>%
mutate(year = as.numeric(gsub("X(\\d+)\\.(\\d+)\\.(\\d+)", "\\1", key, perl = TRUE))) %>%
arrange(symbol, year) %>%
filter(!is.na(value)) %>%
mutate(ret = value/lag(value) - 1) %>%
filter(!is.na(ret)) %>%
left_join(spx) %>%
mutate(above_market = ifelse(ret > spx_ret, 1, 0)) %>%
select(year, symbol, name, ret, spx_ret, above_market) %>%
filter(year < 2019, year >= start_year)
first_last <- raw %>%
group_by(symbol) %>%
summarise(min_year = min(year),
max_year = max(year),
n_years_data = n()) %>%
ungroup()
full_data <- filter(first_last, n_years_data == max(first_last$n_years_data)) %>%
inner_join(raw)
full_symbols <- full_data %>%
select(symbol) %>%
distinct()
not_spx_2000 <- full_symbols %>%
anti_join(spx_2000)
missing_symbols <- spx_2000 %>%
anti_join(full_data)
# Simulation parameters
n_simulations <- 1000
portfolio_sizes <- c(5, 10, 20, 30, 50, 100, 200)
set.seed(12345)
final_results <- data.frame(year = c(),
mean_ret = c(),
binned_ret = c(),
simulation = c(),
portfolio_size = c(),
above_market = c())
for(p in portfolio_sizes){
print(p)
for(i in 1:n_simulations){
s <- sample(full_symbols$symbol, p, replace = FALSE)
tmp <- full_data %>%
filter(symbol %in% s) %>%
group_by(year) %>%
summarise(mean_ret = mean(ret),
spx_ret = mean(spx_ret)) %>%
ungroup() %>%
mutate(binned_ret = case_when(
mean_ret > 0.5 ~ 0.5,
mean_ret < -0.5 ~ -0.5,
TRUE ~ mean_ret
),
simulation = i,
portfolio_size = p
)
fnl <- tmp %>%
summarise(p_ret = prod(1+mean_ret)^(1/nrow(tmp)) - 1,
spx_ret = prod(1+spx_ret)^(1/nrow(tmp)) - 1)
tmp <- tmp %>%
mutate(above_market = ifelse(fnl$p_ret > fnl$spx_ret, 1, 0),
annual_outperformance_full_period = fnl$p_ret - fnl$spx_ret)
if(p == portfolio_sizes[1] & i == 1){
final_results <- tmp
} else{
final_results <- bind_rows(final_results, tmp)
}
}
}
# Summarize above market stats
above_market_stats_year <- full_data %>%
group_by(year) %>%
summarise(above_market = mean(above_market)) %>%
ungroup()
# Loop by start year
all_years <- unique(full_data$year)
above_market_stats_stock <- data.frame(start_year = c(),
market_outperformance_2018 = c())
above_market_stats_portfolio_size <- data.frame(start_year = c(),
portfolio_size = c(),
above_market = c(),
annual_outperformance = c())
for(y in all_years){
print(y)
ind <- full_data %>%
filter(year >= y) %>%
group_by(symbol) %>%
summarise(p_ret = prod(1+ret)- 1,
spx_ret = prod(1+spx_ret) - 1) %>%
ungroup() %>%
mutate(above_market = ifelse(p_ret>spx_ret, 1, 0)) %>%
summarise(market_outperformance_2018 = mean(above_market)) %>%
mutate(start_year = y) %>%
select(start_year, market_outperformance_2018)
n_years <- length(all_years) - which(all_years == y) + 1
port <- final_results %>%
filter(year >= y) %>%
group_by(portfolio_size, simulation) %>%
summarise(p_ret = prod(1+mean_ret)^(1/n_years) - 1,
spx_ret = prod(1+spx_ret)^(1/n_years) - 1) %>%
ungroup() %>%
mutate(above_market = ifelse(p_ret>spx_ret, 1, 0),
market_outperformance_2018 = p_ret - spx_ret,
start_year = y) %>%
group_by(start_year, portfolio_size) %>%
summarise(above_market = mean(above_market),
market_outperformance_2018 = mean(market_outperformance_2018)) %>%
ungroup() %>%
select(start_year, portfolio_size, above_market, market_outperformance_2018)
if(y == all_years[1]){
above_market_stats_stock <- ind
above_market_stats_portfolio_size <- port
} else{
above_market_stats_stock <- bind_rows(above_market_stats_stock, ind)
above_market_stats_portfolio_size <- bind_rows(above_market_stats_portfolio_size, port)
}
}
overall_summary <- final_results %>%
group_by(year, portfolio_size) %>%
summarise(avg_ret = mean(mean_ret),
sd_ret = sd(mean_ret)) %>%
ungroup() %>%
left_join(spx)
# Plot by portfolio size
for(p in portfolio_sizes){
p_string <- str_pad(p, 3, pad = "0")
to_plot <- final_results %>%
filter(portfolio_size == p)
source_string <- paste0("Source: YCharts (OfDollarsAndData.com)")
note_string <- str_wrap(paste0("Note: Stocks are selected from the S&P 500 and only include those with data going back to ", start_year, ". Returns shown include dividends."),
width = 85)
file_path <- paste0(out_path, "/dist_returns_portfolio_", p_string, "_stocks.jpeg")
plot <- ggplot(data = to_plot, aes(x=binned_ret, y=as.factor(year))) +
geom_joy_gradient(rel_min_height = 0.01, scale = 3, fill = "blue") +
scale_x_continuous(label = percent, limit = c(-0.6, 0.6), breaks = seq(-0.6, 0.6, 0.2)) +
of_dollars_and_data_theme +
ggtitle(paste0("Return Distribution by Year\n", p, "-Stock Equal Weight Portfolio")) +
labs(x = "1-Year Return", y = "Year",
caption = paste0(source_string, "\n", note_string))
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
# Do annual outperformance
file_path <- paste0(out_path, "/outperf_portfolio_", p_string, "_stocks.jpeg")
plot <- ggplot(data = to_plot, aes(x=annual_outperformance_full_period)) +
geom_density(fill = "blue") +
geom_vline(xintercept = 0, linetype = "dashed", color = "black") +
scale_x_continuous(label = percent, limit = c(-0.2, 0.2)) +
of_dollars_and_data_theme +
ggtitle(paste0("Annual Outperformance Compared to S&P 500\n", p, "-Stock Equal Weight Portfolio")) +
labs(x = paste0("Annualized Outperformance Since ", start_year), y = "Frequency",
caption = paste0(source_string, "\n", note_string))
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
}
create_gif(out_path,
paste0("dist_returns_portfolio_*.jpeg"),
105,
0,
paste0("_gif_dist_portfolio_size_returns.gif"))
create_gif(out_path,
paste0("outperf_portfolio_*.jpeg"),
105,
0,
paste0("_gif_outperf_portfolio_size_returns.gif"))
# Do stock beats by year
file_path <- paste0(out_path, "/above_market_year.jpeg")
source_string <- paste0("Source: YCharts (OfDollarsAndData.com)")
note_string <- str_wrap(paste0("Note: Stocks are selected from the S&P 500 and only include those with data going back to ", start_year, ". Returns shown include dividends."),
width = 85)
to_plot <- above_market_stats_year
plot <- ggplot(data = to_plot, aes(x=year, y = above_market)) +
geom_bar(stat="identity", fill = "blue") +
geom_hline(yintercept = 0.5, linetype = "dashed") +
scale_y_continuous(label = percent, limits = c(0, 1), breaks = seq(0, 1, 0.1)) +
of_dollars_and_data_theme +
ggtitle(paste0("Percentage of Stocks That\nBeat the S&P 500 by Year")) +
labs(x = paste0("Year"), y = "Percentage",
caption = paste0(source_string, "\n", note_string))
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
# ############################ End ################################## # |
8b6f4ef06cde1806f8a986e55a093dce3e38502b | 99d380bab249726b79f026dbed96d37ca616542e | /ant_compete.R | adfae7901297243186f713210d60b9dd6ea522f3 | [] | no_license | cnell-usgs/ant-removal | 9464facc951aade85251f47017db18e7e1302b66 | 4098853428cca50fdc36bfaeedb7a77074831c9f | refs/heads/master | 2023-05-31T22:15:47.930251 | 2018-07-30T16:47:54 | 2018-07-30T16:47:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,472 | r | ant_compete.R | ################################
## ant cookie analysis ####
## C Nell
## March 2018
################################
setwd("/Users/colleennell/Dropbox/ant_cookie/R")
library(tidyverse)
library(reshape2)
library(broom)
library(lme4)
library(emmeans)
library(estimability)
library(ggplot2)
#################
## QUESTIONS ####
# Time, thermal differences among species
# Is discovery time and recruitment time shorter for the Argentine ant than for the natives?
# Competitive ability
# Does the Argentine ant find faster the bait when is not competing with other ants? And the native species when they’re not competing against de Argentine?
# Do native species find faster the bait when they’re not competing with the Argentine ant?
# Recruitment strategy
# Which species has the most numerous recruitment? And the fastest discovery time?
# Is always the Argentine ant the one who colonized the baits when there’s no removal or some species colonize it better?
# Does the Argentine ant displace other species when they’re competing? Or any of the native species displaces the Argentine ant?
#################
ants<-read.csv('data/antcookie_sp_raw.csv')
View(ants)
str(ants)
## which species had most numerous recruitment?
aggregate(ANT_MAX~ANT_SP, FUN=max, data=ants) # LH = 660
aggregate(ANT_MAX~ANT_SP+COMPETE, FUN=max, data=ants)# compete - LH 660 in not competing, 600 compete.
## highest average recruitment?
aggregate(ANT_MAX~ANT_SP, FUN=mean, data=ants)#mean
#aggregate(ANT_MAX~ANT_SP, FUN=se, data=ants)# se
aggregate(ANT_MAX~ANT_SP+COMPETE, FUN=mean, data=ants)%>%dplyr::select(everything(),mean = ANT_MAX)%>%#compete
left_join(aggregate(ANT_MAX~ANT_SP+COMPETE, FUN=se, data=ants), by=c('ANT_SP','COMPETE'))%>%dplyr::select(everything(), se=ANT_MAX)
# mean and se by ant species
sp.means<-ants%>%group_by(ANT_SP)%>%summarize_at(vars(TIME_COMPETE, TIME_DISCOVER, TIME_RECRUIT, ANT_MAX), funs(max(.,na.rm=TRUE), mean(.,na.rm=TRUE), min(.,na.rm=TRUE), se))
View(sp.means)
#melt datafrma to plot all means
sp.means.melt<-sp.means%>%melt(id.vars=c('ANT_SP'))%>%separate(variable, into=c('variable','event','stat'))%>%dcast(ANT_SP+variable+event~stat)%>%mutate(subset='all',metric=paste(variable, event))
sp.means.melt
ggplot(sp.means.melt, aes(ANT_SP, mean))+
geom_errorbar(aes(ymin=mean-se, ymax=mean+se), width=0)+
geom_point()+
zamia_theme+
facet_wrap(~metric, scales='free_y')+
labs(x='Ant species', y='')
# Is always the Argentine ant the one who colonized the baits when there’s no removal or some species colonize it better?
# not enough data
removal<-ants%>%filter(REMOVAL == 'NO')
compete<-removal%>%filter(ANT_COMPETE == 1)
str(removal) # 26 obs (but includes 1 line per ant species, so fewr)
str(compete) # 7 times when actually competed (14 obs for comp, leaving 12 removals where the other ant did not show)
removal
# which species has the highest time to recruitment?
##max mean and min for each species when no removal
rem.means<-removal%>%group_by(ANT_SP)%>%summarize_at(vars(TIME_COMPETE, TIME_DISCOVER, TIME_RECRUIT, ANT_MAX), funs(max(.,na.rm=TRUE), mean(.,na.rm=TRUE), min(.,na.rm=TRUE), se))
rem.means.melt<-rem.means%>%melt(id.vars=c('ANT_SP'))%>%separate(variable, into=c('variable','event','stat'))%>%dcast(ANT_SP+variable+event~stat)%>%mutate(subset='removal',metric=paste(variable, event))
#species means when no removal (competing)
ggplot(rem.means.melt, aes(ANT_SP, mean))+
geom_errorbar(aes(ymin=mean-se, ymax=mean+se), width=0)+
geom_point()+
zamia_theme+
facet_wrap(~metric, scales='free_y')+
labs(x='Ant species', y='')
##max mean and min for each species when competing
comp.means<-compete%>%group_by(ANT_SP)%>%summarize_at(vars(TIME_COMPETE, TIME_DISCOVER, TIME_RECRUIT, ANT_MAX), funs(max(.,na.rm=TRUE), mean(.,na.rm=TRUE), min(.,na.rm=TRUE), se))
comp.means.melt<-comp.means%>%melt(id.vars=c('ANT_SP'))%>%separate(variable, into=c('variable','event','stat'))%>%dcast(ANT_SP+variable+event~stat)%>%mutate(subset = 'compete',metric=paste(variable, event))
#species means when competing
ggplot(comp.means.melt, aes(ANT_SP, mean))+
geom_errorbar(aes(ymin=mean-se, ymax=mean+se), width=0)+
geom_point()+
zamia_theme+
facet_wrap(~metric, scales='free_y')+
labs(x='Ant species', y='')
## all plotted together
longy<-rbind(comp.means.melt, rem.means.melt, sp.means.melt)
ggplot(longy, aes(ANT_SP, mean))+
geom_errorbar(aes(ymin=mean-se, ymax=mean+se, color=subset), width=0, alpha=.7)+
geom_point(aes(color=subset), alpha=.75)+
zamia_theme+
facet_wrap(~metric, scales='free_y')+
labs(x='Ant species', y='')
## max and min
longy<-rbind(comp.means.melt, rem.means.melt, sp.means.melt)
ggplot(longy, aes(x=ANT_SP))+
geom_point(aes(x=ANT_SP, y=min,color=subset), alpha=.75)+
zamia_theme+
facet_wrap(~metric, scales='free_y')+
labs(x='Ant species', y='')+theme(legend.position='top')
##########
##temperature
# what is the minimum temp to discover for each each species?
temps<-ants%>%group_by(ANT_SP)%>%
dplyr::select(ANT_SP, TEMP_DISCOVER, TEMP_RECRUIT, TEMP_COMPETE)%>%
melt(id.vars=c('ANT_SP'))%>%filter(!is.na(value))%>% #all the temperatures the species were observed at
group_by(ANT_SP)%>%
summarize_if(is.numeric, funs(min(., na.rm=TRUE), max(., na.rm=TRUE), mean(., na.rm=TRUE), diff(range(., na.rm=TRUE))))
# plot of ant activity periods
ggplot(temps)+
geom_linerange(aes(x = reorder(ANT_SP, max), ymin = min, ymax=max, color=ANT_SP), stat='identity', color='darkgrey', size=2, alpha=.95)+
theme(panel.background = element_blank(), axis.line = element_line(color='black'))+
labs(x='Ant species', y='Temperature (C)')+coord_flip()
## does temp affect time to discovery - yes
disc_time_temp<-lm(TIME_DISCOVER~ANT_SP+TEMP_DISCOVER:ANT_SP+TEMP_START, data=ants)
#summary(disc_time_temp)
#temp start mod
disc_time_temp<-lmer(log(1+TIME_DISCOVER)~ANT_SP*TEMP_START+(1|PLOT/FLAG), data=ants)
Anova(disc_time_temp, type='III')## effect of TEMP_START, TEMP_DISCOVER, marg ANT_SP - just LR or ORIGIN is sig
plot(allEffects(disc_time_temp))
#temp discover mod
disc_time_temp<-lmer(log(1+TIME_DISCOVER)~ANT_SP*TEMP_START+(1|PLOT/FLAG)+(1|TEMP_DISCOVER), data=ants)
Anova(disc_time_temp, type='III')## effect of TEMP_START, TEMP_DISCOVER, marg ANT_SP - just LR or ORIGIN is sig
plot(allEffects(disc_time_temp))
# time to discovery was affected by temp start, temperature, and ant origin
disc_time_temp_re<-lmer(log(1+TIME_DISCOVER)~TEMP_DISCOVER*ANT_SP+TEMP_START+(1|PLOT/FLAG), data=ants)
Anova(disc_time_temp_re, type='III')
plot(allEffects(disc_time_temp_re))
###RECRUITMENT
#temp recruit mod
rtime_temp<-lmer(log(1+TIME_RECRUIT)~ANT_SP*TEMP_DISCOVER+(1|PLOT/FLAG), data=ants)
Anova(rtime_temp, type='III')## nada
plot(allEffects(rtime_temp))
#temp discover mod
rtime_temp<-lmer(log(1+TIME_RECRUIT)~ANT_SP+TEMP_DISCOVER+(1|PLOT/FLAG), data=ants)
Anova(rtime_temp, type='III')## effect of TEMP_START, TEMP_DISCOVER, marg ANT_SP - just LR or ORIGIN is sig
plot(allEffects(rtime_temp))
str(ants)
remdf<-ants%>%mutate(gone=ifelse(REMOVAL != 'NO', 'NO','YES'))%>%
dplyr::select(PLOT:FLAG,ANT_SP,ORIGIN,TIME_DISCOVER,TIME_RECRUIT,ANT_MAX,gone)%>%
melt(id.vars=c('PLOT','FLAG','gone','ORIGIN','ANT_SP'))%>%
dcast(PLOT+FLAG~gone+variable+ORIGIN)%>%
mutate(LR_comp = log(YES_TIME_RECRUIT_EXOTIC/YES_TIME_RECRUIT_NATIVE),
LR_alone=log(NO_TIME_RECRUIT_EXOTIC/NO_TIME_RECRUIT_NATIVE))
View(remdf)
colnames(remdf)
trytry<-lmer(YES_) |
f6a2648f5048fee03e1371f45cd8022757c6f557 | af8b1cfa36e31284367560dac2800456de9bb284 | /man/circularize.Rd | 607f43687b767406e082a4449c0f4dbca05d4304 | [
"MIT"
] | permissive | LudvigOlsen/rearrr | f07cdf8fe92647335fb5a26ffc1416162543c59a | 40d150b440ae06507873fad20a28345c08d48cf3 | refs/heads/master | 2023-04-19T04:24:49.834419 | 2023-03-01T10:48:10 | 2023-03-01T10:48:10 | 259,158,437 | 24 | 3 | null | null | null | null | UTF-8 | R | false | true | 4,001 | rd | circularize.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/circularize.R
\name{circularize}
\alias{circularize}
\title{Create x-coordinates so the points form a circle}
\usage{
circularize(
data,
y_col = NULL,
.min = NULL,
.max = NULL,
offset_x = 0,
keep_original = TRUE,
x_col_name = ".circle_x",
degrees_col_name = ".degrees",
origin_col_name = ".origin",
overwrite = FALSE
)
}
\arguments{
\item{data}{\code{data.frame} or \code{vector}.}
\item{y_col}{Name of column in \code{`data`} with y-coordinates to create x-coordinates for.}
\item{.min}{Minimum y-coordinate. If \code{NULL}, it is inferred by the given y-coordinates.}
\item{.max}{Maximum y-coordinate. If \code{NULL}, it is inferred by the given y-coordinates.}
\item{offset_x}{Value to offset the x-coordinates by.}
\item{keep_original}{Whether to keep the original columns. (Logical)
Some columns may have been overwritten, in which case only the newest versions are returned.}
\item{x_col_name}{Name of new column with the x-coordinates.}
\item{degrees_col_name}{Name of new column with the angles in degrees. If \code{NULL}, no column is added.
Angling is counterclockwise around \code{(0, 0)} and starts at \code{(max(x), 0)}.}
\item{origin_col_name}{Name of new column with the origin coordinates (center of circle). If \code{NULL}, no column is added.}
\item{overwrite}{Whether to allow overwriting of existing columns. (Logical)}
}
\value{
\code{data.frame} (\code{tibble}) with the added x-coordinates and the angle in degrees.
}
\description{
\Sexpr[results=rd, stage=render]{lifecycle::badge("experimental")}
Create the x-coordinates for a \code{vector} of y-coordinates such that
they form a circle.
This will likely look most like a circle when the y-coordinates are somewhat equally distributed,
e.g. a uniform distribution.
}
\examples{
\donttest{
# Attach packages
library(rearrr)
library(dplyr)
library(purrr)
has_ggplot <- require(ggplot2) # Attach if installed
# Set seed
set.seed(1)
# Create a data frame
df <- data.frame(
"y" = runif(200),
"g" = factor(rep(1:5, each = 40))
)
# Circularize 'y'
df_circ <- circularize(df, y_col = "y")
df_circ
# Plot circle
if (has_ggplot){
df_circ \%>\%
ggplot(aes(x = .circle_x, y = y, color = .degrees)) +
geom_point() +
theme_minimal()
}
#
# Grouped circularization
#
# Circularize 'y' for each group
# First cluster the groups a bit to move the
# circles away from each other
df_circ <- df \%>\%
cluster_groups(
cols = "y",
group_cols = "g",
suffix = "",
overwrite = TRUE
) \%>\%
dplyr::group_by(g) \%>\%
circularize(
y_col = "y",
overwrite = TRUE
)
# Plot circles
if (has_ggplot){
df_circ \%>\%
ggplot(aes(x = .circle_x, y = y, color = g)) +
geom_point() +
theme_minimal()
}
#
# Specifying minimum value
#
# Specify minimum value manually
df_circ <- circularize(df, y_col = "y", .min = -2)
df_circ
# Plot circle
if (has_ggplot){
df_circ \%>\%
ggplot(aes(x = .circle_x, y = y, color = .degrees)) +
geom_point() +
theme_minimal()
}
#
# Multiple circles by contraction
#
# Start by circularizing 'y'
df_circ <- circularize(df, y_col = "y")
# Contract '.circle_x' and 'y' towards the centroid
# To contract with multiple multipliers at once,
# we wrap the call in purrr::map_dfr
df_expanded <- purrr::map_dfr(
.x = 1:10 / 10,
.f = function(mult) {
expand_distances(
data = df_circ,
cols = c(".circle_x", "y"),
multiplier = mult,
origin_fn = centroid,
overwrite = TRUE
)
}
)
df_expanded
if (has_ggplot){
df_expanded \%>\%
ggplot(aes(
x = .circle_x_expanded, y = y_expanded,
color = .degrees, alpha = .multiplier
)) +
geom_point() +
theme_minimal()
}
}
}
\seealso{
Other forming functions:
\code{\link{hexagonalize}()},
\code{\link{square}()},
\code{\link{triangularize}()}
}
\author{
Ludvig Renbo Olsen, \email{r-pkgs@ludvigolsen.dk}
}
\concept{forming functions}
|
5f2f2993a077468a52f58f7a329f9ac5725e1908 | 5b32f199f9a7aed5168cbe29c1599efd374d2ce2 | /R/plot_featurezones.R | c409d89672b9f20cefeb8e41f6a1246a7d48a8d2 | [
"MIT"
] | permissive | lizlaw/maximin-test | 0d058bc1b8e4648450506c4265c6db236cf10200 | 42c17359c3da552a34c653e2c0917e20743d74dc | refs/heads/master | 2020-06-18T02:57:33.965652 | 2019-10-10T07:35:24 | 2019-10-10T07:35:24 | 196,143,623 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 340 | r | plot_featurezones.R | # plot_featurezones
plot_featurezones <- function(fz){
par_old <- par()
par(mar=c(2,2,2,2))
nz<- length(fz)
nf <- raster::nlayers(fz[[1]])
par(mfrow = c(nz, nf))
for (i in 1:nz){
for (j in 1:nf){
plot(fz[[i]][[j]],
main = paste0("Zone ", i, " (species ", j, ")" ))
}}
suppressWarnings(par(par_old))
} |
5672cb0213bf554071747de11a04da5f533a33bd | df77a46155382712d5b0c5d391f658fd6341e311 | /concepts/tutorialspoint/db/connect_to_mysql.R | 166a47186c684af2c6811c2f53060d67d0e256b7 | [] | no_license | Candy128x/r-concept | fb9fa16d54f2aef229dc8c900f9e145ed5cc7e0e | 5402d177f8079b6591716aa04933e1bc9ea68ec4 | refs/heads/master | 2022-12-31T14:31:09.144400 | 2020-10-16T10:32:35 | 2020-10-16T10:32:35 | 302,610,103 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,085 | r | connect_to_mysql.R | library(RMySQL)
# db_user <- 'root'
# db_password <- 'qwerty123'
# db_name <- 'employee_db'
# db_table <- 'employees'
# db_host <- '127.0.0.1'
# db_port <- 3306
# mydb <- dbConnect(MySQL(), user = db_user, password = db_password,
# dbname = db_name, host = db_host, port = db_port)
# s <- paste0("select * from ", db_table)
# rs <- dbSendQuery(mydb, s)
# df <- fetch(rs, n = -1)
# on.exit(dbDisconnect(mydb))
# Connect to MySQL
db_conn = dbConnect(MySQL(), user = 'root', password = 'qwerty123', dbname = 'employee_db',
host = 'localhost')
# Show List of Tables Present in DB
dbListTables(db_conn)
# Fetch the Tables Record
result_a2 = dbSendQuery(db_conn, "SELECT * FROM employees;")
result_a3 = fetch(result_a2, n = 5)
cat("\n\n ---result_a3--- \n")
print(result_a3)
cat("\n\n ---result_a3---nrow--- \n")
print(nrow(result_a3))
dbClearResult(dbListResults(db_conn)[[1]])
result_b2 = dbSendQuery(db_conn, "SELECT * FROM employees WHERE last_name LIKE 'D%';")
result_b3 = fetch(result_b2)
print(result_b3)
dbClearResult(dbListResults(db_conn)[[1]])
|
4b318d01eba2b7e110045a3279f730a1480278e9 | ffa84a5066a4b6c780abe9dad26b08f9ac5416e1 | /package/binomial/man/bin_probability.Rd | 38c098916eaeb422018045d7d6aae185d679a6aa | [] | no_license | justinhan33/R-Package | 1cf161399957edbef135330c4487535cf29e2e57 | 806fe67da844a79ff01fbadaf68f18f145e47559 | refs/heads/master | 2020-12-14T05:39:10.787925 | 2020-01-18T01:04:38 | 2020-01-18T01:04:38 | 234,659,557 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 921 | rd | bin_probability.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{bin_probability}
\alias{bin_probability}
\title{Probability function}
\usage{
bin_probability(success, trials, prob)
}
\arguments{
\item{success}{number of successes (numeric)}
\item{trials}{number of trials (numeric)}
\item{prob}{probability of success (numeric)}
}
\value{
Probability of getting k successes in n trials
}
\description{
Calculates the probability of getting k successes in n trials
}
\examples{
#probability of getting 2 successes in 5 trials
#(assuming prob of success = 0.5)
bin_probability(success = 2, trials = 5, prob = 0.5)
#probabilities of getting 2 or less successes in 5 trials
#(assuming prob of success = 0.5)
bin_probability(success = 0:2, trials = 5, prob = 0.5)
#55 heads in 100 tosses of a loaded coin with 45\% chance of heads
bin_probability(success = 55, trials = 100, prob = 0.45)
}
|
8f28b9095373f5d5d32fd98e85e8b2b9fb237d72 | 1f426287afea547b9bc1da47367f87f006845128 | /R/randomplot.R | 0eb4e70060703a3f9eb10114b40bf7a643b56a07 | [] | no_license | marami52/appdemo | 1135a7fa5fdefd0c89e0dd4e2add5af298394401 | 87013c9b0252e2dfdba736586f3d9b6eec64ed16 | refs/heads/master | 2021-01-18T07:24:03.755540 | 2016-07-08T18:15:46 | 2016-07-08T18:15:46 | 60,495,241 | 0 | 0 | null | 2016-06-06T03:17:33 | 2016-06-06T03:17:32 | null | UTF-8 | R | false | false | 1,568 | r | randomplot.R | #' Make a random plot
#'
#' This function creates a random histogram plot.
#'
#' @export
#' @param n numer of random values
#' @param dist one of "normal" or "uniform".
randomplot1 <- function(n, dist=c("normal", "uniform")){
#input validation
dist <- match.arg(dist)
stopifnot(n < 1e6)
if(dist == "normal"){
#plot(rnorm(n), col="red")
hist(rnorm(n))
#plot(lm(mpg~disp, data=mtcars))
#test
}
if(dist == "uniform"){
#plot(rnorm(n), col="green")
hist(runif(n))
#plot(lm(mpg~disp, data=mtcars))
}
#return nothing
invisible();
}
randomplot <- function(n, dist=c("normal", "uniform")){
#input validation
dist <- match.arg(dist)
stopifnot(n < 1e6)
if(dist == "normal"){
library(plotly)
set.seed(100)
d <- diamonds[sample(nrow(diamonds), 1000), ]
plotlyOutput(plot_ly(d, x = carat, y = price, text = paste("Clarity: ", clarity),
mode = "markers", color = carat, size = carat))
#hist(rnorm(n))
#plot(lm(mpg~disp, data=mtcars))
}
if(dist == "uniform"){
library(ggplot2)
dat <- data.frame(cond = rep(c("A", "B"), each=10),
xvar = 1:20 + rnorm(20,sd=3),
yvar = 1:20 + rnorm(20,sd=3))
ggplot(dat, aes(x=xvar, y=yvar)) +
geom_point(shape=1) + # Use hollow circles
geom_smooth() # Add a loess smoothed fit curve with confidence region
#plot(rnorm(n), col="green")
#hist(runif(n))
#plot(lm(mpg~disp, data=mtcars))
}
#return nothing
#invisible();
}
|
68d6b788cd84a691a0778a78c212bedac334eef8 | b112b438897dabd6ddeb6ed0d0e45e51de474db0 | /global.R | df3219c03a301e4994a8f75cd8352a3079a68ae8 | [] | no_license | ZubeirSiddiqui/Capstone | f5752351006ce4d4655c3654005feb5c0712cc40 | 51919546ee5bc692e4e455768927d260dc2b5563 | refs/heads/master | 2021-01-02T23:19:13.771894 | 2017-08-12T21:34:03 | 2017-08-12T21:34:03 | 99,498,249 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,766 | r | global.R | # global.R ####
# Coursera Data Science Capstone Project (https://www.coursera.org/course/dsscapstone)
# Shiny script for loading data into global environment
# Developer: Zubeir Siddiqui
# Date: 6th August 2017
# Libraries and options ####
library(shiny)
library(dplyr)
library(wordcloud)
library(RColorBrewer)
library(stringr)
library(stringi)
# setwd("C:/Users/zubeir/Desktop/DataScience/capstone/Word_Prediction")
# Load and create frame of freq1 data
freq1 = readRDS("freq1.rds")
freq1_data = freq1
rm(freq1)
# Load and create frame of freq2 data
freq2 = readRDS("freq2.rds")
freq2_data <- data.frame(freq = freq2$freq, word = freq2$word, word1 = str_split_fixed(freq2$word, " ", 2)[,1], word2 = str_split_fixed(freq2$word, " ", 2)[,2])
rm(freq2)
# Load and create frame of freq3 data
freq3 = readRDS("freq3.rds")
freq3_data <- data.frame(freq = freq3$freq, word = freq3$word, word1 = str_split_fixed(freq3$word, " ", 3)[,1], word2 = str_split_fixed(freq3$word, " ", 3)[,2], word3 = str_split_fixed(freq3$word, " ", 3)[,3])
rm(freq3)
# Load and create frame of freq4 data
freq4 = readRDS("freq4.rds")
freq4_data <- data.frame(freq = freq4$freq, word = freq4$word, word1 = str_split_fixed(freq4$word, " ", 4)[,1], word2 = str_split_fixed(freq4$word, " ", 4)[,2], word3 = str_split_fixed(freq4$word, " ", 4)[,3], word4 = str_split_fixed(freq4$word, " ", 4)[,4])
rm(freq4)
# Load and create frame of freq5 data
freq5 = readRDS("freq5.rds")
freq5_data <- data.frame(freq = freq5$freq, word = freq5$word, word1 = str_split_fixed(freq5$word, " ", 5)[,1], word2 = str_split_fixed(freq5$word, " ", 5)[,2], word3 = str_split_fixed(freq5$word, " ", 5)[,3], word4 = str_split_fixed(freq5$word, " ", 5)[,4], word5 = str_split_fixed(freq5$word, " ", 5)[,5])
rm(freq5)
|
cd6589d604a04a829cc608cd7bda73364ffed2bb | d3b5b74f6bace8d36cfe99bb619c34ba6538578b | /man/encode_onehot_fit.Rd | f8a35175c60d80fa4e545ed6348c7fc8da2b7e7d | [] | no_license | akunuriYoshitha/CatEncode | b4c1f958079608985c3a476e9683e49885f655bd | a1d16fc070b3f4b3a33049e3631969208ce29002 | refs/heads/master | 2023-06-17T08:09:51.542957 | 2021-07-10T18:45:22 | 2021-07-10T18:45:22 | 315,023,192 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 667 | rd | encode_onehot_fit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CatEncodeFit.R
\name{encode_onehot_fit}
\alias{encode_onehot_fit}
\title{A fit function to encode categorical data}
\usage{
encode_onehot_fit(df, colname, fit)
}
\arguments{
\item{df}{Any dataset with atleast one categorical field}
\item{colname}{A string representing the name of the categorical field in the given dataset}
\item{fit}{A list returned from "BestCatEncode" that is used to fit the test data.}
}
\value{
Returns the encoded data vector
}
\description{
Detects the categorical variables and treats it based on the fit file generated by train data using One-Hot Encoding.
}
|
2953a449fffe40a353af00e1c4732f1175d8e228 | bd7475aeb82642093236e3b5f3b2a49d1060978a | /root-DataScience/practice/shinkage.r | c02a00c9d809560e20a86ca2ffe6b9fe2aa02b39 | [] | no_license | RoseSarlake/DataScience | a1575a7cc9810e601e78f60e48148fe9a4495fc3 | 23bbaa78d2f189b3f7da9f6031782bfe7297e333 | refs/heads/master | 2023-05-25T13:37:59.072053 | 2021-05-26T20:21:23 | 2021-05-26T20:21:23 | 344,139,739 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 738 | r | shinkage.r | library(glmnet)
x<- model.matrix(hp~.-1,mtcars) # NO intercept
y<- mtcars[,"hp"]
grid <- 10^seq(10,-2,length = 100)
ridge.fit<-glmnet(x,y,alpha = 0, lambda = grid) # alpha=0 =>ridge
options(digits = 2)
coef(ridge.fit)[,1]
coef(ridge.fit)[,100]
#----------
train<-sample(1:nrow(mtcars),nrow(mtcars)/2)
test <- (-train)
cv.out <- cv.glmnet(x[train,],y[train],alpha=0,nfolds=5) # alpha 0 => Ridge regression, alpha 1 => Lasso regression
best.lambda<-cv.out$lambda.min
prediction<-predict(ridge.fit,s=best.lambda,newx = x[test,])
mean((prediction-y[test])^2)
#refit on the full dataset
ridge.fit.final<- glmnet(x,y,alpha = 0) # do not pass lambda here as parameter. alpha = 0/1
predict(ridge.fit.final,s=best.lambda,type="coefficients")
|
8b4b1e4f386052c5833ddf4d730b1843998d555b | c896f7afea79a502513a222343e622191748ca9e | /man/palette2colors.Rd | 30e6990e061226cab0a9e963498693f77a637285 | [] | no_license | cardiomoon/ztable | 397eac473985336cc67630689c1182102adb2427 | 5b4e34f573aa1e769f0127892c3c574e19845288 | refs/heads/master | 2023-07-24T09:59:42.126063 | 2023-07-14T07:34:53 | 2023-07-14T07:34:53 | 28,034,725 | 22 | 13 | null | 2022-06-11T02:58:00 | 2014-12-15T11:58:27 | HTML | UTF-8 | R | false | true | 638 | rd | palette2colors.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ztable2flextable.R
\name{palette2colors}
\alias{palette2colors}
\title{Extract hexadecimal colors from a color palette}
\usage{
palette2colors(name, reverse = FALSE)
}
\arguments{
\item{name}{The name of color palette from RColorBrewer package}
\item{reverse}{Whether or not reverse the order of colors}
}
\value{
hexadecimal colors
}
\description{
Extract hexadecimal colors from a color palette
}
\examples{
require(RColorBrewer)
require(magrittr)
palette2colors("Reds")
ztable(head(mtcars,10)) \%>\%
addColColor(cols=1:12,bg=palette2colors("Set3"))
}
|
fe41fdc5d1918b03d4822dfac94b1885cab8f373 | 6d67c5483469e5a1fda9204b1bc1f3b7cd24a2a1 | /R/provider1.R | 44d69ea320d2a31dbf864b4920235af22c90cd3d | [] | no_license | Thell/knitr-yaml-example | 84e715aa1245a62f5617419f1f79f705d98fc48e | 5658ce7fd9112fab0b4efee4b2bc697a193ca42b | refs/heads/master | 2016-09-06T06:29:20.284095 | 2014-03-11T19:03:17 | 2014-03-11T19:03:17 | 17,642,307 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 509 | r | provider1.R | #' Provide a chunk hook for knitr to be executed by run_hooks.
#'
#' @param before
#' @param options
#' @param envir
#' @export
providedHook1 <- function(before, options, envir) {
if ( before ) {
sprintf( "providedHook1 is triggering! \n" )
}
}
#' Provide a chunk hook for knitr to be executed by run_hooks.
#'
#' @param before
#' @param options
#' @param envir
#' @export
providedHook2 <- function(before, options, envir) {
if ( before ) {
sprintf( "providedHook2 is triggering! \n" )
}
}
|
a240e80b74dbdb46ce737d4aa534293c86f8e2fb | 8e5b400c7d8a471c6f1ddc68be71d149753acb0c | /R/math-sums.R | c68c25c2ff89a93c845390067a589f0a5eee642e | [
"MIT"
] | permissive | annieaitken123/aceR | 35d501c48bbce92dc9209289a0239464a49356a3 | 8ffa809219727de72983e7dbceee2402cf4af6ce | refs/heads/master | 2020-12-30T09:27:32.285557 | 2017-06-28T03:51:56 | 2017-06-28T03:51:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 179 | r | math-sums.R |
#' @keywords internal
consecutive_sums <- function(vec) {
indices = seq(1:length(vec))
sums = sapply(indices, function(x) {
return (sum(vec[1:x]))
})
return (sums)
} |
5f06f3a7ac14286fd331ce18f55079bb70a1102e | 9f755220afb902a6179b1723a9716b74887a62ef | /R/RcppExports.R | c6d5a99d893713b25a8405889dc4e49a2fb3ecc5 | [] | no_license | CreRecombinase/MCMCArmadillo | 36c5e452b194c23bf025cae02826c14ee7be41d5 | e961cf8c57ccab0a3928b3c0538df15dbb08a85f | refs/heads/master | 2021-08-31T15:48:32.630163 | 2017-12-21T23:24:22 | 2017-12-21T23:24:22 | 115,055,938 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,052 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
em_with_zero_mean_c <- function(y, maxit) {
.Call(`_MCMCArmadillo_em_with_zero_mean_c`, y, maxit)
}
mvrnormArma <- function(n, mu, Sigma) {
.Call(`_MCMCArmadillo_mvrnormArma`, n, mu, Sigma)
}
dmvnrm_arma <- function(x, mean, sigma, logd = FALSE) {
.Call(`_MCMCArmadillo_dmvnrm_arma`, x, mean, sigma, logd)
}
get_sigmabeta_from_h_c <- function(h, gam, Sigma, X, T) {
.Call(`_MCMCArmadillo_get_sigmabeta_from_h_c`, h, gam, Sigma, X, T)
}
get_h_from_sigmabeta_c <- function(X, sigmabeta, Sigma, gam, n, T) {
.Call(`_MCMCArmadillo_get_h_from_sigmabeta_c`, X, sigmabeta, Sigma, gam, n, T)
}
get_target_c <- function(X, Y, sigmabeta, Sigma, gam, beta) {
.Call(`_MCMCArmadillo_get_target_c`, X, Y, sigmabeta, Sigma, gam, beta)
}
sample_index <- function(size, prob = as.numeric( c())) {
.Call(`_MCMCArmadillo_sample_index`, size, prob)
}
update_gamma_c <- function(X, Y, gam) {
.Call(`_MCMCArmadillo_update_gamma_c`, X, Y, gam)
}
betagam_accept_c <- function(X, Y, sigmabeta1, inputSigma, Vbeta, gam1, beta1, gam2, beta2, changeind, change) {
.Call(`_MCMCArmadillo_betagam_accept_c`, X, Y, sigmabeta1, inputSigma, Vbeta, gam1, beta1, gam2, beta2, changeind, change)
}
update_betagam_c <- function(X, Y, gam1, beta1, Sigma, sigmabeta, Vbeta, bgiter) {
.Call(`_MCMCArmadillo_update_betagam_c`, X, Y, gam1, beta1, Sigma, sigmabeta, Vbeta, bgiter)
}
update_h_c <- function(initialh, hiter, gam, beta, Sig, X, T) {
.Call(`_MCMCArmadillo_update_h_c`, initialh, hiter, gam, beta, Sig, X, T)
}
rinvwish_c <- function(n, v, S) {
.Call(`_MCMCArmadillo_rinvwish_c`, n, v, S)
}
update_Sigma_c <- function(n, nu, X, beta, Phi, Y) {
.Call(`_MCMCArmadillo_update_Sigma_c`, n, nu, X, beta, Phi, Y)
}
update_gamma_sw_c <- function(X, Y, gam, marcor) {
.Call(`_MCMCArmadillo_update_gamma_sw_c`, X, Y, gam, marcor)
}
betagam_accept_sw_c <- function(X, Y, sigmabeta1, inputSigma, Vbeta, gam1, beta1, gam2, beta2, changeind, change) {
.Call(`_MCMCArmadillo_betagam_accept_sw_c`, X, Y, sigmabeta1, inputSigma, Vbeta, gam1, beta1, gam2, beta2, changeind, change)
}
update_betagam_sw_c <- function(X, Y, gam1, beta1, Sigma, marcor, sigmabeta, Vbeta, bgiter, smallworlditer) {
.Call(`_MCMCArmadillo_update_betagam_sw_c`, X, Y, gam1, beta1, Sigma, marcor, sigmabeta, Vbeta, bgiter, smallworlditer)
}
doMCMC_c <- function(X, Y, n, T, Phi, nu, initialbeta, initialgamma, initialSigma, initialsigmabeta, marcor, Vbeta, niter, bgiter, hiter, switer) {
.Call(`_MCMCArmadillo_doMCMC_c`, X, Y, n, T, Phi, nu, initialbeta, initialgamma, initialSigma, initialsigmabeta, marcor, Vbeta, niter, bgiter, hiter, switer)
}
run2chains_c <- function(X, Y, initial_chain1, initial_chain2, Phi, niter = 1000L, bgiter = 500L, hiter = 50L, switer = 50L, burnin = 5L) {
.Call(`_MCMCArmadillo_run2chains_c`, X, Y, initial_chain1, initial_chain2, Phi, niter, bgiter, hiter, switer, burnin)
}
|
e7ff14ddab258c88af8c39bb97d1e3c870557a00 | cacf9d286229e3cd8b352f45f5c665469613c836 | /man/is_dhist.Rd | 1f550d6f0433e307c114d6fb6aada8c409313f1f | [
"MIT"
] | permissive | alan-turing-institute/network-comparison | e42a102c84874b54aff337bcd6a76a1089b9eab7 | ee67bd42320a587adae49bafea6a59bfb50aafc6 | refs/heads/master | 2022-07-03T04:30:06.450656 | 2022-06-06T20:14:30 | 2022-06-06T20:14:30 | 75,952,713 | 13 | 1 | MIT | 2022-06-10T12:58:41 | 2016-12-08T15:57:35 | R | UTF-8 | R | false | true | 796 | rd | is_dhist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dhist.R
\name{is_dhist}
\alias{is_dhist}
\title{Check if an object is a \code{dhist} discrete histogram}
\usage{
is_dhist(x, fast_check = TRUE)
}
\arguments{
\item{x}{An arbitrary object}
\item{fast_check}{Boolean flag indicating whether to perform only a
superficial fast check limited to checking the object's class attribute
is set to \code{dhist} (default = \code{TRUE})}
}
\description{
Checks if the input object is of class \code{dhist}. If \code{fast_check} is
\code{TRUE} then the only check is whether the object has a class attribute of
\code{dhist}. If \code{fast_check} is \code{FALSE} (default), then checks
are also made to ensure that the object has the structure required of a
\code{dhist} object.
}
|
0783febeeb75dc0769a84c30505d0440f1b4561f | 0105892335c05de3547050e9e7f49d7f4aaff26f | /20190122_Code.R | 70ff3c75c1b3585d3071eedc5395bb589edcb1fe | [] | no_license | shohct/TidyTuesday | 567528234a9e7ab7a2d00b3cfb694819bddbd50c | 01d824350dd7a597bca038a5904f63ecc42caae3 | refs/heads/master | 2020-04-18T05:57:19.487830 | 2019-01-24T04:15:58 | 2019-01-24T04:15:58 | 167,299,170 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 952 | r | 20190122_Code.R | library(tidyverse)
prisonsum <- read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-01-22/prison_summary.csv")
prisonsum %>%
filter(!pop_category %in% c("Male", "Female", "Total", "Other")) %>%
group_by(year) %>%
mutate(percent = rate_per_100000/sum(rate_per_100000)) %>%
filter(year >= 1990) %>%
ggplot(aes(year, percent, fill = pop_category)) +
geom_area(stat = "identity", position = "fill") +
facet_grid(rows = "urbanicity") +
scale_fill_brewer(palette = "Set2") +
scale_y_continuous(labels = scales::percent) +
labs(fill = "Race",
y = "Proportion",
x = "Year",
title = "Proportion of Prison Population per 100,000 people",
subtitle = "Cross Section by Race and Region",
caption = "Data from: https://github.com/rfordatascience/tidytuesday/blob/master/data/2019/2019-01-22/prison_summary.csv")
ggsave("20190122_plot.png")
|
29a94cf880ebb826f6b8009e62a0a47626abb4a2 | ece45f7b58cb8087a25cee988f59c68ba100c0d8 | /522407274.r | df8963bf9a10909893df107453e4843b561a414b | [] | no_license | erex/MT4113-2015 | 8bf6afe2b328a2e087d65f04c777ea9b356898dc | 4ba68fd5c966dfcd962643634b8e3f768bc2652b | refs/heads/master | 2021-01-10T12:39:16.085352 | 2015-10-04T15:44:20 | 2015-10-04T15:44:20 | 43,632,064 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,966 | r | 522407274.r | my.rnorm <- function (n, m=-0, sd=1){
# call function to generate unifrom random number
# input: n number
# output: two pairs of unifrom random numbers
u1 <- 1 # declare object u1
u2 <- 1 # declare object u2
x1 <- 1 # declare object x1
x2 <- 1 # declare object x2
j <- 1
w= u1^2 + u2^2 #declare object w
my.list <- list #create a list to store valuse
k <-0
while (w<1){ #condition for rejection pair
k <- k+1
u1 <- runif(1) # generate random uniform numbers
u1 =2*u1-1 #transform to unit square
u2 <- runif(1) #generate random uniform numbers
u2 =2*u2-1 #transform to unit square
if (n%%2==1){ # check number is odd
}
v = sqrt(-2*log(w)/w) # define varable v
x1 = u1*v # define x1
x2 = u2*v #define x2
my.list[k] <-x1
my.list[k] <-x2
my.list[k] # store valuse in a list
j <- j+1
}
# return random values
return(k)
}
my.rchisq <- function(n, df=1){
#call function to generate random distributed numbers
# input: a number
# output: random distributed numbers
n<-10
nu <- 2
X <- matrix(rnorm(n*nu),n, nu)^2 # return matrix of squared normals
y <- rowSums(X) #summ the squared normals across each row
# return random t-distributed numbers
return(y)
}
# some code was used form the following book
# Rizzo, M. (2008). Statistical computing with R. Boca Raton: Chapman & Hall/CRC.
t.test(x,y, var.equal = FALSE, conf.level = 0.95)
# t.test function is used here to calculate the confidence level
# for the difference of the means for my.rchisq function
# information for test was used from the following
# Verzani, J. (2014). Using R for Introductory Statistics. 2nd ed.
# Baca Raton: Taylor & Fransis Group,LLC.
my.rt <- function(n, df=1){
# call function to generaate random t-distributed numbers
x<- qt(c(.625, .451), df=4)
# return random t-distributed valuses
return(x)
}
|
664a5bd077a33bb32cbabc4ca96f80b88de10f80 | c79677094dcaeaecedb5fdb955c03f7be13087ec | /AbsEEMSfilecomp_function.R | 759bca00f3a25608fc37c6bedc04b922cc98f22c | [] | no_license | ashjolly/SpecScripts | 263ff4b76056d040743da25cdb9ac3bcd25fd79d | b07b502b2f0a04b428ee0ac971f62b84a1294a8c | refs/heads/master | 2021-03-27T09:11:45.303269 | 2017-01-18T23:12:09 | 2017-01-18T23:12:09 | 37,336,022 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,988 | r | AbsEEMSfilecomp_function.R | #
# function for creating master file for corrected absorbance and fluorescence files
# For passing these files into a file that will calculate absorbance and fluorescence indicies
# 24 July 2015
# AJ PhD project
##################
abseemfilecomp <- function(directoryRaleigh, projectname, directorynoncorabs, filelist_EEMScor){
setwd(directoryRaleigh)
#create column with sample ID - extracted from corrected EEMS filename
y = length(filelist_EEMScor)
sample.ID <- 0 #create sample ID variable
for (i in 1:y){
sample.ID.temp <- strapplyc(filelist_EEMScor[i], paste("(.*)", "_", projectname, "_Raleighcorr.csv", sep = ""), simplify = TRUE)
sample.ID[i] <- sample.ID.temp
}
filelist <- cbind(filelist_EEMScor , sample.ID)
###########
#Abs - non-corrected
setwd(directorynoncorabs)
filelist_Abs_noncorr <- list.files(pattern = "ABS.dat$")
#create column with sample ID - extracted from ABS filename
y = length(filelist_Abs_noncorr)
for (i in 1:y){
sample.ID.temp <- strapplyc(filelist_Abs_noncorr[i], "001(.*)ABS", simplify = TRUE)
sample.ID[i] <- sample.ID.temp
}
filelist_Abs_noncorr <- cbind(filelist_Abs_noncorr, sample.ID)
############ create column with sample ID - extracted from corrected Abs filename
#Abs
setwd(directoryAbsEEMs)
filelist_Abscor <- list.files(pattern = "_AbsCorrected.csv$")
#create column with sample ID - extracted from ABS filename
y = length(filelist_Abscor)
for (i in 1:y){
sample.ID.temp <- strapplyc(filelist_Abscor[i], paste("(.*)","_", projectname,"_AbsCorrected",".csv", sep = ""), simplify = TRUE)
sample.ID[i] <- sample.ID.temp
}
filelist_Abs <- cbind(filelist_Abscor, sample.ID)
#######
# Merge EEM and Abs filenames by sample ID to create file with all of the filenames
data.1 <- merge(filelist, filelist_Abs, by = "sample.ID", all = TRUE)
data.1 <- merge(data.1, filelist_Abs_noncorr, by = "sample.ID", all = TRUE)
return(data.1)
}
|
ef8a8ddb4dae5b34ab67bef4428bb70e2043c011 | 2357953c5b5d8a18aaa2f3010b28b95489bc5212 | /exec/load-results.R | 85374001c645e9b83aa4417c7575e086da552d04 | [] | no_license | bips-hb/bscomparison | 47d00588fc94219f3689b5c8c8bd1de8d383d883 | 5044eedb94e17ac7e050e67ae00db68d43a6f782 | refs/heads/main | 2023-06-20T07:36:43.605156 | 2021-07-16T16:08:50 | 2021-07-16T16:08:50 | 386,695,048 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,448 | r | load-results.R | library(readr)
library(dplyr)
### load the data ----
results <- readr::read_tsv("results/best-f1-scores.tsv")
### turn alphas into numeric
results$alpha <- as.numeric(results$alpha)
### add the lasso explicitly
results_enet1 <- results %>% filter(algorithm == "enet", alpha == 1)
results_enet1$alpha <- as.numeric(results_enet1$alpha)
results_enet1$algorithm <- "lasso"
results <- rbind(results, results_enet1)
### add simulation id
results$simulation_id <- results %>% group_indices(n, p, s, dimensionality, corr_type, rho, beta_type, snr)
### get the best result over all alphas for the e-net
results_enet <- results %>%
filter(algorithm == "enet") %>%
group_by(simulation_id, alpha) %>%
mutate(temp_id = 1:n()) %>%
ungroup() %>%
group_by(simulation_id, temp_id) %>%
filter(F1 == max(F1)) %>%
slice(1) %>%
ungroup()
results_enet <- results_enet %>%
select(-temp_id)
# combine the new enet results with the other results
results <- rbind(
results %>% filter(algorithm != "enet"),
results_enet
)
### add nice labels for the algorithms
get_name <- function(algorithm) {
switch(algorithm,
"bs" = "best subset",
"fs" = "forward stepwise",
"lasso" = "lasso",
"enet" = "e-net",
"enet_bs_hybrid" = "hybrid"
)
}
results$algorithm_label <- sapply(results$algorithm, function(algorithm) get_name(algorithm))
readr::write_rds(results, "results-final.rds", compress = "gz")
|
bc50bde1a32f4eeecee241ab6a4ab1fd5c178c6a | 0746eb650e573d9b99832715fc0c1c9ed9d7d3b3 | /visualizing_parking_tickets.R | 954f15e3814793dcf9e690b211cbd3f94aa5ef2a | [] | no_license | renardhero/Illegal-parking-with-R | 9bb7a538940ff8aa9a42accdf0a154d715effa88 | 1fed62978b17b22ec3b23011e1bce0cc43233dd8 | refs/heads/master | 2021-04-03T06:50:01.092559 | 2018-03-15T16:28:57 | 2018-03-15T16:28:57 | 124,655,675 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 10,818 | r | visualizing_parking_tickets.R | library(dplyr)
library(tidyr)
pysakointivirheet <- read.csv("Pysakointivirheet.csv", header = TRUE, sep = ",",
colClasses = c("factor","character","factor","factor",
"factor","factor","numeric","numeric",
"factor","factor","factor","factor",
"character"))
#Pysakointivirheet analyysi
str(pysakointivirheet)
summary(pysakointivirheet)
summary(pysakointivirheet$Postinumero)
glimpse(pysakointivirheet)
head <-head(pysakointivirheet)
write.csv(head,"head_pysakointivirheet.csv")
min(pysakointivirheet$Virheen.tekovuosi)
max(pysakointivirheet$Virheen.tekovuosi)
#Grouping the data by postal codes and making a count variable summarising
#the amount of parking tickets
library(dplyr)
p <- pysakointivirheet %>%
group_by(Postinumero) %>%
summarise(count = n()) %>%
arrange(count)
#let's delete the identified NA row
p <- p[-89,]
#barplot(p$Postinumero,p$count)
#as.character(p$Postinumero)
#p2 <- data.frame(as.character(p$Postinumero),p$count)
#Let's create another variable that that has the percentages of parking tickets
percentages <- p$count / sum(p$count)
p2 <- data.frame(p$Postinumero,p$count,percentages)
#Let's download .shp map and plot the tickets on it!
library(rgeos)
library(maptools)
np_dist <- readShapeSpatial("PKS_postinumeroalueet_2017_shp.shp")
#The map currently has also Espoo and Vantaa so let's filter it so that only Helsinki remains
helsinki_postal_codes <- c("00100","00120","00130","00140","00150","00160","00170",
"00180","00200","00210","00220","00230","00240","00250",
"00260","00270","00280","00290","00300","00310","00320",
"00330","00340","00350","00360","00370","00380","00390",
"00400","00410","00420","00430","00440","00500","00510",
"00520","00530","00540","00550","00560","00570","00580",
"00590","00600","00610","00620","00630","00640","00670",
"00680","00690","00700","00710","00720","00730","00740",
"00750","00760","00770","00780","00790","00800","00810",
"00820","00830","00840","00850","00860","00870","00880",
"00890","00900","00910","00920","00930","00940","00950",
"00960","00970","00980","00990","00450","00460","00470",
"00480","00490")
np_dist <- np_dist[np_dist$Posno %in% helsinki_postal_codes,]
length(np_dist$Posno)
#Let's fortify the map, so that it can be drawn, postal numbers as id's!
np_dist <- fortify(np_dist, region = "Posno")
library(ggplot2)
#This dataframe is used to group the map data by postal code and getting
#means of both long and lat for all postal codes for labeling the areas
distcenters <- np_dist %>%
group_by(id) %>%
summarise(clat = mean(lat), clong = mean(long))
#Let's merge the dataframes
merged <- merge(p,distcenters,by.x="Postinumero", by.y="id", all.x = TRUE, all.y = TRUE)
merged <- merged[merged$Postinumero!="002230" & merged$Postinumero!="00501" & merged$Postinumero!="00631" & merged$Postinumero!="00632", ]
merged_sorted <- merged[order(merged$Postinumero),]
mean(merged_sorted$count, na.rm = TRUE)
median(merged_sorted$count, na.rm = TRUE)
merged_top <- head(merged_sorted,1)
merged_top
#Now we can actually plot the Chloropleth map!!
ggplot() + geom_map(data = merged, aes(map_id = Postinumero, fill = count),
map = np_dist) + expand_limits(x = np_dist$long, y = np_dist$lat) + scale_fill_gradient2(low = "white",
midpoint = 6000, high = "red", limits = c(0, 89561)) +
ggtitle("Counts of parking tickets in Helsinki region postal codes 2014-2017") +
geom_text(data = merged_top, aes(x = clong, y = clat, label = Postinumero, size = 3), size = 3, col = "darkgrey")
#below adds labels to the map, however, it is a mess...
#+ geom_text(data = merged_top, aes(x = clong, y = clat, label = Postinumero, size = 3), size = 2)
#Let's now take a deep dive into the area, where most parking tickets are granted
np_dist <- readShapeSpatial("PKS_postinumeroalueet_2017_shp.shp")
helsinki_postal_codes <- c("00100","00120","00130","00140","00150","00160","00170",
"00180","00200","00210","00220","00230","00240","00250",
"00260","00270","00280","00290","00300")
np_dist <- np_dist[np_dist$Posno %in% helsinki_postal_codes,]
np_dist <- fortify(np_dist, region = "Posno")
distcenters <- np_dist %>%
group_by(id) %>%
summarise(clat = mean(lat), clong = mean(long))
merged <- merge(p,distcenters,by.x="Postinumero", by.y="id", all.x = FALSE, all.y = TRUE)
merged_sorted <- merged[order(merged$Postinumero),]
mean(merged_sorted$count, na.rm = TRUE)
median(merged_sorted$count, na.rm = TRUE)
merged_top <- head(merged_sorted,19)
merged_top
ggplot() + geom_map(data = merged, aes(map_id = Postinumero, fill = count),
map = np_dist) +
#geom_polygon(data=np_dist, aes(x=lat, y=long), col='black') +
expand_limits(x = np_dist$long, y = np_dist$lat) +
scale_fill_gradient2(low = "white",
midpoint = 10000, high = "red", limits = c(0, 89561)) +
ggtitle("Counts of parking tickets in Helsinki region postal codes 2014-2017") +
geom_text(data = merged_top, aes(x = clong, y = clat, label = Postinumero, size = 3), size = 3, col = "darkgrey")
#After we know where to (not) park, we should also know when to (not) park
library(dplyr)
t1 <- pysakointivirheet %>%
group_by(Virheen.tekovuosi) %>%
summarise(count = n())
t2 <- pysakointivirheet %>%
group_by(Virheen.tekokuukausi,Virheen.tekovuosi) %>%
summarise(count = n())
t2 <- t2[order(t2$count),]
sd(t2$count)
plot(t1)
barplot(counts)
plot(t2)
summary(t1)
summary(t2)
barplot(t2$count, legend=t2$Virheen.tekokuukausi)
p<-ggplot(data=t2, aes(x=t2$Virheen.tekokuukausi, y=t2$count)) +
geom_bar(stat="identity")+
scale_fill_brewer(palette="Blues")
#scale_x_discrete(limits=c("Elokuu", "Heinäkuu", "Helmikuu"))
#theme_minimal()
p
#Let's look at the ticket types
library(dplyr)
library(tidyr)
t4 <- pysakointivirheet %>%
group_by(Virheen.kirjaaja) %>%
summarise(count = n())
pie(t4$count, labels = t4$Virheen.kirjaaja)
t4 <- pysakointivirheet %>%
separate(Virheen.pääluokka...pääsyy,c("Ticket type","description"))
t4 <- t4 %>%
group_by(`Ticket type`) %>%
summarise(count = n())
#Making a map of Helsinki with scatter dots of parking tickets
np_dist <- readShapeSpatial("PKS_postinumeroalueet_2017_shp.shp")
helsinki_postal_codes <- c("00100","00120","00130","00140","00150","00160","00170",
"00180","00200","00210","00220","00230","00240","00250",
"00260","00270","00280","00290","00300")
#Let's filter the data and the map to only include Helsinki "center" area
np_dist <- np_dist[np_dist$Posno %in% helsinki_postal_codes,]
np_dist <- fortify(np_dist, region = "Posno")
sakot_helsinki <- pysakointivirheet[pysakointivirheet$Postinumero %in% helsinki_postal_codes,]
summary(sakot_helsinki$y)
summary(sakot_helsinki$x)
#For some reason both the y and x coordinates include zeros let's get rid of them
sakot_helsinki <- sakot_helsinki[sakot_helsinki$y != 0, ]
sakot_helsinki <- sakot_helsinki[sakot_helsinki$x != 0, ]
summary(sakot_helsinki$y)
summary(sakot_helsinki$x)
sakot_helsinki_top <- head(sakot_helsinki,20000)
#plot(np_dist)
library(ggplot2)
library(ggmap)
#Plotting
ggplot() + geom_polygon(data = np_dist, aes(x=long, y = lat, fill = "posno", group = group),color="white",fill="gray") +
geom_point(data = sakot_helsinki, aes(x = x, y = y), color = "red", size = 0.1) +
guides(fill=TRUE) +
ggtitle("Parking tickets in Helsinki 2014-2017")
#Zooming in this image you can actually start to see the streets of Helsinki :)
#Moving to create visualizations solely about the postal code area 00100
np_dist <- readShapeSpatial("PKS_postinumeroalueet_2017_shp.shp")
postal_code <- "00100"
np_dist <- np_dist[np_dist$Posno %in% postal_code,]
#np_dist <- fortify(np_dist, region = "Posno")
sakot_postinumero <- pysakointivirheet[pysakointivirheet$Postinumero %in% postal_code,]
#We have to feature engineer the coordinates
sakot_postinumero <- sakot_postinumero[sakot_postinumero$y != 0, ]
sakot_postinumero <- sakot_postinumero[sakot_postinumero$x != 0, ]
sakot_postinumero$x_new <- sakot_postinumero$x / 1000000
sakot_postinumero$y_new <- sakot_postinumero$y / 100000
sakot_postinumero_top <- head(sakot_postinumero,80000)
#Plotting the scatter plot
ggplot() + geom_polygon(data = np_dist, aes(x=long, y = lat, group = group),color="white",fill="gray") +
geom_point(data = sakot_postinumero_top, aes(x = x, y = y), color = "red", size = 0.1) +
#guides(fill=TRUE) +
ggtitle("Parking tickets in 00100 2014-2017")
#Let's make a heatmap of 00100 parking tickets
ggplot() + geom_polygon(data = np_dist, aes(x=long, y = lat, group = group),color="white",fill="gray") +
geom_density2d(data = sakot_postinumero_top, aes(x = x, y = y), size = 0.3) +
stat_density2d(data = sakot_postinumero_top,
aes(x = x, y = y, fill = ..level.., alpha = ..level..), size = 0.01,
bins = 20, geom = "polygon") +
scale_fill_gradient(low = "green", high = "red") +
scale_alpha(range = c(0, 0.7), guide = FALSE) +
ggtitle("Parking tickets in 00100 2014-2017")
#Which streets are the most common
kadut <- sakot_postinumero %>%
group_by(Osoite) %>%
summarise(count = n()) %>%
kadut <- kadut[order(kadut$count, decreasing=TRUE), ]
head(kadut,10)
write.csv(kadut,"parkkisakot_kadut.csv")
#Now let's try to make a satellite image picture
library(ggmap)
sbbox <- make_bbox(lon = sakot_postinumero$x_new, lat = sakot_postinumero$y_new, f=.1)
sbbox
coords <- c(mean(sakot_postinumero$x_new),mean(sakot_postinumero$y_new))
coords
coords2 <- c(24.93837910000002,60.16985569999999)
sq_map <- get_map(location = coords2, maptype = "roadmap", source = "google",zoom=14)
ggmap(sq_map)
setwd("~/Information visualization/Pyoramaarat/Helsinki_liikennevaylat_avoin_data/Shape")
katukartta <- readShapeSpatial("~/Information visualization/Pyoramaarat/Helsinki_liikennevaylat_avoin_data/Shape/Hki_liikennevaylat.shp")
|
36469c1e06f8dddb2e64e2fbecd7bc5f797996a3 | c5fe61e230b75199e2036bd0a476525ae4bfeab6 | /run_analysis.R | 55b86d47e7dba9b2ad17be345ce3c74f5354ce6c | [] | no_license | DiogenesCFG/Getting_and_Cleaning_Data_Coursera_Project | 74b9f82a63c3e44e9f30bd2e2de58b8229e5f7fc | 9e5d50f5ea8e145ec116a87a01c7af0daadec9e8 | refs/heads/master | 2020-12-11T17:59:37.171094 | 2020-01-15T18:09:58 | 2020-01-15T18:09:58 | 233,918,158 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,976 | r | run_analysis.R | # Getting and Cleaning Data
# Course Project
library(dplyr)
# Download and unzip the data
if(!file.exists(".\\Data")){dir.create(".\\Data")}
Url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
dest <- ".\\Data\\Dataset.zip"
if(!file.exists(dest)){download.file(Url, destfile = dest)}
if(!file.exists(".\\UCI HAR Dataset")){unzip(dest, exdir = ".\\Data")}
# Read features and activity labels
features <- read.table(".\\Data\\UCI HAR Dataset\\features.txt", stringsAsFactors = FALSE, col.names = c("n", "feat"))
actLabels <- read.table(".\\Data\\UCI HAR Dataset\\activity_labels.txt", stringsAsFactors = FALSE, col.names = c("class", "activity"))
# Read Test data
x_test <- read.table(".\\Data\\UCI HAR Dataset\\test\\X_test.txt", col.names = features$feat)
y_test <- read.table(".\\Data\\UCI HAR Dataset\\test\\y_test.txt", col.names = "classLabel")
s_test <- read.table(".\\Data\\UCI HAR Dataset\\test\\subject_test.txt", col.names = "subject")
# Read Train data
x_train <- read.table(".\\Data\\UCI HAR Dataset\\train\\X_train.txt", col.names = features$feat)
y_train <- read.table(".\\Data\\UCI HAR Dataset\\train\\y_train.txt", col.names = "classLabel")
s_train <- read.table(".\\Data\\UCI HAR Dataset\\train\\subject_train.txt", col.names = "subject")
# 1. Merge data
x <- rbind(x_train, x_test)
y <- rbind(y_train, y_test)
subject <- rbind(s_test, s_train)
mergedData <- cbind(subject, x, y)
# 2. Extracting measurements on mean and sd
mean_sd <- mergedData %>%
select(subject, classLabel, contains("mean"), contains("std"))
# 3. Assign descriptive activity names
mean_sd$classLabel <- actLabels[mean_sd$classLabel, 2]
# 4. Label dataset with descriptive variable names
names(mean_sd)
# According to the features_info.txt file:
# - t stands for Time
# - Acc stands for Accelerometer
# - Gyro stands for Gyroscope
# - f stands for frequency
# Each different word will start with caps to differentiate
names(mean_sd)[1] = "Subject"
names(mean_sd)[2] = "Activity"
names(mean_sd) <- gsub("^t", "Time", names(mean_sd))
names(mean_sd) <- gsub("^f", "Frequency", names(mean_sd))
names(mean_sd) <- gsub("Acc", "Accelerometer", names(mean_sd))
names(mean_sd) <- gsub("Gyro", "Gyroscope", names(mean_sd))
names(mean_sd) <- gsub("mean", "Mean", names(mean_sd))
names(mean_sd) <- gsub("std", "Std", names(mean_sd))
names(mean_sd) <- gsub("angle", "Angle", names(mean_sd))
names(mean_sd) <- gsub("gravity", "Gravity", names(mean_sd))
names(mean_sd) <- gsub("Mag", "Magnitude", names(mean_sd))
names(mean_sd) <- gsub("\\.", "", names(mean_sd))
names(mean_sd) <- gsub("X$", "\\-X", names(mean_sd))
names(mean_sd) <- gsub("Y$", "\\-Y", names(mean_sd))
names(mean_sd) <- gsub("Z$", "\\-Z", names(mean_sd))
# 5. Tidy data set with average of each variable for each activity
# and each subject
Tidy <- group_by(mean_sd, Activity, Subject)
Tidy_mean <- summarize_all(Tidy, mean)
write.table(Tidy_mean, "Tidy_mean.txt", row.name=FALSE)
|
2b0321400dbd0a34246747efb53772b3a77ff2c8 | 0a90d775d6f2824bbd56b9b79a2366176c17fdac | /Bayesian Data Analysis/Assignment 2/Q2/Q2b.R | 15415e4872689c9cb5b8e0ff2eafe8c56fef19b5 | [] | no_license | S1889112/EdinburghMSc | f595a3897f0b3264fe869a74d7ede41ef2309501 | c546cf45f20d5508a0cae3788f00361f9dcc373e | refs/heads/master | 2022-05-04T11:41:01.420573 | 2022-04-29T08:32:35 | 2022-04-29T08:32:35 | 178,222,159 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,283 | r | Q2b.R | ## ===========================================================================##
#################################### 2b: Mod ###################################
## ===========================================================================##
#### Extract as vectors ####
para <- cows$parasite
### Fixed ###
# Specific #
age <- demean(cows$age)
# Environment #
temp <- demean(cows$temp)
rain <- demean(cows$rain)
perm <- demean(cows$permeab)
height <- demean(cows$hight)
slop <- demean(cows$slope)
# Random
farm <- cows$farmID
#### Data ####
## Dataset ##
n <- nrow(cows)
J <- max(farm)
para <- para
age <- age
temp <- temp
rain <- rain
perm <- perm
height <- height
slop <- slop
farm <- farm
## Prior ##
### Same priors for each beta
beta.mu <- 0
beta.tau <- 0.01
## Hyperpriors ##
# Priors on the random parameter alpha
sig.alpha.ub <- 20
## DATA LIST ##
data <- list(n = n, J = J, # Loop idx
para = para, age = age, temp = temp, rain = rain, # Covariates
perm = perm, height = height, slop = slop,
farm = farm,
beta.mu = beta.mu, beta.tau = beta.tau, # Priors
sig.alpha.ub = sig.alpha.ub) # Hyperpriors
#### MODEL ####
modstr.2b <- "model{
# Likelihood
for (i in 1:n) {
para[i] ~ dbern(p[i])
# alpha is the random farm-specific intercept.
logit(p[i]) = b0 + alpha[farm[i]] + b1*age[i] + b2*temp[i] + b3*rain[i] +
b4*perm[i] + b5*height[i] + b6*slop[i]
}
# Priors
b0 ~ dnorm(beta.mu, beta.tau)
b1 ~ dnorm(beta.mu, beta.tau)
b2 ~ dnorm(beta.mu, beta.tau)
b3 ~ dnorm(beta.mu, beta.tau)
b4 ~ dnorm(beta.mu, beta.tau)
b5 ~ dnorm(beta.mu, beta.tau)
b6 ~ dnorm(beta.mu, beta.tau)
for (j in 1:J){
alpha[j] ~ dnorm(0, tau.alpha)
}
# Hyperpriors #
sig.alpha ~ dunif(0, sig.alpha.ub)
tau.alpha = pow(sig.alpha, -2)
}"
m.2b <- jags.model(textConnection(modstr.2b), data = data, n.chains = 3)
var.names <- c('b0','b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'alpha', 'mu.alpha', 'sig.alpha')
update(m.2b, 50000)
# 12m: 150000 iterations
# 8.7m: 125000?
start_time <- Sys.time()
res.2b <- coda.samples(m.2b, variable.names = var.names, n.iter = 125000, thin = 100)
end_time <- Sys.time()
end_time - start_time
## Combine ##
# From runjags
combres.2b <- combine.mcmc(res.2b)
#### CHECK HOW MUCH TO THIN ####
# only run if this is 1
runthincheck <- 0
if (runthincheck == 1){
thincheck(res.2b, 'b2', 500, 50, dim(res.2b[[1]])[1])
abline(v = c(100, 200, 300, 400, 500), col = c('red', 'blue', 'green', 'purple', 'pink'))
}
#### CONVERGENCE ####
gelman.diag(res.2b) # All 1, upper CI 1.02
effectiveSize(combres.2b) # Minimum of 1800
png('Q2/Q2bTrace.png', width = 1200, height = 800)
par(mfrow = c(5, 6))
traceplot(res.2b)
par(mfrow=c(1,1))
dev.off()
#### RESULTS ####
restab.2b <- results.table(combres.2b)
png('Q2/Q2bResults.png', width = 800, height = 600)
grid.table(restab.2b)
dev.off()
#### Further ####
mean(combres.2b[, 'b1'] > 0)
|
a7c87e3bb08d1fbdf0394b632d21cd61fee18ca9 | 19749d7a0180996920b35ffb4aae218ea6f36975 | /scriptR/make_chipseq_boxplot_profile.R | 9c80107004e0f739e0cd782699685125a5425d75 | [
"Apache-2.0"
] | permissive | Guoshuai1314/LoopExtrusion | 40db7549f73fe4a59e3c8a240098324f596fd752 | 36a7fca573715850a890bf38d0ab3ef880f977cb | refs/heads/main | 2023-01-31T18:51:40.603627 | 2020-12-16T14:05:20 | 2020-12-16T14:05:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,560 | r | make_chipseq_boxplot_profile.R | require(dplyr)
require(rtracklayer)
require(ggplot2)
require(plyranges)
library("BSgenome.Hsapiens.UCSC.hg19")
seqlens = seqlengths( Hsapiens );
source("src/functions.R")
#Load AsiSI locations
asi = import.bed("/home/rochevin/Documents/PROJET_THESE/CLASSIF_HR_NHEJ/data/BED/ASIsites_hg19.bed")
bless80 = import.bed("/home/rochevin/Documents/PROJET_THESE/CLASSIF_HR_NHEJ/data/BED/BLESS_80best_JunFragPE_Rmdups_pm500bp.bed")
HR = import.bed("/home/rochevin/Documents/PROJET_THESE/CLASSIF_HR_NHEJ/data/BED/BLESS_HR_JunFragPE_Rmdups_pm500bp.bed")
NHEJ = import.bed("/home/rochevin/Documents/PROJET_THESE/CLASSIF_HR_NHEJ/data/BED/BLESS_NHEJ_JunFragPE_Rmdups_pm500bp.bed")
Random80 = import.bed("/home/rochevin/Documents/PROJET_THESE/CLASSIF_HR_NHEJ/data/BED/80random.bed")
Random30 = import.bed("/home/rochevin/Documents/PROJET_THESE/CLASSIF_HR_NHEJ/data/BED/30random.bed")
uncut <- asi[!asi$name %in% bless80$name]
list.sites <- list("cut"=bless80,"uncut"=uncut,"HR"=HR,"NHEJ"=NHEJ,"Random80"=Random80,"Random30"=Random30)
#Get information from snakemake
window <- 2000
span <- 5
wigs <- lapply(c("/mnt/NAS1/DATA/HIGH_THROUGHPUT_GENOMICS_DIvA/ChIP-Seq/Clouaire_HLNKYBGXC_SCC1//PROCESSED/mapping/EXPERIMENT/BIGWIG/HLNKYBGXC_Pool_ChIP-seq_legube_19s004478-1-1_Clouaire_lane1Rad21DIvA_sequence.exp_spikeinfactor.bw",
"/mnt/NAS1/DATA/HIGH_THROUGHPUT_GENOMICS_DIvA/ChIP-Seq/Clouaire_HLNKYBGXC_SCC1//PROCESSED/mapping/EXPERIMENT/BIGWIG/HLNKYBGXC_Pool_ChIP-seq_legube_19s004478-1-1_Clouaire_lane1Rad21OHT_sequence.exp_spikeinfactor.bw"),import.bw,as="RleList")
names(wigs) <- c("Rad21_DIvA","Rad21_OHT")
dat.boxplot.all <- mclapply(wigs,function(wig){
lapply(names(list.sites), ParaleliseViewboxplot,one.w=wig,list.sites=list.sites) %>%bind_rows()
},mc.cores = length(wigs)) %>% bind_rows(.id = "Condition")
prof.dat <- mclapply(wigs,function(wig){
lapply(names(list.sites), ParaleliseViewprofile,one.w=wig,list.sites=list.sites) %>%bind_rows()
},mc.cores = length(wigs)) %>% bind_rows(.id = "Condition")
cutcolors <- c("#FDBECD","black","#BEBEBE")
HRNHEJ = c("#F5AB35","#049372","#BEBEBE")
filename <- "RAD21"
windowname <- "4kb"
#cutvsuncutvsrandom
p1 <- prof.dat %>% filter(Type %in% c("cut","uncut","Random80"))%>%ggplot(aes(Windows,Value,colour = Type)) +
labs(list(title = "", x = "", y = "")) +
geom_line()+
facet_wrap(~ Condition,ncol = 1,scales = "free_x") +
scale_colour_manual(values=cutcolors) +
theme_classic() +
ggtitle(paste(filename,windowname,sep="_"))
b1 <- dat.boxplot.all %>% filter(Type %in% c("cut","uncut","Random80"))%>%ggplot(aes(Type,score,fill = Type)) +
labs(list(title = "", x = "", y = "")) +
geom_boxplot()+
facet_wrap(~ Condition,ncol = 2) +
scale_fill_manual(values=cutcolors) +
theme_classic() +
ggtitle(paste(filename,windowname,sep="_"))
##HRvsNHEJ
p2 <- prof.dat %>% filter(Type %in% c("HR","NHEJ","Random30"))%>%ggplot(aes(Windows,Value,colour = Type)) +
labs(list(title = "", x = "", y = "")) +
geom_line()+
facet_wrap(~ Condition,ncol = 1,scales = "free_x") +
scale_colour_manual(values=HRNHEJ) +
theme_classic() +
ggtitle(paste(filename,windowname,sep="_"))
b2 <- dat.boxplot.all %>% filter(Type %in% c("HR","NHEJ","Random30"))%>%ggplot(aes(Type,score,fill = Type)) +
labs(list(title = "", x = "", y = "")) +
geom_boxplot()+
facet_wrap(~ Condition,ncol = 2) +
scale_fill_manual(values=HRNHEJ) +
theme_classic() +
ggtitle(paste(filename,windowname,sep="_"))
print(p1)
print(b1)
print(p2)
print(b2)
|
cfb765a03524218878728d1c0270c01fc2102670 | 37e0b227d4b478ccf4edd520a1fd1c07ca539000 | /man/style_flatten_to_inline.Rd | b4e51b3f261117068e684f42c1be81b1d7f58314 | [
"MIT"
] | permissive | coolbutuseless/cssparser | 4634e5b072f9fba9f4be134d5f567b2b6c6d2eb8 | a5a3765fd8b058ae82790045b7b2443f5cbf4d8b | refs/heads/main | 2023-08-25T05:18:46.576490 | 2021-11-02T09:25:55 | 2021-11-02T09:25:55 | 411,632,479 | 4 | 0 | null | null | null | null | UTF-8 | R | false | true | 555 | rd | style_flatten_to_inline.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apply-inline-style.R
\name{style_flatten_to_inline}
\alias{style_flatten_to_inline}
\title{Flatten a style to an inline string}
\usage{
style_flatten_to_inline(style)
}
\arguments{
\item{style}{a named list of property/value pairs}
}
\value{
single string suitable for a \code{style} attribute on an element
}
\description{
Flatten a style to an inline string
}
\examples{
\dontrun{
style_flatten_to_line(list(color='black', border='1px')) # -> "color:black; border: 1px;"
}
}
|
c4f13fde3b7c1d10a9c12d3dd9fb466734feae22 | 3c258c7fe3244f4a41dea7d264098ac614eef19a | /man/parseExcludedControlConditions.Rd | 04fc80dce6b367de5d2871c85150a2f96fdb3159 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | USGS-R/repgen | 379be8577f3effbe7067e2f3dc5b5481ca69999e | 219615189fb054e3b421b6ffba4fdd9777494cfc | refs/heads/main | 2023-04-19T05:51:15.008674 | 2021-04-06T20:29:38 | 2021-04-06T20:29:38 | 31,678,130 | 10 | 25 | CC0-1.0 | 2023-04-07T23:10:19 | 2015-03-04T20:24:02 | R | UTF-8 | R | false | true | 470 | rd | parseExcludedControlConditions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-parse.r
\name{parseExcludedControlConditions}
\alias{parseExcludedControlConditions}
\title{Parse Excluded Control Conditions (VDI)}
\usage{
parseExcludedControlConditions(reportObject)
}
\arguments{
\item{reportObject}{The full report JSON object}
}
\description{
Default wrapper for the readExcludedControlConditions function
that handles errors thrown and returns the proper data.
}
|
87eaf6e1d6a856e658abe3e21f50bb8097fe499f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/TrialSize/examples/Cochran.Armitage.Trend.Rd.R | 9e4cdf64d2ec83e7571313639773fc4bbdc2a6f2 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 375 | r | Cochran.Armitage.Trend.Rd.R | library(TrialSize)
### Name: Cochran.Armitage.Trend
### Title: Cochran-Armitage's Test for Trend
### Aliases: Cochran.Armitage.Trend
### Keywords: ~kwd1 ~kwd2
### ** Examples
pi=c(0.1,0.3,0.5,0.7);
di=c(1,2,3,4);
ni=c(10,10,10,10);
Example.11.5<-Cochran.Armitage.Trend(alpha=0.05,beta=0.2,pi=pi,di=di,ni=ni,delta=1)
Example.11.5
# 7.5 for one group. Total 28-32.
|
cfa8e9f9b462c48b7f56d66071c10529023fe99f | 6db7e92e04e293a0183e1c9a50cb0862fae41e8f | /plot4.R | 096a189e893fac05c274c917e6b59b8982d89a39 | [] | no_license | smodroni/ExData_Plotting1 | 2da44e7e3482f3184661af34e6a805a2073cc596 | e9ee4f4d1d0732fd460448948688fcdd5219547a | refs/heads/master | 2021-01-15T14:12:07.952221 | 2014-11-06T20:51:47 | 2014-11-06T20:51:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,111 | r | plot4.R | ## Read Dataset
df_all <- read.table("./household_power_consumption.txt", sep=';', header=TRUE, stringsAsFactors=F, na.strings="?", comment.char="", quote='\"')
## Covert date column
df_all$Date <- as.Date(df_all$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(df_all, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(df_all)
## Converting dates
date_time <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(date_time)
## Plot the data
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() |
c6e25b8daa7c923e28e4ca99106bfb817b4af66e | 2498a6fa69bcbd6dcb20d90b5817602219a818ba | /Physio_parameters_analyses/Chlorophyll/Scripts/Spis.CBASSvsCLASSIC.colony.genotype.corr.ChlA_20200329.R | 71d114d117fa83bd0e342762429354ff59fa6924 | [] | no_license | reefgenomics/CBASSvsCLASSIC | 1a930faba7bb17323f43a414cb0b9188b76ff223 | 832b72e9a2d6e4bad5ddf00fa2d9b13ef7fdecf9 | refs/heads/master | 2022-12-24T21:02:29.959046 | 2020-07-06T05:39:36 | 2020-07-06T05:39:36 | 196,943,004 | 4 | 3 | null | 2022-12-08T05:24:32 | 2019-07-15T07:02:03 | R | UTF-8 | R | false | false | 3,512 | r | Spis.CBASSvsCLASSIC.colony.genotype.corr.ChlA_20200329.R | # Genotype ranking based on Clh A content results
# set working directory
setwd("~/Documents/Barshis-project/03.SUMMER-CRUISE/04.Short-Long.term.heat.stress.experiment/09.Manuscript/Stats_GCB_CBASSvsCLASSIC/Chlorophyll/")
#libraries to load
library(tidyr)
library(tidyverse)
library(ggplot2)
library(ggpubr)
library(ggpmisc)
# read working table
chlA.data <- read.delim("./Raw.data/Spis.CBASSvsCLASSIC.ChlA.data_17032019.txt", header = TRUE, sep = "\t")
colnames(chlA.data)
# keep only certain columns
chlA_data_long <- chlA.data %>% select(experiment, reef.site, temp.intensity, genotype, chlA_ug_cm2)
# change format from wide to long
chlA_data_wide <- spread(chlA_data_long, temp.intensity, chlA_ug_cm2)
# calculate the delta high
chlA_data_wide$deltahigh <- (chlA_data_wide$High - chlA_data_wide$Control)
# genotype correlation cbass vs classic - High ChlA content delta
high <- data.frame(genotype=chlA_data_wide$genotype[chlA_data_wide$experiment=="CBASS"],
delta_high_x=chlA_data_wide$deltahigh[chlA_data_wide$experiment=="CBASS"],
delta_high_y=chlA_data_wide$deltahigh[chlA_data_wide$experiment=="CLASSIC"],
reef.site=chlA_data_wide$reef.site[chlA_data_wide$experiment=="CBASS"])
# correlation model - High ChlA content delta
cbass.vs.classixc_hightemp_corr <- cor.test(x = high$delta_high_x, y = high$delta_high_y, method = "pearson")
cbass.vs.classixc_hightemp_corr$p.value #0.8094154
cbass.vs.classixc_hightemp_lm <- lm(high$delta_high_x ~ high$delta_high_y)
summary(cbass.vs.classixc_hightemp_lm)
# Multiple R-squared: 0.005518
# p-value: 0.8094
# plot
Chlhigh <- ggplot(high,aes(x=delta_high_x,y=delta_high_y)) +
theme_classic() +
stat_smooth(method = "lm",formula = y ~ x, se = TRUE, level = 0.95, na.rm = TRUE, colour="grey40")+
stat_poly_eq(aes(label= paste(..eq.label..)), npcx = "right", npcy = 0.2, formula = y ~ x, parse=TRUE, size = 4)+
stat_poly_eq(aes(label= paste(..rr.label..)), npcx = "right", npcy = 0.15, formula = y ~ x, parse=TRUE, size = 4)+
stat_fit_glance(method = 'lm', method.args = list(formula=y ~ x), aes(label = paste("P-value = ", signif(..p.value.., digits = 3), sep = "")),npcx = "right", npcy = 0.1, size = 4)+
geom_point(aes(color=reef.site, shape=reef.site), size = 8, alpha=0.8)+
theme(legend.position = 'bottom')+
scale_x_continuous(limits = c(-2, 1.5))+
scale_y_continuous(limits = c(-3, 0.1))+
ggtitle("Chl. A content (HighTemp - ControlTemp)") + xlab("delta Chl. A (ug/cm2) CBASS") + ylab("delta Chl. A (ug/cm2) CLASSIC")+
geom_text(aes(label=genotype), vjust=0, size=3)+
theme(line= element_line(size = 1),
axis.line = element_line(colour = "grey20"),
axis.text.x = element_text(color = "grey20", size = 12, angle = 0, hjust = .5, vjust = .5, face = "plain"),
axis.text.y = element_text(color = "grey20", size = 12, angle = 0, hjust = .5, vjust = .5, face = "plain"),
axis.title.x = element_text(color = "grey20", size = 12, angle = 0, hjust = .5, vjust = 0, face = "plain"),
axis.title.y = element_text(color = "grey20", size = 12, angle = 90, hjust = .5, vjust = .5, face = "plain"),
legend.title = element_text(colour="grey20", size=12, face="bold"),
legend.text = element_text(colour="grey20", size=12, face="plain")) + scale_color_manual(values=c("#56B4E9", "#E69F00")) + scale_shape_manual(values=c(16, 16))
Chlhigh
ggsave("./genotype.ChlA/Plots/genotyperank_deltahighChlA_20200329.pdf", width = 6, height = 6)
|
72e04da34269498876f326f83414d6ed4cd0a2be | 3c7b6c98f274ca4ef939481a04aaf4afaa7126a2 | /R-utils/xy.density.r | 0c481378e6ab61cb7f3ea43b27aeb847b7133a43 | [] | no_license | EDmodel/ED2 | 6c2ad50849739982cbf93798f0aeadb95f9ca540 | 8d4c3aff354ffb8fec57e35b62d1bd3c9e0a671c | refs/heads/master | 2023-07-26T04:36:05.535556 | 2023-06-12T17:20:07 | 2023-06-12T17:20:07 | 19,721,494 | 71 | 110 | null | 2023-06-12T17:20:09 | 2014-05-13T01:21:54 | Fortran | UTF-8 | R | false | false | 23,633 | r | xy.density.r | #==========================================================================================#
#==========================================================================================#
# This function plots a density point cloud that represents the point density at any #
# given area of the graph. #
#------------------------------------------------------------------------------------------#
xy.density <<- function( x
, y
, xlim = if (xlog){
range(pretty.log(x))
}else{
range(pretty(x))
}#end if (xlog)
, ylim = if (ylog){
range(pretty.log(y))
}else{
range(pretty(y))
}#end if (xlog)
, xlevels = NULL
, ylevels = NULL
, zlim = NULL
, xlog = FALSE
, ylog = FALSE
, nbins = 80
, colour.palette = cm.colors
, nlevels = 20
, plot.key = TRUE
, key.log = FALSE
, key.vertical = TRUE
, x.axis.options = NULL
, y.axis.options = NULL
, key.axis.options = NULL
, key.options = NULL
, sub.options = NULL
, main.title = NULL
, key.title = NULL
, plot.after = NULL
, legend.options = NULL
, edge.axes = FALSE
, oma = NULL
, omd = NULL
, f.key = 1/6
, f.leg = 1/6
, off.xlab = NULL
, off.right = NULL
, xaxs = "i"
, yaxs = "i"
, method = c("table","density")
, zignore = 0.0001
, mar.main = c(4.1,4.1,4.1,1.1)
, mar.key = NULL
, useRaster = ! (xlog || ylog)
, reparse = TRUE
, add = FALSE
, ...
){
#---------------------------------------------------------------------------------------#
# Find out whether x and y are both provided. #
#---------------------------------------------------------------------------------------#
if (missing(x) || missing(y)){
cat(" - x is missing: ",missing(x),"\n")
cat(" - y is missing: ",missing(y),"\n")
stop(" Both x and y must be provided.")
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Standardise method. #
#---------------------------------------------------------------------------------------#
method = match.arg(method)
off = (method %in% "density")
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# In case this plot is using and existing plot.window, make sure we use the window #
# settings. #
#---------------------------------------------------------------------------------------#
if (add){
#------ Retrieve direct settings. ---------------------------------------------------#
xlog = par("xlog")
ylog = par("ylog")
xa = par("usr")[1]
xz = par("usr")[2]
ya = par("usr")[3]
yz = par("usr")[4]
xaxs = par("xaxs")
yaxs = par("yaxs")
xfac = ifelse(test=xaxs %in% "r",yes=0.04,no=0.00)
yfac = ifelse(test=yaxs %in% "r",yes=0.04,no=0.00)
#------------------------------------------------------------------------------------#
#----- Find bounds in the X direction. ----------------------------------------------#
xlim = c( (1.+xfac)*xa + xfac*xz, xfac*xa + (1+xfac)*xz ) / (1.+2.*xfac)
ylim = c( (1.+yfac)*ya + yfac*yz, yfac*ya + (1+yfac)*yz ) / (1.+2.*yfac)
if (xlog) xlim=10^xlim
if (ylog) ylim=10^ylim
#------------------------------------------------------------------------------------#
}#end if
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Split the domain into bins, and count points. #
#---------------------------------------------------------------------------------------#
if (is.null(xlevels) && xlog){
xlwr = log(xlim[1])-eps()*diff(log(xlim))
xupr = log(xlim[2])+eps()*diff(log(xlim))
xlevels = exp(seq(from=xlwr,to=xupr,length.out=nbins+off))
xdens = exp(mid.points(log(xlevels)))
}else if (is.null(xlevels)){
xlwr = xlim[1]-eps()*diff(xlim)
xupr = xlim[2]+eps()*diff(xlim)
xlevels = seq(from=xlwr,to=xupr,length.out=nbins+off)
xdens = mid.points(xlevels)
}else if (xlog){
xlwr = min(log(xlevels),na.rm=TRUE)
xupr = max(log(xlevels),na.rm=TRUE)
xdens = exp(mid.points(log(xlevels)))
}else{
xlwr = min(xlevels,na.rm=TRUE)
xupr = max(xlevels,na.rm=TRUE)
xdens = mid.points(xlevels)
}#end if (is.null(xlevels))
if (is.null(ylevels) && ylog){
ylwr = log(ylim[1])-eps()*diff(log(ylim))
yupr = log(ylim[2])+eps()*diff(log(ylim))
ylevels = exp(seq(from=ylwr,to=yupr,length.out=nbins+off))
ydens = exp(mid.points(log(ylevels)))
}else if (is.null(ylevels)){
ylwr = ylim[1]-eps()*diff(ylim)
yupr = ylim[2]+eps()*diff(ylim)
ylevels = seq(from=ylwr,to=yupr,length.out=nbins+off)
ydens = mid.points(ylevels)
}else if (ylog){
ylwr = min(log(ylevels),na.rm=TRUE)
yupr = max(log(ylevels),na.rm=TRUE)
ydens = exp(mid.points(log(ylevels)))
}else{
ylwr = min(ylevels,na.rm=TRUE)
yupr = max(ylevels,na.rm=TRUE)
ydens = mid.points(ylevels)
}#end if (is.null(ylevels))
#---------------------------------------------------------------------------------------#
#------ Cut x and y points into the bins, then use table to count occurrences. ---------#
if (method %in% "table"){
xcut = as.integer(cut(x,breaks=xlevels,labels=xdens))
ycut = as.integer(cut(y,breaks=ylevels,labels=ydens))
ztable = table(xcut,ycut)
idx = cbind( row = as.integer(rownames(ztable)[row(ztable)])
, col = as.integer(colnames(ztable)[col(ztable)])
)#end cbind
zdens = matrix(data=0,nrow=length(xdens),ncol=length(ydens))
zdens[idx] = c(as.matrix(ztable))
zdens = 100. * zdens / sum(zdens)
}else{
xkde = if(xlog){log(x)}else{x}
ykde = if(ylog){log(y)}else{y}
zdens = kde2d(x=xkde,y=ykde,n=nbins,lims=c(xlwr,xupr,ylwr,yupr))
zdens = 100. * zdens[[3]] / sum(zdens[[3]])
}#end if (method %in% "table")
zlwr = zignore * max(zdens,na.rm=TRUE)
zdens = 0. * zdens + ifelse(test=zdens %>=% zlwr,yes=zdens,no=0.)
#---------------------------------------------------------------------------------------#
#------ Find colour levels. ------------------------------------------------------------#
if (key.log){
if (is.null(zlim)){
zlim = range(pretty.log(zdens))
}#end if (is.null(zlim))
clevels = sort(unique(pretty.log(x=zlim,n=nlevels,forcelog=TRUE)))
}else{
if (is.null(zlim)){
zlim = range(pretty(zdens))
}#end if (is.null(zlim))
clevels = sort(unique(pretty(x=zlim,n=nlevels)))
}#end if
ccolours = colour.palette(length(clevels)-1)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Skip this part in case we are adding another plot. #
#---------------------------------------------------------------------------------------#
if (! add){
#------------------------------------------------------------------------------------#
# If legend is to be plotted, key.vertical has to be TRUE. In case the user #
# said otherwise, return a warning. Also, define offsets for X and Y according to #
# the legends and keys. #
#------------------------------------------------------------------------------------#
plot.legend = ! is.null(legend.options)
if ( plot.legend && (! key.vertical)){
warning(" key.vertical=FALSE ignored due to the legend.")
key.vertical = TRUE
}#end if
#------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Find key margins. #
#------------------------------------------------------------------------------------#
if (key.vertical && is.null(mar.key)){
mar.key = c(4.1,0.1,4.1,4.1)
}else if (is.null(mar.key)){
mar.key = c(4.1,4.1,0.6,1.1)
}#end if
#------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Coerce x, y, and key axis options, and key and main title options into lists. #
#------------------------------------------------------------------------------------#
if (is.null(x.axis.options)){
x.axis.options = list(side=1,las=1)
}else{
x.axis.options = as.list(x.axis.options)
if (! "side" %in% names(x.axis.options)){
x.axis.options = modifyList(x=x.axis.options,val=list(side=1))
}#end if (! "side" %in% names(x.axis.options))
}#end if
if (is.null(y.axis.options)){
y.axis.options = list(side=2,las=1)
}else{
y.axis.options = as.list(y.axis.options)
if (! "side" %in% names(y.axis.options)){
y.axis.options = modifyList(x=y.axis.options,val=list(side=2))
}#end if (! "side" %in% names(y.axis.options))
}#end if
if (is.null(key.axis.options)){
if (key.log){
zat = pretty.log(zlim)
zlabels = sprintf("%g",zat)
}else{
zat = pretty(zlim)
zlabels = sprintf("%g",zat)
}#end if
key.axis.options = list(side=ifelse(key.vertical,4,1),las=1,at=zat,labels=zlabels)
}else{
key.axis.options = as.list(key.axis.options)
if (! "side" %in% names(y.axis.options)){
key.axis.options = modifyList( x = key.axis.options
, val = list(side=ifelse(key.vertical,4,1))
)#end modifyList
}#end if (! "side" %in% names(y.axis.options))
}#end if
if (! is.null(key.title )) key.title = as.list(key.title )
if (! is.null(main.title)) main.title = as.list(main.title)
#------------------------------------------------------------------------------------#
#----- Save the margins to avoid losing the data. -----------------------------------#
par.orig = par(no.readonly=TRUE )
mar.orig = par.orig$mar
on.exit(par(par.orig))
par(par.user)
#------------------------------------------------------------------------------------#
#----- Check for outer margins. -----------------------------------------------------#
if ( (! is.null(oma)) && (! is.null(omd))){
stop ("You cannot provide both oma and omd!")
}else if (is.null(oma) && is.null(omd)){
par(oma=c(0,0,0,0))
}else if (is.null(omd)){
par(oma=oma)
}else{
par(omd=omd)
}#end if
#------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------#
# Find offset for x axis label and right, based on legends and keys and outer #
# margin . #
#------------------------------------------------------------------------------------#
par.tout = par(no.readonly=FALSE)
#----- Bottom margin. ---------------------------------------------------------------#
if (is.null(off.xlab)){
if (plot.legend && key.vertical){
off.xlab = with(par.tout,( omi[1] + f.leg * (din[2]-omi[1]-omi[3]) ) / din[2])
}else if (key.vertical){
off.xlab = with(par.tout,omi[1] / din[2])
}else{
off.xlab = with(par.tout,( omi[1] + f.key * (din[2]-omi[1]-omi[3]) ) / din[2])
}#end if
}#end if
#----- Right margin. ----------------------------------------------------------------#
if (is.null(off.right)){
if (key.vertical){
off.right = with(par.tout,( omi[4] + f.key * (din[1]-omi[2]-omi[4]) ) / din[1])
}else if (plot.legend){
off.right = with(par.tout,( omi[4] + f.leg * (din[1]-omi[2]-omi[4]) ) / din[1])
}else{
off.right = with(par.tout,omi[4] / din[1])
}#end if
}#end if
#------------------------------------------------------------------------------------#
#----- Split the screen into multiple pieces (legend, key, plots...) ----------------#
fh.panel = 1. - f.key
fv.panel = 1. - f.leg
if (plot.legend && plot.key){
layout( mat = rbind(c(3, 2),c(1,0))
, heights = c(fv.panel,f.leg)
, widths = c(fh.panel,f.key)
)#end layout
}else if (plot.key){
if (key.vertical){
layout(mat=cbind(2, 1), widths = c(fh.panel,f.key))
}else{
layout(mat=rbind(2, 1), heights = c(fh.panel,f.key))
}#end (if key.vertical)
}else if (plot.legend){
layout(mat=rbind(2,1), heights=c(fv.panel,f.leg))
}#end if (plot.legend)
#------------------------------------------------------------------------------------#
#====================================================================================#
#====================================================================================#
# First plot: the legend. #
#------------------------------------------------------------------------------------#
if (plot.legend){
par(mar = c(0.1,0.1,0.1,0.1))
plot.new()
plot.window(xlim=c(0,1),ylim=c(0,1))
do.call(what="legend",args=legend.options)
}#end if
#====================================================================================#
#====================================================================================#
#====================================================================================#
#====================================================================================#
# Second plot: the key scale. #
#------------------------------------------------------------------------------------#
if (plot.key){
par(mar = mar.key)
plot.new()
#---------------------------------------------------------------------------------#
# Plot in the horizontal or vertical depending on where the scale is going to #
# be plotted. #
#---------------------------------------------------------------------------------#
if (key.vertical){
#----- Decide whether the scale is logarithmic or not. ------------------------#
if (key.log){
plot.window(xlim=c(0,1),ylim=range(clevels),xaxs="i",yaxs="i",log="y")
}else{
plot.window(xlim=c(0,1),ylim=range(clevels),xaxs="i",yaxs="i")
}#end if
#------------------------------------------------------------------------------#
#----- Draw the colour bar. ---------------------------------------------------#
rect( xleft = 0
, ybottom = clevels[-length(clevels)]
, xright = 1
, ytop = clevels[-1]
, col = ccolours
, border = ccolours
)#end rect
#------------------------------------------------------------------------------#
}else{
#----- Decide whether the scale is logarithmic or not. ------------------------#
if (key.log){
plot.window(xlim=range(clevels),ylim=c(0,1),xaxs="i",yaxs="i",las=1,log="x")
}else{
plot.window(xlim=range(clevels),ylim=c(0,1),xaxs="i",yaxs="i",las=1)
}#end if
#------------------------------------------------------------------------------#
#----- Draw the colour bar. ---------------------------------------------------#
rect( xleft = clevels[-length(clevels)]
, ybottom = 0
, xright = clevels[-1]
, ytop = 1
, col = ccolours
, border = ccolours
)#end rect
#------------------------------------------------------------------------------#
}#end if
#---------------------------------------------------------------------------------#
#----- Plot the key axis. --------------------------------------------------------#
do.call (what="axis",args=key.axis.options)
#---------------------------------------------------------------------------------#
#----- Draw box. -----------------------------------------------------------------#
box()
#---------------------------------------------------------------------------------#
#----- Plot the title. -----------------------------------------------------------#
if (! is.null(key.title)) do.call(what="title",args=key.title)
#---------------------------------------------------------------------------------#
}#end if (plot.key)
#====================================================================================#
#====================================================================================#
#====================================================================================#
#====================================================================================#
# Plot the main panel. #
#------------------------------------------------------------------------------------#
plog = paste0(ifelse(xlog,"x",""),ifelse(ylog,"y",""))
par(mar = mar.main)
plot.new()
plot.window(xlim=xlim,ylim=ylim,log=plog,xaxs=xaxs,yaxs=yaxs,...)
zupr = zlim[1] + (1.-sqrt(.Machine$double.eps))*diff(zlim)
zdens = pmin(zupr,zdens) + ifelse(zdens %>% 0,0,NA) + 0. * zdens
xyz = list(x=xdens,y=ydens,z=zdens)
image(x=xyz,zlim=zlim,col=ccolours,breaks=clevels,add=TRUE,useRaster=useRaster)
#====================================================================================#
#====================================================================================#
}else{
#====================================================================================#
#====================================================================================#
# Plot the main panel on existing box. #
#------------------------------------------------------------------------------------#
zupr = zlim[1] + (1.-sqrt(.Machine$double.eps))*diff(zlim)
zdens = pmin(zupr,zdens) + ifelse(zdens %>% 0,0,NA) + 0. * zdens
xyz = list(x=xdens,y=ydens,z=zdens)
image(x=xyz,zlim=zlim,col=ccolours,breaks=clevels,add=TRUE,useRaster=useRaster)
#====================================================================================#
#====================================================================================#
}#end if (! add)
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot other options. Check use a shared list, or one list for each sub-plot. #
#---------------------------------------------------------------------------------------#
n.after = length(plot.after)
for (a in sequence(n.after)){
a.fun = names(plot.after)[a]
a.args = plot.after[[a]]
if (a.fun %in% "text" && reparse) a.args$labels = parse(text=a.args$labels)
do.call(what=a.fun,args=a.args)
}#end for
#---------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------#
# Plot axes and title annotation. #
#---------------------------------------------------------------------------------------#
if (! add){
do.call(what="axis" ,args=x.axis.options)
do.call(what="axis" ,args=y.axis.options)
do.call(what="title",args=main.title )
#------------------------------------------------------------------------------------#
#----- Lastly, add the box (so it stays on top). ------------------------------------#
box()
#------------------------------------------------------------------------------------#
}#end if (! add)
#---------------------------------------------------------------------------------------#
invisible()
}#end function xy.density
#==========================================================================================#
#==========================================================================================#
|
d39f79998e4298671d203448e4a0252e6073e65e | bfaee7d8f67771331470c8272db4bf7f83222cc2 | /man/add_rpgui_deps.Rd | 0021c7ae3061ec975055eb8333ad09ab9237d880 | [
"MIT"
] | permissive | bright-spark/shinyRPG | b6f7a7120fbbc387dbc4fb1322bb0967b7921b38 | a1fe30761ffd6469f28a0f92107d9613e9eccbe7 | refs/heads/main | 2023-06-15T22:41:38.793229 | 2021-07-13T20:00:01 | 2021-07-13T20:00:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 345 | rd | add_rpgui_deps.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rpgui-dependencies.R
\name{add_rpgui_deps}
\alias{add_rpgui_deps}
\title{rpgui dependencies utils}
\usage{
add_rpgui_deps(tag)
}
\arguments{
\item{tag}{Element to attach the dependencies.}
}
\description{
This function attaches rpgui. dependencies to the given tag
}
|
e8b8df02389a020c51a774e7aef123a77f68335d | fd1b8a12ad1b6c50ccddf45a4b4093337bad8000 | /plot3.R | 2ca39131b35d14ecbd710a554b4b2eb0e6f384fa | [] | no_license | jimhark/ExData_Plotting1 | be0073806d3c2d8d3c854e0fcde7bde8f0ddab59 | d34d217ca54fc6aa2da41322c40b59337a93541f | refs/heads/master | 2021-01-09T09:06:34.026813 | 2015-01-11T21:50:46 | 2015-01-11T21:50:46 | 26,156,231 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 723 | r | plot3.R | power <- read.csv("household_power_consumption.txt", sep=";", na.strings="?")
selPower <- subset(power, Date %in% c("1/2/2007", "2/2/2007"))
dateStrings <- apply(selPower[,c("Date","Time")], 1, FUN=function(s) paste(s, collapse=" "))
selPower$datetime <- strptime(dateStrings, "%d/%m/%Y %H:%M:%S")
png("plot3.png", width=480, height=480)
plot(selPower$datetime, selPower$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering")
lines(selPower$datetime, selPower$Sub_metering_2, type="l", col="red")
lines(selPower$datetime, selPower$Sub_metering_3, type="l", col="blue")
legend("topright", lwd=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"))
dev.off()
|
46ccfb1c76dc7cf5349106fcba13420d23281b3d | 48cf403a8eec541960a12c9af81e896f08fcdcc2 | /explAnalyse_pima/global.R | 37ef7724e26171ec9196753074c85d739ba129c4 | [] | no_license | evalehner/shinyStats | 41075f7fce73db031124e9d0e9b3d9aa6dddaa9c | 7c2a243fccaecaacf08926f0c3baeba46eb29c90 | refs/heads/master | 2020-05-31T15:52:42.108781 | 2019-06-27T14:58:07 | 2019-06-27T14:58:07 | 190,367,696 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,184 | r | global.R | library(shiny)
# Code der nur 1x laufen muss außerhalb ver server Funktion um performance zu verbessern
# Achtung: app läuft nur wenn man pima manuell 'einließt'
pimaTe <- MASS::Pima.te
pimaTr <- MASS::Pima.tr
pima <- rbind(pimaTe, pimaTr)
pima <- cbind(pima[,-8], type = as.numeric(pima$type)-1)
rownames_pima <- rownames(pima)
#Function for plotting Histogram and QQPlot
nice <- function(data_values, var_name){
#layout settings
def.par <- par(no.readonly = TRUE)
layout(matrix(c(1,2,3),1,3, byrow = FALSE), respect = T)
#plots
hist(data_values, main = paste("Plot of ", var_name), xlab = paste("[unit]", var_name), freq = F)
lines(density(data_values))
lines(density(data_values, adjust = 2), col = 2)
qqnorm(data_values)
qqline(data_values, col=2)
#change layout settings back to default
par(def.par)
}
#Funktion for Boxplot (optional)
boxplot_variable <- function(data_values, var_name){
#layout settings
def.par <- par(no.readonly = TRUE)
layout(matrix(1,1,1, byrow = FALSE), respect = T)
#plots
boxplot(data_values, horizontal = T)
#change layout settings back to default
par(def.par)
}
panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y)
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste0(prefix, txt)
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex = cex.cor * abs(r))
}
# Funktion für correlation threshold
cor_threshold_vars <- function(data, cor_threshold) {
cor_var_vector = vector()
cor_index_df = data.frame(i = 0, j = 0)
for ( i in 1:length(data)) {
for ( j in 1:length(data)) {
cor_var <- cor(data[i], data[j])
if (i != j & abs(cor_var) > cor_threshold) {
add_to_df <- TRUE
for ( row in 1:length(cor_index_df[,1])) {
if (i == cor_index_df[row,2] & j == cor_index_df[row,1]){
add_to_df <- FALSE
}
}
if (add_to_df == TRUE) {
cor_index_df <- rbind(cor_index_df, c(i, j))
cor_var_vector <- append(cor_var_vector, paste(rownames(cor_var), " - ", colnames(cor_var)))
}
}
}
}
return(data.frame(Correlations = cor_var_vector))
}
#Function for logarithmic plots
logarithm_variable <- function(data_values, var_name)
{{ if (var_name == "npreg") {
logVariable <- log(data_values + 1)
}
else {logVariable <- log(data_values)}}
def.par <- par(no.readonly = TRUE)
layout(matrix(c(1,2,3),1,3, byrow = FALSE), respect = T)
#plots
hist(logVariable, main = paste("Plot of ", var_name), xlab = paste("log units", var_name), freq = F)
lines(density(logVariable))
lines(density(logVariable, adjust = 2), col = 2)
qqnorm(logVariable)
qqline(logVariable, col=2)
#change layout settings back to default
par(def.par)
}
#Function for normalisation
normalized_variable <- function(data_values, var_name)
{nomVariable <- (data_values-mean(data_values))/sd(data_values)
def.par <- par(no.readonly = TRUE)
layout(matrix(c(1,2,3),1,3, byrow = FALSE), respect = T)
#plots
hist(nomVariable, main = paste("Plot of ", var_name), xlab = paste("normalized units", var_name), freq = F)
lines(density(nomVariable))
lines(density(nomVariable, adjust = 2), col = 2)
qqnorm(nomVariable)
qqline(nomVariable, col=2)
#change layout settings back to default
par(def.par)
}
#Function for polynomial
polynomial_variable <- function(data_values, var_name)
{polyVariable <- (data_values)^2
def.par <- par(no.readonly = TRUE)
layout(matrix(c(1,2,3),1,3, byrow = FALSE), respect = T)
#plots
hist(polyVariable, main = paste("Plot of ", var_name), xlab = paste("squared unit", var_name), freq = F)
lines(density(polyVariable))
lines(density(polyVariable, adjust = 2), col = 2)
qqnorm(polyVariable)
qqline(polyVariable, col=2)
#change layout settings back to default
par(def.par)
}
# Funktion für transformieren der Daten für Modell
add_transformed_columns <- function(var_names, transform, df_to_append, df_to_extract) {
number_of_rows <- length(df_to_extract[,1])
for (i in 1:length(var_names)) {
if (transform[i] == "Not included") {
next
}
else if (transform[i] == "Untransformed") {
df_to_append <- cbind(df_to_append, df_to_extract[which(names(df_to_extract)==var_names[i])])
}
else if (transform[i] == "log") {
df_to_append <- cbind(df_to_append, log(df_to_extract[which(names(df_to_extract)==var_names[i])]))
}
else if (transform[i] == "normalized") {
data_to_norm <- df_to_extract[1:number_of_rows, which(names(df_to_extract)==var_names[i])]
data_normalized <- data.frame((data_to_norm-mean(data_to_norm))/sd(data_to_norm))
names(data_normalized) <- var_names[i]
df_to_append <- cbind(df_to_append, data_normalized)
}
else if (transform[i] == "polynomial") {
data_to_pol <- df_to_extract[1:number_of_rows, which(names(df_to_extract)==var_names[i])]
data_polynom <- data.frame((data_to_pol)^2)
names(data_polynom) <- var_names[i]
df_to_append <- cbind(df_to_append, data_polynom)
}
}
return(df_to_append)
}
|
ea264f82d9acc97c0c1e84c07fa5196638a6197b | 716a6f6ca0aa0d939fe3efe8b1018ae096737536 | /R/geohash.R | 501b2235f416574b932a8ab4e76265f5f6ea2db6 | [
"MIT"
] | permissive | harryprince/geohash | 2a476ea620b8a3297ffde7f8d9a54fb0a8e1553f | 4526221ee5e268642c279cdb2c0844bb78ca61ae | refs/heads/master | 2020-03-23T01:22:36.767405 | 2018-11-28T09:13:55 | 2018-11-28T09:13:55 | 140,913,645 | 0 | 0 | null | 2018-07-14T03:44:33 | 2018-07-14T03:44:32 | null | UTF-8 | R | false | false | 453 | r | geohash.R | #' @title Tools for handling URLs
#' @name geohash
#' @description The geohash package provides tools to encode lat/long pairs into geohashes, decode those geohashes,
#' and identify neighbours their neighbours.
#' @seealso the \href{https://cran.r-project.org/package=geohash/vignettes/geohash.html}{package vignette}.
#' @useDynLib geohash, .registration=TRUE
#' @importFrom Rcpp sourceCpp
#' @docType package
#' @aliases geohash geohash-package
NULL
|
4d39b44e5f17a755b1bbd54173d1468330234780 | 9719ea69f693adfddc62b27eaf948fc7b16f6ad0 | /tests/testthat/test-check_wastd_api.R | 3a95b33ca6bb5911bea69dfd97e3511264697ebb | [] | no_license | dbca-wa/wastdr | 49fe2fb1b8b1e518f6d38549ff12309de492a2ad | 5afb22d221d6d62f6482798d9108cca4c7736040 | refs/heads/master | 2022-11-18T01:00:41.039300 | 2022-11-16T08:32:12 | 2022-11-16T08:32:12 | 86,165,655 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 324 | r | test-check_wastd_api.R | test_that("wastd_works returns FALSE if unauthenticated", {
suppressWarnings(
expect_false(
wastd_works(api_url = "http://httpstat.us/401")
)
)
})
test_that("odkc_works returns FALSE if unauthenticated", {
expect_false(odkc_works(url = "http://httpstat.us/401"))
})
# usethis::use_r("check_wastd_api")
|
fec6ec5987a8cbf7383f366501856706f518dc04 | 79a6beed8e70869b1053cdf85fc13c50c58ffe7e | /MLSplayers-dirty/MLSplayers-majorclusters-63-withtweeners.R | 702117073ec8d8295a1981bc9c661669e0904fa9 | [] | no_license | mimburgi/SoccerStuff | 07cfe200f056d9257d28a2735d68f8ccd6573808 | 5c50a239f4b7f58be7cd0837a378d8e852d2cbee | refs/heads/master | 2022-11-27T21:29:17.312796 | 2020-08-05T01:57:04 | 2020-08-05T01:57:04 | 281,275,496 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,014 | r | MLSplayers-majorclusters-63-withtweeners.R | library(dplyr)
library(ggplot2)
library(factoextra)
library(tidyr)
library(ppclust)
source('soccer_util_fxns.R') #for plotclusters fxn
## concat data across both years ####
curr<-read.table('2019summary.txt', header = T) %>% arrange(Player)
currscaled<-curr %>% select_if(is.numeric) %>% scale() %>% as.data.frame()
# currscaled<-select(currscaled, -c(xGteamperc, shotteamperc, A3Passes, M3Passes, D3Passes)) %>%
# select(-c(teamA3pass, teamM3pass, teamD3pass, teampass, teamxG, teamshots, xPlace, ShotDist, KPDist,
# xGper, xAper, passteamperc))
currscaled[currscaled < -3]<- -3
prev<-read.table('2018summary.txt', header = T) %>% arrange(Player)
prevscaled<-prev %>% select_if(is.numeric) %>% scale() %>% as.data.frame()
# prevscaled<-select(prevscaled, -c(xGteamperc, shotteamperc, A3Passes, M3Passes, D3Passes)) %>%
# select(-c(teamA3pass, teamM3pass, teamD3pass, teampass, teamxG, teamshots, xPlace, ShotDist, KPDist,
# xGper, xAper, passteamperc))
#to separate out later
currrows<-c(1:nrow(currscaled))
bothscaledall<-rbind(currscaled, prevscaled)
## trim and explore ####
usedvars<-c("shots", "KP", "xG", "xA", "percChain", "xGChain", "xB", 'ShotChainPerc',
"KPChainPerc", "xBperc", "Vertical", "PassPct",
"PassDistance", "xPassPerc", "Passes", "PassScore",
"A3perc", "M3perc", "D3perc", "A3teamperc", "M3teamperc","D3teamperc")
shootvars<-c(1,3,8)
asvars<-c(2,4,9)
areavars<-c(17:22)
passnumvars<-c(5, 15)
indirectvars<-c(6,7,10)
passstylevars<-c(11, 12, 13, 14, 16)
bothscaled<-select(bothscaledall, usedvars)
#make a graph of hopkins stats
hopkinsdf<-data.frame(clusters=as.numeric(1), stat=as.numeric(NA))
for (i in 2:18){
hopstat<-get_clust_tendency(bothscaled, i, graph = F)$hopkins_stat
hopkinsdf[nrow(hopkinsdf) + 1,]<-c(i, hopstat)
}
hopkinsdf<-hopkinsdf[-1,]
ggplot(hopkinsdf, aes(x=clusters, y=stat)) + geom_point() + geom_line()
test<-get_clust_tendency(bothscaled, 4, graph = F)
test$hopkins_stat
## cluster ####
# #4
# clara4<-clara(bothscaled, 4)
# km4<-kmeans(bothscaled, 4)
# pam4<-kmeans(bothscaled, 4)
# hk4<-hkmeans(bothscaled, 4)
#
#
# #5
# clara5<-clara(bothscaled, 5)
# km5<-kmeans(bothscaled, 5)
# pam5<-kmeans(bothscaled, 5)
# hk5<-hkmeans(bothscaled, 5)
#
# #6
# clara6<-clara(bothscaled, 6)
# km6<-kmeans(bothscaled, 6)
# pam6<-kmeans(bothscaled, 6)
set.seed(0)
hk6<-hkmeans(bothscaled, 6)
set.seed(10)
fuzzy<-fcm(bothscaled, centers = hk6$centers)
# fuzzy2<-fcm(bothscaled, centers = hk6$centers,
# fixcent = T, nstart = 5)
# hk62<-hkmeans(bothscaled, 6, hc.method = 'complete')
#
#
# #7
# clara7<-clara(bothscaled, 7)
# km7<-kmeans(bothscaled, 7)
# pam7<-kmeans(bothscaled, 7)
# hk7<-hkmeans(bothscaled, 7)
# hk72<-hkmeans(bothscaled, 7, hc.method = 'complete')
#
# fviz_screeplot(PCA(bothscaled, scale.unit = F))
# decomp<-preProcess(bothscaled, method='pca', pcaComp=3) %>% predict(bothscaled)
#
# fviz_nbclust(decomp, kmeans, method = 'gap_stat')
#
# km6<-kmeans(decomp, 6, nstart = 10)
#
# plotclusters(bothscaled[,c(1:8)], km6$cluster)
## define clusters to be used for year-to-year comps ####
# compclusts<-as.factor(km4$cluster)
# compclusts<-as.factor(clara4$cluster)
# compclusts<-as.factor(pam4$cluster)
# compclusts<-as.factor(hk4$cluster)
#
# compclusts<-as.factor(km5$cluster)
# compclusts<-as.factor(clara5$cluster)
# compclusts<-as.factor(pam5$cluster)
# compclusts<-as.factor(hk5$cluster)
#
# compclusts<-as.factor(km6$cluster)
# compclusts<-as.factor(clara6$cluster)
# compclusts<-as.factor(pam6$cluster)
# compclusts<-as.factor(hk6$cluster)
# compclusts<-as.factor(hk62$cluster)
#
# compclusts<-as.factor(km7$cluster)
# compclusts<-as.factor(clara7$cluster)
# compclusts<-as.factor(pam7$cluster)
# compclusts<-as.factor(hk7$cluster)
# compclusts<-as.factor(hk72$cluster)
compclusts<-as.factor(fuzzy$cluster)
##name cluster levels ####
levels(compclusts)<-c('MF Recycler', 'MF creator','Hybrid Attacker',
'Defender', 'B2B support', 'Attacker')
## compare cluster assignments across years ####
curr$cluster<-compclusts[currrows]
prev$cluster<-compclusts[-currrows]
commonplayers<-intersect(prev$Player, curr$Player)
prevtest<-subset(prev, Player %in% commonplayers) %>% arrange(Player)
currtest<-subset(curr, Player %in% commonplayers) %>% arrange(Player)
prevtest$Player<-droplevels(prevtest$Player)
currtest$Player<-droplevels(currtest$Player)
sum(prevtest$cluster == currtest$cluster)/length(prevtest$cluster)
#sum(prevtest$Player == currtest$Player)/length(prevtest$Player)
## confusion matrix ####
caret::confusionMatrix(prevtest$cluster, currtest$cluster)
## plot vars for each group across clusters ####
#shootvars, asvars, areavars,
#passnumvars, indirectvars, passtylevars
plotclusters(bothscaled[,shootvars], compclusts)
plotclusters(bothscaled[,asvars], compclusts)
plotclusters(bothscaled[,indirectvars], compclusts)
plotclusters(bothscaled[,passnumvars], compclusts)
plotclusters(bothscaled[,passstylevars], compclusts)
plotclusters(bothscaled[,areavars], compclusts)
## Position breakdown for each cluster ####
both<-rbind(curr, prev)
both$Pos<-as.factor(both$Pos)
ftable<- count(both, cluster, Pos, .drop = F) %>%
group_by(cluster) %>%
mutate(freq = n / sum(n))
ggplot(ftable, aes(x=cluster, y=Pos, fill=freq)) + geom_tile() +
scale_fill_gradient(low="darkgrey", high="darkred")
## set a tweener threshold from fuzzy cluster probs ####
misclassedplayers<-prevtest$Player[prevtest$cluster!=currtest$cluster]
threshold<-.1
fuzzyprobs<-as.data.frame(fuzzy$u)
fuzzyprobsprev<-fuzzyprobs[-currrows,]
fuzzyprobstest<-fuzzyprobsprev[which(prevtest$Player %in% misclassedplayers),]
colnames(fuzzyprobstest)<-levels(compclusts)
fuzzyprobstest$secondprobs<-apply((fuzzyprobstest),1,secondmax)
fuzzyprobstest$firstprobs<-apply(fuzzyprobstest,1,max)
fuzzyprobstest$tweener<-0
fuzzyprobstest$tweener[(fuzzyprobstest$firstprobs-fuzzyprobstest$secondprobs) < threshold]<-1
sum(fuzzyprobstest$tweener)
|
9eb6ad85d66cc6ce21620160cccde6307b8c08cf | bbfa4b42ae55e599f39892dfd080f1ef790b8e1e | /final_analyses/script/functions/tibble_all_posteriors.R | d031175e8026b44250c8a4fe9d2bd1ffbdaa53f2 | [] | no_license | brophyj/tocilizumab_reanalysis | 67ebe548ecee76c98eff889a75202f2b9092b380 | 09aab5e5212c37fd98a13e196aef3779249a14f4 | refs/heads/master | 2023-04-16T18:49:46.954627 | 2021-05-14T01:09:14 | 2021-05-14T01:09:14 | 354,985,882 | 0 | 0 | null | 2021-04-13T13:25:09 | 2021-04-05T22:10:26 | null | UTF-8 | R | false | false | 1,211 | r | tibble_all_posteriors.R | tibble_all_posteriors = function(
noninformative_object, # Output from noninformative_posterior()
multiple_priors_object # Output from normal_approximation_multiple_priors()
) {
set.seed = 123 # set seed for reproducibility (rnorm())
n = 10e4 # sampling size
tibble(
# This is an output from rbeta() in noninformative_posterior()
"Non-informative" = noninformative_object %>%
summarise(delta = (toci - control)) %>% pull(),
"Evidence-based" = multiple_priors_object %>%
filter(type == "evidence-based") %>%
summarise(a = rnorm(n,
mean = post.mean,
sd = post.sd
)) %>% pull(),
"Skeptical" = multiple_priors_object %>%
filter(type == "skeptical") %>%
summarise(a = rnorm(n,
mean = post.mean,
sd = post.sd
)) %>% pull(),
"Optimistic" = multiple_priors_object %>%
filter(type == "optimistic") %>%
summarise(a = rnorm(n,
mean = post.mean,
sd = post.sd
)) %>% pull(),
"Pessimistic" = multiple_priors_object %>%
filter(type == "pessimistic") %>%
summarise(a = rnorm(n,
mean = post.mean,
sd = post.sd
)) %>% pull(),
)
} |
33c9d4b6bbf1d6b2437ef46831319cb788d2a0a9 | 4e011e5e2299be4c73542b0aaa035ae05c0b90d7 | /allplayerMLB.R | 7e9ef1deb1f46a4015f5b5449cd4621f96876592 | [] | no_license | renu9826/stattleship | 445b8c73e01de031588cdd6c8194dbb07227efe9 | 0d9d3f014dd5a1ec64c707276e420f9605f39869 | refs/heads/master | 2021-05-11T21:09:49.596281 | 2018-01-15T02:59:26 | 2018-01-15T02:59:26 | 117,462,548 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,219 | r | allplayerMLB.R | # Stattleship data for all the players
set_token('c96b55e55080b317913d10e8ef0565cd')
ep <- "Players"
q_body <- list()
Ply17 <- ss_get_result(sport = sport, league = league, ep = ep, query
= q_body, version = 1, walk = TRUE)
Play17<- do.call("rbind", lapply(Ply17, function(x) x$players))
Play17[!duplicated(Play17),]
# On Stattleship 2016 and 2017 player data is together.
# work on Play 17
playall<- Play17[Play17$active== "TRUE",]
colnames(playall)
table(is.na(playall$last_name))
playall$player<- paste(playall$first_name,playall$last_name)
playall1<- playall[which(!duplicated(playall$player)==TRUE),]
colnames(playall1)
unique(playall1$player)
colnames(playall1)
playall2 <- playall1[,c(1,5,6,10,15:18,24:25,28,35:36,39:40)]
colnames(playall2)[1] <- "player_id"
playall2$pro_debut <- ymd(playall2$pro_debut)
playall3 <- playall3[which(year(playall3$pro_debut)!="2017"),]
write.csv(playall3,file="playall3.csv")
#adding country of birth for all the players
playall4 <- join(playall3,cob2016)
playall4[which(is.na(playall4$cob)==TRUE),"cob"] <- "USA"
# Formatting coulmns
playall4$birth_date <- ymd(playall4$birth_date)
playall4$bats <- as.factor(playall4$bats)
playall4$cob <- as.factor(playall4$cob)
playall4$handedness <- as.factor(playall4$handedness)
playall4$height <- as.numeric(playall4$height)
playall4$position_name <- as.factor(playall4$position_name)
playall4$humanized_salary<- as.numeric(gsub(",","",playall4$humanized_salary))
playall4$weight <- as.numeric(playall4$weight)
playall4$years_of_experience <- as.numeric(playall4$years_of_experience)
write.csv(playall4,file="playall4.csv")
#combining all players with injuried players with all players
PLITplayer_1<- join(PLIT_2,playall4)
write.csv(PLITplayer_1,file="PLITplayer_1.csv")
PLITplayer_2 <- join(playall4,PLIT_2)
colnames(PLITplayer_2)
write.csv(PLITplayer_1,file="PLITplayer_2.csv")
# Getting Playerstats for 2016
colnames(PlayerStat16)
PS16_1<- PlayerStat16[,-c(2:5)]
PS16_2<- PS16_1[which(duplicated(PS16_1$player_id)==FALSE),]
write.csv(PS16_2,file="PS16_2.csv")
PS16_3 <- PS16_2[,c(4,6,8,9,10,20,36,56,62,63,65,70,74,75,80,82,86,105,118,124,125,130,135,138,141,142)]
write.csv(PS16_3,file="PS16_3.csv")
#Joining Stat with Players and Injury- 2016 all data
PLITpstat<- join(PLITplayer_2,PS16_3)
PLITpstat$age <- 2016-year(PLITpstat$birth_date)
PLITpstat$BMI <- (PLITpstat$weight*0.45)/((PLITpstat$height*0.025)^2)
PLITpstat$high_school<- ifelse(is.na(PLITpstat$high_school),"No","Yes")
PLITpstat$school <- ifelse(is.na(PLITpstat$school),"No","Yes")
write.csv(PLITpstat,file="PLITpstat.csv")
PLITpstat_2<- join(PLITplayer_2,PS16_3)
PLITpstat_2$age <- 2016-year(PLITpstat_2$birth_date)
PLITpstat_2$BMI <- (PLITpstat_2$weight*0.45)/((PLITpstat_2$height*0.025)^2)
# 0- no school ; 1- school
PLITpstat_2$high_school<- ifelse(is.na(PLITpstat_2$high_school),"No","Yes")
# 0- no school ; 1- undergrad school
PLITpstat_2$school <- ifelse(is.na(PLITpstat_2$school),"No","Yes")
write.csv(PLITpstat_2,file="PLITpstat_2.csv")
PLITpstat_2$Ilocation <-""
PLITpstat_2$Position <- ""
PLITpstat_2$Injury_stat <- ifelse(is.na(PLITpstat_2$injury_id),0,1)
table(PLITpstat_1$Ilocation)
table(PLITpstat_2$Injury_stat)
|
2780ee82b9ceb4eaf8844b9a28991bf70c06fdc1 | 4d322b8691d7beb2b3f88a3ba5544be0cf14930e | /cachematrix.R | 7cd0a0eff3b4b6631703db5f0a66f8af55b84b12 | [] | no_license | ricej2/ProgrammingAssignment2 | 44b06d925cb5f5189f98cd92bddc926aea7bb916 | 6aa3d4119aaae8bfa78cf9a4a2f831c07341a656 | refs/heads/master | 2021-01-22T01:18:01.903161 | 2015-09-26T23:21:08 | 2015-09-26T23:21:08 | 43,224,589 | 0 | 0 | null | 2015-09-26T21:57:24 | 2015-09-26T21:57:24 | null | UTF-8 | R | false | false | 1,980 | r | cachematrix.R | # Below are two functions that are used to create a special
# matrix that stores a matrix and cache's its inverse.
##This function creates a special "matrix" object that can cache its inverse
#The function will create and return functions that can:
#set the matrix, get the matrix, set and get the inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
#function to set matrix
set <- function(y){
x <<- y
i <<- NULL #store matrix in cache
}
get <- function() x #get matrix
setInverse <- function(solve) i<<- solve #set inverse matrix
getInverse <- function() i #get inverse matrix
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse) ## create list of functions
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
# cacheSolve take a custom matrix type created by the makeCacheMatrix function
# and calculates the inverse matrix of it
# but first it checks to see if the calculation has been done before
# if it has been done before it recalls the data from the cache. If it has not been done
# before it calculates the inverse matrix then store it in the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse() #query the x matrix's cache
if(!is.null(i)){ #if there is a cache the inverse has been previously calculated
message("getting cached data") # sent message indicating this is just cache
return(i) # return the cache
}
data <- x$get() # get the matrix used by makeCacheMatrix function
i <- solve(data, ...) # calculate the inverse of the matrix
x$setInverse(i) # store the inverse matrix in cache using the makeCacheMatrix set function
i # return the inverse
}
|
5609dbf25320ef1a23e9e806afa47e7d8dcb072f | 38931341c376db16edddc151c9e01d4b7c33ac01 | /cachematrix.R | 1ac29272f9854f59d5a75cb8b3e07aff432bcb46 | [] | no_license | farawayfiend/ProgrammingAssignment2 | 4e182efed467c4440e4a17a00815575f8cdf156b | bd043ac020f7a22d04e9232b27554a6994fb8076 | refs/heads/master | 2021-01-17T04:41:54.435980 | 2014-08-24T18:00:21 | 2014-08-24T18:00:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,706 | r | cachematrix.R | ## makeCacheMatrix takes a matrix (must have an inverse) as an input
## and stores that matrix for use by the function cacheSolve. It also creates a
## list of subfunctions which are used by cacheSolve.
## cacheSolve takes the matrix stored in makeCacheMatrix and checks to see if an
## inverse has already been calculated. If the inverse has not been calculated,
## it calculates the inverse and returns it. If the inverse has already been
## calculated, it retrieves the inverse that was stored rather than recalculating.
## Stores a matrix and creates a list composed of functions used by cacheSolve
makeCacheMatrix <- function(x = matrix()) {
cachedinverse <- NULL
setmatrix <- function(y){ #not used in cacheSolvebut
x <<- y #retained so I can use cacheSolve
cachedinverse <<- NULL #on a new matrix without
} #rerunning the entire function
getmatrix <- function() {x}
setinverse <- function(matrixinverse) {cachedinverse <<- matrixinverse}
getinverse <- function() {cachedinverse}
list (setmatrix = setmatrix, getmatrix = getmatrix,
setinverse = setinverse, getinverse = getinverse)
}
## Returns the inverse of a matrix inputted in makeCacheMatrix
cacheSolve <- function(x, ...) {
cachedinverse <- x$getinverse()
if(!is.null(cachedinverse)) {
message("getting cached data")
return(cachedinverse)
}
tempmatrix <- x$getmatrix()
cachedinverse <- solve(tempmatrix, ...)
x$setinverse(cachedinverse)
cachedinverse
}
|
37d611eea2b24c2c8ebfc76cbb9fae8d1f885d13 | 8df3086384cee95612be523a7abd36f801ceae51 | /subst_func/make.pps.als.R | a50bdb8649d2f900c805ce8469fe79754ff64630 | [] | no_license | duchene/adequacy_comparisons | e74e2563c2ac78c49a5e61552ae9871996848af8 | 9350ffbbb7250eb294aaca77059ea397096b3346 | refs/heads/master | 2020-12-25T10:41:52.215801 | 2016-07-14T07:36:29 | 2016-07-14T07:36:29 | 59,931,512 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,502 | r | make.pps.als.R | # This function takes posterior log and trees files. It simulates alignments using the model parameters from the corresponding line in the log file. This function also requires the length of the alignment.
make.pps.als <- function(trees.file, log.file, N = 100, l = 1000, savetrees = F){
trees <- try(read.nexus(trees.file))
if(class(trees) == "try-error") trees <- try(read.tree(trees.file), silent = T)
if(class(trees) == "try-error") stop("Cannot read trees")
print(class(trees))
if(length(grep("[.]csv", log.file, value = T)) == 0){
logdat <- read.table(log.file, header = T, comment = "[")
} else {
logdat <- read.table(log.file, head = T, row.names = 1, sep = ",")
}
print(class(logdat))
if(length(trees) == N){
endburnin <- 1
} else {
endburnin <- round((length(trees) * 0.1), 0)
}
samp <- sample(endburnin:length(trees), N)
trees <- trees[samp]
logdat <- logdat[samp,]
#if("alpha" %in% colnames(logdat)){
# if(length(which(logdat$alpha < 0.01)) > 0 || length(which(logdat$alpha > 1.2)) > 0){
# #print("Extreme values of alpha have been modified to allow simulation.")
# logdat$alpha[which(logdat$alpha > 1.2)] <- 1.2
# for(i in 5:ncol(logdat)) logdat[,i][which(logdat[,i] < 0.01)] <- 0.5
# }
#}
if(savetrees){
write.tree(trees, file = paste0(trees.file, N, ".tre"))
write.csv(logdat, file = paste0(trees.file, N, ".csv"))
}
sim <- list()
for(i in 1:nrow(logdat)){
sim[[i]] <- list(phylogram = trees[[i]])
if(all(c("r.A...C.", "r.A...G.", "r.A...T.", "r.C...G.", "r.C...T.", "r.G...T.") %in% colnames(logdat))){
# GENERAL TIME REVERSIBLE (GTR)
#print("The substitution model is GTR")
basef <- c(logdat$pi.A.[i], logdat$pi.C.[i], logdat$pi.G.[i], logdat$pi.T.[i])
qmat <- c(logdat$r.A...C[i], logdat$r.A...G.[i], logdat$r.A...T.[i], logdat$r.C...G.[i], logdat$r.C...T.[i], logdat$r.G...T.[i])
#print(basef)
#print(qmat)
if("alpha" %in% colnames(logdat)){
rates = phangorn:::discrete.gamma(logdat$alpha[i], k = 4)
rates <- rates + 0.001
sim_dat_all<- lapply(rates, function(r) simSeq(sim[[i]][[1]], l = round(l/4, 0), Q = qmat, bf = basef, rate = r))
sim[[i]][[3]] <- c(sim_dat_all[[1]], sim_dat_all[[2]], sim_dat_all[[3]], sim_dat_all[[4]])
} else {
sim[[i]][[3]] <- simSeq(sim[[i]][[1]], Q = qmat, bf = basef, l = l)
}
#print("DATA SIMULATION PROCESSED")
} else if("kappa" %in% colnames(logdat)){
# HASEGAWA-KISHINO-YANO (HKY)
#print("The substitution model is HKY")
basef <- c(logdat$pi.A.[i], logdat$pi.C.[i], logdat$pi.G.[i], logdat$pi.T.[i])
qmat <- c(1, 2*logdat$kappa[i], 1, 1, 2*logdat$kappa[i], 1)
if("alpha" %in% colnames(logdat)){
rates = phangorn:::discrete.gamma(logdat$alpha[i], k = 4)
rates <- rates + 0.001
sim_dat_all<- lapply(rates, function(r) simSeq(sim[[i]][[1]], l = round(l/4, 0), Q = qmat, bf = basef, rate = r))
sim[[i]][[3]] <- c(sim_dat_all[[1]], sim_dat_all[[2]], sim_dat_all[[3]], sim_dat_all[[4]])
} else {
sim[[i]][[3]] <- simSeq(sim[[i]][[1]], Q = qmat, bf = basef, l = l)
}
} else {
# JUKES-CANTOR (JC)
#print("The substitution model is assumed to be JC")
sim[[i]][[3]] <- simSeq(sim[[i]][[1]], l = l)
}
}
return(sim)
} |
fec1d42ba674046c7a253ae6660938504d6edd50 | 77157987168fc6a0827df2ecdd55104813be77b1 | /orthoDr/R/predict.r | 98bcea86028b4aa0d6e4aeafef42cd381a84c7d8 | [] | no_license | akhikolla/updatedatatype-list2 | e8758b374f9a18fd3ef07664f1150e14a2e4c3d8 | a3a519440e02d89640c75207c73c1456cf86487d | refs/heads/master | 2023-03-21T13:17:13.762823 | 2021-03-20T15:46:49 | 2021-03-20T15:46:49 | 349,766,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,304 | r | predict.r | #' @title predict.orthoDr
#' @name predict.orthoDr
#' @description The prediction function for orthoDr fitted models
#' @param object A fitted orthoDr object
#' @param testx Testing data
#' @param ... ...
#' @return The predicted object
#' @examples
#' # generate some survival data
#' N = 100; P = 4; dataX = matrix(rnorm(N*P), N, P)
#' Y = exp(-1 + dataX[,1] + rnorm(N))
#' Censor = rbinom(N, 1, 0.8)
#'
#' # fit the model with keep.data = TRUE
#' orthoDr.fit = orthoDr_surv(dataX, Y, Censor, ndr = 1,
#' method = "dm", keep.data = TRUE)
#'
#' #predict 10 new observations
#' predict(orthoDr.fit, matrix(rnorm(10*P), 10, P))
#'
#' # generate some personalized dose scenario
#'
#' exampleset <- function(size,ncov){
#'
#' X = matrix(runif(size*ncov,-1,1),ncol=ncov)
#' A = runif(size,0,2)
#'
#' Edr = as.matrix(c(0.5,-0.5))
#'
#' D_opt = X %*% Edr + 1
#'
#' mu = 2 + 0.5*(X %*% Edr) - 7*abs(D_opt-A)
#'
#' R = rnorm(length(mu),mu,1)
#'
#' R = R - min(R)
#'
#' datainfo = list(X=X,A=A,R=R,D_opt=D_opt,mu=mu)
#' return(datainfo)
#' }
#'
#' # generate data
#'
#' set.seed(123)
#' n = 150
#' p = 2
#' ndr =1
#' train = exampleset(n,p)
#' test = exampleset(500,p)
#'
#' # the direct learning method
#' orthofit = orthoDr_pdose(train$X, train$A, train$R, ndr = ndr, lambda = 0.1,
#' method = "direct", K = as.integer(sqrt(n)), keep.data = TRUE,
#' maxitr = 150, verbose = FALSE, ncore = 2)
#'
#' predict(orthofit,test$X)
#'
#' # the pseudo direct learning method
#' orthofit = orthoDr_pdose(train$X, train$A, train$R, ndr = ndr, lambda = seq(0.1,0.2,0.01),
#' method = "pseudo_direct", K = as.integer(sqrt(n)), keep.data = TRUE,
#' maxitr = 150, verbose = FALSE, ncore = 2)
#'
#' predict(orthofit,test$X)
predict.orthoDr <- function(object, testx, ...)
{
# check test data
if (missing(testx)) stop("testx is missing")
if (!is.matrix(testx) || !is.numeric(testx)) stop("testx must be a numerical matrix")
if (class(object)[2] !="fit")
stop("This is not an orthoDr fitted object")
if (!object$keep.data)
stop("Need the original data for prediction. Please specify keep.data = TRUE in model fitting.")
# predict survival functions on the testing data
if (class(object)[3] == "surv")
pred = predict_orthoDr_surv(object, testx, ...)
# predict regression outcome on the testing data
if (class(object)[3] == "reg")
pred = predict_orthoDr_reg(object, testx, ...)
# predict rewards and optimal dose on the testing data
if (class(object)[3] == "pdose")
pred = predict_orthoDr_pdose(object, testx, ...)
class(pred) <- c("orthoDr", "predict", class(object)[3])
return(pred)
}
#' @title predict_orthoDr_surv
#' @name predict_orthoDr_surv
#' @description Internal prediction function for survival models
#' @param object fitted object
#' @param testx Testing data
#' @param ... ...
#' @return The predicted object
#' @keywords internal
predict_orthoDr_surv <- function(object, testx, ...)
{
# transform the covariates into the same scale
x = object$x
xscale = apply(x, 2, sd)
xmean = apply(x, 2, mean)
X = scale(x)
testX = sweep(testx, 2, xmean, FUN = "-")
testX = sweep(testX, 2, xscale, FUN = "/")
XB = X %*% object$B
testXB = testX %*% object$B
XBscale = apply(XB, 2, sd)
testXB = sweep(testXB, 2, XBscale, FUN = "/")
XB = scale(XB)/object$bw/sqrt(2)
testXB = testXB/object$bw/sqrt(2)
# kernel matrix between x and testx
testKernel = KernelDist_cross(testXB, XB)
# this method does not deal with ties. I need to fix this later on
testKernel = testKernel[, order(object$y), drop = FALSE]
Censor = (object$censor[order(object$y)] == 1)
inrisk = apply(testKernel, 1, cumsum)
totalweights = inrisk[nrow(X), , drop = FALSE]
inrisk = sweep(-inrisk, 2, totalweights, FUN = "+")
testKernel = sweep(testKernel, 2, Censor, FUN = "*" )
testKernel = t(testKernel)
lambda = 1 - testKernel / inrisk
lambda[is.na(lambda)] = 1
lambda[lambda > 1 | lambda < 0] = 1
S = apply(lambda, 2, cumprod)
surv = S[Censor, , drop = FALSE]
timepoints = sort(object$y[object$censor])
return(list("surv" = surv, "timepoints" = timepoints))
}
#' @title predict_orthoDr_reg
#' @name predict_orthoDr_reg
#' @description Internal prediction function for regression models
#' @param object fitted object
#' @param testx Testing data
#' @param ... ...
#' @return The predicted object
#' @keywords internal
predict_orthoDr_reg <- function(object, testx, ...)
{
# transform the covariates into the same scale
x = object$x
xscale = apply(x, 2, sd)
xmean = apply(x, 2, mean)
X = scale(x)
testX = sweep(testx, 2, xmean, FUN = "-")
testX = sweep(testX, 2, xscale, FUN = "/")
XB = X %*% object$B
testXB = testX %*% object$B
XBscale = apply(XB, 2, sd)
testXB = sweep(testXB, 2, XBscale, FUN = "/")
XB = scale(XB)/object$bw/sqrt(2)
testXB = testXB/object$bw/sqrt(2)
# kernel matrix between x and testx
testKernel = KernelDist_cross(testXB, XB)
# this method does not deal with ties. I need to fix this later on
pred = apply(testKernel, 1, function(w, x) weighted.mean(x, w), object$y)
return(list("pred" = pred))
}
#' @title predict_orthoDr_pdose
#' @name predict_orthoDr_pdose
#' @description Internal prediction function for personalized dose models
#' @param object fitted object
#' @param testx Testing data
#' @param ... ...
#' @return The predicted object
#' @keywords internal
predict_orthoDr_pdose <- function(object, testx, ...)
{
# check test data
if (missing(testx)) stop("testx is missing")
if (!is.matrix(testx) || !is.numeric(testx)) stop("testx must be a numercial matrix")
if (class(object)[2] !="fit")
stop("This is not an orthoDr fitted object")
if (!object$keep.data)
stop("Need the original data for prediction. Please specify keep.data = TRUE in model fitting.")
if (class(object)[3] == "pdose")
pred = dosepred(object$B, object$x, testx, object$bw, object$W)
return(list("pred" = pred))
}
|
d6fdf0daee96687428bcce8a1d2ef1f02ad4b03d | 850b3edb028a235685ee07c743cadaeefcc87021 | /man/expectreg.ls.Rd | 750c7ef88c073642f54b4e89b1da6ffd63b2742f | [] | no_license | amadoudiogobarry/expectreg | dbcc6e484b46252b659e76010411bdd2070fbbab | 6e5da23a3b5b4813aad45b6b2344427953f9094f | refs/heads/master | 2022-06-10T07:09:42.532527 | 2014-03-05T00:00:00 | 2014-03-05T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,423 | rd | expectreg.ls.Rd | \name{expectreg.ls}
\Rdversion{1.1}
\alias{expectreg.ls}
\alias{expectreg.qp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Expectile regression of additive models
}
\description{
Additive models are fitted with least asymmetrically weighted squares
or quadratic programming to obtain expectiles
for parametric, continuous, spatial and random effects.
}
\usage{
expectreg.ls(formula, data = NULL, estimate=c("laws","restricted","bundle","sheets"),
smooth = c("schall", "gcv", "cvgrid", "aic", "bic", "lcurve", "fixed"),
lambda = 1, expectiles = NA, ci = FALSE)
expectreg.qp(formula, data = NULL, id = NA, smooth = c("schall", "acv", "fixed"),
lambda = 1, expectiles = NA)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{formula}{
An R formula object consisting of the response variable, '~'
and the sum of all effects that should be taken into consideration.
Each effect has to be given through the function \code{\link{rb}}.
}
\item{data}{
Optional data frame containing the variables used in the model, if the data is not explicitely given in the
formula.
}
\item{id}{
Potential additional variable identifying individuals in a longitudinal data set. Allows for a random intercept estimation.
}
\item{estimate}{
Character string defining the estimation method that is used to fit the expectiles. Further detail on all available methods is given below.
}
\item{smooth}{
There are different smoothing algorithms that should prevent overfitting.
The 'schall' algorithm iterates the smoothing penalty \code{lambda} until it converges (REML),
the generalised cross-validation 'gcv' minimizes a score-function using \code{\link[stats]{nlm}} or with a grid search by 'cvgrid' or the function uses a fixed penalty. The numerical minimisatioin is also possible with AIC or BIC as score. The L-curve is a new experimental grid search.
}
\item{lambda}{
The fixed penalty can be adjusted. Also serves as starting value for
the smoothing algorithms.
}
\item{expectiles}{
In default setting, the expectiles (0.01,0.02,0.05,0.1,0.2,0.5,0.8,0.9,0.95,0.98,0.99) are calculated.
You may specify your own set of expectiles in a vector. The option may be set to 'density' for the calculation
of a dense set of expectiles that enhances the use of \code{\link{cdf.qp}} and \code{\link{cdf.bundle}} afterwards.
}
\item{ci}{
Whether a covariance matrix for confidence intervals and a \code{\link[=summary.expectreg]{summary}} is calculated.
}
}
\details{
In least asymmetrically weighted squares (LAWS) each expectile is fitted independently from the others.
LAWS minimizes:
\eqn{ S = \sum_{i=1}^{n}{ w_i(p)(y_i - \mu_i(p))^2} }
with
\eqn{ w_i(p) = p 1_{(y_i > \mu_i(p))} + (1-p) 1_{(y_i < \mu_i(p))} }.
The restricted version fits the 0.5 expectile at first and then the residuals.
Afterwards the other expectiles are fitted as deviation by a factor of the residuals from the mean expectile.
This algorithm is based on He(1997). The advantage is that expectile crossing cannot occur,
the disadvantage is a suboptimal fit in certain heteroscedastic settings.
Also, since the number of fits is significantly decreased, the restricted version is much faster.
The expectile bundle has a resemblence to the restricted regression. At first, a trend curve is fitted
and then an iteration is performed between fitting the residuals and calculating the deviation factors
for all the expectiles until the results are stable. Therefore this function shares the (dis)advantages
of the restricted.
The expectile sheets construct a p-spline basis for the expectiles and perform a continuous fit over all expectiles
by fitting the tensor product of the expectile spline basis and the basis of the covariates.
In consequence there will be most likely no crossing of expectiles but also a good fit in heteroscedastic scenarios.
"schall" smoothing does not yet work for sheets.
The function \code{expectreg.qp} also fits a sheet over all expectiles, but it uses quadratic programming with constraints,
so crossing of expectiles will definitely not happen. So far the function is implemented for one nonlinear or spatial covariate
and further parametric covariates. It works with all smoothing methods.
}
\value{
An object of class 'expectreg', which is basically a list consisting of:
\item{lambda }{The final smoothing parameters for all expectiles and for all effects in a list.
For the restricted and the bundle regression there are only the mean and the residual lambda.}
\item{intercepts }{The intercept for each expectile.}
\item{coefficients}{ A matrix of all the coefficients, for each base element
a row and for each expectile a column. }
\item{values}{ The fitted values for each observation and all expectiles,
separately in a list for each effect in the model,
sorted in order of ascending covariate values. }
\item{response}{ Vector of the response variable. }
\item{covariates}{ List with the values of the covariates. }
\item{formula}{ The formula object that was given to the function. }
\item{asymmetries}{ Vector of fitted expectile asymmetries as given by argument \code{expectiles}. }
\item{effects}{ List of characters giving the types of covariates. }
\item{helper}{ List of additional parameters like neighbourhood structure for spatial effects or 'phi' for kriging. }
\item{design}{ Complete design matrix. }
\item{fitted}{ Fitted values \eqn{ \hat{y} }. }
\code{\link[=plot.expectreg]{plot}}, \code{\link[=predict.expectreg]{predict}}, \code{\link[=resid.expectreg]{resid}},
\code{\link[=fitted.expectreg]{fitted}}, \code{\link[=effects.expectreg]{effects}}
and further convenient methods are available for class 'expectreg'.
}
\references{
Schnabel S and Eilers P (2009)
\emph{ Optimal expectile smoothing }
Computational Statistics and Data Analysis, 53:4168-4177
Sobotka F and Kneib T (2010)
\emph{ Geoadditive Expectile Regression }
Computational Statistics and Data Analysis,
doi: 10.1016/j.csda.2010.11.015.
Schnabel S and Eilers P (2011)
\emph{ Expectile sheets for joint estimation of expectile curves }
(under review at Statistical Modelling)
Frasso G and Eilers P (2013)
\emph{ Smoothing parameter selection using the L-curve}
(under review)
}
\author{
Fabian Sobotka, Thomas Kneib \cr
Georg August University Goettingen \cr
\url{http://www.uni-goettingen.de} \cr
Sabine Schnabel \cr
Wageningen University and Research Centre \cr
\url{http://www.wur.nl}
Paul Eilers \cr
Erasmus Medical Center Rotterdam \cr
\url{http://www.erasmusmc.nl}
Linda Schulze Waltrup, Goeran Kauermann \cr
Ludwig Maximilians University Muenchen \cr
\url{http://www.uni-muenchen.de} \cr
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{rb}}, \code{\link{expectreg.boost}}
}
\examples{
ex = expectreg.ls(dist ~ rb(speed),data=cars,smooth="b",lambda=5,expectiles=c(0.01,0.2,0.8,0.99))
ex = expectreg.ls(dist ~ rb(speed),data=cars,smooth="f",lambda=5,estimate="restricted")
plot(ex)
data("lidar", package = "SemiPar")
explaws <- expectreg.ls(logratio~rb(range,"pspline"),data=lidar,smooth="gcv",
expectiles=c(0.05,0.5,0.95))
print(explaws)
plot(explaws)
###expectile regression using a fixed penalty
plot(expectreg.ls(logratio~rb(range,"pspline"),data=lidar,smooth="fixed",
lambda=1,expectiles=c(0.05,0.25,0.75,0.95)))
plot(expectreg.ls(logratio~rb(range,"pspline"),data=lidar,smooth="fixed",
lambda=0.0000001,expectiles=c(0.05,0.25,0.75,0.95)))
#As can be seen in the plot, a too small penalty causes overfitting of the data.
plot(expectreg.ls(logratio~rb(range,"pspline"),data=lidar,smooth="fixed",
lambda=50,expectiles=c(0.05,0.25,0.75,0.95)))
#If the penalty parameter is chosen too large,
#the expectile curves are smooth but don't represent the data anymore.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ nonparametric }
\keyword{ smooth }% __ONLY ONE__ keyword per line
\keyword{ multivariate }
\keyword{ regression }
\keyword{ nonlinear }
\keyword{ models }
|
faaf1607dedf3b477a2ad2cc47b481934d2ef956 | bd9f6c883faec0af6fb2823ba7747710a56a0426 | /comtrade_checks/get_hs_codes.R | 1bb13f1b138391d49929f53339d3cab4539b4b95 | [] | no_license | lauradelduca/s3inventory | 1c12295d6e7e6b293f66a97a942c3994cf0b5158 | 1757bde4d889d9bfae8d96f7f8e90cf36ebc225c | refs/heads/master | 2021-04-06T08:28:38.404735 | 2018-10-21T22:18:22 | 2018-10-21T22:18:22 | 124,647,987 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,457 | r | get_hs_codes.R | ## Load HS codes from commodity_dictionary from AWS S3 for comtrade_check.R
## Laura Del Duca
## needs to have library aws.s3 and AWS S3 credentials loaded into R
obj <- get_object(object = 'data/1-TRADE/commodity_equivalents_final.csv', bucket = 'trase-storage')
hs <- read.csv(text = rawToChar(obj), sep = ';', quote = '',
colClasses = c("character", "character", "character",
"character", "character", "numeric", "character",
"character"))
hs6 <- as.vector(as.numeric(hs$code_value))
beef <- as.vector(as.numeric(sort(unique(hs$code_value[hs$com_name == 'BEEF']))))
chicken <- as.vector(as.numeric(sort(unique(hs$code_value[hs$com_name == 'CHICKEN']))))
corn <- as.vector(as.numeric(sort(unique(hs$code_value[hs$com_name == 'CORN']))))
cotton <- as.vector(as.numeric(sort(unique(hs$code_value[hs$com_name == 'COTTON']))))
leather <- as.vector(as.numeric(sort(unique(hs$code_value[hs$com_name == 'LEATHER']))))
pork <- as.vector(as.numeric(sort(unique(hs$code_value[hs$com_name == 'PORK']))))
timber <- as.vector(as.numeric(sort(unique(hs$code_value[hs$com_name == 'TIMBER']))))
woodpulp <- as.vector(as.numeric(sort(unique(hs$code_value[hs$com_name == 'WOOD PULP']))))
shrimps <- as.vector(as.numeric(sort(unique(hs$code_value[hs$com_name == 'SHRIMPS']))))
soy <- as.vector(as.numeric(sort(unique(hs$code_value[hs$com_name == 'SOYBEANS']))))
sugarcane <- as.vector(as.numeric(sort(unique(hs$code_value[hs$com_name == 'SUGAR CANE'])))) |
c32b198c5fc95c4d1531e674db367c0c93ca4ca0 | 6969293bedcc54b489cc1d22ccf38608e6da13a3 | /TitanicSurvivalPredictor.R | 1ad304753c52814b4d15e2f433d3646ec2f7de1c | [] | no_license | qasimir/TitanicSurvivalPredictor | 56848d36d984a0d203eb6dfc9e386a4e7cf61cb3 | 5e09905115f3e99eb6c7e00f7e36c3a646736e67 | refs/heads/master | 2021-07-07T02:06:34.773866 | 2017-10-02T00:42:27 | 2017-10-02T00:42:27 | 104,948,224 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,786 | r | TitanicSurvivalPredictor.R |
# get the data from the working directory
trainingdata = read.csv("TitanicSurvivalTrainingData.csv")
testingdata = read.csv("TitanicSurvivalTestData.csv")
nrow(trainingdata) # number of rows
ncol(trainingdata) # number of columns
dim(trainingdata) # the general case
# a table gets the number of occurances of a particular value
table(trainingdata$Survived) #get the number of peopl who survived and those who didn't
prop.table(table(trainingdata$Survived)) # table of the proportion of people who survived
#1st approximation: more likely that a given person died. Therefore, we will make the guess that everyone in the testing set dies
testingdata$Survived = rep(0,ncol(testingdata))
# we can export our 1st approximation like so:
firstApprox = data.frame(PassengerID = testingdata$PassengerId, Survived = testingdata$Survived) # create a data frame like so
write.csv(firstApprox, file = "firstApproximation.csv", row.names = FALSE)
# moving on to the second approximation. This gives a 2d table of the number of femals and males who survived
table(trainingdata$Sex, trainingdata$Survived)
#giving the proportion of each sex who were likely to survive. the "1" here indicates that we are examining the proportions of the rows:
prop.table(table(trainingdata$Sex, trainingdata$Survived), 1)
# we now have a better approximation. Females were more likely to survive. Can update the prediction now
#set the survival entries where Sex == female is true, to be true. The other entries are already for survival are already set to 0, so we don't need to worry about them
testingdata$Survived[testingdata$Sex == 'female'] = 1
#and export second approximation:
scndapprox = data.frame(passengerID = testingdata$PassengerId, Survived = testingdata$Survived)
write.csv(scndapprox, file = "secondApproximation.csv", row.names = FALSE)
# 3rd approximation: factoring age into it as well
summary(trainingdata$Age)
# make a variable which checks to see whether or not the passenger was a child
trainingdata$Child = 0
trainingdata$Child[trainingdata$Age < 18] = 1
# Now use an aggregate. Target variable on the LHS of the tilde, and the categories on the right.
# This subsets the frame for all of the different combinations of Sex and child. The result of this, is the sum of all the categories of people who survived.
aggregate(Survived ~ Child + Sex, data = trainingdata, FUN = sum)
# number of people in each subset, regardless of whether or not they survived:
aggregate(Survived ~ Child + Sex, data = trainingdata, FUN = length)
# now we want the proportion of people who survived, in each subset:
aggregate(Survived ~ Child + Sex, data = trainingdata, FUN = function(x) {sum(x)/length(x)})
# Children were more likely to survive if they were female, but that does not change our prediction, as the finer distinction does not add anything to the hypothesis.
# let us now try for fare types. First, seperate them into distinct groups:
trainingdata$FareType = ">30"
trainingdata$FareType[trainingdata$Fare < 30] = "20-30"
trainingdata$FareType[trainingdata$Fare < 20] = "10-20"
trainingdata$FareType[trainingdata$Fare < 10] = "<10"
# now see if there is anything interesting:
aggregate(Survived ~ FareType + Pclass + Sex, data = trainingdata, FUN = function(x) {sum(x)/length(x)})
# we can see from this that 3rd class females in the greater than 20 dollar fare range did worse off. Update for third prediction:
testingdata$Survived[testingdata$Sex == 'female' & testingdata$Fare >=20 & testingdata$Pclass == 3] = 0
thirdapprox = data.frame(passengerID = testingdata$PassengerId, Survived = testingdata$Survived)
write.csv(scndapprox, file = "ThirdApproximation.csv", row.names = FALSE)
#now we use descision trees to automate the proccess of splitting the data into segments.
# import rpart (recursive partitioning and Regression Trees)
library(rpart)
# these will help with the visualisation
#install.packages("rattle")
#install.packages("rpart.plot")
#install.packages("RColorBrewer")
#library(rattle)
#library(rpart.plot)
#library(RColorBrewer)
# once imported, we can use it in a similar manner to the aggregate function, but it does it with automation
fit = rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked,
data = trainingdata,
method = "class")
#we can plot the tree, and
plot(fit)
text(fit)
# update the prediction:
Prediction = predict(fit, testingdata, type = "class")
submit = data.frame(PassengerId = testingdata$PassengerId, Survived = Prediction)
write.csv(submit, file = "fourthOrderPrediction.csv", row.names = FALSE)
# lets see what feature engineering we can do
# first, lets combine the testing and training data, after tidying up a bit:
trainingdata$Child = NULL
trainingdata$FareType = NULL
testingdata$Survived = NA
combined = rbind(trainingdata, testingdata)
# change the names from factors to characters:
combined$Name = as.character(combined$Name)
# the titles are comprised of surnames, then titles, then first names, delineated by a comma
strsplit(combined$Name[1], split="[,.]")[[1]][2] # as an example of the first name
# for all names:
combined$Title = sapply(combined$Name, FUN = function(x) {strsplit(x, split="[,.]")[[1]][2]})
# strip off all of the leading whitespace:
combined$Title = sub(" ","", combined$Title)
# there are some redundancies in the titles, so we can reduce them:
# madame, and madmoiselle:
combined$Title[combined$Title %in% c("Mme","Mlle")] = "Mlle"
# male honourific
combined$Title[combined$Title %in% c("Capt","Don","Jonkheer","Major","Col")] = "Sir"
#female honourific
combined$Title[combined$Title %in% c("Dona","Lady","the Countess")] = "Lady"
#add a new feature, family size:
combined$FamilySize = combined$SibSp + combined$Parch + 1
#we can group people according to families, as needing to search for family members might have been a deciding factor as to whether an individual boarded a life raft
#get the surnames
combined$Surname = sapply(combined$Name, FUN=function(x){strsplit(x,split="[,.]")[[1]][1]})
#assign every person a family ID:
combined$FamilyID = paste(as.character(combined$FamilySize),combined$Surname, sep = "")
#we want to separate the singles and pairs from the list of families, so we give them their own designation
combined$FamilyID[combined$FamilySize <=2 ] = "Single/Pairs"
table(combined$FamilyID)
#looking at the table of families, there are some who have misreported their family sizes. Will need to clean this up
famIDs = data.frame(table(combined$FamilyID))
famIDs = famIDs[famIDs$Freq <= 2,]
combined$FamilyID[combined$FamilyID %in% famIDs$Var1] = "Single/Pairs"
combined$FamilyID = as.factor(combined$FamilyID)
#goodie, now that we have got a list of people who are part of a family, we can split it apart, and do some predictions on the new variables
trainingdata2 = combined[1:nrow(trainingdata),]
testingdata2 = combined[(nrow(trainingdata)+1):nrow(combined),]
# now grow a new descision tree with the updated info
fit = rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize,
data = trainingdata2,
method = "class")
# and make a fifth order prediction, based on the new engineered variables:
Prediction = predict(fit, testingdata2, type = "class")
submit = data.frame(PassengerId = testingdata2$PassengerId, Survived = Prediction)
write.csv(submit, file = "FifthOrderPrediction.csv", row.names = FALSE)
#part 6: random forests
#download and install random forest
install.packages('randomForest')
library(randomForest)
# first problem, is empty spaces in the data. Especially for age. rpart cannot handle this
# We can fill this in with an Agefit, with the method being anova, as the age data is continuous:
Agefit = rpart(Age ~ Pclass + Sex + SibSp + Parch + Fare + Embarked + Title,
data = combined[!is.na(combined$Age),],
method = "anova")
combined$Age[is.na(combined$Age)] = predict(Agefit, combined[is.na(combined$Age),])
#Embarked and Fare also have some blanks
#Southampton is the most common embarkment point, so we will put these into Southampton
combined$Embarked[combined$Embarked == ""] = "S"
combined$Fare[is.na(combined$Fare)] = median(combined$Fare, na.rm = TRUE)
#our dataframe is now cleaned of blanks. we still have too many factors in Family ID to run this, however. Reducing the number of families:
combined$FamilyID2 = as.character(combined$FamilyID)
combined$FamilyID2[combined$FamilySize <= 3] = 'SmallFamily'
combined$FamilyID2 = factor(combined$FamilyID2)
# Try converting titles to factors
combined$Title = as.factor(combined$Title)
#Now, we split the data again:
trainingdata2 = combined[1:nrow(trainingdata),]
testingdata2 = combined[(nrow(trainingdata)+1):nrow(combined),]
#set a seed for the random forest
set.seed(415)
fit = randomForest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize + FamilyID2,
data=trainingdata2,
importance = TRUE,
ntree=2000)
# look at some of the metrics of the fit
varImpPlot(fit)
# there are also "conditional inference trees" shown below.
# They use statistical metrics to determine the nodes, and can handle more factors than random trees
install.packages("party")
library(party)
fit = cforest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked + Title + FamilySize + FamilyID,
data=trainingdata2,
controls=cforest_unbiased(ntree=2000,mtry=3))
Prediction = predict(fit, testingdata2, OOB=TRUE, type = "response")
submit = data.frame(PassengerId = testingdata$PassengerId, Survived = Prediction)
write.csv(submit, file = "SixthOrderPrediction.csv", row.names = FALSE)
|
82da2374ddaa8694ff4b4b7e517bfbdec4815012 | 626b0065fd063ebd9e2f672da57cf5a1e06ff895 | /Codes_R_Programming - Copy.R | bf4b2eab3b806bd5caf9b6de00a882efa128a697 | [] | no_license | Chandankrbharti/DataAnalyticUsingR | 5e615120fbbee61f419ac8cd07141086d25df369 | 8475c6203ff855c68b2a17a3dd896c53e9aa3892 | refs/heads/master | 2020-03-20T20:52:01.431288 | 2018-06-25T09:36:04 | 2018-06-25T09:36:04 | 137,711,196 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,013 | r | Codes_R_Programming - Copy.R | #Print 1 to 10 numbers
1:10
#Find avg
mean(1:10)
#Variable assignment
a = 5
a <- 5x=
5+2 -> a
a
b <- 4
a+b
# Vector is a 1 dimensional object & it should have same element
x <- 1:10
x
class(x) # Integer
x1 <- c(6.7,4,6,7,8,9)
x1
class(x1) # Numeric
a <- 5L
class(a)
x3 <- c("Guru", "Madhu", "Deepankar")
class(x3) # Character
x1[4] # to access a element of Vector
x1[1:4]
x1[c(2,4,6)] # to access 2nd, 4th and 6th element
length(x1) # to find out the length of vector
#Logical Vector
x4 <- c(TRUE, FALSE, TRUE, FALSE)
class(x4)
x4 <- c(TRUE, FALSE, TRUE, FALSE, T, F)
#Complex Vector
x5 <- c(2+3i, 3+1i, 1+2i)
class(x5)
x6 <- c(1,2, "Guru", "Madhu")
class(x6)
class(x6[1])
x1 <- c(1,2, TRUE, FALSE)
class(x1)
# Explicit Coercion
# Objects can be explicitly coerced from one class to another using the as.* functions, if available.
x <- 0:6
class(x)
as.numeric(x)
as.logical(x)
as.character(x)
x <- c("a", "b", "c")
as.numeric(x)
as.logical(x)
as.complex(x)
# Vectorised Operation
1:4
6:10
1:4 + 6:10
c(1,2,3,4,5) + c(6,7,8,9,10)
c(2,4,6,8) + c(1,2,3,4)
C(1,2,3,4,5) + c(6,7,8,9)
1:10 + 6:10
C(1,2,3,4,5,6,7,8,9,10) + c(6,7,8,9,10)
6:10
1:5
1:10+6:10
1:6+6:9
c(1,2,3,4)+c(6,7,8,9,10)
1:10
6:10
1:10 + 6:10
c(1,2,3,4,5,6,7,8,9,10) + c(6,7,8,9,10)
c(1,2)+c(2+3i, 3+1i)
1:5 - 6:10
# Comparison of Vectors
c(3,4-1,2+1,5-1,10-7) == 3
c(3,4-1,2+1,5-1,10-7) != 3
c(3,4-1,2+1,5-1,10-7) > 3
c(3,4-1,2+1,5-1,10-7) >= 3
c(3,4-1,2+1,5-1,10-7) < 3
c(3,4-1,2+1,5-1,10-7) <= 3
a1 <- 1:10
a1+3
b1 <- 11:20
b1 + 4
(a1*3) + (b1*4)
(a1*3) - (b1*4)
# Sequence of numbers
1:10
var <- seq(from=1, to=10, by=3)
var
seq(1,10,2)
# repeatation of numbers
rep(1:4, 4)
rep(1:4, each=4)
rep1 <- rep(1:4, 4)
class(rep1)
# Matrix
m1 <- 1:8
m1
class(m1)
dim(m1) <-c(2,4)
m1
m2 <- matrix(1:5,3,4)
m2
m2 <- matrix(1:8,2,4, byrow=TRUE) # Forming Matrix by row
m3 <- matrix(1:8,4,2)
m3
m4 <- matrix(1:7, 2,4)
m4 <- 1:7
m4
m3 <- matrix(1:4, 2,4)
m3
m3 <- matrix(1:20, 4,4)
m3[,2]
m3[1,2] # accessing an element from a Matrix
#Matrices can be created by column-binding or row-binding with the cbind() and rbind() functions.
x <- 1:3
y <- 10:12
cbind(x, y)
rbind(x, y)
# Array
my_array <- array(1:100, dim=c(2,4,5))
my_array
my_array[ , ,3] # All elements of 3rd matrix
my_array[ ,3, ] # 3rd column of all matrix
class(my_array)
my_array[2, ,2]
class(my_array[2, ,2])
# Data Frames
data()
mtcars
View(mtcars)
salary_url <- "http://www.justinmrao.com/salary_data.csv"
salary_data <- read.csv(salary_url)
View(salary_data)
class(salary_data)
#List
List_of_Vecs <- list (x1,x3,x4,x5)
List_of_Vecs
class(List_of_Vecs)
List_of_Vecs [4] # Accessing an element of a List
List_of_Vecs [[4]][1]
List_of_Vecs_Mat <- list (x1,x3,x4,x5,m3) # List of Vectors and Matrix
List_of_Vecs_Mat[[5]][2,]
List_of_Mat_DF <- list(m3, salary_data) # List of Matrix & DF
List_of_Mat_DF
length(List_of_Mat_DF)
List_of_Mat_DF[3] # Accessing an element of a List
New_DF <- as.data.frame(List_of_Mat_DF[2]) # Creating a new data frame
List_of_Mat_DF[2]
List_of_list <- list(List_of_Vecs,m3) # Viewing a list of list
List_of_list
New_DF
List_of_Vecs_Mat[[5]][1,2]
List_of_Vecs_Mat[[5]][4,2]
# Factor - Data structure that belongs to character
Gender_cols <- c("male", "female", "female", "female", "male", "unknown")
class(Gender_cols)
Gender_cols
Gender_cols_fac <- as.factor(Gender_cols) # Converting to factor
Gender_cols_fac
as.integer(Gender_cols_fac)
#Missing Values
is.na(mtcars$mpg)
## Create a vector with NAs in it
x <- c(1, 2, NA, 10, 3)
## Return a logical vector indicating which elements are NA
is.na(x)
## Return a logical vector indicating which elements are NaN
is.nan(x)
## Now create a vector with both NA and NaN values
x <- c(1, 2, 0, NaN, NA, 4)
is.na(x)
is.nan(x)
x <- data.matrix(mtcars)
class(x)
x
dim(x)
View(x)
x <- data.frame(foo = 1:4, bar = c(T, T, F, F))
x
nrow(x)
ncol(x)
names(x)
#sapply(x,class)
#Names
#R objects can have names, which is very useful for writing readable code and self-describing objects.
#Here is an example of assigning names to an integer vector.
x <- 1:3
names(x)
names(x) <- c("New York", "Seattle", "Los Angeles")
class(x)
#Lists can also have names, which is often very useful.
x <- list("Los Angeles" = 1, Boston = 2, London = 3)
x
names(x)
#names(x) <- c("a","b","c")
#Matrices can have both column and row names.
m <- matrix(1:4, nrow = 2, ncol = 2)
m
dim(m)
dimnames(m) <- list(c("a", "b"), c("c", "d"))
class(dimnames(m))
attributes(m)
#Column names and row names can be set separately using the colnames() and rownames()functions.
colnames(m) <- c("h", "f")
rownames(m) <- c("x", "z")
m
#Note that for data frames, there is a separate function for setting the row names, the row.names()function.
#Also, data frames do not have column names, they just have names (like lists).
#Subsetting R Objects
#There are three operators that can be used to extract subsets of R objects.
#The [ operator always returns an object of the same class as the original. It can be used to
#select multiple elements of an object
#The [[ operator is used to extract elements of a list or a data frame. It can only be used to
#extract a single element and the class of the returned object will not necessarily be a list or
#data frame.
#The $ operator is used to extract elements of a list or data frame by literal name. Its semantics
#are similar to that of [[.
x <- matrix(1:6, 2, 3)
x[1,]
x[1, 2, drop = FALSE]
x[1,2]
x[1, , drop = FALSE]
x <- list(foo = 1:4, bar = 0.6)
x$foo
x$bar
#Nested List
x <- list(a = list(10, 12, 14), b = c(3.14, 2.81))
x[[1]][3]
#or
x[[c(1, 3)]]
x <- list(foo = 1:4, bar = 0.6, baz = "hello")
x[c(1, 3)]
#or
x[[c(1, 3)]]
#Removing NA Values
x <- c(1, 2, NA, 4, NA, 5)
bad <- is.na(x)
print(bad)
y <- x[!bad]
#What if there are multiple R objects and you want to take the subset with no missing values in any
#of those objects?
x <- c(1, 2, NA, 4, NA, 5)
y <- c("a", NA, NA, "d", NA, "f")
good <- complete.cases(x, y)
good
x[good]
y[good]
airquality
is.na(airquality$Solar.R)
colSums(is.na(airquality))
which(is.na(airquality$Solar.R))
View(airquality)
good <- complete.cases(airquality)
class(good)
head(airquality[good, ])
salary_url <- "http://www.justinmrao.com/salary_data.csv"
salary_data <- read.csv(salary_url)
class(salary_data)
salary_data
View(salary_data)
dim(salary_data) # to find dimension of a table
str(salary_data) # structure of salary data
# rows = Observations, columns = variables, Features (ML)
salary_data_1 <- read.csv(salary_url, stringsAsFactors = FALSE) # To recognise string as a char not factor
salary_data_1
salary_data
str(salary_data_1)
str(salary_data)
salary_data$team # To access a column from DF
class(salary_data_1$team)
salary_data_1$team <- as.factor(salary_data_1$team) # converting char to factor
class(salary_data_1$team)
head(salary_data) # By default first 6 observations
head(salary_data, 10) # First 10 observations
tail(salary_data) # Bottom 6 observations
ncol(salary_data) # Number of columns
nrow(salary_data) # number of rows
length(salary_data)
# Subsetting of Data
salary_data_2 <- salary_data[25:45, ] # To access 25 to 45 rows of all the columns
View(salary_data_2)
# Record number 23, 45, 100,200,3000
salary_data_3 <- salary_data[c(23,45,100,200,3000), ]
salary_data_3
salary_data_4 <- salary_data[ , c(5,7,10,12)] # Accessing columns
salary_data_4
salary_data_5 <- salary_data[ , -12] # To eliminate a perticular column
View(salary_data_5)
dim(salary_data_5)
names(salary_data)
library(Amelia)
# Intelligent Subsetting
unique(salary_data_1$player) # To identify unique subsets
unique(salary_data_1$team)
salary_data_6 <- subset(salary_data, salary_data$team == "Detroit Pistons") # To select the records of a perticular Column
View(salary_data)
salary_data_7 <- subset(salary_data, salary_data$team == "Detroit Pistons" & salary_data$salary_year>600000)
salary_data_7 <- subset(salary_data, salary_data$team == "Detroit Pistons" & salary_data$salary_year>600000 & salary_data$contract_years_remaining <=2)
salary_data_7 <- subset(salary_data, salary_data$team == "Detroit Pistons" & salary_data$salary_year>600000 & salary_data$contract_years_remaining >7)
salary_data_7
# Paste
n1 <- paste("Guru", "Madhu", sep = "_")
n1
View(salary_data)
salary_data$newcols <- paste(salary_data$team, salary_data$year, sep = "*") # To join two columns together seperated by a value
View(salary_data$newcols)
salary_data$newcols <- NULL # To drop the new column created
#Assignment 3
#Keep the new col in the third order instead of last
# join rows of two data frames
salary_data_9 <- rbind(salary_data_2, salary_data_3) # Column names & number of cols present in both DF should be same
salary_data_10 <- cbind(salary_data_4, salary_data_5) # Number of rows should be equal
View(salary_data_10)
# Assignment
# I want to combine 2 data frames which has unequal rows and columns
# smartbind
# Functions in R
sessionInfo() # To see the R version details
# Sorting
sort_vec <- c(1,5,2,6,8,10,12,20)
sort_vec1 <- sort(sort_vec)
sort_vec1
sort_vec2 <- sort(sort_vec, decreasing = TRUE) # Decsending order
sort_vec2
# Order
sort_vec3 <- order(sort_vec)
sort_vec
sort_vec3
sort_vec4 <- order(-sort_vec3)
sort_vec4
salary_data_11 <- salary_data
salary_data_11$salary_year <- sort(salary_data$salary_year)
salary_data_11
salary_data_11 <- salary_data[1:10, ]
salary_data_11
View(salary_data_11)
salary_data_11$salary_year <- sort(salary_data_11$salary_year)
order(salary_data$salary_year)
salary_data_new <- salary_data[order(salary_data$salary_year), ]
View(salary_data_new)
salary_data_new <- salary_data[order(-salary_data$salary_year), ] # Descending order
salary_data_new1 <- salary_data[order(salary_data$contract_years_remaining, salary_data$salary_year), ]
View(salary_data_new1) # Sorting two columns
salary_data_new1 <- salary_data[order(salary_data$contract_years_remaining, -salary_data$salary_year), ]
# Functions
char_vec <- c("Guru", "Deepankar", "Madhu")
toupper(char_vec) # Uppercase
tolower(char_vec) # Lowercase
# Assignment
#Add 2 coluns for Team where one column having all the name in uppercase and vice versa
# Substring Functions
my_string <- "gurudeepankarmadhupavan"
length(my_string)
my_string_sub <- substring(my_string, 5,15)
my_string_sub
nchar(my_string)
salary_data$year1 <- substring(salary_data$year, 1,4)
salary_data$year1
# Assignment
# Get last 4 char of the column " Team"
# Hint : nchar
my_string_sub_1 <- substring(my_string, (nchar(my_string)-7), nchar(my_string))
my_string_sub_1
# Substitute functions
x1 <- c("R tutorial", "data science tutorial", "c tutorial")
x1
x2 <- sub("tutorial","training", x1)
x2
x3 <- c("R tutorial c tutorial ds TUTORIAL", "data science tutorial r tutorial", "c tutorial r tutorial")
x3
x4 <- sub("tutorial","training", x3)
x4
x5 <- gsub("tutorial","training", x3) # Group Substitution
x5
x5 <- gsub("tutorial","training", x3, ignore.case = TRUE) # To ignore case
# Pattern Matching
country_name <- c("America", "United States of America", "Americas", "china", "Japan")
pattern <- "America"
grep(pattern, country_name)
country_name [grep(pattern, country_name)] <- "My America"
country_name
grepl(pattern, country_name) # To see logical outpout for pattern matching
# String Split
my_string <- "I love working on R ; and packages it offers"
x <- strsplit(my_string, ";") # Splitting words based on semi colon, coma
#Data Import & Export
#read.table, read.csv, for reading tabular data
#readLines, for reading lines of a text file
#source, for reading in R code files (inverse of dump)
#dget, for reading in R code files (inverse of dput)
#load, for reading in saved workspaces
#unserialize, for reading single R objects in binary form
#write.table, for writing tabular data to text files (i.e. CSV) or connections
#writeLines, for writing character data line-by-line to a file or connection
#dump, for dumping a textual representation of multiple R objects
#dput, for outputting a textual representation of an R object
#save, for saving an arbitrary number of R objects in binary format (possibly compressed) to a file.
# serialize, for converting an R object into a binary format for outputting to a connection (or file).
#Read the help("read.table") document
?mean
??apriori
data(package = .packages(TRUE))
data()
help("mtcars")
library(Amelia)
data("diamonds")
diamonds
# Loading Barley package which is a part of another package "Lattice"
data("barley", package = "lattice")
dim(barley)
View(barley)
head(barley)
ncol(barley)
length(barley)
# Loading a text file
# 2nd Option
d = read.table("E:/New folder/R_EB/auto1.txt", sep = "\t", header = TRUE)
View(d)
# Option 1
file_path <- "C:\\Users\\User\\Desktop\\R_EB\\auto1.txt"
d = read.table(file_path, sep="\t")
# Create a batch file through which you can write a file into R
#install.packages("lerningr")
library(learningr)
getwd()
setwd()
deer_file <- "C:/Users/Pavan/Documents/R/win-library/3.4/learningr/extdata/RedDeerEndocranialVolume.dlm"
deer_data <- read.table(deer_file, header = TRUE, fill = TRUE)
deer_data
ncol(deer_data)
str(deer_data)
# reading a CSV file
crab_data <- read.csv("C:/Users/Pavan/Documents/R/win-library/3.4/learningr/extdata/crabtag.csv", header = FALSE, skip = 4, nrows = 8)
crab_data
write.csv(salary_data,"sal8.csv")
# XLSX File
library(openxlsx)
My_xl <- read.xlsx("E:/New folder/R_EB/sample.xlsx")
# Text file
the_tempest <- readLines("C:/Users/Pavan/Documents/R/win-library/3.4/learningr/extdata/Shakespeare.s.The.Tempest..from.Project.Gutenberg.pg2235.txt")
the_tempest[1:100]
# XML File
library(XML)
r_options <- xmlParse("C:/Users/User/Documents/R/win-library/3.3/learningr/extdata/options.xml")
View(r_options)
library(jsonlite)
dat.1 <- fromJSON("C:/Users/Pavan/Documents/R/win-library/3.4/learningr/extdata/Jamaican.Cities.json")
dat.1
#The analogous functions in readr are read_table() and read_csv().
#Using dput() and dump()
## Create a data frame
y <- data.frame(a = 1, b = "a")
y
row.names(y) <- "f"
a <- url("https://www.indiatimes.com/")
b <- readLines(a,10)
b
x <- 1:4
y <- 6:9
z <- x + y
#Vectorized Matrix Operations
x <- matrix(1:4, 2, 2)
y <- matrix(rep(10, 4), 2, 2)
x*y
x / y
# Dates and Times
# Dates are represented by the Date class and times are represented by the POSIXct or the POSIXlt class.
#Dates are represented by the Date class and can be coerced from a character string using the as.Date() function.
## Coerce a 'Date' object from character
x <- as.Date("1970-01-01")
x
class(x)
#You can see the internal representation of a Date object by using the unclass() function.
unclass(x)
unclass(as.Date("2018-04-3"))
x <- Sys.time()
class(x)
unclass(x)
p <- as.POSIXlt(x)
names(unclass(p))
p$hour
p$mday
#You can also use the POSIXct format.
x <- Sys.time()
x
## Internal representation
names(unclass(x))
## Can't do this with 'POSIXct'!
x$sec
#strptime() function in case your dates are written in a different format.
#strptime() takes a character vector that has dates and times and converts them into to a POSIXlt object.
datestring <- c("January 10, 2012 10:40", "March 29, 2018 9:10")
x <- strptime(datestring, "%B %d, %Y %H:%M")
class(x)
unclass(x)
x$wday
#Operations on Dates and Times
x <- as.Date("2012-01-01")
y <- strptime("9 Jan 2011 11:34:21", "%d %b %Y %H:%M:%S")
class(y)
x-y # Error
x <- as.POSIXlt(x)
x-y
# Time classes keeps track of Leap years, leap seconds etc.
x <- as.Date("2012-03-01")
y <- as.Date("2012-02-28")
x-y
## My local time zone
x <- as.POSIXct("2012-10-25 01:00:00")
y <- as.POSIXct("2012-10-25 06:00:00", tz = "GMT")
y-x
View(mtcars)
colSums(is.na(mtcars))
dim(mtcars)
str(mtcars)
summary(mtcars)
mean(mtcars$mpg)
?mtcars
fivenum(mtcars$mpg)
summary(mtcars$mpg)
summary(mtcars)
boxplot(mtcars$mpg)
IQR(mtcars$mpg) # Inter Quadrile Range
mtcars1 <- edit(mtcars)
View(mtcars1)
summary(mtcars1$mpg)
IQR(mtcars1$mpg)
boxplot(mtcars1$mpg)
b2 <- boxplot(mtcars1$mpg)
b2$out
max(mtcars$mpg)
# Bi variate analysis on MPG and Cyl column
boxplot(mtcars$mpg~mtcars$cyl)
fivenum(mtcars$mpg,mtcars$cyl)
b1 <- boxplot(mtcars$mpg~mtcars$cyl)
b1$out
data("airquality")
head(airquality)
dim(airquality)
tail(airquality)
is.na(airquality$Ozone) #To find missing value
which(is.na(airquality$Ozone)) # Position of missing values
length(which(is.na(airquality$Ozone))) # Number of missing values
length(which(is.na(airquality$Ozone)))/nrow(airquality) # Percentage of missing values
round(length(which(is.na(airquality$Ozone)))/nrow(airquality),2) # To round it by 2 digits
mean(airquality$Ozone)
mean(airquality$Ozone,na.rm = TRUE) # To count mean value ignoring the missing values
airquality$Ozone[which(is.na(airquality$Ozone))] <- mean(airquality$Ozone, na.rm = TRUE)
View(airquality) # Replacing the missing values with mean value
colSums(is.na(airquality)) # To find out missing values in all the column
colSums(is.na(airquality))/nrow(airquality) # Percentage
boxplot(mtcars$mpg~mtcars$cyl, main = "BoxPlot b/w mpg and Cyl", xlab = "Cyl", ylab = "mpg")
boxplot(mtcars$mpg~mtcars$cyl, main = "BoxPlot b/w mpg and Cyl", xlab = "Cyl", ylab = "mpg", col="red")
boxplot(mtcars$mpg~mtcars$cyl, main = "BoxPlot b/w mpg and Cyl", xlab = "Cyl", ylab = "mpg",
col=c("blue", "yellow", "rosybrown1"))
colors()
par(bg="skyblue") # To apply background color
colors()
png(file ="boxplotGElatest1.png")
boxplot(mtcars$mpg~mtcars$cyl, main = "BoxPlot b/w mpg and Cyl", xlab = "Cyl", ylab = "mpg",
col=c("turquoise4", "tomato2", "pink3"))
getwd()
dev.off()
png(file = paste("boxplot1", Sys.Date(),".png"))
png(file = paste("boxplot1", Sys.time(),".png")) # Assignment
library(lattice)
bwplot(mtcars$mpg~mtcars$cyl, main = "BoxPlot b/w mpg and Cyl", xlab = "Cyl", ylab = "mpg",
col=c("blue", "yellow", "rosybrown1")) # Box Whisker Plot
bwplot(iris$Petal.Length ~ iris$Species)
View(iris)
?iris
head(iris)
library(ggplot2)
qplot(iris$Species, iris$Petal.Length, geom = "boxplot")
hist(mtcars$mpg) # Histogram
hist(mtcars$mpg, labels = TRUE)
hist(mtcars$mpg, breaks = 10, labels = TRUE)
plot(mtcars$mpg) # Scatter Plot is preferred for bi variate analysis
plot(mtcars$mpg, mtcars$wt)
plot(mtcars$mpg, mtcars$disp, type = "l") # Line chart, not preferred for Bi variate
plot(mtcars$mpg, type = "l")
plot(mtcars$mpg, type = "h")
plot(mtcars$mpg, type = "b")
plot(mtcars$mpg, type = "o")
par(mfrow = c(2,2))
plot(mtcars$mpg, type = "b")
plot(mtcars$mpg, type = "h")
plot(mtcars$mpg, type = "h")
plot(mtcars$mpg, type = "o")
plot(mtcars$mpg, type = "l")
plot(mtcars$mpg, type = "h")
plot(mtcars$mpg, type = "b")
plot(mtcars$mpg, type = "o")
plot(mtcars$mpg, type = "o")
plot(mtcars$mpg)
plot(mtcars$mpg, pch = 2)
plot(mtcars$mpg, pch = 3)
plot(mtcars$mpg, pch = 6)
plot(mtcars$mpg, pch = 18, col = "red")
plot(mtcars$mpg, pch = 14, col = "blue")
plot(mtcars$mpg, pch = c(as.factor(mtcars$mpg)))
par(mfrow = c(1,1))
plot(mtcars$mpg, type = "l")
|
baf0fe1d11162fc9d5b72e110bed08b267149e06 | c3e2451daec7c223e6bca5e8ec5d29ea3efa5c6a | /R/util_proposals.R | 4cc34c843ac3bd1f80857af54b3c23409eeca669 | [] | no_license | pierrejacob/bayeshscore | d0336efc16dd0456ffa2c3f6fbe51aabbcf3f3f8 | 8f148f4074e09de4911d5645a9781c8aa844d38d | refs/heads/master | 2021-09-22T10:56:27.652463 | 2018-09-08T18:21:28 | 2018-09-08T18:21:28 | 63,806,619 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,950 | r | util_proposals.R | #------------------------------------------------------------------------------#
#-------------- Some proposals for the rejuvenation steps of SMC and SMC2 ----#
#------------------------------------------------------------------------------#
#'@rdname get_proposal_independent_normal
#'@title get_proposal_independent_normal
#'@description Independent Normal proposal using fitted mean and covariance matrix
#'@export
get_proposal_independent_normal <- function(){
f = function(thetas,normw,...){
covariance = cov.wt(t(thetas), wt = normw, method = "ML")
mean_t = covariance$center
cov_t = covariance$cov + diag(rep(10^(-4)/nrow(thetas)), nrow(thetas)) # increased a bit the diagonal to prevent degeneracy effects)
# define the sampler
rproposal = function(Ntheta) {
return (fast_rmvnorm_transpose(Ntheta, mean_t, cov_t))
}
# define the corresponding density function
dproposal = function(thetas,log = TRUE) {
if (log) {return (fast_dmvnorm_transpose(thetas, mean_t, cov_t))}
else {return (exp(fast_dmvnorm_transpose(thetas, mean_t, cov_t)))}
}
return (list(r = rproposal, d = dproposal))
}
return(f)
}
#'@rdname get_proposal_mixture
#'@title get_proposal_mixture
#'@description Independent proposal from a fitted mixture of Normals with \code{nclust} components (default is 5).
#' If the fit is unsuccessful, return independent Normal proposal (see \code{get_independent_normal_proposal}).
#'@export
get_proposal_mixture <- function(nclust = 5, maxattempts = 5, verbose = FALSE){
f <- function(thetas,normw,...){
options(warn = -1)
# resample
ancestors <- systematic_resampling_n(normw, length(normw), runif(1))
thetas_check <- thetas[,ancestors,drop=FALSE]
# fit mixture
fit <- mixmodCluster(data = data.frame(t(thetas_check)), nbCluster = nclust, dataType = "quantitative")
# test that it worked
is.error <- (length(fit@bestResult@parameters@proportions) == 0)
attempt = 0
while(attempt < maxattempts && is.error){
attempt <- attempt + 1
if(verbose){cat("fitting mixture... attempt", attempt, "\n")}
fit <- mixmodCluster(data = data.frame(t(thetas_check)), nbCluster = nclust, dataType = "quantitative")
# test that it worked
is.error <- (length(fit@bestResult@parameters@proportions) == 0)
}
options(warn = 0)
if (is.error){
return(get_proposal_independent_normal()(thetas,normw))
}
# if it worked, ...
rproposal = function(Ntheta) {
proportions <- fit@bestResult@parameters@proportions
means <- fit@bestResult@parameters@mean
variances <- fit@bestResult@parameters@variance
K <- nrow(means)
X <- matrix(0, ncol = Ntheta, nrow = ncol(means))
# sample allocations
allocations <- systematic_resampling_n(proportions, Ntheta, runif(1))
for (k in 1:K){
which.k <- which(allocations == k)
nk <- length(which.k)
if (nk > 0){
xk <- fast_rmvnorm_transpose(nk, means[k,], variances[[k]])
X[,which.k] <- xk
}
}
# random shuffling
X <- X[,sample(x = 1:ncol(X), size = ncol(X), replace = FALSE),drop=FALSE]
return(X)
}
dproposal = function(thetas,log = TRUE) {
proportions <- fit@bestResult@parameters@proportions
means <- fit@bestResult@parameters@mean
variances <- fit@bestResult@parameters@variance
d <- nrow(thetas)
n <- ncol(thetas)
K <- nrow(means)
evals <- matrix(0, nrow = n, ncol = K)
for (k in 1:K){
evals[,k] <- fast_dmvnorm_transpose(thetas, means[k,], variances[[k]]) + log(proportions[k])
}
g <- function(row){
m <- max(row)
return(m + log(sum(exp(row - m))))
}
results <- apply(X = evals, MARGIN = 1, FUN = g)
if (log) return(results)
else return(exp(results))
}
return (list(r = rproposal, d = dproposal))
}
return(f)
}
|
e04126c8f6376f86f708112efb41af8666404277 | a1bb95e9d753981fb44249477d20636ae21369b7 | /World/World/TestsPerMillion.r | 436a18da0dc004e0a4310e6d6cd8eaee26448ffd | [] | no_license | SravanKumar35/Data-Analysis-on-COVID19-Data | 9ac8213a8b13067627865ac543065b378234b1fb | e519df1c84a0ce89ebe596261b491dfba6375dc2 | refs/heads/master | 2022-11-14T07:52:02.792747 | 2020-07-08T12:02:19 | 2020-07-08T12:02:19 | 275,380,393 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,117 | r | TestsPerMillion.r | df <- read.csv('G:\\Required\\College\\8th sem\\Major Project\\COVID\\World-20200428T111157Z-001\\World\\full-list-cumulative-total-tests-per-million.csv')
library(RColorBrewer)
library(flextable)
library(officer)
# Define the number of colors you want
nb.cols <- 42
mycolors <- colorRampPalette(brewer.pal(8, "Set2"))(nb.cols)
head(df)
df$Date <- as.Date(df$Date, format = "%b %d, %Y")
#df <- subset(df, select = c(Entity, Date, Cumulative.total.tests.per.thousand))
library(plotly)
fig1 <- plot_ly(df, x = ~Date, y = ~Cumulative.total.tests.per.thousand,
type = 'scatter', mode = 'lines+markers', color = ~Entity)
library(dplyr)
fig1 <- fig1 %>% layout(title = "Number of tests per thousand people", margin = margin,
autosize = TRUE,
showlegend = TRUE,
annotations = df$Entity)
fig1
df_sorted <- df[order(df$Cumulative.total.tests.per.thousand, decreasing = TRUE),]
#df_sorted
df_sorted <- df_sorted[1:54,]
fig2 <- plot_ly(df_sorted, x = ~Date, y = ~Cumulative.total.tests.per.thousand,
type = 'scatter', mode = 'lines+markers', color = ~Entity)
fig2 <- fig2 %>%
layout(hovermode = 'compare',
title = "Top 5 countries with maximum tests per thousand people")
fig2
df_ind <- filter(df, Code == 'USA' | Code == 'FRA' | Code == 'DEU' | Code == 'ITA' |
Code == 'IND')
head(df_ind)
fig3 <- plot_ly(df_ind, x = ~Date, y = ~Cumulative.total.tests.per.thousand,
type = 'scatter', mode = 'lines+markers', color = ~Entity)
fig3 <- fig3 %>%
layout(hovermode = 'compare',
title = 'Top 5 countries with maximum cases & their number of tests per thousand people')
fig3
confirmed_cases <- read.csv('G:\\Required\\College\\8th sem\\Major Project\\COVID\\World-20200428T111157Z-001\\World\\total-and-daily-cases-covid-19.csv')
confirmed_cases$Date <- as.Date(confirmed_cases$Date, format = "%b %d, %Y")
head(confirmed_cases)
TopConfirmedCases <- filter(confirmed_cases, Code == 'USA' | Code == 'FRA' | Code == 'DEU' | Code == 'ITA' |
Code == 'IND' | Code == 'ESP' | Code == 'CHN')
#Total Confirmed Cases
fig5 <- plot_ly(TopConfirmedCases, x = ~Date, y = ~TopConfirmedCases$Total.confirmed.cases..cases.,
type = 'scatter', mode = 'lines+markers', color = ~Code)
fig5 <- fig5 %>%
layout(hovermode = 'compare',
title = "Top 6 countries with India: total number of cases")
fig5
##Daily COnfirmed Cases
fig6 <- plot_ly(TopConfirmedCases, x = ~Date, y = ~TopConfirmedCases$Daily.new.confirmed.cases..cases.,
type = 'scatter', mode = 'lines+markers', color = ~Code)
fig6 <- fig6 %>%
layout(hovermode = 'compare',
title = 'Daily confirmed cases in top 6 countries')
fig6
IndianDailyCases <- filter(confirmed_cases, Code == 'IND')
fig_ind <- plot_ly(IndianDailyCases, x = ~Date, y = ~IndianDailyCases$Daily.new.confirmed.cases..cases.,
type = 'scatter', mode = 'lines+markers')
fig_ind <- fig_ind %>%
layout(hovermode = 'compare',
title = 'Daily and total confirmed cases in India')
fig_ind <- fig_ind %>%
add_trace(y = ~IndianDailyCases$Total.confirmed.cases..cases.)
fig_ind
library(gganimate)
p <- ggplot(IndianDailyCases, aes(Date, IndianDailyCases$Total.confirmed.cases..cases., color = Code)) +
geom_line() +
geom_point() +
transition_reveal(Date)
p
head(df)
head(confirmed_cases)
length(df$Date)
length(confirmed_cases$Date)
CasesTest <- merge(df, confirmed_cases, by.x = c('Entity', 'Code', 'Date'))
head(CasesTest)
#Comparison between Daily Tests and Daily Confirmed Cases
fig7 <- plot_ly(CasesTest, x = ~Date, y = ~Cumulative.total.tests.per.thousand,
type = 'scatter', mode = 'lines+markers', color = ~Code)
fig7 <- fig7 %>%
add_trace(y = ~CasesTest$Daily.new.confirmed.cases..cases., type = 'scatter', mode = 'lines+markers',
color = ~Entity)
fig7 <- layout(fig7, yaxis = list(type = "log"))
fig7
print("Correlation between Total Number of cases and The Daily Tests: " +
cor(CasesTest$Cumulative.total.tests.per.thousand, CasesTestGrp$TotalConfirmedCases))
#######IND
IndiaConfirmedCases <- filter(confirmed_cases, Code == 'IND')
IndiaConfirmedCases <- IndiaConfirmedCases %>%
mutate(TotalPercentIncrease = c(NA, -diff(IndiaConfirmedCases$Total.confirmed.cases..cases.) / IndiaConfirmedCases$Total.confirmed.cases..cases.[-1] * 100))
IndiaConfirmedCases <- IndiaConfirmedCases %>%
mutate(DailyPercentIncrease = c(NA, -diff(IndiaConfirmedCases$Daily.new.confirmed.cases..cases.) / IndiaConfirmedCases$Daily.new.confirmed.cases..cases.[-1] * 100))
ft <- flextable(tail(IndiaConfirmedCases, 15))
ft <- autofit(ft)
print(ft, preview = "pptx")
#########USA
USConfirmedCases <- filter(confirmed_cases, Code == 'USA')
USConfirmedCases <- USConfirmedCases %>%
mutate(TotalPercentIncrease = c(NA, -diff(USConfirmedCases$Total.confirmed.cases..cases.) / USConfirmedCases$Total.confirmed.cases..cases.[-1] * 100))
USConfirmedCases <- USConfirmedCases %>%
mutate(DailyPercentIncrease = c(NA, -diff(USConfirmedCases$Daily.new.confirmed.cases..cases.) / USConfirmedCases$Daily.new.confirmed.cases..cases.[-1] * 100))
ft2 <- flextable(tail(USConfirmedCases, 15))
ft2 <- autofit(ft2)
print(ft2, preview = "pptx")
#######CHN
CHNConfirmedCases <- filter(confirmed_cases, Code == 'CHN')
CHNConfirmedCases <- CHNConfirmedCases %>%
mutate(TotalPercentIncrease = c(NA, -diff(CHNConfirmedCases$Total.confirmed.cases..cases.) / CHNConfirmedCases$Total.confirmed.cases..cases.[-1] * 100))
CHNConfirmedCases <- CHNConfirmedCases %>%
mutate(DailyPercentIncrease = c(NA, -diff(CHNConfirmedCases$Daily.new.confirmed.cases..cases.) / CHNConfirmedCases$Daily.new.confirmed.cases..cases.[-1] * 100))
ft3 <- flextable(tail(CHNConfirmedCases, 15))
ft3 <- autofit(ft3)
print(ft3, preview = "pptx")
#####Germany
DEUConfirmedCases <- filter(confirmed_cases, Code == 'DEU')
DEUConfirmedCases <- DEUConfirmedCases %>%
mutate(TotalPercentIncrease = c(NA, -diff(TotalConfirmedCases) / TotalConfirmedCases[-1] * 100))
DEUConfirmedCases <- DEUConfirmedCases %>%
mutate(DailyPercentIncrease = c(NA, -diff(DailyNewConfirmedCases) / DailyNewConfirmedCases[-1] * 100))
ft4 <- flextable(tail(DEUConfirmedCases, 15))
ft4 <- autofit(ft4)
print(ft4, preview = "pptx")
#####Spain
ESPConfirmedCases <- filter(confirmed_cases, Code == 'ESP')
ESPConfirmedCases <- ESPConfirmedCases %>%
mutate(TotalPercentIncrease = c(NA, -diff(ESPConfirmedCases$Total.confirmed.cases..cases.) / ESPConfirmedCases$Total.confirmed.cases..cases.[-1] * 100))
ESPConfirmedCases <- ESPConfirmedCases %>%
mutate(DailyPercentIncrease = c(NA, -diff(ESPConfirmedCases$Daily.new.confirmed.cases..cases.) / ESPConfirmedCases$Daily.new.confirmed.cases..cases.[-1] * 100))
ft5 <- flextable(tail(ESPConfirmedCases, 15))
ft5 <- autofit(ft5)
print(ft5, preview = "pptx")
#####France
FRAConfirmedCases <- filter(confirmed_cases, Code == 'FRA')
FRAConfirmedCases <- FRAConfirmedCases %>%
mutate(TotalPercentIncrease = c(NA, -diff(FRAConfirmedCases$Total.confirmed.cases..cases.) / FRAConfirmedCases$Total.confirmed.cases..cases.[-1] * 100))
FRAConfirmedCases <- FRAConfirmedCases %>%
mutate(DailyPercentIncrease = c(NA, -diff(FRAConfirmedCases$Daily.new.confirmed.cases..cases.) / FRAConfirmedCases$Daily.new.confirmed.cases..cases.[-1] * 100))
ft6 <- flextable(tail(FRAConfirmedCases, 15))
ft6 <- autofit(ft6)
print(ft6, preview = "pptx")
#####Italy
ITAConfirmedCases <- filter(confirmed_cases, Code == 'ITA')
ITAConfirmedCases <- ITAConfirmedCases %>%
mutate(TotalPercentIncrease = c(NA, -diff(ITAConfirmedCases$Total.confirmed.cases..cases.) / ITAConfirmedCases$Total.confirmed.cases..cases.[-1] * 100))
ITAConfirmedCases <- ITAConfirmedCases %>%
mutate(DailyPercentIncrease = c(NA, -diff(ITAConfirmedCases$Daily.new.confirmed.cases..cases.) / ITAConfirmedCases$Daily.new.confirmed.cases..cases.[-1] * 100))
ft7 <- flextable(tail(ITAConfirmedCases, 15))
ft7 <- autofit(ft7)
print(ft7, preview = "pptx")
|
2e9a687faaf2830dbf0a82cb673327dcc4f3a253 | 87be5670b9b8a28ea0703b92568031c9a407ab9c | /mixedeffects.R | ea348bf90dff2cdd1cf4247ed9ebbd57c015c3c3 | [] | no_license | dtsh2/JWDReviewSeries | 4777221e31717f484391ed70692cfb8c98b87926 | 3eeb798b6944bec2394fbc3fe69e0ef2e488d0bf | refs/heads/master | 2021-01-18T09:56:13.530218 | 2018-01-31T18:50:41 | 2018-01-31T18:50:41 | 21,152,401 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,885 | r | mixedeffects.R |
######################################################
# captive bat sera anti-LBV analysis ALL POSITIVE ####
######################################################
## clear environment and load package
rm(list=ls())
library(lme4)
### 88 bats from 4 cohorts sampled over 3 years (10 sampling days)
### Age, sex, and titre recorded. Individuals sampled an average of 5.3 times.
## get data and check first few rows
captive=read.csv("Captive_sero_Jul2011.csv", header=T)
head(captive)
## rename codes in Age column
levels(captive$Age) <- list(B="Neonate", A="SM", JUV="Juv",SI="SI")
captive$Age = factor(captive$Age,labels=c("Neonate","SM","Juvenile","SIM"))
#is.factor(captive$Age)
summary(captive)
## omit NAs
captive1=na.omit(captive)
## from now on, work within the captive1 data frame
attach(captive1)
names(captive1)
## log linear model of Titre against Days
## with different fits for each Age group
per_group_model <- lmList(LogTitre ~ Days|Age,
data=captive1)
## log linear model (means parameterization) of Titre against Days, Age,
## and interacting effects of Days & Age
overall_model <- lm(LogTitre ~ -1 + Days*Age, data=captive1)
## mixed effects log linear model of Titre only for Neonates
# fixed effects = days,
# random effects = Days (continuous) | ID (categorical)
per_group_model_ranef <- lmer(LogTitre ~ Days + (Days | ID),
data=captive1, subset=Age=="Neonate")
# get coefficients from this model
pg_coef.Neonate <- coef(per_group_model_ranef)$ID
# Average for whole Neonate group (can use median if preferred)
per_group_model.Neonate <- c(mean(pg_coef.Neonate$"(Intercept)"), mean(pg_coef.Neonate$Days))
## plot logTitre against Days, draw models for each indvidual (color coded)
## and the group as a whole (black and dashed)
par(mfrow=c(1, 1))
plot(jitter(LogTitre) ~ Days, data=subset(captive1, Age=="Neonate"),
col=ID, main="Neonate")
abline(per_group_model.Neonate, col='black', lwd=3, lty=2)
for (i in 1:13)
{
abline(pg_coef.Neonate[i,1], pg_coef.Neonate[i,2], col=rownames(pg_coef.Neonate)[i])
}
## Do the same for SM
# individual lmes
per_group_model_ranef <- lmer(LogTitre ~ Days + (Days | ID),
data=captive1, subset=Age=="SM")
pg_coef.SM <- coef(per_group_model_ranef)$ID
# average lme
per_group_model.SM <- c(mean(pg_coef.SM$"(Intercept)"), mean(pg_coef.SM$Days))
# plot
par(mfrow=c(1, 1))
plot(jitter(LogTitre) ~ Days, data=subset(captive1, Age=="SM"),
col=ID, main="SM")
abline(per_group_model.SM, col='black', lwd=3, lty=2)
for (i in 1:57)
{
abline(pg_coef.SM[i,1], pg_coef.SM[i,2], col=rownames(pg_coef.SM)[i])
}
## Do the same for Juvenile
# individual lmes
per_group_model_ranef <- lmer(LogTitre ~ Days + (Days | ID),
data=captive1, subset=Age=="Juvenile")
pg_coef.Juvenile <- coef(per_group_model_ranef)$ID
# average lm
per_group_model.Juvenile <- c(mean(pg_coef.Juvenile$"(Intercept)"), mean(pg_coef.Juvenile$Days))
# plot
par(mfrow=c(1, 1))
plot(jitter(LogTitre) ~ Days, data=subset(captive1, Age=="Juvenile"),
col=ID, main="Juvenile")
abline(per_group_model.Juvenile, col='black', lwd=3, lty=2)
for (i in 1:8)
{
abline(pg_coef.Juvenile[i,1], pg_coef.Juvenile[i,2], col=rownames(pg_coef.Juvenile)[i])
}
## Do the same for SIM
# individual lmes
per_group_model_ranef <- lmer(LogTitre ~ Days + (Days | ID),
data=captive1, subset=Age=="SIM")
pg_coef.SIM <- coef(per_group_model_ranef)$ID
# average lm
per_group_model.SIM <- c(mean(pg_coef.SIM$"(Intercept)"), mean(pg_coef.SIM$Days))
# plot
par(mfrow=c(1, 1))
plot(jitter(LogTitre) ~ Days, data=subset(captive1, Age=="SIM"),
col=ID, main="SIM")
abline(per_group_model.SIM, col='black', lwd=3, lty=2)
for (i in 1:11)
{
abline(pg_coef.SIM[i,1], pg_coef.SIM[i,2], col=rownames(pg_coef.SIM)[i])
}
detach(captive1)
################
### Simulated clustered population
## define parameters
sd_ind <- 0.1
sd_grp <- 0.5
ovr_mean <- 5
num_ind <- 10
meas_per_ind <- 10
ind_mean <- rnorm(num_ind, mean=ovr_mean, sd=sd_grp)
## create matrix with values
ind_val <- matrix(0,num_ind,meas_per_ind)
for (i in 1:num_ind)
{
ind_val[i,] <- rnorm(meas_per_ind, mean=ind_mean[i], sd=sd_ind)
}
## turn matrix into a data.frame and sort by id
x <- data.frame(id=rep(1:num_ind, meas_per_ind),
grp=rep(rep(c("A", "B"), each=num_ind/2), meas_per_ind),
val=as.vector(ind_val))
x <- x[order(x$id),]
## Show difference between regular linear model and mixed effects model
# fixed effects = grp, random effects = id (categorical)
summary(lmer(val ~ grp + (1|id), data=x))
# no random effects
summary(lm(val ~ grp, data=x))
|
7bf40fe408de0d52809764d65219cf0133b8f382 | 5981f1f23d2dfe30894ab46123f1eef916dc1c6a | /class05/class_05 .R | 2aadbfb04788b59567d41965bfa1361244715a31 | [] | no_license | meganmt/bimm143_ | 68bfb6863bac02f4b4239d47d88ff58f1deedfea | 145f33ca29972c2500ea4a81e1fea602670bcae3 | refs/heads/master | 2021-01-03T00:42:52.701414 | 2020-03-10T16:55:41 | 2020-03-10T16:55:41 | 239,841,417 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,370 | r | class_05 .R | #' ---
#' title: "CLass 5: Data Visualization and graphs in R"
#' author: "Megan Truong"
#' date: "January 23, 2020"
#' ---
#Class 5
#Data Visualization and graphs in R
plot(1:10, col="blue", typ="o")
#Need to import/read input data first
baby <- read.table("bimm143_05_rstats/weight_chart.txt",
header = TRUE)
#Basic plot of age vs weight
plot(baby)
plot(baby$Age, baby$Weight)
#For a line plot with filled square
plot(baby$Age, baby$Weight, typ= "o", pch=15)
#for plot point size to 1.5 normal size and line width thickness 2x
plot(baby$Age, baby$Weight, typ="o", pch=15,
cex=1.5, lwd=2)
#y axis limits to 2-10kg
plot(baby$Age, baby$Weight, typ="o", pch=15,
cex=1.5, lwd=2, ylim=c(2,10))
#x label to be Age and Y to be Weight
plot(baby$Age, baby$Weight, typ="o", pch=15,
cex=1.5, lwd=2, ylim = c(2,10),
xlab = "Age", ylab = "Weight")
#title at top
plot(baby$Age, baby$Weight, typ="o", pch=15,
cex=1.5, lwd=2, ylim = c(2,10),
xlab = "Age (months)", ylab = "Weight (kg)",
main = "Baby Weights")
#silly example of 'pch' plot character and 'cex' size
plot(1:5, cex=1:5, pch=1:5)
#BAR PLOT of feature_counts file separated by 'tab'
mouse <- read.table("bimm143_05_rstats/feature_counts.txt",
sep = "\t", header = TRUE)
#View(mouse)
#BarPlot of Mouse Count
barplot(mouse$Count)
#Make it horizontal and blue :)
barplot(mouse$Count,
horiz = TRUE, col = "lightblue")
#Add Names
barplot(mouse$Count,
horiz = TRUE, col = "lightblue",
names.arg = mouse$Feature, las = 1)
#par
par(mar=c(5,10,0,1))
barplot(mouse$Count,
horiz = TRUE, col= "lightblue",
names.arg = mouse$Feature, las = 1,)
par(mar=c(5,4,2,2))
plot(1:10)
#Rainbow colors
mf <- read.delim("bimm143_05_rstats/male_female_counts.txt")
barplot(mf$Count, names.arg=mf$Sample,
col=rainbow(nrow(mf)), las =2, ylab= "Counts")
#Different female and male colors
barplot(mf$Count, names.arg=mf$Sample,
col=c("blue2", "red2"), las =2, ylab= "Counts")
#Coloring by Value
genes <- read.delim("bimm143_05_rstats/up_down_expression.txt")
#Shows down regulated, unchanging and upregualted (STATE COL)
table(genes$State)
plot(genes$Condition1, genes$Condition2, col=genes$State)
#palette colors: black, red, green3, blue, cyan, magenta, yellow, gray
palette()
|
69bf62641eb2345adc671c34c7082814f08b32c0 | 0dff99bef342692fe65bea93c39d5836aea5d862 | /R/PAS-Final/UI/4.3.ui_explsavedmodel.R | fe44d4dc3684346961c7db968f0a46ddeb2d5b15 | [] | no_license | michaeljohannesmeier/coding-examples | e45ff844a20808cb7478ea3751caadf7134547ce | a1a2d54535284169cda51f2705ab77dbf4ec202d | refs/heads/master | 2022-11-25T09:12:14.919019 | 2018-06-20T07:29:29 | 2018-06-20T07:29:29 | 138,684,834 | 0 | 1 | null | 2022-11-22T17:19:21 | 2018-06-26T04:30:07 | JavaScript | UTF-8 | R | false | false | 1,137 | r | 4.3.ui_explsavedmodel.R | tab3save<-
tabItem(tabName = "explsavedmodel",
conditionalPanel(condition = "input.storeReg || input.storeRegAuto", box(
title = "Stored regression model",
collapsible = TRUE,
status = "warning",
solidHeader = TRUE,
width = 15,
conditionalPanel(condition = "input.storeReg || input.storeRegAuto", strong(h2("Saved model", align = "center"))),
conditionalPanel(condition = "input.storeReg || input.storeRegAuto", dygraphOutput("storedModel")),
br(),
br(),
fluidRow(
box(
width = 4,
title = "Coefficient evaluation",
tableOutput("storeCoefficent")
),
box(
width = 4,
title = "Quality of regression",
tableOutput("storeQuality")
),
box(
width = 4,
title = "Drivers and time lags",
tableOutput("storeLag")
)
)
))
) |
f1bef8eabfe96ce70b216d1c9a99f2391290e9d8 | 395daa1ec2a5f403dda8d65cfa88d5280afe2665 | /man/varkernelslice.Rd | a2ecb9fb90543e27a300341d9d2ee35d6454444e | [] | no_license | CWWhitney/uncertainty | 4f0cdc86b453e5fbe48767d34d8db435c6979b63 | 0e6a25ba719a59f3104fd8c716d2aff3923f79be | refs/heads/main | 2023-04-07T22:04:54.398851 | 2022-06-13T20:52:49 | 2022-06-13T20:52:49 | 374,667,187 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,937 | rd | varkernelslice.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/varkernelslice.R
\name{varkernelslice}
\alias{varkernelslice}
\title{Estimated outcome variable values given the influencing variable,
based on a slice of 'z' from the kernel density plot of the variable and out_var data.}
\usage{
varkernelslice(
in_var,
out_var,
expectedin_var,
n = 100,
ylab = "Relative probability",
xlab = "Output values for the given influence variable values"
)
}
\arguments{
\item{in_var}{is a vector of observations of a given influencing variable corresponding to another list with observed values of an outcome variable {out_var}.}
\item{out_var}{is a vector of observed values of an outcome variable corresponding to another list with observations of a given influencing variable {in_var}.}
\item{expectedin_var}{is the expected value of the input variable for which the outcome variable {out_var} should be estimated.}
\item{n}{is the number of grid points in each direction. Can be scalar or a length-2 integer vector (passed to the kde2d kernel density function of the MASS package).}
\item{ylab}{is a label for the relative probability along the cut through the density kernel on the y axis, the default label is "Relative probability".}
\item{xlab}{is a label for the influencing variable {in_var} on the x axis, the default label is "Influencing variable".}
}
\description{
Plot representing probabilities (shown along the y-axis) for the expected value of the outcome variable (shown along the x-axis).
This is a cut through the density kernel from uncertainty::varkernel() function, which integrates to 1, the probability values are relative, not absolute measures.
}
\examples{
in_var <- sample(x = 1:50, size = 20, replace = TRUE)
out_var <- sample(x = 1000:5000, size = 20, replace = TRUE)
varkernelslice(in_var, out_var, expectedin_var = 10)
}
\keyword{density}
\keyword{influence}
\keyword{kernel}
|
316758156c35b86be8b2ac5d1d4d26cf981fd878 | 8c2f1d0430257cc9386ef2a47e22c46fd5e987da | /chao.R | 4997af2740a5aa72766f097ea67bea95e94129eb | [] | no_license | paseycatmore/ucdbirds | 4355a969bf217a84c75761787f11521435c22030 | 206ca8d93b08f14e28be64f6fff5eaada627e57b | refs/heads/master | 2020-05-06T20:31:17.752762 | 2019-04-17T09:13:59 | 2019-04-17T09:13:59 | 180,242,043 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,197 | r | chao.R | # script: estimating chao species richness for each year
# plotting / linear regression
#
# input: "final_long_list.csv"
# long format of all data combined (see master_df.R)
#
# output: "chao_*.csv"
# chao species estimator values
# *entries for 1973 - 2018 if exists
# "all_chao.csv"
# all chao estimate outputs for all years
#
# author: Casey Patmore
# casey.patmore@ucdconnect.ie
#
# date: last modified 10/11/2018
setwd("C:/Users/casey/Desktop/Birds")
library(SpadeR)
library(plyr)
library(dplyr)
library(tidyr)
library(data.table)
library(gdata)
library(ggplot2)
library(vegan)
library(car)
library(coin)
rm(list=ls())
start <- read.csv("final_long_list.csv")
start[start == 0] <- NA
start <- na.omit(start)
start$Record <- as.numeric(start$Record)
start <- split(start, start$Year)
for(item in start){
sample <- length(unique(item$Week))
this_year <- item$Year[1]
#print(this_year)
item$Year <- NULL
item <- rbind(data.frame(Name = "Sample", Week = "All", Record = sample), item)
item$Week <- NULL
item <- item %>%
group_by(Name) %>%
summarise(Record = sum(Record))
value <- nrow(item)
words <- paste(value, "in", this_year)
print(words)
}
years <- c(1973, 1974, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991,
1992, 1993, 1998, 2018)
species <- c(53, 45, 39, 48, 46, 50, 59, 56, 58, 58, 61, 59, 56, 54, 91, 57)
check <- data.frame(years, species)
library(lme4)
linear <- glm(species ~ years, data = check)
summary(linear)
par(mfrow=c(1,1))
plot(species ~ years, data = check)
abline(linear)
plot(linear)
#autocorrelation?
durbinWatsonTest(linear)
plot(residuals(linear),type="b")
abline(h=0,lty=3)
acf(residuals(linear))
library(nlme)
mdl.ac <- gls(species ~ years, data = check,
correlation = corAR1(form=~years),
na.action=na.omit)
summary(mdl.ac)
plot(fitted(mdl.ac),residuals(mdl.ac))
abline(h=0,lty=3)
qqnorm(mdl.ac)
acf(residuals(mdl.ac,type="p"))
library(MuMIn)
model.sel(linear,mdl.ac)
###
for(item in start){
sample <- length(unique(item$Week))
this_year <- item$Year[1]
print(this_year)
item$Year <- NULL
item <- rbind(data.frame(Name = "Sample", Week = "All", Record = sample), item)
item$Week <- NULL
item <- item %>%
group_by(Name) %>%
summarise(Record = sum(Record))
chao <- ChaoSpecies(item$Record,"incidence_freq", k = 10, conf = 0.95)
chao <- chao$Species_table[3, ]
chao$Year <- this_year
file = paste("chao_", this_year, ".csv", sep="")
write.csv(chao, file = file)
}
library(vegan)
data(dune)
data(dune.env)
attach(dune.env)
pool <- specpool(dune, Management)
pool
op <- par(mfrow=c(1,2))
boxplot(specnumber(dune) ~ Management, col="hotpink", border="cyan3",
notch=TRUE)
boxplot(specnumber(dune)/specpool2vect(pool) ~ Management, col="hotpink",
border="cyan3", notch=TRUE)
par(op)
data(BCI)
## Accumulation model
pool <- poolaccum(BCI)
summary(pool, display = "chao")
plot(pool)
## Quantitative model
estimateR(BCI[1:5,])
rm(list=ls())
#using 1973 as a jumping off point template
all_chao <- read.csv("chao_1973.csv", strip.white = TRUE)
#append all other years next
chao_years <- c(1974,1982:1993,1998:2018)
for (year in chao_years){
filename = paste("chao_", year, ".csv", sep="")
if (file.exists(filename)){
chao <- read.csv(filename, strip.white = TRUE)
all_chao <- rbind.fill(all_chao, chao)
#print(filename)
}
}
write.csv(all_chao, file = "all_chao.csv" ,row.names=FALSE)
colnames(all_chao)[1]<-"Test"
all_chao$Test <- trim(all_chao$Test)
#all_chao$Test=="iChao2 (Chiu et al. 2014)"
chao_test <- subset(all_chao, Test == "Chao2-bc")
#or any other test
library(lme4)
linear <- glm(Estimate ~ Year, data = chao_test)
summary(linear)
par(mfrow=c(1,1))
plot(Estimate ~ Year, data = chao_test)
abline(linear)
plot(linear)
#autocorrelation?
durbinWatsonTest(linear)
#hooray, they're independent
#try to do clustering estimates?
#rm(list=ls())
#start <- read.csv("final_long_clusters.csv")
#start[start == 0] <- NA
#start <- na.omit(start)
#start <- split(start, start$Year)
#sample_year <- data.frame()
#for(item in start){
# sample <- length(unique(item$Week))
# this_year <- item$Year[1]
# sample_year <- rbind(sample_year, c(this_year, sample))
#}
#colnames(sample_year)[1] <- "Year"
#colnames(sample_year)[2] <- "Record"
#c_start <- read.csv("final_long_clusters.csv")
#c_start[c_start == 0] <- NA
#c_start <- na.omit(c_start)
#c_start <- split(c_start, c_start$clusters)
#for(item in c_start){
# this_cluster <- item$clusters[1]
# item$clusters <- NULL
# item$Week <- NULL
# item <- split(item, item$Year)
# for(item in item){
# tryCatch({
# this_year <- item$Year[1]
# this_sample <- filter(sample_year, Year %in% this_year)
# item$Week <- NULL
# item <- rbind(data.frame(Name = "Sample", Year = this_year, Record = this_sample$Record), item)
# item <- item %>%
# group_by(Name) %>%
# summarise(Record = sum(Record))
# chao <- ChaoSpecies(item$Record,"incidence_freq", k = 10, conf = 0.95)
# chao <- chao$Species_table
# chao$Year <- this_year
# chao$cluster <- this_cluster
# # }, error=function(e){})
# file = paste("chao_cluster", this_cluster, "_", this_year, ".csv", sep="")
# write.csv(chao, file = file)
# }, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
# }
#}
#rm(list=ls())
#doesn't work unfortunately
#guess we're doing proportions
rm(list=ls())
start <- read.csv("final_long_clusters.csv")
start[start == 0] <- NA
start <- na.omit(start)
start <- split(start, start$Year)
clusters <- c(1,2,3,4)
clusters_per_year <- data.frame()
for(item in start){
species <- length(unique(item$Name))
this_year <- item$Year[1]
for(cluster in clusters){
year_cluster <- filter(item, clusters %in% cluster)
cluster_species <- length(unique(year_cluster$Name))
clusters_per_year <- rbind(clusters_per_year, c(this_year, species, cluster_species))
}
}
colnames(clusters_per_year)[1] <- "Year"
colnames(clusters_per_year)[2] <- "total_Species"
colnames(clusters_per_year)[3] <- "cluster_Species"
clusters_per_year$cluster <- rep(cbind(1,2,3,4))
clusters_per_year$proportions <- (clusters_per_year$cluster_Species / clusters_per_year$total_Species)
ggplot(data = clusters_per_year, aes(x=Year, y=proportions, col=as.factor(cluster))) +
geom_point() +
geom_smooth(method='lm',se=FALSE) + theme_minimal() +
coord_cartesian(ylim=c(0, 1))
ggplot(clusters_per_year, aes(x=Year, y=proportions, fill=cluster)) +
geom_bar(stat="identity")+theme_minimal()
multiple <- lm(proportions ~ as.factor(cluster), data = clusters_per_year)
summary(multiple)
plot(multiple)
###########
years <- unique(clusters_per_year$Year)
years <- paste(years[1:length(years)])
frame <- data.frame(matrix(nrow=length(years), ncol=4))
rownames(frame) = years
colnames(frame) = clusters
for(row in 1:nrow(clusters_per_year)){
print(row)
year = clusters_per_year$Year[row]
cluster = clusters_per_year$cluster[row]
value = clusters_per_year$cluster_Species[row]
frame[paste(year), cluster] = value
print(frame[paste(year), cluster])
}
shannon <- diversity(frame)
shannon <- as.data.frame(shannon)
shannon$year <- years
ggplot(shannon, aes(x=year, y=shannon)) +
geom_boxplot()
ggplot(shannon, aes(x=shannon)) + geom_histogram() +
stat_function(fun = dnorm, args = list(mean = mean(shannon$shan), sd = sd(shannon$shan)))
linear <- glm(shan ~ year, data = shannon)
summary(linear)
par(mfrow=c(1,1))
plot(shan ~ year, data = shannon)
abline(linear)
shannon$shan <- as.numeric(shannon$shannon)
shannon$year <- as.numeric(shannon$year)
##################
par(mfrow=c(2,2))
plot(linear)
par(mfrow=c(1,1))
plot(residuals(linear))
plot(residuals(linear),type="b")
abline(h=0,lty=3)
acf(residuals(linear))
linear.ac <- gls(Estimate ~ Year, data = chao_test,
correlation = corAR1(form=~Year),
na.action=na.omit)
summary(linear.ac)
coef(linear)
coef(linear.ac)
plot(fitted(linear.ac),residuals(linear.ac))
abline(h=0,lty=3)
qqnorm(linear.ac)
acf(residuals(linear.ac,type="p"))
library(MuMIn)
model.sel(linear,linear.ac)
|
d26f0ad6bfa81905722fe4eeb847b87a8965755c | 7cb043b3f0b35232b5c16b92c8c7ef9354d20066 | /Salary_hike.R | 1637c7af8ef359916588de4c74a6d2aa33be6197 | [] | no_license | surajbaraik/Simple-Linear-Regression--Salary-Hike-Data--R-code | 3796e08460a09f0c78eb3c7247058f3eba57c463 | bab11c23b4be202c09512b9775d98ba49706fbfe | refs/heads/master | 2022-11-22T02:33:13.303254 | 2020-07-09T07:00:56 | 2020-07-09T07:00:56 | 278,270,076 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,723 | r | Salary_hike.R | ######
######
##### Assignment Question no 4 , Churn out rate, Predict the churn out rate based on salary hike
###### Y(output) is Salary and X(input) is years Exp
salaryhike <- read.csv(file.choose())
attach(salaryhike)
View(salaryhike)
summary(salaryhike)
plot(YearsExperience,Salary)
### YearsExperience is X, and Salary is Y
### after visualization of scatter plot, we can say it is positive in direction
### strength is strong
cor(YearsExperience,Salary)
Smodel1 = lm(Salary ~ YearsExperience)
Smodel1
summary(Smodel1)
predict(Smodel1)
Smodel1$residuals
confint(Smodel1, level = 0.95)
predict(Smodel1, interval = "confidence")
Srmse <- sqrt(mean(Smodel1$residuals^2))
Srmse
####### LOG MODEL
plot(log(YearsExperience), Salary)
### YearsExperience is X, and Salary is Y
### after visualization of scatter plot, we can say it is positive in direction
### strength is moderate
cor(log(YearsExperience), Salary)
Smodel2 = lm(Salary ~ log(YearsExperience))
summary(Smodel2)
Srmse2 <- sqrt(mean(Smodel2$residuals^2))
Srmse2
##### expoential Model
plot(YearsExperience, log(Salary))
### strength is moderate.
cor(YearsExperience,log(Salary))
Smodel3 = lm(log(Salary) ~ YearsExperience)
summary(Smodel3)
log_S <- predict(Smodel3, interval = "confidence")
log_S
Exp_sal <- exp(log_S)
S_err <- YearsExperience - Exp_sal
S_err
Srmse3 = sqrt(mean(S_err^2))
Srmse3
########Polynomial
Smodel4 = lm(Salary ~ YearsExperience)
summary(Smodel4)
confint(Smodel4, level = 0.95)
S_logres = predict(Smodel4,interval = "confidence")
Spoly <- exp(S_logres)
Spoly
err_Spoly <- YearsExperience - Spoly
err_Spoly
Srmse4 <- sqrt(mean(err_Spoly^2))
Srmse4 |
a953ec5e4f5455d2a8f4a2539d8fe1ee2de3e361 | 1eb6887eb32b68af1df571bfa06915609491debf | /man/save_summary.Rd | fe8a5e6afa99d81c8682ea6d268834ad454fc330 | [
"MIT"
] | permissive | PiermattiaSchoch/data.validator | 84c5d0f1421300d1701acc7fea5056bb72e448eb | 9db21c2af724fbcd35b7d41fbed59373dd3d2774 | refs/heads/master | 2022-12-26T00:25:32.179671 | 2020-10-05T09:45:41 | 2020-10-05T09:45:41 | 297,728,214 | 0 | 0 | MIT | 2020-09-22T17:51:42 | 2020-09-22T17:51:41 | null | UTF-8 | R | false | true | 688 | rd | save_summary.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validator.R
\name{save_summary}
\alias{save_summary}
\title{Save simple validation summary in text file}
\usage{
save_summary(
validator,
file_name = "validation_log.txt",
success = TRUE,
warning = TRUE,
error = TRUE
)
}
\arguments{
\item{validator}{Validator object that stores validation results.}
\item{file_name}{Name of the resulting file (including extension).}
\item{success}{Should success results be presented?}
\item{warning}{Should warning results be presented?}
\item{error}{Should error results be presented?}
}
\description{
Saves \code{print(validator)} output inside text file.
}
|
58d2f14e0bbfddc195fede5e7a650f6231fcf323 | ec2d6f790c243428084c6c8f708955e31129a431 | /R/polyNew.R | 8d3422e7fa246ae17a2f105b34b45fcf900d47f1 | [] | no_license | jaropis/shiny-tools | a221a279c600ca46d3f73620dab80018329579fa | b3d4fdda883585e562d030adf8ac307907d5e8d7 | refs/heads/master | 2023-03-15T03:53:49.461990 | 2021-03-20T12:08:37 | 2021-03-20T12:08:37 | 220,004,701 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,411 | r | polyNew.R | ### This file is part of PCSS's Run Test suite.
### Run Test is free software: you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation, either version 3 of the License, or
### (at your option) any later version.
### Run Test is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### You should have received a copy of the GNU General Public License
### along with Run Test. If not, see <http://www.gnu.org/licenses/>.
### Run Test is free software: you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation, either version 3 of the License, or
### (at your option) any later version.
### Run Test is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### You should have received a copy of the GNU General Public License
### along with Run Test. If not, see <http://www.gnu.org/licenses/>.
#' this is just the poly function without the safeguards on the degree of polyomial, which some consider a bug - in this way I am able to fit the polynomial of order n-1 (n - number of datapoints)
#'
#' @param x-vector and polynomial degree
#' @return whatever poly returns
#' @export
polyNew <- function (x, ..., degree = 1, coefs = NULL, raw = FALSE)
{
dots <- list(...)
if (nd <- length(dots)) {
if (nd == 1 && length(dots[[1L]]) == 1L)
degree <- dots[[1L]]
else return(polym(x, ..., degree = degree, raw = raw))
}
if (is.matrix(x)) {
m <- unclass(as.data.frame(cbind(x, ...)))
return(do.call("polym", c(m, degree = degree, raw = raw)))
}
if (degree < 1)
stop("'degree' must be at least 1")
if (anyNA(x))
stop("missing values are not allowed in 'poly'")
n <- degree + 1
if (raw) {
Z <- outer(x, 1L:degree, "^")
colnames(Z) <- 1L:degree
attr(Z, "degree") <- 1L:degree
class(Z) <- c("poly", "matrix")
return(Z)
}
if (is.null(coefs)) {
xbar <- mean(x)
x <- x - xbar
X <- outer(x, seq_len(n) - 1, "^")
QR <- qr(X)
z <- QR$qr
z <- z * (row(z) == col(z))
raw <- qr.qy(QR, z)
norm2 <- colSums(raw^2)
alpha <- (colSums(x * raw^2)/norm2 + xbar)[1L:degree]
Z <- raw/rep(sqrt(norm2), each = length(x))
colnames(Z) <- 1L:n - 1L
Z <- Z[, -1, drop = FALSE]
attr(Z, "degree") <- 1L:degree
attr(Z, "coefs") <- list(alpha = alpha, norm2 = c(1,
norm2))
class(Z) <- c("poly", "matrix")
}
else {
alpha <- coefs$alpha
norm2 <- coefs$norm2
Z <- matrix(, length(x), n)
Z[, 1] <- 1
Z[, 2] <- x - alpha[1L]
if (degree > 1)
for (i in 2:degree) Z[, i + 1] <- (x - alpha[i]) *
Z[, i] - (norm2[i + 1]/norm2[i]) * Z[, i - 1]
Z <- Z/rep(sqrt(norm2[-1L]), each = length(x))
colnames(Z) <- 0:degree
Z <- Z[, -1, drop = FALSE]
attr(Z, "degree") <- 1L:degree
attr(Z, "coefs") <- list(alpha = alpha, norm2 = norm2)
class(Z) <- c("poly", "matrix")
}
Z
}
|
1f2d2331b95b04bfa012569e927d0ee7ba369630 | 96e20fc83c84ee1429f8c7383f1bcc9428a312b3 | /man/PredictDiag.Rd | 7367e34538544d15cae0dc48e0b14f77597c9d2e | [
"MIT"
] | permissive | ManuelPerisDiaz/VariantsID | 61762fa88a81b937c0a3ce74165bfce0eaffe731 | d9b307248162821db2b96116ed255e2326c5d8a0 | refs/heads/master | 2023-08-26T21:26:32.119405 | 2021-11-02T00:35:36 | 2021-11-02T00:35:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 513 | rd | PredictDiag.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PredictDiag.R
\name{PredictDiag}
\alias{PredictDiag}
\title{PredictDiag}
\usage{
PredictDiag(WT, WT_ref, diag_ref, Hbvarinats)
}
\arguments{
\item{WT}{sequence of wild-type(HbA beta) protein .fasta file}
\item{WT_ref}{reference list of fragments for wild-type protein (HbA beta)}
\item{diag_ref}{possible diagnostic ions for each AA of Hba beta}
\item{Hbvarinats}{sequences of Hb variants .fasta file}
}
\description{
PredictDiag
}
|
bb122d2fe075a376000478ba88f65b0a73d4c5c6 | 490e2f2fbbf6900917b7bc97a1e0ee516b36626e | /src/supply_power.R | 9497b2cce23b49d4d4c5b0c78bc0b31d44697b49 | [] | no_license | ajenningsfrankston/renewable_spot_market | f409f67adcf7bf23581e9aaf7967dbba450fcc5e | f93fefb95ac0734ee04ff8bd59d95cd75eca96b3 | refs/heads/master | 2021-01-20T16:55:15.457229 | 2017-07-03T01:54:07 | 2017-07-03T01:54:07 | 95,732,806 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 570 | r | supply_power.R | supply_power <- function(surplus,storage,hydro,capacity) {
# surplus : supply - demand at time
# storage : energy store
# hydro : last resort energy source (hydro dams )
if (surplus >= 0) {
storage <- storage + surplus
if (storage > capacity ) { storage <- capacity }
}
else {
if (storage > (-surplus)) {
storage <- storage + surplus
surplus <- 0
}
else {
surplus <- surplus + storage
storage <- 0
hydro <- -surplus
surplus <- 0
}
}
t = list(Storage=storage,Hydro=hydro)
t
} |
14cb5005f9b8d4f4f4871b1f081137a08f98997b | d765b7e59f28eaed696c959592804b3557a32374 | /cachematrix.R | 94ab9b1dbe9a0b2e7fda5fa1bd024b76cba2f3aa | [] | no_license | DataSciFi/ProgrammingAssignment2 | 8c80fe230e29e9dfc978eeb73c3d8875a100db1c | ca1ce06033814c24b19cc95b49ee3bdbc7001980 | refs/heads/master | 2021-01-19T19:04:57.108208 | 2014-04-15T15:46:26 | 2014-04-15T15:46:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,081 | r | cachematrix.R | ## makeCacheMatrix and cacheSolve allow to reuse calculated inverse matrix
## by means of caching it in a closure
##
## Closure to save matrix along with its inverse
makeCacheMatrix <- function(x = matrix()) {
## Inverse matrix holder
i <- NULL
## Setter for matrix
set <- function(y) {
x <<- y
i <<- NULL
}
## Getter for matrix
get <- function() x
## Setter for inverse matrix
setInverse <- function (inverse) i <<- inverse
## Getter for inverse matrix
getInverse <- function () i
list(set = set, get = get, getInverse = getInverse, setInverse = setInverse)
}
## Calculates inverse matrix
## Before calculation tries to reuse cached inverse matrix
cacheSolve <- function(x, ...) {
## Try to reuse cached inverse matrix
m <- x$getInverse()
## If there cached inverse matrix present, return it
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## If there is no cached inverse matrix, do inverse and save result to cache
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
##########################################################################
## Test
##########################################################################
##
## PREPARE ##########################
##provide a 3x3 matrix
m <- matrix(c(1,2,3,6,0,4,7,8,9),3,3)
#
#create a "cache"
cache <- makeCacheMatrix()
#
#set the matrix value of the cache
cache$set(m)
##
## CROSSCHECK #######################
##
#crosscheck 1: same matrix in cache than m?
m2<-cache$get()
if(!identical(m, m2)) {
stop("Matrix is not the same")
}
#
#crosscheck 2: at this point the cached inverse must be null
iCache<-cache$getInverse()
if(!is.null(iCache)) {
stop("Inverse must be null")
}
##
## SOLVE ###########################
##
#now solve first time
s1 <- cacheSolve(cache)
#
#solve second time
s2 <- cacheSolve(cache)
#
#check: s1 and s2 should be identical
if(!identical(s1,s2)) {
stop("Both inverse computations must be the same")
}
|
0323433f96fa1ee44c636b599c12190452745264 | d1d2ff8b8541f500998082424c4b22e9ee586ef0 | /R/UserCommands.R | 6c0dfc7434eba64306f1cf3f3484507f354d527e | [
"Apache-2.0"
] | permissive | rfherrerac/Capr | f0e6741582eac17be1db75effc7dc749fde31660 | 270ec6c47ae921813445f50b639f50be1808f587 | refs/heads/main | 2023-03-05T11:17:20.724152 | 2021-02-03T16:39:25 | 2021-02-03T16:39:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,580 | r | UserCommands.R | # Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of Capr
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
######
#UI Functions
#' Function to save component
#'
#' This function saves the component as a json file. The component is converted from s4 to s3 to
#' fit the jsonlite function
#' @param x the component you wish to save
#' @param saveName a name for the function you want to save
#' @param savePath a path to a file to save. Default is the active working directory
#' @return no return in r. json file written to a save point
#' @include LowLevelSaveFn.R
#' @export
saveComponent <- function(x, saveName, savePath = getwd()){
sc <- saveState(x) #run save state for component
objjson <- jsonlite::toJSON(sc, pretty=T, auto_unbox = T) #convert to json
write(objjson, file=file.path(savePath,paste0(saveName, ".json"))) #if a savePath is provided append to name
invisible(sc)
}
#' Function to load component
#'
#' This function loads the component from a json file to its s4 componentclass
#' @param path a path to the file we wish to load
#' @return returns a component
#' @include LowLevelLoadFn.R
#' @importFrom jsonlite read_json
#' @export
loadComponent <- function(path){
json <- jsonlite::read_json(path)
comp <- as.ComponentLoad(json)
return(comp)
}
##################-------------read json Cohort-----------------##################
#' Function to read in a circe json
#'
#' This function reads a circe json an builds the cohort definition in an execution space
#' @template Connection
#' @template VocabularyDatabaseSchema
#' @template OracleTempSchema
#' @param jsonPath a path to the file we wish to import
#' @param returnHash if true returns a has table with all components necessary to build the
#' cohort definition including the cohort definition
#' @return returns the cohort definition
#' @include LowLevelBuildLangFn.R
#' @importFrom jsonlite read_json
#' @importFrom purrr map
#' @importFrom magrittr %>%
#' @export
readInCirce <- function(jsonPath,
connectionDetails,
connection = NULL,
vocabularyDatabaseSchema = NULL,
oracleTempSchema = NULL,
returnHash = FALSE){
cohort <- jsonlite::read_json(jsonPath) #read in json from file path
dbConnection <- createDatabaseConnectionLang(connectionDetails = connectionDetails,
vocabularyDatabaseSchema = vocabularyDatabaseSchema,
oracleTempSchema = oracleTempSchema)
cohortBuild <- getCohortDefinitionCall(cohort)$createCDCall #get the functions needed to build the cohort
cohortCaller <- getCohortDefinitionCall(cohort)$CohortCall # get the caller function to make final cohort
cohortBuild <- Filter(Negate(is.null),cohortBuild) #remove null spaces
exeEnv <- new.env() #create an execution environemnt
for (i in seq_along(dbConnection)){
eval(dbConnection[[i]], envir = exeEnv) #run connection setup in execution environment
}
for(i in seq_along(cohortBuild)){ #for each item in list
purrr::map(cohortBuild[[i]], ~eval(.x, envir = exeEnv)) #evaluate expression in execution environment
}
DatabaseConnector::disconnect(exeEnv$connection) #disconnect
eval(cohortCaller, envir = exeEnv) #evaluate the cohort Caller in the execution environemnt
if (returnHash) {
rm(connection, connectionDetails, vocabularyDatabaseSchema, oracleTempSchema,
envir = exeEnv)
ret <- exeEnv
} else {
ret <- exeEnv$CohortDefinition #if return Hash is false returns the cohort definition
}
return(ret) #return the cohort definition as CAPR object
}
##############--------write R function calls -----------###################
#' Function to write capr calls from a circe json
#'
#' This function writes the CAPR calls used to build the cohort definition
#' defined in the circe json . The ouput is a txt file with executable R language
#' @param jsonPath a path to the file we wish to import
#' @param txtPath a path to the txt file we wish to save
#' @return no return but saves the CAPR calls to build a cohort in a txt file
#' @include LowLevelBuildLangFn.R
#' @importFrom jsonlite read_json
#' @importFrom purrr map
#' @export
writeCaprCall <- function(jsonPath, txtPath){
cohort <- jsonlite::read_json(jsonPath) #read in json from file path
dbConnection <- unlist(createDatabaseConnectionLang(),use.names = FALSE) #use dummy credentials
cohortBuilder <- getCohortDefinitionCall(cohort) #build cohort definition call
tt <- unlist(cohortBuilder, use.names = FALSE) #unlist list
sink(txtPath) #create place to save txt
for (i in seq_along(dbConnection)){
print(dbConnection[[i]]) #print through dummy credentials
}
for (i in seq_along(tt)){
print(tt[[i]])#print through loop o R language
}
sink() #close file conn
}
########------compile cohort definition------------#################
#' Convert cohort definition object to CIRCE and run through circe compiler
#'
#' This function converts a Cohort Definition class object to a CIRCE expression, creates the json and compiles the
#' circe json to create ohdisql to run queries against a dbms containing OMOP cdm data
#'
#' @param CohortDefinition input cohort Definition class object
#' @param generateOptions the options for building the ohdisql using CirceR::createGenerateOptions
#' @include LowLevelCoercionFn.R
#' @importFrom CirceR cohortExpressionFromJson cohortPrintFriendly buildCohortQuery
#' @importFrom RJSONIO toJSON
#' @return A three tiered list containing the the circe json, a text read and ohisql.
#' If an error occurs the ohdisql slot will be NA and the user should review the circe cohort definition for potential errors.
#' @export
compileCohortDefinition <- function(CohortDefinition, generateOptions){
circeS3 <- convertCohortDefinitionToCIRCE(CohortDefinition) #convert cohort definition to circe s3 object
circeJson <- RJSONIO::toJSON(circeS3)
circeJson2 <- CirceR::cohortExpressionFromJson(circeJson)
cohortRead <- CirceR::cohortPrintFriendly(circeJson2)
ohdisql <- CirceR::buildCohortQuery(circeJson2, generateOptions)
#old
#circeJson <-jsonlite::toJSON(circe, pretty=T, auto_unbox = TRUE) #convert circe object to json
# ohdisql <- tryCatch(CirceCompileR::compile(circeJson), #run circe compiler
# error =function(x) { #if an error occurs in compiler send error message
# #error message
# message("Circe Object can not Compile. Please review circe cohort definition to find error")
# NA_character_#return NA since compilation failed
# })#end try catch error
#create list with cohort definition converted to circe, circe json and ohdiSQL
cohortList <- list('circeJson' = circeJson,
'cohortRead' = cohortRead,
'ohdiSQL' = ohdisql)
#return cohort list
return(cohortList)
#end of function
}
|
d256e56331127fbc8b7a236c13944e0f1b587593 | 2732726961436c9d909473a2b1e86844d64c5684 | /day17_pop_culture/Pop_culture_day17.R | 025d76929ecd226f8a0d4f6bad7c414147639a19 | [] | no_license | Fgazzelloni/rstats-chart-challenge-2021 | 5af0828b12b063888478f9bc22d898766fb9e32c | bdcb2ffe77f1cd3fd426cbe4023bb4e62e5cb586 | refs/heads/main | 2023-09-05T14:01:51.009033 | 2021-11-03T09:31:56 | 2021-11-03T09:31:56 | 354,427,714 | 1 | 1 | null | 2021-04-04T01:04:01 | 2021-04-04T01:04:01 | null | UTF-8 | R | false | false | 4,499 | r | Pop_culture_day17.R |
# Pop Culture day 17 - Data for IHME latest Covid19 projections ----------------
# load libraries ---------------------------
library(viridis)
library(hexbin)
library(tidyverse)
# load data <- <- <- <- <- <- <- <- <- <- <- <- <- <- <- <- <- <-
# latest IHME covid19 projections -------------------------------
library(downloader)
url="https://ihmecovid19storage.blob.core.windows.net/latest/ihme-covid19.zip"
download(url, dest="ihme-covid19_latest.zip", mode="wb")
unzip("ihme-covid19_latest.zip")
# select data sets of interest ------------------------------
df <- read.csv("2021-04-16/best_masks_hospitalization_all_locs.csv")
df3 <- read.csv("2021-04-16/worse_hospitalization_all_locs.csv")
# manipulations -----------------------------------------
my_df_mask <- df%>% select(date,location_name,
confirmed_infections,
#mobility_composite,
total_pop)
my_df_mandate_easing <- df3%>% select(date,location_name,
confirmed_infections)
my_df <- my_df_mask%>% left_join(my_df_mandate_easing,by=c("date","location_name"))
names(my_df)<-c("date","location","infections_UM","population","infections_ME")
my_df_global <-my_df %>% filter(location =="Global")
UM_norm<-rnorm(my_df_global$infections_UM)
ME_norm<-rnorm(my_df_global$infections_ME)
# plotting ----------------------------------------------------
# inspired by:
# http://sape.inf.usi.ch/quick-reference/ggplot2/coord
library(extrafont)
base_family="Arial Rounded MT Bold"
base_size=12
half_line <- base_size/2
main_plot <- ggplot(data.frame(x = UM_norm, y = ME_norm),
aes(x = x, y = y)) +
geom_hex() +
coord_fixed() +
scale_fill_identity() +
labs(title = "Global Covid19 Infections \nUniversal Mask vs Mandate Easing",
caption = "Viz Federica Gazzelloni - DataSource: IHME Covid19 latest projections - Pop Culture day 17",
x = "Universal Mask - Infections projection",
y = "Mandate Easing - Infections projection") +
theme_void() +
theme(line = element_line(colour = "grey85", size = 0.4, linetype = 1, lineend = "round"),
rect = element_rect(fill = "gray88", colour = "grey85", size = 2, linetype = 1),
text = element_text(family = base_family, face = "plain", colour = "white", size = base_size,
lineheight = 0.9, hjust = 0.5, vjust = 0.5, angle = 0, margin = margin(),
debug = FALSE),
axis.line = element_line(colour = "black", size = 0.4, linetype = 1, lineend = "butt"),
axis.text = element_text(size = base_size * 1.1, colour = "black"),
axis.text.x = element_text(margin = margin(t = 0.8 * half_line/2), vjust = 1),
axis.text.y = element_text(margin = margin(r = 0.8 * half_line/2), hjust = 1),
axis.ticks = element_line(colour = "gray94", size = 1.3),
axis.ticks.length = unit(half_line, "pt"),
axis.title = element_text(colour = "red"),
axis.title.x = element_text(margin = unit(c(3.5, 0, 0, 0), "mm"),
vjust = 1, size = base_size * 1.3, face = "bold"),
axis.title.y = element_text(angle = 90, margin = unit(c(0, 3.5, 0, 0), "mm"),
vjust = 1, size = base_size * 1.3, face = "bold"),
panel.background = element_rect(fill = "red", colour = NA),
panel.border = element_rect(colour = "grey71", fill = NA, size =4),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(colour = "gray70"),
plot.title = element_text(color="black",size = base_size * 1.5, hjust = 0, vjust = 0,
face = "bold",
margin = margin(b = half_line * 1),family=base_family),
plot.subtitle = element_text(color="black",size = 8, hjust = 0, vjust = 0,
margin = margin(b = half_line * 0.9)),
plot.caption = element_text(size = 8, hjust = 1, vjust = 1,
margin = margin(t = half_line * 0.9), color = "purple"),
plot.margin = margin(15,15,15,15))
# saving ######################################
ragg::agg_png(here::here("day17_pop_culture", "Pop_culture_day17.png"),
res = 320, width = 14, height = 8, units = "in")
main_plot
dev.off()
|
df256b94b66b245c4d3a36dc48ff772554867949 | 582dd488211c6ecb2b5fdb458d916f50017edf34 | /Project_3_Working.R | 20927b468c4845833ef6f1f4d9ec8ee1a595b19c | [
"Apache-2.0"
] | permissive | tash088/Project-3 | 2aae2f1eed783f7a208291bfe59cf72c9b508fc4 | 0c2560ee8f2e188170a2bc998e105bb0b85d0839 | refs/heads/main | 2023-01-09T19:37:47.136543 | 2020-11-17T13:19:52 | 2020-11-17T13:19:52 | 309,998,094 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 598 | r | Project_3_Working.R | library(tidyverse)
library(readxl)
library(caret)
help(predict)
creditData <- read_excel("creditCardData.xlsx",col_names=TRUE)
creditData<-rename(creditData,default=`default payment next month`)
creditData<-select(creditData,LIMIT_BAL,SEX,EDUCATION,MARRIAGE,AGE,PAY_0,default)
creditData$default<-as.factor(creditData$default)
creditData$SEX<-as.factor(creditData$SEX)
creditData$EDUCATION<-as.factor(creditData$EDUCATION)
creditData$MARRIAGE<-as.factor(creditData$MARRIAGE)
creditData$PAY_0<-creditData$PAY_0
creditData
summary(select(creditData,LIMIT_BAL,EDUCATION,SEX,MARRIAGE,AGE))
|
7463d6b00a91130700d9fec6c1ea1d257033b8b8 | 850860fa27a5acc4aa3907c30b95450039157015 | /cachematrix.R | 0a7f5e98c33462cdf8c79380a7d56b15ad87a467 | [] | no_license | pablorodrig16/ProgrammingAssignment2 | f5c5688eae7b4e3d4e175f1ec3c1592ae2156e0b | 2cbd3b87ac51983fd18177a7613d5df2e9b7fc3e | refs/heads/master | 2021-01-21T18:46:34.679654 | 2015-05-14T05:17:05 | 2015-05-14T05:17:05 | 35,403,493 | 0 | 0 | null | 2015-05-11T05:12:59 | 2015-05-11T05:12:59 | null | UTF-8 | R | false | false | 1,922 | r | cachematrix.R | ## These functions calculate the inverse matrix resulting from
## applying solve() function to an square matrix and store both of them in a list.
## Then if that inverse matrix is needed it can be
## get from de cache without needing to perform calculations again.
## 'makeCacheMatrix' function creates a list containing 4 functions:
## 1. $set(newmatrix): sets a matrix to be used. This function allows to
## easily assign a new matrix to a previously created list for calculation.
## 2. $get() gets the original matrix
## 3. $setSolve(inverse) sets the inverse matrix in the cache
## 4. $getSolve () gets the inverse matrix from the cache
##
## 'x' must be a square and invertible matrix
makeCacheMatrix <- function(x = matrix()) {
solvematrix <- NULL
set <- function(newmatrix) {
x <<- newmatrix
solvematrix <<- NULL
}
get <- function() x
setSolve <- function(inverse) solvematrix <<- inverse
getSolve <- function() solvematrix
list(set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
## 'cacheSolve' returns the inverse matrix from the original one. When it is called
## for the first time it return the result from applying solve() function to the
## original matrix and set this result on the cache. Therefore when cacheSolve()
## is called with the same data it gets the inverse matrix from the cache.
## 'x' argument must be a list created with 'makeCacheMatrix' function containing
## the original matrix.
cacheSolve <- function(x, ...) {
inverse <- x$getSolve()
if(!is.null(inverse)) {
message("getting cached matrix")
inverse
}else{
matrix <- x$get()
inverse <- solve(matrix, ...)
x$setSolve(inverse)
inverse
}
}
|
88b128624b8b131022d7107fd0a817997919ffce | 585fa751f775731024cd1cff508cbf2f8fe4cdf4 | /eigen_projection_sample.R | 191ad666915953b46efe260fe6985f5d50956269 | [] | no_license | hataloo/fashion_projection | 479ef453a0749d16e2bdcb8c0adcbfd025104cb9 | 5307461dc5e48fe83b2d1dc5efece164ca76b79a | refs/heads/main | 2023-04-15T14:34:24.320602 | 2021-04-23T08:39:21 | 2021-04-23T08:39:21 | 334,989,563 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,808 | r | eigen_projection_sample.R |
#Run data_preparation.R and eigen_projection_preparation.R prior to running this file.
sample_class <- match("Bag", class_names)
sample_index <- 15
projection_class <- match("Bag", class_names)
eigen_length <- c(3,10,25)
sample_eigen_visualization <- function(sample_class, sample_index, projection_class, eigen_length, info){
train_images_by_class <- info[["train_images"]]
class_names <- info[["class_names"]]
train_vectors_by_class <- info[["train_vectors"]]
eigen_class_sp <- info[["eigen_class_sp"]]
class_projection <- info[["class_projection"]]
plotpath <- sprintf("./plots/class_projections/%sOnto%s_%d.pdf",class_names[sample_class],class_names[projection_class],sample_index)
layoutmat <- matrix(c(1,1,2,3), nrow = 2,ncol = 2, byrow = TRUE)
for(i in 1:(length(eigen_length)+3)){layoutmat <- rbind(layoutmat, c(i+3,i+3))}
layout(layoutmat, heights = c(1, rep(1,each = 1+length(eigen_length))))
par(mar = c(0,0,0,0))
plot.new()
text(0.5,0.5,sprintf("%s projected onto %s", class_names[sample_class], class_names[projection_class]), cex = 2)
par(mar = c(1,1,1,1))
image(1:28, 1:28, train_images_by_class[[sample_class]][sample_index,,], col = gray((0:255/(255))), xaxt = "n", yaxt = "n", main = class_names[sample_class])
image(1:28, 1:28, train_images_by_class[[projection_class]][sample_index,,], col = gray((0:255/(255))), xaxt = "n", yaxt = "n", main = class_names[projection_class])
plot(seq(image_size^2), train_vectors_by_class[[sample_class]][sample_index,], type = "l", ylab = "", xlab = "", main = sprintf("Original data for %s",class_names[sample_class] ))
plot(seq(image_size^2), train_vectors_by_class[[sample_class]][sample_index,]-class_image_means[[sample_class]], type = "l", ylab = "", xlab = "", main = sprintf("Centered data for %s",class_names[sample_class] ))
test <- lincomb(class_projection[[sample_class]]$basis, t(class_projection[[sample_class]]$coeff[1,]))
plot(test, type = "l", ylab = "", xlab = "", main = sprintf("Centered Spline-representation for %s",class_names[sample_class] ))
par(mar = c(3,1,3,1))
for (j in 1:length(eigen_length)){
eigenfunctions <- get_eigenfunctions(eigen_class_sp[[sample_class]], eigen_length[j])
eigen_projection_info <- eigen_project_sample(class_projection[[sample_class]]$coeff[sample_index,], eigenfunctions, spect[[j]], eigen_length[j])
eigen_projection_info <- spline_and_eigen_project_sample(train_vectors_by_class[[class_index]][sample_index,] - class_vector_means[[projection_class]], knots, order, eigenfunctions,spect[[class_index]], eigen_length[j])
plot(eigen_projection_info[["proj"]], type = "l",
main = sprintf("Projected onto %s with %d eigenvectors",
class_names[projection_class], eigen_length[j]))
}
}
sample_eigen_visualization(sample_class, sample_index, projection_class, eigen_length,
info = list("train_images" = train_images_by_class,
"class_names" = class_names,
"eigen_class_sp" = eigen_class_sp,
"train_vectors" = train_vectors_by_class,
"class_projection" = class_projection))
max_eigen_coeff <- vector(mode = "list", length(class_names))
for (class_index in 1:length(class_names)){
max_eigen_coeff[[class_index]] <- get_max_eigen_coeff(class_projection[[sample_class]]$coeff[sample_index,], eigen_class_sp[[class_index]], spect[[class_index]],
eigen_length = 30)
}
max_class_index <- which.max(max_eigen_coeff)
max_class <- class_names[max_class_index]
cat(sprintf("Predicted class: %s\nActual class: %s",max_class, class_names[sample_class]))
|
248a42753329706b5f57f300979287c845f605a5 | 0c2c555accd7311bc988ef87c3093055db4eaf25 | /R/exporting_model.R | 6e335c03da5f0127731da4f5470c174565e117e6 | [] | no_license | Geidelberg/cotonou | 1dc491e8f659ba431d2fbbec43ad19934457e7ba | b0cc0c0afae63bad0e2aeb073c60a60928c51c50 | refs/heads/master | 2021-06-05T09:43:07.530294 | 2020-07-27T11:37:32 | 2020-07-27T11:37:32 | 87,826,062 | 0 | 3 | null | 2020-01-15T10:02:21 | 2017-04-10T15:18:39 | R | UTF-8 | R | false | false | 106 | r | exporting_model.R | #' @title The model in odin syntax
#' @export main_model
#' @rdname main_model
#' @name doesthiswork
NULL
|
b0365ba071d27a09be2b6a7219b6d439240255b9 | 7a0e33100081ba37fb5846ace638d46fd1ea7c7f | /R/plot_mw.r | 02117a339c25b2f02dacb8516e6d4604f5cf198f | [] | no_license | cran/GmAMisc | 4056f56b41747e5758f24b627d641e740f7f412b | 12eb385a69521f9493d658c61ddeb4894483e8ba | refs/heads/master | 2022-03-07T02:52:44.745241 | 2022-02-23T14:20:02 | 2022-02-23T14:20:02 | 145,911,179 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 5,711 | r | plot_mw.r | #' R function for visually displaying Mann-Whitney test's results
#'
#' The function allows to perform Mann-Whitney test, and to display the test's results in a plot
#' along with two boxplots. For information about the test, and on what it is actually testing, see
#' for instance the interesting article by R M Conroy, "What hypotheses do "nonparametric" two-group
#' tests actually test?", in The Stata Journal 12 (2012): 1-9.\cr
#'
#' The returned boxplots display the
#' distribution of the values of the two samples, and jittered points represent the individual
#' observations.
#'
#' At the bottom of the chart, a subtitle arranged on three lines reports relevant
#' statistics:\cr -test statistic (namely, U) and the associated z and p value;\cr -Probability of
#' Superiority value (which can be interpreted as an effect-size measure, as discussed in:
#' https://nickredfern.wordpress.com/2011/05/12/the-mann-whitney-u-test/);\cr -another measure of
#' effect size, namely r (see
#' https://stats.stackexchange.com/questions/124501/mann-whitney-u-test-confidence-interval-for-effect-size),
#' whose thresholds are indicated in the last line of the plot's subtitle.\cr
#'
#' The function may also
#' return a density plot (coupled with a rug plot at the bottom of the same chart) that displays the
#' distribution of the pairwise differences between the values of the two samples being compared.
#' The median of this distribution (which is represented by a blue reference line in the same chart)
#' corresponds to the Hodges-Lehmann estimator.
#'
#' @param x Object storing the values of the first group being compared.
#' @param y Object storing either the values of the second group being compared or a grouping
#' variable with 2 levels.
#' @param xlabl If y is not a grouping variable, user may want to specify here the name of the x
#' group that will show up in the returned boxplots (default is "x").
#' @param ylabl If y is not a grouping variable, user may want to specify here the name of the y
#' group that will show up in the returned boxplots (default is "y").
#' @param strip Logical value which takes FALSE (by default) or TRUE if the user wants jittered
#' points to represent individual values.
#' @param notch Logical value which takes FALSE (by default) or TRUE if user does not or do want to
#' have notched boxplots in the final display, respectively; it is worth noting that overlapping
#' of notches indicates a not significant difference at about 95 percent confidence.
#' @param omm It stands for overall mean and median; takes FALSE (by default) or TRUE if user
#' wants the mean and median of the overall sample plotted in the chart (as a dashed RED line and
#' dotted BLUE line respectively).
#' @param outl Logical value which takes FALSE or TRUE (by default) if users want the boxplots to
#' display outlying values.
#' @param HL Logical value that takes TRUE or FALSE (default) if the user wants to display the
#' distribution of the pairwise differences between the values of the two samples being compared;
#' the median of that distribution is the Hodges-Lehmann estimator.
#'
#' @keywords mwPlot
#'
#' @export
#'
#' @importFrom plyr count
#' @importFrom coin wilcox_test
#'
#' @examples
#' #create a toy dataset
#' mydata <- data.frame(values=c(rnorm(30, 100,10),rnorm(30, 80,10)),
#' group = as.factor(gl(2, 30, labels = c("A", "B"))))
#'
#' # performs the test, displays the test's result, including jittered points, notches,
#' #overall median and mean, and the Hodges-Lehmann estimator
#' mwPlot(x=mydata$values, y=mydata$group, strip=TRUE, omm=TRUE, notch=TRUE, HL=TRUE)
#'
mwPlot <- function (x,y,xlabl="x",ylabl="y", strip=FALSE,notch=FALSE,omm=FALSE, outl=TRUE, HL=FALSE){
options(scipen=999)
if (is.numeric(y)==FALSE) {
data <- data.frame(value=x, group=y)
} else {data <- data.frame(value=c(x,y), group=c(rep(xlabl, length(x)), rep(ylabl, length(y))))
}
res <- wilcox.test(data[,1] ~ data[,2], conf.int=TRUE)
U <- wilcox.test(data[,1] ~ data[,2])$statistic
p <- ifelse(res$p.value < 0.001, "< 0.001", ifelse(res$p.value < 0.01, "< 0.01", ifelse(res$p.value < 0.05, "< 0.05",round(res$p.value, 3))))
print(paste("p-value=",res$p.value))
samples.size <- plyr::count (data[,2]) #requires the plyr package
PS <- round(U/(samples.size[1,2] * samples.size[2,2]), 3)
z <- round(wilcox_test(data[,1] ~ data[,2])@statistic@teststatistic,3) #requires the coin package
r <- round(abs(z/sqrt(samples.size[1,2] + samples.size[2,2])), 3)
graphics::boxplot(data[,1] ~ data[,2], data = data, notch = notch, outline=outl)
chart.title="Box Plots"
if (strip==TRUE) {
stripchart(data[,1] ~ data[,2], vertical = TRUE, data = data, method = "jitter", add = TRUE, pch = 16, col="#00000088", cex = 0.5)
chart.title="Jittered Box Plots"
} else {
}
title(main=chart.title, sub=paste("Mann-Whitney U=", U, ", z=",z, ", p=", p, "; Probability of Superiority=", PS, "; r=", r, "\nP{value(group to the left) > value(group to the right)}=", PS,"\nEffect size thresholds [r]: small (0.10), medium (0.30), large (0.50)"), cex.sub=0.8)
if (omm==TRUE) {
abline(h=mean(data[,1]), lty=2, col="red")
abline(h=median(data[,1]), lty=3, col="blue")
} else {
}
if (HL==TRUE) {
unstacked.data <- utils::unstack(data)
diff <- outer(unstacked.data[[1]], unstacked.data[[2]],"-")
m <- round(median(diff), 3)
graphics::plot(stats::density(diff), main="Pairwise differences distribution", xlab="", sub=paste("difference in location", "\nmedian (Hodges-Lehmann estimator):", m))
polygon(stats::density(diff), col="grey")
rug(diff, col="red")
abline(v=m, lty=2, col="blue")
} else {
}
}
|
359368b6d1519120a5a0cf26d2c827b25d445c86 | 90a12049ecc3f402b278294d217969223076f18b | /R/aaa_env_vars.R | 047c4003ab3c1d7baf5a1a2c29d4e83c3c83d7c3 | [] | no_license | muschellij2/templateflow | 5f5f8e2de1499a4c941e59565098b6e530a8a48d | 6dd2ee53ba9f2a1d5bd05c4a4b24b12415cb0124 | refs/heads/master | 2023-03-09T16:18:19.744605 | 2021-02-22T21:30:16 | 2021-02-22T21:30:16 | 341,295,115 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 210 | r | aaa_env_vars.R | TF_GITHUB_SOURCE = "https://github.com/templateflow/templateflow.git"
TF_S3_ROOT = "https://templateflow.s3.amazonaws.com"
TF_USE_DATALAD = Sys.getenv("TEMPLATEFLOW_USE_DATALAD", unset = FALSE)
TF_CACHED = TRUE |
912fe99c1201f91a0e0154a86e9769c8a0553d06 | 2d9c1336e0159d4790b866b12bc67d96596c88b4 | /R/lightleak_upload.R | 32f8f5b0af18de4ebbd05cfc0b198444710b9ad8 | [] | no_license | JARS3N/lightleak | 0aea53171771bb4f3f1321f0a1a82e7ae9ecdf32 | 4ad91968b59728f34f8995a671c8c79a47f22d7b | refs/heads/master | 2022-08-22T05:50:46.238269 | 2022-08-11T18:15:37 | 2022-08-11T18:15:37 | 105,680,952 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 335 | r | lightleak_upload.R |
#### upload LightLeak Data
upload <- function(LLDat) {
require(RMySQL)
upload_per_file <- function(obj, db) {
not_in_db <- file_query_meta(obj, db) == 1
if (not_in_db) {
process_upload(obj, db)
}
}
my_db <- adminKraken::con_mysql()
res <- lapply(LLDat, upload_per_file, db = my_db)
dbDisconnect(my_db)
}
|
7a70ada5d007d5c6beba48eb4d1dc5f36e4ac8e7 | c7da3c116f3a35c344f7e7a73cd7a09077e3d283 | /man/plot_retweet_count.Rd | 45cb2ea9c625c3a30a49cb31ca5c544f2cea5cbe | [] | no_license | clement-lee/hybridProcess | 882237c0a135b505dce315f2f09ee2c951ef5b2d | ec848f60f5a2dc73b1643781d21f48f805db4768 | refs/heads/master | 2020-04-18T03:12:52.804330 | 2019-01-29T15:47:05 | 2019-01-29T15:47:05 | 167,190,760 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 465 | rd | plot_retweet_count.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting.R
\name{plot_retweet_count}
\alias{plot_retweet_count}
\title{Plot retweet count}
\usage{
plot_retweet_count(df, t_0, t_inf)
}
\arguments{
\item{df}{a data frame with at least the following variables: id_str and t_ij.}
\item{t_0}{start of data collection; POSIXct.}
\item{t_inf}{end of data collection relative to t_0; a positive scalar.}
}
\description{
Plot retweet count
}
|
4ff0f7acd58030b1d530b9025d550cd45778dac4 | fbd822d04d0d8204065c270f98ba688c6639f779 | /data-raw/tidy_save_shapefiles.R | 1379f077663065c44e73f1c37be261f391dd73f6 | [] | no_license | seawaR/colmaps | 86b3079ddc6e45cc47c5015c8146c2eb15243697 | b0b4cc5d083cb5e8e124e2a92a3affb17bbf5a47 | refs/heads/master | 2021-01-17T23:34:45.437918 | 2015-05-24T00:33:36 | 2015-05-24T00:33:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,084 | r | tidy_save_shapefiles.R | library("dplyr")
library("rgdal")
# help("tolower")
capwords <- function(s, strict = TRUE) {
cap <- function(s) paste(toupper(substring(s, 1, 1)),
{s <- substring(s, 2); if(strict) tolower(s) else s},
sep = "", collapse = " " )
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
map <- list(municipios = NULL, departamentos = NULL)
var_codigo <- c(municipios = "CODIGO_DPT", departamentos = "CODIGO_DEP")
for (level in names(map)){
path <- list(input = "inst/extdata/",
rawoutput = "inst/extdata/",
binoutput = "data/")
path <- sapply(path, paste0, level)
encoding <- readLines(con = paste0(path["input"], "/", level, ".cpg"),
warn = FALSE)
map[[level]] <- readOGR(dsn = path["input"], layer = level, verbose = FALSE,
stringsAsFactors = FALSE, encoding = encoding)
# Set polygons ids
for (i in seq_len(nrow(map[[level]]))){
current_id <- slot(slot(map[[level]], "polygons")[[i]], "ID")
id_dane <- slot(map[[level]], "data")[current_id, var_codigo[level]]
slot(slot(map[[level]], "polygons")[[i]], "ID") <- id_dane
}
# Check:
identical(sapply(slot(map[[level]], "polygons"), slot, "ID"),
slot(map[[level]], "data")[[var_codigo[level]]])
# Format data
dots <- list(id = var_codigo[[level]],
id_depto = ~ CODIGO_DEP,
municipio = ~ capwords(enc2utf8(NOMBRE_MUN)),
depto = ~ capwords(enc2utf8(NOMBRE_DEP)))
if (level == "departamentos") dots <- dots[-c(2, 3)]
slot(map[[level]], "data") <- slot(map[[level]], "data") %>%
transmute_(.dots = dots)
if(dir.exists(path["rawoutput"]))
file.remove(dir(path["rawoutput"], full.names = TRUE))
writeOGR(obj = map[[level]], dsn = path["rawoutput"], layer = level,
driver = "ESRI Shapefile", layer_options = 'ENCODING="ISO-8859-1"')
save(list = level, file = paste0(path["binoutput"], ".rda"),
compress = "bzip2", envir = as.environment(map))
}
|
bf6f2c132f0f6546f06c6ee76806678d0e7e2faf | 2c303bd5f3d712af852dc30176b3358c7de3cc67 | /src code/r/02/post-hazard.R | 3970408d35ad5db164e6273ba803df56bd8efe82 | [
"CC0-1.0"
] | permissive | junchenfeng/thesis | 89e2d02eccd200e8d4da4cb38bbfc48307f63d37 | 456e841a6e6d9a2276b09f4804529ab77bda4fa8 | refs/heads/master | 2021-01-20T15:36:32.185297 | 2017-02-09T21:46:09 | 2017-02-09T21:46:09 | 44,146,208 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,528 | r | post-hazard.R | library(dplyr)
library(tidyr)
complete_likelihood<-function(x,y,h,pi,c,hr){
# P(X,Y,H) = P(Y|X)P(H|X)P(X)
pyx = y*c+(1-y)*(1-c)
px = x*pi + (1-x)*(1-pi)
phx = h*hr+(1-h)*(1-hr)
return(pyx*px*phx)
}
conditional_x_density_xh <-function(pi,l,h0,h1){
# p(X_t=1|H_t-1=0) = p(X_t=1,H_t-1=0)/(p(X_t=1,H_t-1=0)+p(X_t=0,H_t-1=0))
# p(X_t=1,H_t-1=0) = P(X_t=1,X_t-1=0,Ht-1=0)+P(X_t=1,X_t-1=1,Ht-1=0)
p10 = l*(1-pi)*(1-h0) + pi*(1-h1)
# p(X_t=0,H_t-1=0) = P(X_t=0,X_t-1=0,Ht-1=0)
p00 = (1-l)*(1-pi)*(1-h0)
return(p10/(p10+p00))
}
trans_x2y_hazard<-function(pi,c0,c1,h0,h1, idx){
# P(H|Y) = P(Y,H)/(P(Y,H=1)+P(Y,H=0))
# P(Y,H)== P(X=1,Y,H)+P(X=0,Y,H)
# PXYH
if (idx==1){
p111 = complete_likelihood(1,1,1,pi,c1,h1)
p011 = complete_likelihood(0,1,1,pi,c0,h0)
p010 = complete_likelihood(0,1,0,pi,c0,h0)
p110 = complete_likelihood(1,1,0,pi,c1,h1)
p11 = p111+p011
p10 = p110+p010
h = p11/(p11+p10)
}else{
p101 = complete_likelihood(1,0,1,pi,c1,h1)
p001 = complete_likelihood(0,0,1,pi,c0,h0)
p100 = complete_likelihood(1,0,0,pi,c1,h1)
p000 = complete_likelihood(0,0,0,pi,c0,h0)
p01 = p101+p001
p00 = p100+p000
h = p01/(p01+p00)
}
return(h)
}
gather_hr <- function(hr_data){
h_data = hr_data %>% select(t,yhmean,whmean) %>% rename(correct=yhmean,incorrect=whmean) %>% gather(res,h,-t)
hmax_data = hr_data %>% select(t,yhmax,whmax) %>% rename(correct=yhmax,incorrect=whmax) %>% gather(res,hmax,-t)
hmin_data = hr_data %>% select(t,yhmin,whmin) %>% rename(correct=yhmin,incorrect=whmin) %>% gather(res,hmin,-t)
data = merge(h_data,hmax_data, by=c('t','res'))
data = merge(data,hmin_data, by=c('t','res'))
return(data)
}
imputate_hazard_rate <- function(test_data, Tmax){
alldata = data.frame(t=seq(1,Tmax), hr=as.numeric(0), pc = as.numeric(0), pw = as.numeric(0),Nc=as.numeric(0),Nw=as.numeric(0))
for (t in seq(1,Tmax)){
base_num = sum(test_data$t==t)
exit_num = sum(test_data$t==t & test_data$idx==1)
base_yes_num = sum(test_data$t==t & test_data$atag==1)
base_no_num = sum(test_data$t==t & test_data$atag==0)
exit_yes_num = sum(test_data$t==t & test_data$atag==1 & test_data$idx==1)
exit_no_num = sum(test_data$t==t & test_data$atag==0 & test_data$idx==1)
alldata[t,] = c(t, exit_num/base_num, exit_yes_num/base_yes_num, exit_no_num/base_no_num, base_yes_num, base_no_num)
}
alldata = alldata %>% mutate(sdc=sqrt(pc*(1-pc)/Nc),sdw=sqrt(pw*(1-pw)/Nw))
hr_point = alldata %>% select(t,pc,pw) %>% rename(correct=pc,incorrect=pw) %>% gather(res,h,-t)
hr_sd = alldata %>% select(t,sdc,sdw) %>% rename(correct=sdc,incorrect=sdw) %>% gather(res,sd_h,-t)
harzard_rate_data = merge(hr_point,hr_sd,by=c('t','res'))
return(harzard_rate_data)
}
proj_dir = getwd()
kpids = c('87','138')
kpnames = c('Two Digit Multiplication', 'Long Division')
maxT= 4
## Prop Model
for (i in seq(2)){
# read in data
file_path = paste0(proj_dir,'/_data/02/res/',kpids[i],'/yh.txt')
y_param_data = read.table(file_path, col.names=c('l','pi','c0','c1','lambda0','beta0','lambda1','beta1'), header=F,sep=',')
file_path = paste0(proj_dir,'/_data/02/res/',kpids[i],'/xh.txt')
x_param_data = read.table(file_path, col.names=c('l','pi','c0','c1','lambda0','beta0','lambda1','beta1'), header=F,sep=',')
file_path = paste0(proj_dir,'/_data/02/spell_data_',kpids[i],'.csv')
kp_spell_data = read.csv(file_path, col.names=c('spell_id','t','atag','idx'),header=F)
# get the hazard rates
for (j in seq(nrow(y_param_data))){
tmp = data.frame(t=seq(4), yh=y_param_data$lambda1[j]*exp(y_param_data$beta1[j]*(seq(4)-1)), wh=y_param_data$lambda0[j]*exp(y_param_data$beta0[j]*(seq(4)-1)),idx=j)
if (j==1){
y_hr_dist=tmp
}else{
y_hr_dist = rbind(y_hr_dist,tmp)
}
}
for (j in seq(nrow(x_param_data))){
lambda0 = x_param_data$lambda0[j]
lambda1 = x_param_data$lambda1[j]
beta0 = x_param_data$beta0[j]
beta1 = x_param_data$beta1[j]
l = x_param_data$l[j]
pi = 1-x_param_data$pi[j]
c0 = x_param_data$c0[j]
c1 = x_param_data$c1[j]
x_hrs = data.frame(t=seq(4), yh=lambda1*exp(beta1*(seq(4)-1)), wh=lambda0*exp(beta0*(seq(4)-1)))
tmp = data.frame(t=seq(4),yh=as.numeric(0),wh=as.numeric(0))
for (t in seq(4)){
if (t!=1){
pi = conditional_x_density_xh(pi, l, x_hrs$wh[t-1], x_hrs$yh[t-1])
}
tmp$yh[t]=trans_x2y_hazard(pi,c0,c1,x_hrs$wh[t],x_hrs$yh[t],1)
tmp$wh[t]=trans_x2y_hazard(pi,c0,c1,x_hrs$wh[t],x_hrs$yh[t],0)
}
if (j==1){
xy_hr_dist = tmp
}else{
xy_hr_dist = rbind(xy_hr_dist, tmp)
}
}
# calculate the mean and the 95 credible interval
y_hr = y_hr_dist %>% group_by(t) %>% summarize(yhmean=mean(yh),whmean=mean(wh),
yhmax=quantile(yh,prob=0.95), whmax=quantile(wh,prob=0.95),
yhmin=quantile(yh,prob=0.05), whmin=quantile(wh,prob=0.05))
xy_hr = xy_hr_dist %>% group_by(t) %>% summarize(yhmean=mean(yh),whmean=mean(wh),
yhmax=quantile(yh,prob=0.95), whmax=quantile(wh,prob=0.95),
yhmin=quantile(yh,prob=0.05), whmin=quantile(wh,prob=0.05))
y_h_data = gather_hr(y_hr)
y_h_data$type = 'BKT'
xy_h_data = gather_hr(xy_hr)
xy_h_data$type = 'LTP'
# compute the real data
emp_h_data = imputate_hazard_rate(kp_spell_data, maxT)
emp_h_data$res = factor(emp_h_data$res)
emp_h_data = emp_h_data %>% mutate(hmax=h+1.97*sd_h,hmin=h-1.97*sd_h) %>% select(t,res,h) %>% rename(hd=h)
tmp_data = rbind(y_h_data, xy_h_data)
tmp_data = merge(tmp_data, emp_h_data)
tmp_data$kp = kpnames[i]
if(i==1){
all_data_1 = tmp_data
}else{
all_data_1 = rbind(all_data_1,tmp_data)
}
}
## Nonparametric Model
for (i in seq(2)){
file_path = paste0(proj_dir,'/_data/02/res/',kpids[i],'/yh_np.txt')
y_param_data = read.table(file_path, col.names=c('l','pi','c0','c1','h01','h02','h03','h04','h11','h12','h13','h14'), header=F,sep=',')
file_path = paste0(proj_dir,'/_data/02/res/',kpids[i],'/xh_np.txt')
x_param_data = read.table(file_path, col.names=c('l','pi','c0','c1','h01','h02','h03','h04','h11','h12','h13','h14'), header=F,sep=',')
file_path = paste0(proj_dir,'/_data/02/spell_data_',kpids[i],'.csv')
kp_spell_data = read.csv(file_path, col.names=c('spell_id','t','atag','idx'),header=F)
for (j in seq(nrow(y_param_data))){
h0s = c(y_param_data$h01[j],y_param_data$h02[j],y_param_data$h03[j],y_param_data$h04[j])
h1s = c(y_param_data$h11[j],y_param_data$h12[j],y_param_data$h13[j],y_param_data$h14[j])
tmp = data.frame(t=seq(4), yh=h1s, wh=h0s,idx=j)
if (j==1){
y_hr_dist=tmp
}else{
y_hr_dist = rbind(y_hr_dist,tmp)
}
}
for (j in seq(nrow(x_param_data))){
lambda0 = x_param_data$lambda0[j]
lambda1 = x_param_data$lambda1[j]
beta0 = x_param_data$beta0[j]
beta1 = x_param_data$beta1[j]
l = x_param_data$l[j]
pi = 1-x_param_data$pi[j]
c0 = x_param_data$c0[j]
c1 = x_param_data$c1[j]
h0s = c(x_param_data$h01[j],x_param_data$h02[j],x_param_data$h03[j],x_param_data$h04[j])
h1s = c(x_param_data$h11[j],x_param_data$h12[j],x_param_data$h13[j],x_param_data$h14[j])
tmp = data.frame(t=seq(4),yh=as.numeric(0),xh=as.numeric(0))
for (t in seq(4)){
if (t!=1){
pi = conditional_x_density_xh(pi, l, h0s[t-1], h1s[t-1])
}
tmp$yh[t]=trans_x2y_hazard(pi,c0,c1,h0s[t],h1s[t],1)
tmp$wh[t]=trans_x2y_hazard(pi,c0,c1,h0s[t],h1s[t],0)
}
if (j==1){
xy_hr_dist = tmp
}else{
xy_hr_dist = rbind(xy_hr_dist, tmp)
}
}
# calculate the mean and the 95 credible interval
y_hr = y_hr_dist %>% group_by(t) %>% summarize(yhmean=mean(yh),whmean=mean(wh),
yhmax=quantile(yh,prob=0.95), whmax=quantile(wh,prob=0.95),
yhmin=quantile(yh,prob=0.05), whmin=quantile(wh,prob=0.05))
xy_hr = xy_hr_dist %>% group_by(t) %>% summarize(yhmean=mean(yh),whmean=mean(wh),
yhmax=quantile(yh,prob=0.95), whmax=quantile(wh,prob=0.95),
yhmin=quantile(yh,prob=0.05), whmin=quantile(wh,prob=0.05))
y_h_data = gather_hr(y_hr)
y_h_data$type = 'BKT'
xy_h_data = gather_hr(xy_hr)
xy_h_data$type = 'LTP'
# compute the real data
emp_h_data = imputate_hazard_rate(kp_spell_data, maxT)
emp_h_data$res = factor(emp_h_data$res)
emp_h_data = emp_h_data %>% mutate(hmax=h+1.97*sd_h,hmin=h-1.97*sd_h) %>% select(t,res,h) %>% rename(hd=h)
tmp_data = rbind(y_h_data, xy_h_data)
tmp_data = merge(tmp_data, emp_h_data)
tmp_data$kp = kpnames[i]
if(i==1){
all_data_2 = tmp_data
}else{
all_data_2 = rbind(all_data_2,tmp_data)
}
}
## Merge
all_data_1$spec='Parametric'
all_data_2$spec='Nonparametric'
all_data = rbind(all_data_1,all_data_2)
all_data$res = factor(all_data$res)
ggplot(data=all_data %>% filter(type=='LTP'), aes(x=t,y=h, col=res,linetype=res)) + geom_line() +
scale_linetype_manual(values = c("correct"='twodash',"incorrect"='solid')) +
facet_grid(spec~kp)+
geom_errorbar(aes(x=t, ymin=hmin, ymax=hmax,color=res),width=0.1) + facet_grid(spec~kp)+
geom_line(aes(x=t,y=hd,col=res),linetype='dotted')+
theme(legend.position="top") + ylab('Hazard Rate') + xlab('Number of Practice')
|
099fc662501e1aa3780f3f03a138d893e321341c | 71efb0e950c7d52e526c5641ebcaa4b60cc2152a | /Starter/mlclass-ex1/gradientDescentMulti.R | 1b8dadc01762a63524718273fd5b64f8fc0c275d | [] | no_license | yc-ng/machine-learning-course | 0c801b0482dd27e358ab36a619d5d5970c004f78 | 2c9643849e07040394e08f0f2b50150384f073dd | refs/heads/master | 2020-03-22T08:01:16.651561 | 2018-07-12T14:38:34 | 2018-07-12T14:38:34 | 139,739,589 | 0 | 0 | null | 2018-07-04T15:31:37 | 2018-07-04T15:31:37 | null | UTF-8 | R | false | false | 1,631 | r | gradientDescentMulti.R | gradientDescentMulti <- function(X, y, theta, alpha, num_iters) {
#GRADIENTDESCENTMULTI Performs gradient descent to learn theta
# theta <- GRADIENTDESCENTMULTI(x, y, theta, alpha, num_iters) updates theta by
# taking num_iters gradient steps with learning rate alpha
# Initialize some useful values
m <- length(y) # number of training examples
n <- ncol(X) # number of features
J_history <- rep(0,num_iters)
for (iter in 1:num_iters) {
# ---------------------- YOUR CODE HERE ----------------------
# Instructions: Perform a single gradient step on the parameter vector
# theta.
#
# Hint: While debugging, it can be useful to print out the values
# of the cost function (computeCostMulti) and gradient here.
#
h <- X %*% theta
series <- numeric(n)
for (j in 1:n){
series[j] <- sum((h - y) * X[, j])
}
for (j in 1:n){
theta[j] <- theta[j] - (series[j] * alpha / m)
}
# series0 <- sum(h - y)
# series1 <- sum((h - y) * X[, 2])
# series2 <- sum((h - y) * X[, 3])
#
# theta[1] <- theta[1] - (series0 * alpha / m)
# theta[2] <- theta[2] - (series1 * alpha / m)
# theta[3] <- theta[3] - (series2 * alpha / m)
# Save the cost J in every iteration
J_history[iter] <- computeCostMulti(X, y, theta)
}
# ------------------------------------------------------------
list(theta = theta, J_history = J_history)
}
|
7ce526a917b87bb4e094ee8d5a605368568b0cca | 64a91e6762d9e7b8fb4552b7cc07014d73d6b817 | /man/VarReg.control.Rd | 4455a684b4b19369dd1c6564551ef5ebcdbeec60 | [] | no_license | kristyrobledo/VarReg | 95416998b5c012bad00c76bef1fb71c23e94c984 | 1a73ae2fab5e1841f4941c942d20328096b7e6a9 | refs/heads/master | 2023-08-22T02:36:04.857619 | 2023-08-10T12:21:25 | 2023-08-10T12:21:25 | 227,324,209 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,256 | rd | VarReg.control.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VarReg_control.R
\name{VarReg.control}
\alias{VarReg.control}
\title{Auxillary for controlling VarReg fitting}
\usage{
VarReg.control(bound.tol = 1e-05, epsilon = 1e-06, maxit = 1000)
}
\arguments{
\item{bound.tol}{Positive tolerance for specifying the interior of the parameter space. This allows the algorithm to terminate early if an interior maximum is found. If set to \code{bound.tol=Inf}, no early termination is attempted.}
\item{epsilon}{Positive convergence tolerance. If \eqn{\theta} is a vector of estimates, convergence is declared when \eqn{\sqrt{(\sum (\theta_{old} - \theta_{new})^2)}/ \sqrt{\sum (\theta_{old})^2} }. This should be smaller than \code{bound.tol}.}
\item{maxit}{integer giving the maximum number of EM algorithm iterations for a given parameterisation.}
}
\value{
A list of the three components: \code{bound.tol}, \code{epsilon} and \code{maxit} .
}
\description{
Use \code{VarReg.control} to determine parameters for the fitting of \code{\link{semiVarReg}}. Typically only used internally within functions.
}
\details{
This is used similarly to \code{\link[stats]{glm.control}}. If required, it may be internally passed to another function.
}
|
58a7ba43d3fb3e65cd03bdd4011d132ef0c85f37 | 49f0605bea382482a028e543cac6e72e3c0335c7 | /man/subbytype.Rd | 63f89a2bf4ef4bda90f5d064a025f31a473c5dff | [] | no_license | cran/cbanalysis | 2993af56c9b1b6809842132e43398207bad61940 | ce00fca11d555c4c251bb44908e0c32f2d33984d | refs/heads/master | 2021-01-19T13:44:33.718634 | 2017-09-04T17:43:49 | 2017-09-04T17:43:49 | 88,104,628 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,108 | rd | subbytype.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subbytype.R
\name{subbytype}
\alias{subbytype}
\title{Subsets Data frame Based On Variable Types.}
\usage{
subbytype(df)
}
\arguments{
\item{df}{- Input Data frame We Wish To Subset.}
}
\value{
Returns List of Data frames.
}
\description{
Returns a list of 6 data frames.
List's first element contains subset of all Factor variables of the input data frame.
Second element contains subset of all numeric and Integer variables of the input data frame.
Third element contains subset of all logical variables of the input data frame.
Fourth element contains subset of all complex variables of the input data frame.
Fifth element contains subset of all character variables of the input data frame.
Sixth element contains subset of all raw variables of the input data frame.
}
\examples{
numv<-c(1,2,3)
chrv<-c("a","b","c")
df<-data.frame(numv,chrv)
subbytype(df)
}
\author{
"Sandip Kumar Gupta", "sandip_nitj@yahoo.co.in"
}
|
5f947aa37e866506b8d07b65ef6d2db3688f4488 | 35dab897fb9e82c7bf81833b018ae4e483d5e8ef | /fig4/fig4c/plot_all_AP_features.R | 149fe906fd8494d36d5236dd08d5ef8f72170b58 | [] | no_license | grace-hansen/adipose_sex_dimorphism | 48fa0ece2a6e5cf4df6afbc796a0081c2d575ec6 | 6402f5dc152fec241fa07f8f3cd57a8d011a0eb6 | refs/heads/main | 2023-07-24T06:15:18.515680 | 2023-07-18T20:09:50 | 2023-07-18T20:09:50 | 555,140,395 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,991 | r | plot_all_AP_features.R | #!/usr/bin/Rscript
library(data.table)
library(tidyverse)
library(gridExtra)
setwd("~/medusa/papers/TWAS/lipocyte_profiler/scatter")
pony_colors<-fread("~/medusa/papers/TWAS/pony_palette")
######### Author: Grace Hansen #########
#This script plots adipocyte profiler data along a time course and from different cell types
############################ For color manipulation ############################
darken <- function(color, factor=1.2){
col <- col2rgb(color)
col <- col/factor
col <- rgb(t(col), maxColorValue=255)
col
}
################################################################################
celltypes<-c("sc","vc")
timepoints<-c("day0","day3","day8","day14")
################# Both sexes ####################3
AP<-matrix(nrow=0,ncol=8)
for (ct in celltypes) {
for (tp in timepoints) {
dat<-as.matrix(fread(paste("rs1534696_allfeatures_",tp,"_",ct,"_bothsexes.tsv",sep='')))
head(dat)
dat<-cbind(dat,rep(ct,nrow(dat)))
dat<-cbind(dat,rep(tp,nrow(dat)))
AP<-rbind(AP,dat)
}
}
AP<-as.data.frame(AP,stringsAsFactors=FALSE)
colnames(AP)[7:8]<-c("celltype","tp")
AP$`SNP.pvalue`<-as.numeric(as.character(AP$`SNP.pvalue`))
AP$`t-test`<-as.numeric(as.character(AP$`t-test`))
AP$q<-as.numeric(as.character(AP$q))
AP$tp<-factor(AP$tp,levels=c("day0","day3","day8","day14"))
#Insert category labels
AP$category<-rep("other/combined",nrow(AP))
AP$category[grepl("AGP",AP$features) & !(grepl("BODIPY",AP$features)) & !(grepl("Mito",AP$features)) & !(grepl("DNA",AP$features))]<-"Actin"
AP$category[!(grepl("AGP",AP$features)) & !(grepl("BODIPY",AP$features)) & !(grepl("Mito",AP$features)) & grepl("DNA",AP$features)]<-"DNA"
AP$category[!(grepl("AGP",AP$features)) & grepl("BODIPY",AP$features) & !(grepl("Mito",AP$features)) & !(grepl("DNA",AP$features))]<-"Intracellular lipids"
AP$category[!(grepl("AGP",AP$features)) & !(grepl("BODIPY",AP$features)) & grepl("Mito",AP$features) & !(grepl("DNA",AP$features))]<-"Mitochondria"
AP<-AP[!(is.na(AP$SNP.pvalue)),]
#Volcano plots
S<-ggplot()+
geom_hline(yintercept=1.855,linetype="dashed",color="gray80")+
geom_point(data=AP[AP$celltype=="sc" & (AP$q > 0.1 | AP$`SNP.pvalue`>0.05),],aes(x=`t-test`,y=-log10(`SNP.pvalue`),color=category),size=0.5,alpha=0.25)+
geom_point(data=AP[AP$celltype=="sc" & AP$q < 0.1 & AP$`SNP.pvalue`<0.05,],aes(x=`t-test`,y=-log10(`SNP.pvalue`),color=category),size=1.5)+
facet_wrap(vars(tp),nrow=1)+
ggtitle("Subcutaneous Adipocytes")+
theme_minimal()+
scale_x_continuous(name="t-statistic",limits=c(-4,4))+
scale_y_continuous(name="-log10 p-value",limits=c(0,5))+
scale_color_manual(values=c(rgb(pony_colors[2,1:3]),rgb(pony_colors[7,1:3]),darken(rgb(pony_colors[16,1:3]),1.1),rgb(pony_colors[11,1:3]),"gray80"))+
theme(axis.line.x = element_blank(),
axis.text.x=element_text(size=8))+
labs(color="Feature Class")
V<-ggplot()+
geom_hline(yintercept=1.855,linetype="dashed",color="gray80")+
geom_point(data=AP[AP$celltype=="vc" & (AP$q > 0.1 | AP$`SNP.pvalue`>0.05),],aes(x=`t-test`,y=-log10(`SNP.pvalue`),color=category),size=0.5,alpha=0.25)+
geom_point(data=AP[AP$celltype=="vc" & AP$q < 0.1 & AP$`SNP.pvalue`<0.05,],aes(x=`t-test`,y=-log10(`SNP.pvalue`),color=category),size=1.5)+
facet_wrap(vars(tp),nrow=1)+
ggtitle("Visceral Adipocytes")+
theme_minimal()+
scale_x_continuous(name="t-statistic",limits=c(-4,4))+
scale_y_continuous(name="-log10 p-value",limits=c(0,5))+
scale_color_manual(values=c(rgb(pony_colors[2,1:3]),rgb(pony_colors[7,1:3]),darken(rgb(pony_colors[16,1:3]),1.1),rgb(pony_colors[11,1:3]),"gray80"))+
theme(axis.line.x = element_blank(),
axis.text.x=element_text(size=8))+
labs(color="Feature Class")
pdf("AP_features_timecourse.pdf",width=8,height=3)
grid.arrange(V,S,nrow=1)
dev.off()
################# Female ####################3
AP<-matrix(nrow=0,ncol=9)
for (ct in celltypes) {
for (tp in timepoints) {
dat<-as.matrix(fread(paste("rs1534696_allfeatures_",tp,"_",ct,"_female.tsv",sep='')))
head(dat)
dat<-cbind(dat,rep(ct,nrow(dat)))
dat<-cbind(dat,rep(tp,nrow(dat)))
AP<-rbind(AP,dat)
}
}
AP<-as.data.frame(AP,stringsAsFactors=FALSE)
colnames(AP)[8:9]<-c("celltype","tp")
AP$`p-value (t-test)`<-as.numeric(as.character(AP$`p-value (t-test)`))
AP$`t-test`<-as.numeric(as.character(AP$`t-test`))
AP$qvalue<-as.numeric(as.character(AP$qvalue))
AP$tp<-factor(AP$tp,levels=c("day0","day3","day8","day14"))
#Insert category labels
AP$category<-rep("other/combined",nrow(AP))
AP$category[grepl("AGP",AP$features) & !(grepl("BODIPY",AP$features)) & !(grepl("Mito",AP$features)) & !(grepl("DNA",AP$features))]<-"Actin"
AP$category[!(grepl("AGP",AP$features)) & !(grepl("BODIPY",AP$features)) & !(grepl("Mito",AP$features)) & grepl("DNA",AP$features)]<-"DNA"
AP$category[!(grepl("AGP",AP$features)) & grepl("BODIPY",AP$features) & !(grepl("Mito",AP$features)) & !(grepl("DNA",AP$features))]<-"Intracellular lipids"
AP$category[!(grepl("AGP",AP$features)) & !(grepl("BODIPY",AP$features)) & grepl("Mito",AP$features) & !(grepl("DNA",AP$features))]<-"Mitochondria"
AP<-AP[!(is.na(AP$SNP.pvalue)),]
#Volcano plots
S<-ggplot()+
geom_hline(yintercept=1.303,linetype="dashed",color="black")+
geom_point(data=AP[AP$celltype=="sc" & (AP$qvalue > 0.05 | AP$`p-value (t-test)`>0.05),],aes(x=`t-test`,y=-log10(`p-value (t-test)`),color=category),size=0.5,alpha=0.25)+
geom_point(data=AP[AP$celltype=="sc" & AP$qvalue < 0.05 & AP$`p-value (t-test)`<0.05,],aes(x=`t-test`,y=-log10(`p-value (t-test)`),color=category),size=1.5)+
facet_wrap(vars(tp),nrow=1)+
ggtitle("Subcutaneous Adipocytes")+
theme_minimal()+
scale_x_continuous(name="t-statistic",limits=c(-4,4))+
scale_y_continuous(name="-log10 p-value",limits=c(0,5))+
scale_color_manual(values=c(rgb(pony_colors[2,1:3]),rgb(pony_colors[7,1:3]),darken(rgb(pony_colors[16,1:3]),1.1),rgb(pony_colors[11,1:3]),"gray80"))+
theme(axis.line.x = element_blank(),
axis.text.x=element_text(size=10))+
labs(color="Feature Class")
V<-ggplot()+
geom_hline(yintercept=1.303,linetype="dashed",color="black")+
geom_point(data=AP[AP$celltype=="vc" & (AP$qvalue > 0.05 | AP$`p-value (t-test)`>0.05),],aes(x=`t-test`,y=-log10(`p-value (t-test)`),color=category),size=0.5,alpha=0.25)+
geom_point(data=AP[AP$celltype=="vc" & AP$qvalue < 0.05 & AP$`p-value (t-test)`<0.05,],aes(x=`t-test`,y=-log10(`p-value (t-test)`),color=category),size=1.5)+
facet_wrap(vars(tp),nrow=1)+
ggtitle("Visceral Adipocytes")+
theme_minimal()+
scale_x_continuous(name="t-statistic",limits=c(-4,4))+
scale_y_continuous(name="-log10 p-value",limits=c(0,5))+
scale_color_manual(values=c(rgb(pony_colors[2,1:3]),rgb(pony_colors[7,1:3]),darken(rgb(pony_colors[16,1:3]),1.1),rgb(pony_colors[11,1:3]),"gray80"))+
theme(axis.line.x = element_blank(),
axis.text.x=element_text(size=10))+
labs(color="Feature Class")
pdf("AP_features_timecourse_females.pdf",width=4,height=5)
grid.arrange(S,V,nrow=1)
dev.off()
|
bd988841c41bde92793141343a648655d3e72e4e | 8f2eca45ca1f659f9574aadbd9c45a25ad5aff83 | /man/prsp_exp_models.Rd | f6e53f893d39844046a51edb27915f31bf21aba8 | [
"CC-BY-3.0",
"MIT"
] | permissive | paride92/peRspective | 652ea09506bc4905fea7edbe0fe82a99101a8c8f | 4373272c66ca5e21c1f209d3a72f7c1c0ac287e2 | refs/heads/master | 2023-06-21T13:12:21.001203 | 2021-07-13T13:41:57 | 2021-07-13T13:41:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 289 | rd | prsp_exp_models.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prsp_stream.R
\docType{data}
\name{prsp_exp_models}
\alias{prsp_exp_models}
\title{All valid experimental Perspective API models}
\description{
All valid experimental Perspective API models
}
\keyword{datasets}
|
c74b5aeb0d6ffe0656cd441a6dce44e303557f47 | 42d4e666f3596a68266249fc0ce38a3f04781a05 | /Rday11.R | 1719de9ca10bbb20290c6a1245351fc6a029974f | [] | no_license | aspaachu/datavisualization | c0474b17c5f38674a2a8b0eadc2e0d40e4ec9aec | 0c84cf81ebdba581ba07eec91bc4f9df300383cb | refs/heads/main | 2023-06-26T20:14:42.451808 | 2021-07-27T14:05:16 | 2021-07-27T14:05:16 | 390,004,124 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 659 | r | Rday11.R | #1Consider the student data in the marks.csv file. Read it into an R variable, Attach
#additional columns in it to keep student wise and subjectwise totals .
x<-read.csv(file.choose())
x
y<-apply(x,1,sum,-c(2))
y
cbind(x,Studenttotal=y)
z<-apply(x,2,sum,-c(2))
z[1]<-NA
rbind(x,Subjecttotal=z)
#2Let list1 <- list(observationA = c(1:5, 7:3),observationB=matrix(1:6,nrow=2))
x<-list(observationA = c(1:5, 7:3),observationB=matrix(1:6,nrow=2))
lapply(x,length)
lapply(x,sum)
lapply(x,class)
f1 <- function(t) { log10(t) + 1 }
lapply(x,f1)
lapply(x,unique)
lapply(x,range)
#3
t<-list(A=matrix(1:9,3),B=1.4,C=matrix(1:10,2),D=21)
lapply(t, mean)
sapply(t,mean)
|
be578c45eebcf1b80a6c1c765bf5d34e605f3bde | ede0a874bad1f7dbb6a3e03ec87ab2c6e215decd | /man/GetRefBatchName.Rd | 68df44e001ad0a6118f7bfbc4b59da9ce06c3622 | [
"MIT"
] | permissive | ddiez/CanekDebug | 26044ade7d0f603c9aeb775bf9d022916aeb9028 | 4625138c482d32db3ab7c05f6b385a2cad03c3db | refs/heads/main | 2023-04-07T21:13:49.973671 | 2021-04-08T10:47:55 | 2021-04-08T10:47:55 | 345,546,436 | 0 | 1 | NOASSERTION | 2021-04-08T09:16:24 | 2021-03-08T05:56:22 | R | UTF-8 | R | false | true | 261 | rd | GetRefBatchName.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Batch.R
\name{GetRefBatchName}
\alias{GetRefBatchName}
\title{GetRefBatchName}
\usage{
GetRefBatchName(x)
}
\arguments{
\item{x}{CanekDebug object.}
}
\description{
GetRefBatchName
}
|
0716b3b5f2ecf59db62530a9975a6e8f475a26ac | 8723f30f8032b801c020267b3d8de10695266ec1 | /shinyProject1/server.R | 7a53ce0bda621fd346f1e658253fbcae0af5a38e | [] | no_license | yiguo-git/shinyProject | e8aa764864c072d4e6facde63394d5a7732fe134 | 68aadd63d20b59439bd38e41f4af336e55d8bda6 | refs/heads/master | 2022-04-26T04:36:45.448724 | 2020-04-26T13:00:18 | 2020-04-26T13:00:18 | 259,013,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 943 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(leaflet)
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$mapPlot <- renderLeaflet({
R = input$Radius
# draw the histogram with the specified number of bins
# hist(x, breaks = bins, col = 'darkgray', border = 'white')
myrooms = data.frame(lat = c(1.373015),
lng = c(103.874895),
labels = c('Rosyth School'))
myrooms %>% leaflet %>% addTiles %>%
addMarkers(popup=~labels) %>%
addCircles(weight = 1, radius = R*30)
})
output$meter <- renderPrint({
paste(as.character(input$Radius*30), "m")
})
})
|
3b3aad14475cb0f393e3e43ba5f42f8c25fbb88c | 738234c4084abb8f88a53ebc9ee0fcc574538c05 | /set_1/bt_tuning.R | 89045d035659acff086996811227d86bf7b56b31 | [] | no_license | apoorvashivaram/classification_competition | 408ceff1d96836cb2f5d44ecce270510a655586a | 3152ca3ec13c4a8d947591539197db254db2f664 | refs/heads/main | 2023-05-05T08:36:30.257005 | 2021-05-28T04:39:24 | 2021-05-28T04:39:24 | 362,628,690 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,245 | r | bt_tuning.R | # boosted tree tuning ----
# load package(s) ----
library(tidyverse)
library(tidymodels)
library(tictoc)
# set seed ----
set.seed(3729)
# load required objects ----
load("model_info/loan_setup.rda")
# define model ----
bt_model <- boost_tree(
mode = "classification",
mtry = tune(),
min_n = tune(),
learn_rate = tune(),
) %>%
# variable importance plot
set_engine("xgboost", importance = "impurity")
# # check tuning parameters
# parameters(bt_model)
# set-up tuning grid ----
bt_params <- parameters(bt_model) %>%
# don't want to use all the parameters (# of predictors)
update(mtry = mtry(range = c(2, 10)),
learn_rate = learn_rate(range = c(-5, -0.2))
)
# define grid ----
bt_grid <- grid_regular(bt_params, levels = 5)
# boosted tree workflow ----
bt_workflow <- workflow() %>%
add_model(bt_model) %>%
add_recipe(loan_recipe)
# tuning/fitting ----
tic("Boosted Tree")
# tuning code
bt_tune <- bt_workflow %>%
tune_grid(
resamples = loan_fold,
grid = bt_grid
)
# calculate runtime info
toc(log = TRUE)
# save runtime info
bt_runtime <- tic.log(format = TRUE)
# write out results and workflow ---
save(bt_tune, bt_workflow, bt_runtime, file = "model_info/bt_tune.rda")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.