blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6c36c472f9813ca4edbf6d56e3cacd38b6383f32 | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.security.identity/man/securityhub_enable_security_hub.Rd | 419a76c23dc1af9310605a87546ab8f02b4230db | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 2,146 | rd | securityhub_enable_security_hub.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/securityhub_operations.R
\name{securityhub_enable_security_hub}
\alias{securityhub_enable_security_hub}
\title{Enables Security Hub for your account in the current Region or the
Region you specify in the request}
\usage{
securityhub_enable_security_hub(Tags, EnableDefaultStandards)
}
\arguments{
\item{Tags}{The tags to add to the hub resource when you enable Security Hub.}
\item{EnableDefaultStandards}{Whether to enable the security standards that Security Hub has
designated as automatically enabled. If you do not provide a value for
\code{EnableDefaultStandards}, it is set to \code{true}. To not enable the
automatically enabled standards, set \code{EnableDefaultStandards} to
\code{false}.}
}
\value{
An empty list.
}
\description{
Enables Security Hub for your account in the current Region or the
Region you specify in the request.
When you enable Security Hub, you grant to Security Hub the permissions
necessary to gather findings from other services that are integrated
with Security Hub.
When you use the
\code{\link[=securityhub_enable_security_hub]{enable_security_hub}} operation to
enable Security Hub, you also automatically enable the following
standards.
\itemize{
\item CIS AWS Foundations
\item AWS Foundational Security Best Practices
}
You do not enable the Payment Card Industry Data Security Standard (PCI
DSS) standard.
To not enable the automatically enabled standards, set
\code{EnableDefaultStandards} to \code{false}.
After you enable Security Hub, to enable a standard, use the
\code{\link[=securityhub_batch_enable_standards]{batch_enable_standards}}
operation. To disable a standard, use the
\code{\link[=securityhub_batch_disable_standards]{batch_disable_standards}}
operation.
To learn more, see \href{https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-settingup.html}{Setting Up AWS Security Hub}
in the \emph{AWS Security Hub User Guide}.
}
\section{Request syntax}{
\preformatted{svc$enable_security_hub(
Tags = list(
"string"
),
EnableDefaultStandards = TRUE|FALSE
)
}
}
\keyword{internal}
|
93eb0b61352cd22c1be6c867d8a6a23f600a23a8 | 5b9e8b0f0eb4b1f01c3210dad94285a08a05a830 | /test.R | b95b05f55dc576cd08c8a6cb4132a862e5db3105 | [] | no_license | sartinsh/R_funkcijas_darbam | c9affd036c69eae1e7ab919ef34b03e0c5a7dec9 | 46dde0e13d80b67d796aa98102e19f0a047757f9 | refs/heads/master | 2020-05-24T05:34:34.802574 | 2017-12-12T14:09:31 | 2017-12-12T14:09:31 | 84,826,846 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 42 | r | test.R | hw <- function(x) {print("Hello world!")}
|
c96edc37bb9693061253a90e9d13cf2be4522cef | de3ced01d64aa5e4dbbc92d3d29ccc51fa97999b | /R/GeocodeBreweries.R | f70cf775ddc0d8e193f60f2e9778999ef915743c | [] | no_license | duffy88/WorldBreweryMap | db54c4d7ade026b35046038407a79dd6d79119ca | 9e2b158fdba7625a312279d186163a9dda8bd801 | refs/heads/master | 2021-01-11T14:53:39.303529 | 2017-01-27T20:48:34 | 2017-01-27T20:48:34 | 80,244,727 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 777 | r | GeocodeBreweries.R |
# Load Packages
library(ggmap)
# Load cleaned player info
beerGraphs <- readRDS("data/BeerGraphsBreweries(wStreets).rds")
# 3734 to 5000,5001 to 7158
for(i in 6234:7158){ #nrow(beerGraphs)
if(!is.na(beerGraphs$FinalLocationFull[i])){
lonlat <- geocode(beerGraphs$FinalLocationFull[i])
}
# Compile output of locations
if(i ==6234){
temp2 <- cbind(beerGraphs$Brewery[i],beerGraphs$FinalLocationFull[i], lonlat)
print(i)
}else {
temp2 <- rbind(temp2, cbind(beerGraphs$Brewery[i],beerGraphs$FinalLocationFull[i], lonlat))
print(i)
}
if(i ==7158){
temp2[ is.na(temp2[,2]),"lon"] <- NA
temp2[ is.na(temp2[,2]),"lat"] <- NA
saveRDS(temp2, "data/Locations2017Jan/BreweryLocations(Batch-4)FullLoc.rds")
}
}
|
3bd8f83b660aeda1e5db8aa67933bc75b45beef7 | 67de61805dd839979d8226e17d1316c821f9b1b4 | /demo/BivariateCorrelation.R | e8ed6f2f71b579ea4a002f4939f6a61d5981daea | [
"Apache-2.0"
] | permissive | falkcarl/OpenMx | f22ac3e387f6e024eae77b73341e222d532d0794 | ee2940012403fd94258de3ec8bfc8718d3312c20 | refs/heads/master | 2021-01-14T13:39:31.630260 | 2016-01-17T03:08:46 | 2016-01-17T03:08:46 | 49,652,924 | 1 | 0 | null | 2016-01-14T14:41:06 | 2016-01-14T14:41:05 | null | UTF-8 | R | false | false | 3,915 | r | BivariateCorrelation.R | #
# Copyright 2007-2016 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# Program: BivariateCorrelation.R
# Author: Hermine Maes
# Date: 2009.08.01
#
# ModelType: Saturated
# DataType: Continuous
# Field: None
#
# Purpose:
# Optimization Example in OpenMx: Testing significance of correlation
#
# RevisionHistory:
# Hermine Maes -- 2009.10.08 updated & reformatted
# Ross Gore -- 2011.06.15 added Model, Data & Field metadata
# Mike Hunter -- 2013.09.16 nudged starting values of second model varainces away from zero
# Hermine Maes -- 2014.11.02 piecewise specification
# -----------------------------------------------------------------------------
require(OpenMx)
require(MASS)
# Load Library
# -----------------------------------------------------------------------------
set.seed(200)
rs=.5
xy <- mvtnorm::rmvnorm (1000, c(0,0), matrix(c(1,rs,rs,1),2,2))
testData <- xy
testData <- testData[, order(apply(testData, 2, var))[2:1]] #put the data columns in order from largest to smallest variance
# Note: Users do NOT have to re-order their data columns. This is only to make data generation the same on different operating systems: to fix an inconsistency with the mvtnorm::rmvnorm function in the MASS package.
selVars <- c('X','Y')
dimnames(testData) <- list(NULL, selVars)
summary(testData)
cov(testData)
# Simulate Data: two standardized variables X & Y with correlation of .5
# -----------------------------------------------------------------------------
bivCorModel <- mxModel("bivCor",
mxMatrix( type="Full", nrow=1, ncol=2, free=TRUE, values=c(0,0), name="expMean" ),
mxMatrix( type="Lower", nrow=2, ncol=2, free=TRUE, values=.5, name="Chol" ),
mxAlgebra( expression=Chol %*% t(Chol), name="expCov"),
mxData( observed=testData, type="raw" ),
mxExpectationNormal( covariance="expCov", means="expMean", dimnames=selVars),
mxFitFunctionML()
)
# Fit Saturated Model with Raw Data and Matrix-style Input
# -----------------------------------------------------------------------------
bivCorFit <- mxRun(bivCorModel)
EM <- mxEval(expMean, bivCorFit)
EC <- mxEval(expCov, bivCorFit)
LL <- mxEval(fitfunction, bivCorFit)
# Run Model and Generate Output
# -----------------------------------------------------------------------------
bivCorModelSub <-mxModel(bivCorModel,
mxMatrix( type="Diag", nrow=2, ncol=2, free=TRUE,
values=.2, # Note: to test optimizer for robustness to bad starting values, change to 0.
name="Chol" )
)
# Specify SubModel testing Covariance=Zero
# -----------------------------------------------------------------------------
bivCorFitSub <- mxRun(bivCorModelSub)
EMs <- mxEval(expMean, bivCorFitSub)
ECs <- mxEval(expCov, bivCorFitSub)
LLs <- mxEval(fitfunction, bivCorFitSub)
Chi= LLs-LL;
LRT= rbind(LL,LLs,Chi); LRT
# Run Model and Generate Output
# -----------------------------------------------------------------------------
omxCheckCloseEnough(LL, 5407.036, .001)
omxCheckCloseEnough(c(EC), c(1.0656, 0.4752, 0.4752, 0.9292), .001)
omxCheckCloseEnough(c(EM), c(0.058, 0.006), .001)
# Compare OpenMx Results to Mx Results
# LL: likelihood; EC: expected covariance, EM: expected means
# -----------------------------------------------------------------------------
|
c7c055418fe4f7a5fb8828802f2e69d32270b67d | 5cbebb79a58838ab81c4042ed0b4c69b43904efb | /index/scripts/incubations.r | 072cb9b9827a1a00b40661a2a2b7b3c01aa5c976 | [] | no_license | harmvankup/Master_Thesis | 9edbf2bd4a77709839122ec062eede16c1a38c0a | 0923e3f3a87d95aaf68c464a1892def7a6666ce7 | refs/heads/main | 2023-07-15T17:03:11.445450 | 2021-08-29T14:04:14 | 2021-08-29T14:04:14 | 372,544,994 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,109 | r | incubations.r | library(readxl)
library(here)
library(tidyverse)
library(ggplot2)
library(reshape2)
library(xtable)
# import raw data
IC_raw_data <- read_excel(path.expand(here("index","data", "IC_raw_data.xlsx")))
incubationgraph <- read_excel(here("index","data","incubationgraph.xlsx"))
####== Transform the data ==####
# middle duplicates for IC data, and calculate umol/L
IC_inc_data <-
IC_raw_data %>%
filter(str_detect(sample,'inc')) %>%
group_by(sample) %>%
mutate(P = (mean(Phosphate)*1000)/94.9714,
"SO" = mean(Sulphate)*1000/96.06,
NO = mean(Nitrate)*1000/62.004,
"Br" = mean(Bromide)*1000/79.9,
"Fl" = mean(Fluoride)*1000/19,
"Cl" = mean(Chloride)*1000/35.45) %>%
distinct(sample, .keep_all = TRUE)
# Join IC and photometric data, and add treated and oxic status for all cores.
inc_data <-
full_join(
incubationgraph,
IC_inc_data,
by = c("samplenr" = "sample"),
copy = FALSE,
suffix = c(".photometric", ".IC"),
keep = FALSE
) %>%
mutate(
HS = 1.1*(((H2SuM*1.13759603802245)/0.026915542)/1000),
PFe = Preal/FeTot,
FeIIFeIII = FeII/FeIII,
FeIIFetot = FeII/FeTot,
oxic_state = case_when( str_detect(samplenr,"2\\.") ~ "oxic",
str_detect(samplenr,"3\\.") ~ "anoxic"),
treated = case_when( str_detect(samplenr,"A|C") ~ "treated",
str_detect(samplenr,"B|D") ~ "nontreated"),
Location = case_when( str_detect(samplenr,"A") ~ "A",
str_detect(samplenr,"-B") ~ "B",
str_detect(samplenr,"-\\C") ~ "C",
str_detect(samplenr,"-\\D") ~ "D"))
# calculate benthic fluxes
flux_data <- inc_data %>%
mutate(
dP = (0.001*Preal*volume_ml)/(pi*0.0009),
dFe = (0.001*FeTot*volume_ml)/(pi*0.0009),
dNH = (0.001*NH4*volume_ml)/(pi*0.0009),
dSH = (0.001*H2SuM*volume_ml)/(pi*0.0009),
dNO = (0.001*NO*volume_ml)/(pi*0.0009),
dSO = (0.001*SO*volume_ml)/(pi*0.0009)
)
# reshape the data by melting all variables, and grouping them by parameter.
molten_inc <- inc_data %>% select("Core.photometric",
"samplenr",
"time",
"HS",
"FeTot",
"Preal",
"NH4",
"SO",
"NO",
"oxic_state",
"treated",
"Location") %>%
melt(id.vars = c("Core.photometric","samplenr","time", "oxic_state", "treated","Location"),
measured_.vars = c( "HS",
"FeTot",
"Preal",
"NH4",
"SO",
"NO"), na.rm = TRUE)
molten_flux <- flux_data %>% select("Core.photometric",
"time",
"dSH",
"dFe",
"dP",
"dNH",
"dSO",
"dNO",
"oxic_state",
"treated",
"Location") %>%
melt( id.vars = c("Core.photometric","time","oxic_state","treated","Location"))
####== create plots for all parameters ==####
# Lable function
Lables = as_labeller( c( "HS" = "HS^'-'",
"FeII" = "Fe^'2+'",
"FeTot" = "Fe[tot]",
"FeIII" = "Fe^'3+'",
"Preal" = "PO[4]^'3-'",
"NH4" = "NH[4]^'+'",
"P" = "PO[4]^'3-'",
"SO" = "SO[4]^'2-'",
"NO" = "NO[3]^'-'",
"Br" = "Br^'-'",
"Fl" = "F^'-'",
"Cl" = "Cl^'-'",
"oxic" = "'Oxic incubations'",
"anoxic" = "'Anoxic incubations'"), default = label_parsed )
d <- list(molten_inc, molten_flux)
l <- list(c("FeTot","Preal","HS","NH4","SO","NO"), c("dFe", "dP", "dSH", "dNH", "dSO", "dNO"))
for (i in 1:2) {
graph <-
ggplot(transform(d[[i]],
variable = factor(variable, levels = l[[i]])),
mapping = aes(x = time ,
y = value,
color = Location,
shape = Location,
by = Core.photometric)
) +
scale_color_manual( values=c("A" = "coral3", "B" = "deepskyblue4", "C" = "coral3", "D" = "deepskyblue4")) +
scale_shape_manual( values=c("A" = 16, "B" = 1, "C" = 17, "D" = 2)) +
geom_point() +
geom_line() +
theme_gray() +
theme(axis.title=element_text(size=20),
plot.title = element_text(size=25, face="bold", hjust = 0.5), #hjust is position (left/right/ middle =0.5)
axis.text=element_text(size=14,angle = 0, hjust = 0.5),
strip.text.y = element_text(size=14, angle = 0),
strip.text.x = element_text(size=14, angle = 0),
# panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.title = element_text(size = 25),
legend.text = element_text(size = 21)) +
facet_grid(variable ~ oxic_state, scales = "free_y",switch = "y", labeller = Lables) + labs(title = "Benthic flux experiment", x= "time (days)", y = expression(paste("concentration in ",mu,"mol/L")))
ggsave(paste("IC_incubations_",i,".eps",sep=""), plot =graph, path = path.expand(here("index","figures")),
width =30, height = 27,units = "cm",dpi = 600)
}
####== Benthic flux calculations ==####
# slope of NH4 line
inc_flux <- inc_data %>% mutate(NH4mol = (0.000001*NH4*`volume_ml`)/(pi*0.0009),
Pmol = (0.000001*Preal*`volume_ml`)/(pi*0.0009),
Femol = (0.000001*FeTot*`volume_ml`)/(pi*0.0009),
HSmol = (0.000001*HS*`volume_ml`)/(pi*0.0009),
NOmol = (0.000001*NO*`volume_ml`)/(pi*0.0009),
SOmol = (0.000001*SO*`volume_ml`)/(pi*0.0009))
O2slope = c(-0.172133,
-0.404987,
-0.360635,
-0.0793467,
-0.370374,
-0.174982,
-0.227892,
-0.167105
)
O2_umol_day = as_tibble_col((O2slope*24)/(31.9988*pi*0.0009), column_name = "O2")
fluxmin <- c(0,0,7)
fluxmax <- c(10, 25,60)
fluxan <- list(c("NH4", "Fe"), c("P"),c("HS","NH4","SO","Fe") )
fluxox <- list(c("NH4"), c("P"), c("SO","NO") )
fluxslopes <- tibble(row.names = c("A","A","B","B","C","C","D","D"))
for (i in 1:3) {
d <- inc_flux[inc_flux$time < fluxmax[[i]] & inc_flux$time > fluxmin[[i]] ,] %>%
group_by(Core.photometric) %>%
summarize(NH4 = summary(lm(NH4mol ~ time))$coefficient[2,2],
P = summary(lm(Pmol ~ time))$coefficient[2,2],
Fe = summary(lm(Femol ~ time))$coefficient[2,2],
HS = summary(lm(HSmol ~ time))$coefficient[2,2],
NO = summary(lm(NOmol ~ time))$coefficient[2,2],
SO = summary(lm(SOmol ~ time))$coefficient[2,2]
)
an <- filter(d, str_detect(Core.photometric,"3\\.")) %>% select(fluxan[[i]])
colnames(an) <- paste0(fluxan[[i]], c(rep(fluxmax[[i]], length(fluxan[[i]]))), c(rep("An",length(fluxan[[i]]))))
ox <- filter(d, str_detect(Core.photometric,"2\\.")) %>% select(fluxox[[i]])
colnames(ox) <- paste0(fluxox[[i]], c(rep(fluxmax[[i]], length(fluxox[[i]]))), c(rep("Ox",length(fluxox[[i]]))))
fluxslopes <- bind_cols(fluxslopes, an, ox)
}
fluxslopes <- bind_cols(fluxslopes, O2_umol_day)
fluxslopes <- relocate(fluxslopes, row.names, O2, Fe10An, Fe60An, P25An, NH410An, NH460An, HS60An, SO60An, P25Ox, NH410Ox, SO60Ox, NO60Ox)
view(fluxslopes)
print(xtable(fluxslopes, type = "latex"), file = "slopes.tex")
C_mg_day = -30*(O2slope*24)/31.9988
####== P to Fe ratio calculations ==####
inc_data %>%
group_by(Core.photometric) %>%
summarise(PtoFe = mean(PFe, na.rm = TRUE),
FeIItoFeIII = mean(FeIIFeIII, na.rm = TRUE),
FeIItoFetot = mean(FeIIFetot, na.rm = TRUE))
PFe_bp <- ggplot(inc_data, mapping = aes(x = Core.photometric, y = PFe)) + geom_boxplot()
FeIIFeIII_bp <- ggplot(inc_data, mapping = aes(x = Core.photometric, y = FeIIFetot)) + geom_boxplot()
PFe_inc_plot <- ggplot(inc_data,
mapping = aes(x = time ,
y = PFe,
color = treated,
shape = Location,
by = Core.photometric)
) +
scale_color_manual( values=c("treated" = "red", "nontreated" = "blue")) +
scale_shape_manual( values=c("A" = 16, "B" = 1, "C" = 17, "D" = 2)) +
geom_point() +
geom_line() + facet_grid(Location ~ oxic_state, scales = "free_y",switch = "y")
FeIIFetot_inc_plot <- ggplot(inc_data,
mapping = aes(x = time ,
y = FeIIFetot,
color = treated,
shape = Location,
by = Core.photometric)
) +
scale_color_manual( values=c("treated" = "red", "nontreated" = "blue")) +
scale_shape_manual( values=c("A" = 16, "B" = 1, "C" = 17, "D" = 2)) +
geom_point() +
geom_line() + facet_grid(Location ~ oxic_state, scales = "free_y")
show(PFe_inc_plot)
|
351bdfd050740447f7ecac65319ea3897fdadd2e | 1aadf121677606b8fc6e1c4f7862a2f6bb78a923 | /compute_elo_colony.R | 6444622c331671bd7ad22531fece4167446fd768 | [] | no_license | pv272/Dominance_DMR_Analyses | 189cebb89f5523df723cb0c8d85cc6bb975362d1 | 244fb5fb6f32a86629687ae4cee8ace0a594ba43 | refs/heads/master | 2020-04-05T21:45:32.922855 | 2018-12-20T12:21:17 | 2018-12-20T12:21:17 | 157,229,265 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,487 | r | compute_elo_colony.R | source("Elo_functions.R") ## warnings can be ignored
## get the elo-score, based on Mathias script
library(readr)
library(dplyr)
library(purrr)
library(tidyr)
library(ggplot2)
## read data
d <- read_csv("SubCall.csv")
d <- d %>% filter(d$ObsType == "Scan") ## keep only scan obs
# create a nested df by colony
d2 <- d %>% nest(-Colony) %>%
mutate(n_obs = map_dbl(data, ~ .x %>% group_by(.$ObsType) %>%
summarize(ObsCount=n_distinct(.$ObsRef)) %>%
pull(ObsCount))) %>% ## get the number of interaction by colony (scan only)
mutate(all_ids= map(data, ~ unique(c(.x$Winner, .x$Loser)))) %>% ## get list of all ids (needed for fitting the elo)
filter(n_obs > 10) %>% ## keep only colonies with more than 10 obs
mutate(K = map2_dbl(data, all_ids, ~ optim(par=4, burn_in=0, elo.model1, all_ids = .y, IA_data = .x,
return_likelihood=T, init_elo = 100, method= "Brent", upper = 15, lower = 0)$par)) ## optim the K value for each colony
out1 <- d2 %>% mutate(new_df = pmap(list(X = d2$data, Y = d2$all_ids, Z = as.list(d2$K)), function(X, Y, Z) {
out <- elo.model1(par = Z, burn_in = 0, init_elo = 1000, IA_data = X, all_ids = Y, return_likelihood =F)
}
)) ## fit the elo with correct K values
out11 <- out1 %>% mutate(plot = map(new_df, ~ plot_elo(.x))) ## add a column with plot for each colony
master_df <- out11
save(master_df, file = "master_df.rda", compress = "xz") ## save new master_df with all infos.
|
4e0d918b65064dff7f99aeaddea498fa9a8367b5 | 75ce086d36cb31b356f3af3f297a5508aeaef826 | /plot4.R | f7c5d581c30e935e6a4cf28ab3eb87ddfdb23fc5 | [] | no_license | jiuno/ExData_Plotting1 | 3f8c219fbdf1ea332fd7fd37e6570a38b12fe8fb | dc1882cb5672d323891645b6c814f9964a2c72cd | refs/heads/master | 2022-11-20T17:48:57.393702 | 2020-07-18T19:43:09 | 2020-07-18T19:43:09 | 280,527,670 | 0 | 0 | null | 2020-07-17T21:22:21 | 2020-07-17T21:22:20 | null | UTF-8 | R | false | false | 1,833 | r | plot4.R | #Required packages
library(lubridate)
library(dplyr)
power <- read.csv(file = "household_power_consumption.txt",header = T,sep = ";"
,na.strings = "?")
#Change your timezone in R so the Date values are written in english.
Sys.setlocale("LC_TIME","english")
#Convert "Date" column into Date variable type.
power[,1] <- dmy(power$Date)
#Filter rows between 2007/02/01 and 2007/02/02 (included)
minipower <- power %>% filter(Date == "2007-02-01" | Date == "2007-02-02" )
#Convert "Time" variables into "period" type variable from lubridate
minipower[,2] <- hms(minipower$Time)
#Create "Date/Time" Column. Adds Days and time from Date and Time variables.
minipower <- mutate(minipower,"Date/Time" = Date + Time)
#Open png device
png("plot4.png",width = 480,height = 480)
#Set global parameter "mfcol" so 4 plots can be plotted on the same graph.
par(mfcol = c(2,2))
#Global active power
plot(y = minipower$Global_active_power,x = minipower$`Date/Time`,
type = "l",
ylab = "Global Active Power",
xlab = "")
#Energy sub metering
plot(x = minipower$`Date/Time`,
y = minipower$Sub_metering_1,
type = "l",
col = "black",
ylab = "Energy sub metering",
xlab = "")
lines(x = minipower[,10], y = minipower[,8],
type = "l",
col = "red")
lines(x = minipower[,10], y = minipower[,9],
type = "l",
col = "blue")
legend("topright",
legend = c(names(minipower[,7:9])),
lty = 1,col = c("black","red","blue"))
#Voltage
plot(x = minipower$`Date/Time`,
y = minipower$Voltage,
type = "l",
col = "black",
ylab = "Voltage",
xlab = "datetime")
#Global reactive power
plot(x = minipower$`Date/Time`,
y = minipower$Global_reactive_power,
type = "l",
col = "black",
ylab = "Global_reactive_power",
xlab = "datetime")
#Close png device and save png file
dev.off() |
f8cc098ab31c09daa68ef2eea579fb1c44c37eb8 | 3886da207e03c6e2266d09b1bf30d557d35bf09a | /load_usernames.R | 18e21ae394eca22af883616733dd485ef882641a | [] | no_license | andreasose/twitter-scripts | 39ee0618b5f8ce14baadcbc2d42e03fdb449c1e6 | a778f6d68dd2a216d447eeedf66cd1195425c4de | refs/heads/master | 2021-01-17T04:36:21.327635 | 2017-02-26T19:59:36 | 2017-02-26T19:59:36 | 57,212,056 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 551 | r | load_usernames.R | library(twitteR)
library(rjson)
library(httr)
twitter_list <- "journalister"
twitter_name <- "andreasose"
api.url <- paste0("https://api.twitter.com/1.1/lists/members.json?slug=",
twitter_list, "&owner_screen_name=", twitter_name, "&count=5000")
response <- POST(api.url, config(token=twitteR:::get_oauth_sig()))
response.list <- fromJSON(content(response, as = "text", encoding = "UTF-8"))
users.names <- sapply(response.list$users, function(i) i$name)
users.screennames <- sapply(response.list$users, function(i) i$screen_name)
|
3b71ac666bed4ff08627b7d3098a671c1dcb7f80 | 701126efc2e5c4fd913fb880afc65ffc5591b4d2 | /R/methRatios.R | 79c114103f0db3e798171df15c29ebbfe310fc75 | [] | no_license | timpeters82/aaRon | 340f9eb00c70879bb5f173b6433bf7ca5c74c3e3 | ea2c009121b8221740a9a04f64389306d13cb5bf | refs/heads/master | 2020-12-25T17:56:27.536462 | 2017-04-26T08:18:40 | 2017-04-26T08:18:40 | 58,909,291 | 0 | 0 | null | 2016-05-16T06:49:11 | 2016-05-16T06:34:21 | R | UTF-8 | R | false | false | 704 | r | methRatios.R | #' methRatios
#'
#' Calculates methylation ratios from count data
#'
#' @param x \code{GRanges} of methylation count data
#' @param samples \code{data.frame} describing the samples to calculate ratios for
#' @param minCov minimum amount of coverage to calculate a ratio
#' @return \code{GRanges} of methylation ratios
#'
#' @export
#'
#' @importFrom GenomicRanges values values<-
#'
#' @author Aaron Statham <a.statham@@garvan.org.au>
methRatios <- function(x, samples, minCov=5) {
stopifnot(all(c("Sample", "C", "cov") %in% colnames(samples)))
tmp <- as.matrix(values(x)[samples$cov])
tmp[tmp<minCov] <- NA
values(x) <- as.matrix(values(x)[samples$C])/tmp
names(values(x)) <- samples$Sample
x
} |
b9220bdfdbf9c3731a690fe5100e44df1a622190 | 81cffdc441d1ec58f81463eb8779df3ac2a86eac | /lib/002_streetdata_json_cleaning.R | da52ea0fe83202385ee6688629458120035d9a61 | [] | no_license | PengfeiWangWZ/ParkmanGo | bbd61cdd8a342606504ce31a757d2058c5eee1d4 | c94f78a3c091919e6c37aae4b55a19a6b4a9a295 | refs/heads/master | 2021-06-15T01:25:01.829832 | 2017-03-16T23:13:11 | 2017-03-16T23:13:11 | 70,748,086 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,353 | r | 002_streetdata_json_cleaning.R | #install.packages("jsonlite")
library(jsonlite)
library(dplyr)
library(tidyr)
library(plyr)
setwd("/Users/yanjin1993/Google Drive/Columbia University /2016 Fall /Applied Data Science /Project_002/")
nyc.st.json <- fromJSON("original_data/nyc-streets.geojson")
dat.nyc.st <- nyc.st.json$features
datclean.nycst <- cbind(dat.nyc.st$properties %>%
select(LINEARID, FULLNAME), dat.nyc.st$geometry)
# 1. Data Processing ###################################################################################################################################
# 1.1 Nested Data Flatten
# Make a dataframe for flatten geo-code for each street name (LineString only)
rows <- data.frame()
for (i in 1:nrow(datclean.nycst)) {
if (datclean.nycst$type[i] == "LineString"){
# Extract the list of geocode for each street
cols <- as.data.frame(datclean.nycst$coordinates[[i]])
cols <- cols %>% mutate(V1 = paste0(V1, ", ", V2)) %>%
select(-V2)
rows <- rbind.fill(rows, as.data.frame(t(cols)))
} else {
# If is not a LineString type, then the entire row remains NA
rows <- rbind.fill(rows, data.frame(NA))
}
}
dat.rows <- rows %>% select(-NA.)
# Merge two dataframes
dat.nyc.street <- cbind(datclean.nycst, dat.rows)
# Save to local
saveRDS(dat.nyc.street, "exported_data/nyc_street_coordinate.rds")
|
aeb2827f49d9c931a88c83faee76d04c3edf75c7 | ee89632a65b33b9f8a4fe9955ece2f476b99b467 | /R/plot_log_actual_fitted.R | adadd13ffca1626ea83e22d5c508800380416ff2 | [
"MIT"
] | permissive | dnepple/tprstats | a938a6671b2f6fa46d075ac9c61acdb5f73fc53c | 4c5d93a66d40af3537aeb902ca2ce48db1b8e736 | refs/heads/master | 2023-03-18T14:15:14.449458 | 2023-03-13T18:33:45 | 2023-03-13T18:33:45 | 165,563,871 | 8 | 7 | NOASSERTION | 2023-03-13T18:33:46 | 2019-01-13T22:58:05 | R | UTF-8 | R | false | false | 769 | r | plot_log_actual_fitted.R | #' Plot Log Actual Fitted
#'
#' @param linmod Linear model.
#' @param MyData The data.
#' @param col_name Name of column variable as a string (use quotes).
#'
#' @export
plotLogActualFitted <- function(linmod, MyData, col_name) {
Y <- unlist(MyData[, col_name])
Observation <- seq(1, NROW(MyData))
Pred_and_PI <- exp(stats::predict(linmod, MyData, interval = "predict"))
lyhat <- Pred_and_PI[, 1]
Lower <- Pred_and_PI[, 2]
Upper <- Pred_and_PI[, 3]
graphics::plot(Y ~ Observation, pch = 20, main = "Actual (black), Predicted (red), and 95% PI (blue)")
graphics::lines(lyhat ~ Observation, lw = 1, col = "red")
graphics::lines(Upper ~ Observation, col = "blue", lw = 1)
graphics::lines(Lower ~ Observation, col = "blue", lw = 1)
graphics::grid()
}
|
13143e19840b5493dc6f1a811df83d30a68c0c98 | 97eac27c24e12a8f6dc68940ab4c0f02f32cf53d | /package/potts/tests/debug-cond.R | 556bba80e76e7a216fed87e47661913a6bf4562b | [] | no_license | cjgeyer/potts | a2a44b19df8bf1dff8f70e1997116afc8d7d165b | 5b9c3ad90ecee8f7b28a07f80bfde39ef768964d | refs/heads/master | 2022-08-27T01:30:58.210677 | 2022-08-12T16:48:45 | 2022-08-12T16:48:45 | 11,518,189 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,598 | r | debug-cond.R |
library(potts)
library(pooh)
set.seed(42)
ncolor <- as.integer(4)
alpha <- rnorm(ncolor) * 0.01
beta <- log(1 + sqrt(ncolor))
theta <- c(alpha, beta)
nrow <- 25
ncol <- 20
x <- matrix(1, nrow = nrow, ncol = ncol)
foo <- packPotts(x, ncolor)
out <- potts(foo, theta, nbatch = 5, blen = 3, nspac = 2, debug = TRUE,
boundary = "condition")
names(out)
identical(out$initial, foo)
before <- out$pstate
dim(before)
niter <- dim(before)[1]
niter == out$nbatch * out$blen * out$nspac
after <- before
after[- niter, , ] <- before[- 1, ,]
after[niter, , ] <- unpackPotts(out$final)
sort(unique(as.vector(before)))
sort(unique(as.vector(after)))
all.equal(x, before[1, , ])
##### check conditioning #####
before.foo <- before
before.foo[ , seq(2, nrow - 1), seq(2, ncol - 1)] <- 0
after.foo <- after
after.foo[ , seq(2, nrow - 1), seq(2, ncol - 1)] <- 0
identical(apply(before.foo, c(2, 3), max), before.foo[1, , ])
identical(apply(before.foo, c(2, 3), min), before.foo[1, , ])
identical(apply(after.foo, c(2, 3), max), before.foo[1, , ])
identical(apply(after.foo, c(2, 3), min), before.foo[1, , ])
##### calculate canonical statistics #####
ttt <- matrix(NA, niter, ncolor + 1)
for (icolor in 1:ncolor)
ttt[ , icolor] <- apply(after - after.foo == icolor, 1, sum)
colin <- seq(2, ncol - 1)
rowin <- seq(2, nrow - 1)
tstar <- rep(0, niter)
for (i in 2:nrow)
tstar <- tstar +
apply(after[ , i, colin] == after[ , i - 1, colin], 1, sum)
for (i in 2:ncol)
tstar <- tstar +
apply(after[ , rowin, i] == after[ , rowin, i - 1], 1, sum)
ttt[ , ncolor + 1] <- tstar
##### check batch means #####
foo <- ttt[seq(1, niter) %% out$nspac == 0, ]
foo <- array(as.vector(foo), c(out$blen, out$nbatch, ncolor + 1))
foo <- apply(foo, c(2, 3), mean)
identical(foo, out$batch)
##### check bonds #####
bprob <- (- expm1(- beta))
all.equal(bprob, 1 - exp(- beta))
my.hstate.possible <- array(FALSE, c(niter, nrow, ncol))
for (i in seq(1, nrow - 1))
my.hstate.possible[ , i, colin] <-
before[ , i, colin] == before[ , i + 1, colin]
storage.mode(my.hstate.possible) <- "integer"
identical(my.hstate.possible == 1, out$hunif != -1)
my.hstate <- out$hunif < bprob
storage.mode(my.hstate) <- "integer"
my.hstate <- my.hstate * my.hstate.possible
identical(my.hstate, out$hstate)
my.vstate.possible <- array(FALSE, c(niter, nrow, ncol))
for (i in seq(1, ncol - 1))
my.vstate.possible[ , rowin, i] <-
before[ , rowin, i] == before[ , rowin, i + 1]
storage.mode(my.vstate.possible) <- "integer"
identical(my.vstate.possible == 1, out$vunif != -1)
my.vstate <- out$vunif < bprob
storage.mode(my.vstate) <- "integer"
my.vstate <- my.vstate * my.vstate.possible
identical(my.vstate, out$vstate)
##### check patches #####
my.row <- row(my.hstate[1, , ])
my.col <- col(my.hstate[1, , ])
my.other.row <- my.row + 1
my.other.row[my.other.row > nrow] <- 1
my.other.col <- my.col + 1
my.other.col[my.other.col > ncol] <- 1
vertices <- paste(my.row, my.col, sep = ":")
patch.equals <- NULL
for (iiter in 1:niter) {
isbond <- my.hstate[iiter, , ] == 1
my.row.bond <- as.vector(my.row[isbond])
my.col.bond <- as.vector(my.col[isbond])
my.other.row.bond <- as.vector(my.other.row[isbond])
my.from.h <- paste(my.row.bond, my.col.bond, sep = ":")
my.to.h <- paste(my.other.row.bond, my.col.bond, sep = ":")
isbond <- my.vstate[iiter, , ] == 1
my.row.bond <- as.vector(my.row[isbond])
my.col.bond <- as.vector(my.col[isbond])
my.other.col.bond <- as.vector(my.other.col[isbond])
my.from.v <- paste(my.row.bond, my.col.bond, sep = ":")
my.to.v <- paste(my.row.bond, my.other.col.bond, sep = ":")
wout <- weak(from = c(my.from.h, my.from.v), to = c(my.to.h, my.to.v),
domain = vertices, markers = TRUE)
blab <- as.vector(out$patch[iiter, , ])
widx <- sort(unique(wout))
bidx <- blab[match(widx, wout)]
patch.equals <- c(patch.equals, identical(bidx[wout], blab))
}
all(patch.equals)
##### check colors #####
unif.equals <- NULL
my.after <- after
for (iiter in 1:niter) {
fred <- out$patch[iiter, , ]
blab <- as.vector(fred)
blab.count <- tabulate(blab, nbins = nrow * ncol)
blab.fixed <- c(fred[c(1, nrow), ], fred[ , c(1, ncol)])
blab.fixed <- sort(unique(blab.fixed))
punif <- out$punif[iiter, ]
unif.equals <- c(unif.equals, identical(punif != -1, blab.count != 0))
alpha.prod <- outer(blab.count, alpha)
p.prod <- exp(alpha.prod)
p.sum <- apply(p.prod, 1, sum)
p <- sweep(p.prod, 1, p.sum, "/")
p.cum <- apply(p, 1, cumsum)
p.cum <- t(p.cum)
p.foo <- sweep(p.cum, 1, out$punif[iiter, ])
newcolor <- apply(p.foo < 0, 1, sum) + 1
newcolor[blab.count == 0] <- NA
is.fixed <- blab %in% blab.fixed
color.old <- as.vector(before[iiter, , ])
color.new <- newcolor[blab]
color.new[is.fixed] <- color.old[is.fixed]
my.after[iiter, , ] <- matrix(color.new, nrow, ncol)
}
all(unif.equals)
all.equal(after, my.after)
##### check uniform random numbers #####
u <- c(as.vector(out$hunif), as.vector(out$vunif), as.vector(out$punif))
u <- u[u != -1]
ks.test(x = u, y = "punif")
##### check alpha = 0 #####
alpha <- rep(0, ncolor)
theta <- c(alpha, beta)
out.too <- potts(out, param = theta, debug = FALSE)
alpha <- rep(0.1, ncolor)
theta <- c(alpha, beta)
out.too.too <- potts(out, param = theta, debug = FALSE)
all.equal(out.too$batch, out.too.too$batch)
|
7aee94707aa6b525d945ed7e23e6fc9357bc09d0 | 12021ef6948d02d1267b4db9f87e5bbd62068c1a | /1 - Codeup.R | e6d2a966925f14a16831ce6784aa7705baece0f6 | [] | no_license | msheffer2/Hierarchical-Bayes-Choice-Study | 508b555893c5bc712e265e6b466bb2078e1194e0 | 947e5ff3e1c4f403b23a200afae8c1c7b44945b6 | refs/heads/master | 2021-01-25T05:44:30.802095 | 2017-03-09T13:54:55 | 2017-03-09T13:54:55 | 80,673,755 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,897 | r | 1 - Codeup.R | # Set working directory
################################################################################
setwd("C:\\repos\\Hierarchical-Bayes-Choice-Study")
# Load libraries
################################################################################
library(tidyverse)
#Explicitly called: dummies
# Load and prepare choice design data
################################################################################
choice_design <- read_delim("./data/choice_design.csv", delim="\t") %>%
select(screen, RAM, processor, price, brand)
#Function to convert from design levels to Effects Coding
effcode <- function(attmat){
effcode_sub <- function(xvec){
att.mat <- dummies::dummy(xvec)
ref.ndx <- att.mat[,1]
att.mat <- att.mat[,-1]
att.mat[ref.ndx==1,] <- -1
return(att.mat)
}
natts <- ncol(attmat)
efmat <- matrix(data=NA, ncol=1, nrow=nrow(attmat))
for (j in 1:natts){
dummat <- effcode_sub(as.numeric(attmat[,j]))
efmat <- cbind(efmat,dummat)
}
efmat <- efmat[,-1]
dimnames(efmat) <- list(NULL,NULL)
return(efmat)
}
xmatrix <- effcode(as.matrix(choice_design))
#Creating a price*brand interaction in the xmatrix
price <- choice_design$price - mean(choice_design$price)
brands <- xmatrix[,9:11]
pxb <- price * brands
xmatrix <- cbind(xmatrix, pxb)
#Since there isn't a hold-out card, I'm designating Card #20 as a hold-out
#and removing it from the model datasets
holdout_x <- xmatrix[58:60,]
xmatrix <- xmatrix[-(58:60),]
#Save file
save(xmatrix, file="./data/xmatrix.Rdata")
save(holdout_x, file="./data/holdout_x.Rdata")
rm(price, brands, pxb, choice_design, xmatrix, holdout_x)
# Load and prepare respondent data
################################################################################
load("./data/raw_data.Rdata")
ydata <- select(raw_data, starts_with("DCM"), -dcm1_timer)
#Since there isn't a hold-out card, I'm designating Card #20 as a hold-out
#and removing it from the model datasets
holdout_y <- select(ydata, DCM1_20)
ydata <- select(ydata, -DCM1_20) %>%
as.matrix()
#Save file
save(ydata, file="./data/ydata.Rdata")
save(holdout_y, file="./data/holdout_y.Rdata")
rm(raw_data, ydata, holdout_y)
# Creating all possible combinations file to use during Post Model Analytics
################################################################################
allpos <- data.frame(expand.grid(screen=seq(1:3), RAM=seq(1:3), processor=seq(1:3),
price=seq(1:3), brand=seq(1:4)))
apmatrix <- effcode(as.matrix(allpos))
#Creating a price*brand interaction in the xmatrix
price <- allpos$price - mean(allpos$price)
brands <- apmatrix[,9:11]
pxb <- price * brands
apmatrix <- data.frame(cbind(apmatrix, pxb))
#Listing allpos and apmatrix for saving
apmatrix <- list(apmatrix, allpos)
save(apmatrix, file="./data/apmatrix.Rdata")
rm(price, brands, pxb, allpos, apmatrix, effcode)
|
253b68230b512a9c5c37b30b4b4b1e3b396faee7 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-235/tlc02-nonuniform-depth-235.R | 905307a4a3bb252c25dd77c688c5900ad8468c33 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 80 | r | tlc02-nonuniform-depth-235.R | 6df9b50e117e6424fc6c12f6682324ec tlc02-nonuniform-depth-235.qdimacs 54045 142536 |
fb766a6760c8f2dc250a4f7b355e8fd22e973145 | 0fe9ec847d5e553ee847bab32e45c36f34049ff2 | /man/plot_P_scales.Rd | 23e7552399becfa994cd8db12f868a74a34fe8d3 | [] | no_license | sawers-rellan-labs/soilP | 5eff4599e7ce58ca6626a287439e78f5b91230c9 | aad9b9b1b3516b7edfeb7053aebb0d073c1792c7 | refs/heads/master | 2022-02-25T00:34:41.256465 | 2022-02-17T19:57:52 | 2022-02-17T19:57:52 | 129,322,524 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 923 | rd | plot_P_scales.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_P_scales.R
\name{plot_P_scales}
\alias{plot_P_scales}
\title{Scattreplot wirh marginal histograms for ISRIC P retention Potential Scales}
\usage{
plot_P_scales(df, palette = NULL, scale_x = "LD2", scale_y = "combined")
}
\arguments{
\item{df}{dataframe containning Phosphorus Retention Potential Scales
as columns.}
\item{soilclass}{dataframe with soil P retention class table}
\item{is}{8 bit value integer vector corresponding to soil P retention
main classes}
\item{becomes}{8 bit value corresponding to new order for soil P retention
main classes}
\item{filename}{output geotiff file name with P retention classes stored
in new order}
}
\value{
ratified raster object with P retention classes as 8 bit values
}
\description{
Continuous scales derived from multivariate analysis of the
P retention potential Space
}
\examples{
}
|
410182126eb89731dab7ef8594c2efc7a895bdad | c3fced9fa3881b8d07000adfb5bebe4213eaa4a4 | /ANALYSIS/DATA/Wirkungsdaten 2016 und 2017 Kopie.R | 8bfbcfd5126c33fd46b20315dda1f60c6c8c42ba | [] | no_license | rafael-schuetz/Pareto | ea9c06cb588113bbdf6a3b5da27a2d2a22f37dc8 | 74c414268d429373b83ccfb27bf222ae25b97c32 | refs/heads/master | 2022-04-13T11:36:56.587595 | 2020-04-08T18:31:48 | 2020-04-08T18:31:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,282 | r | Wirkungsdaten 2016 und 2017 Kopie.R | # PaRE3To-Projekt
# 1. Herunterladen der Daten ----------------------------------------------
install.packages("readxl")
library(readxl)
### Daten 2017
Wirkungsdaten_2017 <- read_excel("/Users/yannick/Documents/PaRE3To-Projekt/Pareto-Projekt/ANALYSIS/DATA/CHILDREN Wirkungsdaten_VERTRAULICH_final.xlsx",
sheet = "2017")
View(Wirkungsdaten_2017)
# Löschen von Spalten ohne Inhalt
Wirkungsdaten_2017$...17 <- NULL
Wirkungsdaten_2017$Entdeckerfonds <- NULL
### Daten 2016
Wirkungsdaten_2016 <- read_excel("/Users/yannick/Documents/PaRE3To-Projekt/Pareto-Projekt/ANALYSIS/DATA/CHILDREN Wirkungsdaten_VERTRAULICH_final.xlsx",
sheet = "2016")
View(Wirkungsdaten_2016)
# Löschen von Spalten ohne Inhalt
Wirkungsdaten_2016$Entdeckerfonds <- NULL
# 2. Analyse der Daten ----------------------------------------------------
library(dplyr)
library(ggplot2)
# Histogramm
hist(Wirkungsdaten_2017$`Anzahl KiJu insgesamt in der Einrichtung`)
hist(Wirkungsdaten_2016$`Anzahl Kinder pro Mahlzeit 2016`)
# Scatterplot
plot(x = Wirkungsdaten_2017$`Gesamtbudget der Einrichtung`,
y = Wirkungsdaten_2017$`Anzahl Ki pro Mahlzeit 2017`)
# Statistische Zusammenfassung
summary(Wirkungsdaten_2017$`Gesamtbudget der Einrichtung`)
|
3574a249854b7b686c3c9cd2b7527330c8bf7629 | e2b794a9d61f05cd79706bb5e22b74217ca1768f | /R/error_email.R | cb32da49bb45c50514a51170aacf99ddbad88735 | [
"MIT"
] | permissive | SERTwitter/ser | 1ceb409da3a15cc0df9a949bfeb96a5a47c8c537 | ee71538c1c78f71e82a27b2ac5d29c31137bf440 | refs/heads/master | 2022-06-27T22:44:38.252820 | 2022-06-20T19:42:14 | 2022-06-20T19:42:14 | 178,266,668 | 4 | 3 | NOASSERTION | 2022-06-20T19:42:15 | 2019-03-28T19:08:10 | R | UTF-8 | R | false | false | 1,651 | r | error_email.R | .email_to <- new.env(parent = emptyenv())
#' @export
#' @rdname errors
on_error_email_to <- function(recipient) {
.email_to$email <- recipient
invisible(recipient)
}
#' @export
#' @rdname errors
email_to <- function() {
.email_to$email
}
#' @export
#' @rdname errors
gmail_id <- function() {
Sys.getenv("GMAIL_ID")
}
#' @export
#' @rdname errors
gmail_secret <- function() {
Sys.getenv("GMAIL_SECRET")
}
#' @export
#' @rdname errors
gmail_email <- function() {
gmail("ser.twitteracct")
}
build_error_html <- function(.error) {
paste("<h2>Error in SER code:", Sys.time(), "</h2> \n", .error)
}
#' Email errors in code
#'
#' `email_on_error()` will email on error. Use it with `action_safely()` to wrap
#' a function using the emailer. Set the email recipient globally with
#' `on_error_email_to()` and retrieve it with `email_to()`.
#'
#' @param .e the error message
#' @param recipient an email address.
#' @param .f a function to wrap in the email error catching function
#'
#' @return a character vector containing the error email
#' @export
#'
#' @rdname errors
email_on_error <- function(.e, recipient = email_to(), .msg = NULL) {
authorize_gmailr()
email_msg <- build_error_html(.e)
gmailr::mime() %>%
gmailr::to(recipient) %>%
gmailr::from("ser.twitteracct@gmail.com") %>%
gmailr::subject(paste("Error in SER code:", Sys.time(), .msg)) %>%
gmailr::html_body(email_msg) %>%
gmailr::send_message()
.e
}
#' @export
#' @rdname errors
action_safely <- function(.f, .msg = NULL) {
function(...) {
tryCatch(
.f(...),
error = purrr::partial(email_on_error, .msg = .msg)
)
}
}
|
a17789a7e45436617fb59d2033f19bca188ab011 | 843549b8706d9a4de92357d5686b125bec78cd85 | /Hdac1 ChIP/Genome_annotation_of_Hdac1_peaks.R | 1773215653ac54c5eae8094890102658c2e0bc51 | [] | no_license | jiajinglz/bioRxiv_05052022_Hdac_dual_roles | c08847aae8c78c18f778ffaff50f9ed679e7088b | 84f49202e21f35da6f6b79c5c8cc6a9f603126d5 | refs/heads/main | 2023-04-06T07:14:19.666646 | 2023-03-28T02:54:11 | 2023-03-28T02:54:11 | 553,051,311 | 0 | 0 | null | 2023-04-02T17:40:23 | 2022-10-17T16:30:00 | R | UTF-8 | R | false | false | 3,589 | r | Genome_annotation_of_Hdac1_peaks.R | ##############st8################
st8_annotate_output <- read.delim("/Volumes/GoogleDrive/My Drive/foxh1 and hdac1/hdac1 chip seq/distribution/st8_hdac1_wt_IDR0.05_in_merged_peaks.annotate", header=FALSE)
st8 <- data.frame(st8_annotate_output$V8)
st8 <- data.frame(st8[-1,])
colnames(st8) <- c("region")
st8$region <- as.character(st8$region)
st8 <- data.frame(sapply(strsplit(st8$region, "[()]"), "[", 1))
colnames(st8) <- c("region")
st8$region <- as.character(st8$region)
type <- unique(st8)
library(stringr)
exon <- sum(str_count(st8, "exon"))
intergenic <- sum(str_count(st8, "Intergenic"))
intron <- sum(str_count(st8, "intron"))
TSS <- sum(str_count(st8, "TSS"))
TTS <- sum(str_count(st8, "TTS"))
data <- data.frame(
type= type[1:5,],
value= c(571,480,145,114,27)
)
library(ggplot2)
# Basic piechart
pie_st8 <- ggplot(data, aes(x="", y=value, fill=type)) +
geom_bar(stat="identity", width=1, color="white") +
coord_polar("y", start=0) +
theme_void() # remove background, grid, numeric labels
pie_st8
ggsave('/Volumes/GoogleDrive/My Drive/foxh1 and hdac1/hdac1 chip seq/distribution/st8_pie_on_IDR_peaks.tiff', pie_st8, device = "tiff", dpi = 100)
##############st9################
st9_annotate_output <- read.delim("/Volumes/GoogleDrive/My Drive/foxh1 and hdac1/hdac1 chip seq/distribution/st9_hdac1_wt_IDR0.05_in_merged_peaks.annotate", header=FALSE)
st9 <- data.frame(st9_annotate_output$V8)
st9 <- data.frame(st9[-1,])
colnames(st9) <- c("region")
st9$region <- as.character(st9$region)
st9 <- data.frame(sapply(strsplit(st9$region, "[()]"), "[", 1))
colnames(st9) <- c("region")
st9$region <- as.character(st9$region)
type <- unique(st9)
library(stringr)
exon <- sum(str_count(st9, "exon"))
intergenic <- sum(str_count(st9, "Intergenic"))
intron <- sum(str_count(st9, "intron"))
TSS <- sum(str_count(st9, "TSS"))
TTS <- sum(str_count(st9, "TTS"))
data <- data.frame(
type= type[1:5,],
value= c(1919,2093,338,137,648)
)
library(ggplot2)
# Basic piechart
pie_st9 <- ggplot(data, aes(x="", y=value, fill=type)) +
geom_bar(stat="identity", width=1, color="white") +
coord_polar("y", start=0) +
theme_void() # remove background, grid, numeric labels
pie_st9
ggsave('/Volumes/GoogleDrive/My Drive/foxh1 and hdac1/hdac1 chip seq/distribution/st9_pie_on_IDR_peaks.tiff', pie_st9, device = "tiff", dpi = 100)
#############st10.5#################
st105_annotate_output <- read.delim("/Volumes/GoogleDrive/My Drive/foxh1 and hdac1/hdac1 chip seq/distribution/st105_hdac1_wt_IDR0.05_in_merged_peaks.annotate", header=FALSE)
st105 <- data.frame(st105_annotate_output$V8)
st105 <- data.frame(st105[-1,])
colnames(st105) <- c("region")
st105$region <- as.character(st105$region)
st105 <- data.frame(sapply(strsplit(st105$region, "[()]"), "[", 1))
colnames(st105) <- c("region")
st105$region <- as.character(st105$region)
type <- unique(st105)
library(stringr)
exon <- sum(str_count(st105, "exon"))
intergenic <- sum(str_count(st105, "Intergenic"))
intron <- sum(str_count(st105, "intron"))
TSS <- sum(str_count(st105, "TSS"))
TTS <- sum(str_count(st105, "TTS"))
data <- data.frame(
type= type[1:5,],
value= c(6343,8056,5525,2400,354)
)
library(ggplot2)
# Basic piechart
pie_st105 <- ggplot(data, aes(x="", y=value, fill=type)) +
geom_bar(stat="identity", width=1, color="white") +
coord_polar("y", start=0) +
theme_void() # remove background, grid, numeric labels
pie_st105
ggsave('/Volumes/GoogleDrive/My Drive/foxh1 and hdac1/hdac1 chip seq/distribution/st105_pie_on_IDR_peaks.tiff', pie_st105, device = "tiff", dpi = 100)
|
01d27807153f300f10ce521e5f22bb86c88fde67 | 0e3b6ca75ed91dfca35ce413a2790c58fbdc1d1e | /R/model_linear_combo.R | 5d6e64acdee87564a7639d932ae80e0bcd471e1b | [] | no_license | cran/pcsstools | 6850ba60227388d7be24e9bdc5091003c63c2cb6 | 6a8272733efdaabef27d99bb6791c5fd5c94afd7 | refs/heads/master | 2023-04-02T16:51:46.670976 | 2021-03-23T18:30:02 | 2021-03-23T18:30:02 | 350,936,770 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,598 | r | model_linear_combo.R | #' Model the principal component score of a set of phenotypes using PCSS
#'
#' \code{model_prcomp} calculates the linear model for the mth principal
#' component score of a set of phenotypes as a function of a set of
#' predictors.
#'
#' @param formula an object of class \code{formula} whose dependent variable is
#' a series of variables joined by \code{+} operators. \code{model_prcomp}
#' will treat a principal component score of those variables as the actual
#' dependent variable. All model terms must be accounted for in \code{means}
#' and \code{covs}.
#'
#' @param comp integer indicating which principal component score to analyze.
#' Must be less than or equal to the total number of phenotypes.
#' @param n sample size.
#' @param means named vector of predictor and response means.
#' @param covs named matrix of the covariance of all model predictors and the
#' responses.
#' @param center logical. Should the dependent variables be centered before
#' principal components are calculated?
#' @param standardize logical. Should the dependent variables be standardized
#' before principal components are calculated?
#' @param ... additional arguments
#'
#' @inherit pcsslm return
#'
#' @references{
#'
#' \insertRef{wolf_computationally_2020}{pcsstools}
#'
#' }
#'
#' @examples
#' ex_data <- pcsstools_example[c("g1", "x1", "x2", "y1", "y2", "y3")]
#' head(ex_data)
#' means <- colMeans(ex_data)
#' covs <- cov(ex_data)
#' n <- nrow(ex_data)
#'
#' model_prcomp(
#' y1 + y2 + y3 ~ g1 + x1 + x2,
#' comp = 1, n = n, means = means, covs = covs
#' )
#' @export
model_prcomp <- function(formula, comp = 1, n, means, covs,
center = FALSE, standardize = FALSE, ...) {
cl <- match.call()
terms <- terms(formula)
xterms <- extract_predictors(formula)
yterms <- parse_sum(extract_response(formula))
check_terms_combo(xterms$predictors, yterms, means, covs)
# Re-arrange means, covs, and predictors to match given formula
means0 <- means[c(xterms$predictors, yterms)]
covs0 <- covs[c(xterms$predictors, yterms), c(xterms$predictors, yterms)]
add_intercept <- xterms$add_intercept
# Adjust pcss if response is centered, standardized
if (center) {
means0[yterms] <- 0
}
if (standardize) {
var0 <- c(rep(1, length(xterms$predictors)), diag(covs0)[yterms])
covs0 <- (1 / sqrt(var0)) * t((1 / sqrt(var0)) * covs0)
}
# Calculate weights for PCA
ysigma <- covs0[yterms, yterms]
phi <- eigen(ysigma)$vectors[, comp]
re <- calculate_lm_combo(
means = means0, covs = covs0, n = n, phi = phi,
add_intercept = add_intercept, terms = terms, ...
)
re$call <- cl
class(re) <- "pcsslm"
return(re)
}
#' Model a linear combination of a set of phenotypes using PCSS
#'
#' \code{model_combo} calculates the linear model for a linear combination of
#' phenotypes as a function of a set of predictors.
#'
#' @param formula an object of class \code{formula} whose dependent variable is
#' a series of variables joined by \code{+} operators. \code{model_combo}
#' will treat a principal component score of those variables as the actual
#' dependent variable. All model terms must be accounted for in \code{means}
#' and \code{covs}.
#'
#' @param phi named vector of linear weights for each variable in the
#' dependent variable in \code{formula}.
#' @param n sample size.
#' @param means named vector of predictor and response means.
#' @param covs named matrix of the covariance of all model predictors and the
#' responses.
#' @param ... additional arguments
#'
#' @references{
#'
#' \insertRef{wolf_computationally_2020}{pcsstools}
#'
#' \insertRef{gasdaska_leveraging_2019}{pcsstools}
#'
#' }
#'
#' @inherit pcsslm return
#' @examples
#' ex_data <- pcsstools_example[c("g1", "x1", "x2", "x3", "y1", "y2", "y3")]
#' head(ex_data)
#' means <- colMeans(ex_data)
#' covs <- cov(ex_data)
#' n <- nrow(ex_data)
#' phi <- c("y1" = 1, "y2" = -1, "y3" = 0.5)
#'
#' model_combo(
#' y1 + y2 + y3 ~ g1 + x1 + x2 + x3,
#' phi = phi, n = n, means = means, covs = covs
#' )
#'
#' summary(lm(y1 - y2 + 0.5 * y3 ~ g1 + x1 + x2 + x3, data = ex_data))
#' @export
model_combo <- function(formula, phi, n, means, covs, ...) {
cl <- match.call()
terms <- terms(formula)
xterms <- extract_predictors(formula)
yterms <- parse_sum(extract_response(formula))
check_terms_combo(xterms$predictors, yterms, means, covs)
# Re-arrange means, covs, and predictors to match given formula
means0 <- means[c(xterms$predictors, yterms)]
covs0 <- covs[c(xterms$predictors, yterms), c(xterms$predictors, yterms)]
add_intercept <- xterms$add_intercept
phi0 <- phi[yterms]
re <- calculate_lm_combo(
means = means0, covs = covs0, n = n, phi = phi,
add_intercept = add_intercept, terms = terms, ...
)
re$call <- cl
class(re) <- "pcsslm"
return(re)
}
#' Model an individual phenotype using PCSS
#'
#' \code{model_singular} calculates the linear model for a singular
#' phenotype as a function of a set of predictors.
#'
#' @param formula an object of class \code{formula} whose dependent variable is
#' only variable. All model terms must be accounted for in \code{means}
#' and \code{covs}.
#' @param n sample size.
#' @param means named vector of predictor and response means.
#' @param covs named matrix of the covariance of all model predictors and the
#' responses.
#' @param ... additional arguments
#'
#' @references{
#'
#' \insertRef{wolf_computationally_2020}{pcsstools}
#'
#' }
#'
#' @inherit pcsslm return
#'
#' @export
#' @examples
#' ex_data <- pcsstools_example[c("g1", "x1", "y1")]
#' means <- colMeans(ex_data)
#' covs <- cov(ex_data)
#' n <- nrow(ex_data)
#'
#' model_singular(
#' y1 ~ g1 + x1,
#' n = n, means = means, covs = covs
#' )
#' summary(lm(y1 ~ g1 + x1, data = ex_data))
model_singular <- function(formula, n, means, covs, ...) {
cl <- match.call()
terms <- terms(formula)
xterms <- extract_predictors(formula)
yterms <- parse_sum(extract_response(formula))
check_terms_combo(xterms$predictors, yterms, means, covs)
# Re-arrange means, covs, and predictors to match given formula
means0 <- means[c(xterms$predictors, yterms)]
covs0 <- covs[c(xterms$predictors, yterms), c(xterms$predictors, yterms)]
add_intercept <- xterms$add_intercept
re <- calculate_lm(
means = means0, covs = covs0, n = n, add_intercept = add_intercept,
terms = terms, ...
)
re$call <- cl
class(re) <- "pcsslm"
return(re)
}
#' Calculate a linear model for a linear combination of responses
#'
#' \code{calculate_lm_combo} describes the linear model for a linear combination
#' of responses as a function of a set of predictors.
#'
#' @param means a vector of means of all model predictors and the response with
#' the last \code{m} elements the response means (with order corresponding to
#' the order of weights in \code{phi}).
#' @param covs a matrix of the covariance of all model predictors and the
#' responses with the order of rows/columns corresponding to the order of
#' \code{means}.
#' @param n sample size.
#' @param m number of responses to combine. Defaults to \code{length(weighs)}.
#' @param phi vector of linear combination weights with one entry per response
#' variable.
#' @param add_intercept logical. If \code{TRUE} adds an intercept to the model.
#' @param ... additional arguments
#'
#' @references{
#'
#' \insertRef{wolf_computationally_2020}{pcsstools}
#'
#' \insertRef{gasdaska_leveraging_2019}{pcsstools}
#'
#' }
#' @inherit pcsslm return
calculate_lm_combo <- function(means, covs, n, phi, m = length(phi),
add_intercept, ...) {
p <- length(means) - m
# Covariances with linear combo and variance/mean of the linear combo
new_covs <- covs[1:p, (p + 1):(p + m)] %*% phi
new_var <- drop(t(phi) %*% covs[(p + 1):(p + m), (p + 1):(p + m)] %*% phi)
new_mean <- sum(phi * means[(p + 1):(p + m)])
means0 <- c(means[1:p], new_mean)
covs0 <- rbind(cbind(covs[1:p, 1:p], new_covs), c(t(new_covs), new_var))
colnames(covs0) <- c(names(means)[1:p], NA)
rownames(covs0) <- c(names(means)[1:p], NA)
calculate_lm(means = means0, covs = covs0, n = n,
add_intercept = add_intercept, ...)
}
|
bbadc879b3b72a043880e70145b966f71c354516 | d14bcd4679f0ffa43df5267a82544f098095f1d1 | /R/ls.mode.R | cb533a35ac362303cce656d70859004295c5f091 | [] | no_license | anhnguyendepocen/SMRD | 9e52aa72a5abe5274f9a8546475639d11f058c0d | c54fa017afca7f20255291c6363194673bc2435a | refs/heads/master | 2022-12-15T12:29:11.165234 | 2020-09-10T13:23:59 | 2020-09-10T13:23:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 194 | r | ls.mode.R | ls.mode <-
function (funname = NULL, where = 1, mode = "function", all.names = FALSE)
{
dimnames(wqm.objects.summary(mode = mode, where = where,
all.names = all.names))[[1]]
}
|
5130c0fbce42cbfe7f637654ba7bd58a4730af85 | fb96b0e38bdeee226bd2da40faa6e7ec62f58732 | /analysis/biopsy_classes.R | 316704661fb163dac15b634b3ca9795efa1fb207 | [] | no_license | webbedfeet/CARRA | 29baa07639ce0afe80de19b47ff19c06521eb33a | 4e314ebe7e536b00836a972d022fb393381275e9 | refs/heads/master | 2021-01-03T13:37:23.369732 | 2020-10-07T06:15:18 | 2020-10-07T06:15:18 | 240,086,783 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,911 | r | biopsy_classes.R | ## LN ascertainment
##
## There is a variable, SLICC00, that is putatively the right variable
## We're going to ascertain based on WHO and ISNRPS values
##
pacman::p_load(char=c('tidyverse','janitor','broom', 'naniar',
'here'))
all_subjects <- vroom(here('data/raw/all_rows_data_2020-01-31_1545.csv'),
col_select = c(subjectId, visit = folderName)) %>%
distinct() %>%
mutate(folderName = fct_relevel(visit, 'Baseline','6 month','12 month','18 month','24 month')) %>%
select(-folderName) %>%
clean_names(case='snake')
slicc_info <- vroom(here('data/raw/slicc_data_2020-01-31_1545.csv'),
col_select = c(subjectId, visit = folderName,
SLICC00)) %>%
clean_names(case='snake')
ln_visit <- vroom(here('data/raw/vis_data_2020-01-31_1545.csv'),
col_select = c(subjectId, folderName, LUPUSNEP)) %>%
clean_names(case = 'snake') %>%
rename(visit = folder_name)
raw_biopsy <- rio::import(here('data/raw/biopsy_data_2020-01-31_1545.csv'), setclass = 'tbl') %>%
clean_names(case='snake') %>%
select(subject_id, visit = folder_name, event_index, biopdtc_yyyy, biopsdtc_yyyy,
matches('[who|isnrps][2-6]$'))
### Check that missing data for biopsy is all or nothing
assertthat::are_equal(
sort(unique(rowSums(is.na(select(raw_biopsy, isnrps2:who6))))),
c(0,10))
# raw_biopsy <- raw_biopsy %>%
# filter_at(vars(isnrps2:who6), complete.cases) %>%
# mutate(LN = rowSums(.[,-(1:3)])) %>%
# mutate(LN2 = rowSums(.[,c('isnrps2','who2')]),
# LN3 = rowSums(.[,c('isnrps3','who3')]),
# LN4 = rowSums(.[,c('isnrps4','who4')]),
# LN5 = rowSums(.[,c('isnrps5','who5')])) %>%
# mutate_at(vars(starts_with('LN')), ~ifelse(. > 0, 1, 0)) %>%
# # Create 3 exclusive classes: LN 3/4 only, LN 3/4+5, and LN5 only
# mutate(LN34 = ifelse((LN3==1 | LN4==1) & LN5==0, 1, 0),
# LN345 = ifelse((LN3==1 & LN5==1) | (LN4==1 & LN5==1), 1, 0),
# LN50 = ifelse(LN5==1 & LN3==0 & LN4==0, 1, 0))
#
# saveRDS(raw_biopsy, file = here('data/rda/biopsy_classes.rds'), compress=T)
#
# Updated definition, more explicit ---------------------------------------
raw_biopsy1 <- raw_biopsy %>%
filter_at(vars(isnrps2:who6), complete.cases) %>%
rowwise() %>%
mutate(LN = sum(c_across(isnrps2:who5)),
LN2 = sum(c(isnrps2, who2)),
LN3 = sum(c(isnrps3, who3)),
LN4 = sum(c(isnrps4, who4)),
LN5 = sum(c(isnrps5, who5))) %>%
mutate(across(starts_with('LN'), ~ifelse(. > 0, 1, 0))) %>%
ungroup() %>%
# Create 3 exclusive classes: LN 3/4 only, LN 3/4+5, and LN5 only
mutate(LN34 = ifelse((LN3==1 | LN4==1) & LN5==0, 1, 0),
LN345 = ifelse((LN3==1 & LN5==1) | (LN4==1 & LN5==1), 1, 0),
LN50 = ifelse(LN5==1 & LN3==0 & LN4==0, 1, 0))
saveRDS(raw_biopsy1, file = here('data/rda/biopsy_classes1.rds'))
|
08011a2d010ee6d1bba5da1ef94a2aef65505736 | 249542669c3973467a63f41b647ad3d0fe5070a6 | /R/auc_roc.R | 2d6e33ccf96eacd485a6dc160c34d8045a67b01d | [] | no_license | rmatam/mltools | f82af6239cfe7060c59b04169ac75a25b68cf3ac | 05b80d1447ca75c7983858647b05d997cff0ca5d | refs/heads/master | 2021-01-13T08:20:28.823925 | 2016-10-12T15:12:10 | 2016-10-12T15:12:10 | 72,211,313 | 1 | 0 | null | 2016-10-28T13:47:51 | 2016-10-28T13:47:51 | null | UTF-8 | R | false | false | 2,149 | r | auc_roc.R | #' @title
#' Area Under the ROC Curve
#'
#' @description
#' Calculates Area Under the ROC Curve
#'
#' @details
#' If \code{returnDT=FALSE}, returns Area Under the ROC Curve.If \code{returnDT=TRUE}, returns a data.table object with
#' False Positive Rate and True Positive Rate for plotting the ROC curve.
#'
#' @param preds A vector of prediction values
#' @param actuals A vector of actuals values (numeric or ordered factor)
#' @param returnDT If TRUE, a data.table of (FalsePositiveRate, TruePositiveRate) pairs is returned, otherwise AUC ROC score is returned
#'
#' @references
#' \url{https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve}
#'
#' @export
#' @import data.table
#'
#' @examples
#' preds <- c(.1, .3, .3, .9)
#' actuals <- c(0, 0, 1, 1)
#' auc_roc(preds, actuals)
#' auc_roc(preds, actuals, returnDT=TRUE)
auc_roc <- function(preds, actuals, returnDT=FALSE){
# Calculate area under the ROC curve
# If returnDT = TRUE, a data.table is returned
# Check if every prediction is identical and if so, return 0.5
if(length(unique(preds)) == 1) return(0.5)
# Convert actuals to numeric if it's an ordered factor
if(is(actuals, "factor")){
if(is.ordered(actuals) & length(levels(actuals)) == 2) actuals <- as.numeric(actuals) - 1 else stop("actuals is type factor, but is unordered. Make it an ordered factor.")
}
dt <- data.table(Pred=preds, Actual=actuals*1L)
setorder(dt, -Pred)
bg <- dt[, list(CountFalse=sum(Actual==0), CountTrue=sum(Actual)), by=list(Pred)]
# Calculate the CumulativeFalsePositiveRate and CumulativeTruePositiveRate
bg[, CumulativeFPR := cumsum(CountFalse)/sum(CountFalse)]
bg[, CumulativeTPR := cumsum(CountTrue)/sum(CountTrue)]
# Calculate AUC ROC
bg[, AdditionalArea := c(head(CumulativeFPR, 1) * head(CumulativeTPR, 1)/2,
(tail(CumulativeFPR, -1) - head(CumulativeFPR, -1)) * (head(CumulativeTPR, -1) + (tail(CumulativeTPR, -1) - head(CumulativeTPR, -1))/2))]
bg[, CumulativeArea := cumsum(AdditionalArea)]
# Return the desired result
if(returnDT) return(bg[]) else return(tail(bg$CumulativeArea, 1))
}
|
3aa318edd5d584116f848192885c3db0a528c3ce | 0484ddd6f392fecfa542747f550248bba6a9bf2a | /tests/testthat.r | 347d7d94442327965d6b54112c99f622cceb6af5 | [] | no_license | lengning/gClinBiomarker | d0115d4a699ca12866b9776c6a3835d9c0ece6c9 | 726d3bb9edbd8ecc450fc650ea7ab9922737629b | refs/heads/master | 2021-10-24T06:16:06.064819 | 2019-03-22T18:25:07 | 2019-03-22T18:25:07 | 125,939,464 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 72 | r | testthat.r | library(testthat)
library(gClinBiomarker)
test_check("gClinBiomarker")
|
c10665d158e3177ee7cfa298bf6e3b242ee811cd | b207cd6e0e11a74cec570fb91ff974f3d5d5d69d | /tests/testthat/test-vov.R | 1f808f1b9f1e9fea271fb259f57d21ffb4aba35c | [] | no_license | cran/vov | b5ec40ce05df7452f599e12098714abeb85023fc | dfe268b7289d5d81b7aa69fc183b9cd5f71d3650 | refs/heads/master | 2020-12-22T23:07:21.029791 | 2020-08-27T20:50:03 | 2020-08-27T20:50:03 | 236,958,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,229 | r | test-vov.R | library(vov)
library(shiny)
test_that("vov works", {
# blur in
expect_equal(
object = as.character(blur_in(h1("hi"))),
expected = '<h1 class="vov blur-in">hi</h1>'
)
# blur out
expect_equal(
object = as.character(blur_out(h1("hi"))),
expected = '<h1 class="vov blur-out">hi</h1>'
)
# fade in
expect_equal(
object = as.character(fade_in(h1("hi"))),
expected = '<h1 class="vov fade-in">hi</h1>'
)
# fade_in_bottom_left
expect_equal(
object = as.character(fade_in_bottom_left(h1("hi"))),
expected = '<h1 class="vov fade-in-bottom-left">hi</h1>'
)
# fade_in_bottom_right
expect_equal(
object = as.character(fade_in_bottom_right(h1("hi"))),
expected = '<h1 class="vov fade-in-bottom-right">hi</h1>'
)
# fade_in_down
expect_equal(
object = as.character(fade_in_down(h1("hi"))),
expected = '<h1 class="vov fade-in-down">hi</h1>'
)
# fade_in_left
expect_equal(
object = as.character(fade_in_left(h1("hi"))),
expected = '<h1 class="vov fade-in-left">hi</h1>'
)
# fade_in_right
expect_equal(
object = as.character(fade_in_right(h1("hi"))),
expected = '<h1 class="vov fade-in-right">hi</h1>'
)
# fade_in_top_left
expect_equal(
object = as.character(fade_in_top_left(h1("hi"))),
expected = '<h1 class="vov fade-in-top-left">hi</h1>'
)
# fade_in_top_right
expect_equal(
object = as.character(fade_in_top_right(h1("hi"))),
expected = '<h1 class="vov fade-in-top-right">hi</h1>'
)
# fade_in_up
expect_equal(
object = as.character(fade_in_up(h1("hi"))),
expected = '<h1 class="vov fade-in-up">hi</h1>'
)
# fade out
expect_equal(
object = as.character(fade_out(h1("hi"))),
expected = '<h1 class="vov fade-out">hi</h1>'
)
# fade_out_bottom_left
expect_equal(
object = as.character(fade_out_bottom_left(h1("hi"))),
expected = '<h1 class="vov fade-out-bottom-left">hi</h1>'
)
# fade_out_bottom_right
expect_equal(
object = as.character(fade_out_bottom_right(h1("hi"))),
expected = '<h1 class="vov fade-out-bottom-right">hi</h1>'
)
# fade_out_down
expect_equal(
object = as.character(fade_out_down(h1("hi"))),
expected = '<h1 class="vov fade-out-down">hi</h1>'
)
# fade_out_left
expect_equal(
object = as.character(fade_out_left(h1("hi"))),
expected = '<h1 class="vov fade-out-left">hi</h1>'
)
# fade_out_right
expect_equal(
object = as.character(fade_out_right(h1("hi"))),
expected = '<h1 class="vov fade-out-right">hi</h1>'
)
# fade_out_top_left
expect_equal(
object = as.character(fade_out_top_left(h1("hi"))),
expected = '<h1 class="vov fade-out-top-left">hi</h1>'
)
# fade_out_top_right
expect_equal(
object = as.character(fade_out_top_right(h1("hi"))),
expected = '<h1 class="vov fade-out-top-right">hi</h1>'
)
# fade_out_up
expect_equal(
object = as.character(fade_out_up(h1("hi"))),
expected = '<h1 class="vov fade-out-up">hi</h1>'
)
# fade in
expect_equal(
object = as.character(fade_in(h1("hi"))),
expected = '<h1 class="vov fade-in">hi</h1>'
)
# fade_in_bottom_left
expect_equal(
object = as.character(fade_in_bottom_left(h1("hi"))),
expected = '<h1 class="vov fade-in-bottom-left">hi</h1>'
)
# fade_in_bottom_right
expect_equal(
object = as.character(fade_in_bottom_right(h1("hi"))),
expected = '<h1 class="vov fade-in-bottom-right">hi</h1>'
)
# fade_in_down
expect_equal(
object = as.character(fade_in_down(h1("hi"))),
expected = '<h1 class="vov fade-in-down">hi</h1>'
)
# fade_in_left
expect_equal(
object = as.character(fade_in_left(h1("hi"))),
expected = '<h1 class="vov fade-in-left">hi</h1>'
)
# fade_in_right
expect_equal(
object = as.character(fade_in_right(h1("hi"))),
expected = '<h1 class="vov fade-in-right">hi</h1>'
)
# fade_in_top_left
expect_equal(
object = as.character(fade_in_top_left(h1("hi"))),
expected = '<h1 class="vov fade-in-top-left">hi</h1>'
)
# fade_in_top_right
expect_equal(
object = as.character(fade_in_top_right(h1("hi"))),
expected = '<h1 class="vov fade-in-top-right">hi</h1>'
)
})
|
7eda728fc4b323f3f6b8951243cb40b96b26b647 | 6c6613504dd33574afc3bb9d0c98d90fcf9654c4 | /run_analysis.R | 0fba7b2ddc049329fe35f2d8305ab592960fe80d | [] | no_license | dklausme/GetCleanData | 19598b4054aafd7afe233d5036f34a3b17a26ed6 | 8cb9e179a2468e7261822eb5599d52ba446629d8 | refs/heads/master | 2016-08-11T10:45:39.443529 | 2016-03-14T05:17:29 | 2016-03-14T05:17:29 | 53,827,816 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,415 | r | run_analysis.R | # Makes sure I'm in the right directory
setwd('/Users/David/Documents/R/GetCleanData/UCI HAR Dataset');
# Imports info I'll need for labels
features = read.table('./features.txt',header=FALSE);
activity = read.table('./activity_labels.txt',header=FALSE);
# 1. Merges test and train
# Imports X info, merges
xtest = read.table('./test/X_test.txt',header=FALSE);
xtrain = read.table('./train/X_train.txt',header=FALSE);
xstuff = rbind(xtrain, xtest);
# Imports Y info, merges
ytest = read.table('./test/y_test.txt',header=FALSE);
ytrain = read.table('./train/y_train.txt',header=FALSE);
ystuff = rbind(ytrain, ytest);
# Imports subject info, merges
subjecttest = read.table('./test/subject_test.txt',header=FALSE);
subjecttrain = read.table('./train/subject_train.txt',header=FALSE);
subjectstuff = rbind(subjecttrain, subjecttest);
# 2. Pulls out columns with mean or standard deviation
meanstd = grep("-(mean|std)\\(\\)", features[, 2]);
xstuff = xstuff[, meanstd];
# 3. Uses activity names to name activities
ystuff[, 1] = activity[ystuff[, 1], 2];
# 4. Labels data with descriptive names
names(ystuff) <- "activity";
names(subjectstuff) = "subject";
# 5. Create second data set with averages for each variable for each activity/subject
final = cbind(xstuff,ystuff,subjectstuff);
average = ddply(final, .(subject, activity), function(x) colMeans(x[,1:66]));
|
f3bff401c5656f9223fa6270951516528644b3f4 | 4867cbde0414af89991ddfce854bcd50d0f4877e | /Tentor/exam_with_solutions/Tenta 2018-01-11.R | f8c5c0089f744d4d2fb2e3314856c25de70b3634 | [] | no_license | oskarhiden/TDDE01-Machine-learning | 27b9bda6ac496637301293018bbd6c8f74ef7323 | b4e66daf35c52008c6031b544e19cb41f4b91d73 | refs/heads/master | 2020-12-10T12:16:28.862563 | 2020-01-17T14:52:54 | 2020-01-17T14:52:54 | 233,590,873 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 9,512 | r | Tenta 2018-01-11.R | RNGversion('3.5.1')
library(readr)
#Assignment 1
#Step 1
video = read.csv("C:/Users/oskar/OneDrive/Universitet/Luleå Tekniska högskola/Databaser 1/Dokument/Git Repro/TDDE01-Machine-learning/Tentor/exam_with_solutions/video.csv")
#video = read.csv("Desktop/video.csv")
set.seed(12345)
n=dim(video)[1]
set.seed(12345)
id=sample(1:n, floor(n*0.5))
train=video[id,]
test=video[-id,]
train1 = train
train1$codec = c()
train1$utime = c()
res = prcomp(train1)
lambda = res$sdev^2
lambda
#cumsum(lambda)
sprintf("%2.3f",lambda/sum(lambda)*100)
#sprintf("%2.3f",cumsum(lambda)/sum(lambda)*100)
# only one varible nessesary
#scaled
train1_scaled = scale(train1)
res_scaled = prcomp(train1_scaled)
lambda_scaled = res_scaled$sdev^2
sprintf("%2.3f",lambda_scaled/sum(lambda_scaled)*100)
sprintf("%2.3f",cumsum(lambda_scaled)/sum(lambda_scaled)*100)
# 9 varibles neassesary
# Different varibles has different scale.
# Whithour scaling a lot of the variation comes from varibles with big absolute values, wheres in
# % the change can be small.
#Step 2
library(pcr)
train$codec = c()
test$codec = c()
# = data.frame(scale(train))
#test = data.frame(scale(test))
MSE_train = numeric(17)
MSE_test = numeric(17)
for(i in 1:17){
pcr_model = pcr(utime ~ ., ncomp=17, data=train, scale=TRUE)
pred_train = predict(pcr_model, ncomp = i)
pred_test = predict(pcr_model, ncomp = i, newdata = test)
MSE_train[i] = mean((pred_train - train$utime)^2)
MSE_test[i] = mean((pred_test - test$utime)^2)
}
MSE_train
MSE_test
which.min(MSE_test)
plot(MSE_train, ylim = c(100,300), col = "red", type = "b", ylab = "MSE", xlab = "# of components")
points(MSE_test, col="blue", type = "b")
# When the number of components increases, the model becomes more complex and the bias goes
# down while variance goes up. The optimal model should have the lowest test error, in this
# case M=8. "Simpler models with similar test errors is often choosen, for ex. M=8."
#Step 3
pcr_model = pcr(utime ~ ., ncomp=8, data=train, scale=TRUE)
pcr_model$terms
# Equation:
# Loadings:
# Comp 1 Comp 2 Comp 3 Comp 4 Comp 5 Comp 6 Comp 7 Comp 8
# utime 1.736 -1.598 -6.774 0.953 -3.466 3.369 -3.255 -5.517
#Step 4
#class = numeric(nrow(video))
class = ifelse(video$codec == "mpeg4", "mpeg", "other")
id = which(class == "mpeg")
mpeg = video[id,]
other = video[-id,]
plot(y= mpeg$duration, x=mpeg$frames, col="blue", main="duration vs frames")
points(y= other$duration, x= other$frames, col="red")
# an decion boundary would be good.
# Step 5
library(MASS)
data = video
data$codec = c()
data = data.frame(scale(data))
video2 = cbind(data, as.factor(class))
lda_model = lda(class ~ duration + frames, data = video2)
pred_lda = predict(lda_model)
pred_lda$class
id = which(pred_lda$class == "mpeg")
mpeg = video[id,]
other = video[-id,]
plot(y= other$duration, x=other$frames, col="red", main="duration vs frames")
points(y=mpeg$duration , x= mpeg$frames, col="blue")
#plot(x=video2$duration, y=video2$frames, col=pred_lda$class)
conf_matrix = table(class, pred_lda$class)
missclass = 1 - sum(diag(conf_matrix))/sum(conf_matrix)
missclass
# Why bad at classify?
#
dur = lda_model$scaling[1]
fra = lda_model$scaling[2]
test = cbind(video2$duration*dur, video2$frames*fra)
# wtith col diff:
id = which(class == "mpeg")
mpeg = test[id,]
other = test[-id,]
plot(y= other[,1], x=other[,2], col="red", main="duration vs frames", ylim = c(0,2), xlim = c(-2.5,1.5))
points(y=mpeg[,1] , x= mpeg[,2], col="blue")
lda_model$scaling
lda_model$prior
# The result of classification is rather bad. It is clear that covariance matrices per
# class are very different. In addition, class-conditional distributions do not look like
# multivariate normal.
#Step 6
library(tree)
# TEST
data = data.frame(cbind(video$duration, video$frames, as.factor(class)))
names(data) = c("duration", "frames", "class")
tree_model = tree(class~duration + frames, data = data)
plot(tree_model)
text(tree_model, pretty = 0)
dev = numeric(11)
for(i in 2:11){
pruned_tree = prune.tree(tree_model, best=i)
#predict(pruned_tree)
dev[i] = deviance(pruned_tree)
}
dev
plot(dev)
#plot(cv.tree(tree_model)) ?????????????????
# END TEST
data3 = video
data3$class = ifelse(data3$codec == "mpeg4", "mpeg", "other")
data3$codec = c()
data3$class = as.factor(data3$class)
tree_model = tree(class ~ duration + frames, data = data3)
cv_tree = cv.tree(tree_model)
best_size = cv_tree$size[which.min(cv_tree$dev)]
best_size #11 leaves in final tree with current settings in tree()
plot(cv_tree$size, cv_tree$dev, type="b")
# final tree
print(tree_model)
plot(tree_model)
text(tree_model)
# Since the decision boundary between the two classes is linear, but not perpendicular to
# any of the cordinate axis. The tree has to create this boundary by producing a "stair-like"
# decision bounary between the two classes. More leaves => more lika a linear boundary.
# Assignment 2 - Support vector machines
spam = read.csv2("C:/Users/oskar/OneDrive/Universitet/Luleå Tekniska högskola/Databaser 1/Dokument/Git Repro/TDDE01-Machine-learning/Tentor/exam_with_solutions/spambase.csv")
#spam = read.csv2("Desktop/spambase.csv")
library(kernlab)
#pic model based on validation data(holdout method:
data = spam
n=dim(data)[1]
set.seed(12345)
id=sample(1:n, floor(n*0.6))
train=data[id,]
id1=setdiff(1:n, id)
set.seed(12345)
id2=sample(id1, floor(n*0.2))
valid=data[id2,]
id3=setdiff(id1,id2)
test=data[id3,]
#train models
svm_05 = ksvm(as.factor(Spam) ~ ., data=train, kernel="rbfdot", kpar= list(sigma=0.05), C = 0.5)
svm_1 = ksvm(as.factor(Spam) ~ ., data=train, kernel="rbfdot", kpar= list(sigma=0.05), C = 1)
svm_5 = ksvm(as.factor(Spam) ~ ., data=train, kernel="rbfdot", kpar= list(sigma=0.05), C = 5)
pred_05 = predict(svm_05, newdata = valid)
pred_1 = predict(svm_1, newdata = valid)
pred_5 = predict(svm_5, newdata = valid)
#table(valid$Spam, pred_05)
misclass = function(true, predict){
table = table(true, predict)
return(1-(sum(diag(table)/sum(table))))
}
misclasserror = numeric(3)
misclasserror[1] = misclass(valid$Spam, pred_05)
misclasserror[2] = misclass(valid$Spam, pred_1)
misclasserror[3] = misclass(valid$Spam, pred_5)
misclasserror
plot(misclasserror, type="b")
# smallest error form C = 5, that model is choosen
# Step 2 - error estimation using retrained model on tarin+valid and then test on test data.
train_valid = rbind(test, valid)
final_model = ksvm(as.factor(Spam) ~ ., data=train_valid, kernel="rbfdot", kpar= list(sigma=0.05), C = 5)
final_pred = predict(final_model, newdata=test)
final_error = misclass(test$Spam, final_pred)
final_error
# Final error is 0.04561912
# TEST
alpha(final_model)
nSV(final_model)
coef(final_model)
# End TEST
# User_model by traing on all data, C=5
user_model = ksvm(as.factor(Spam) ~ ., data=spam, kernel="rbfdot", kpar= list(sigma=0.05), C = 5)
# C is the cost of constraints violation, this is the 'C'-constant of
# the regularization term in the Lagrange formulation. Higher C means higher bias in training.
# Assignment 2 - Neural Networks
library(neuralnet)
set.seed(12345)
value = runif(50,0,10)
sin = sin(value)
data = data.frame(value, sin)
plot(data)
#devide data
train=data[1:25,]
valid=data[26:50,]
plot(train, col="blue")
points(valid, col="red")
#set.seed(12345)
winit = runif(31, -1, 1)
SE_tr = vector("numeric", length = 10)
SE_va = vector("numeric", length = 10)
for(i in 1:10){
nn <- neuralnet(sin~value, data = train, hidden = c(10), startweights = winit, threshold = i/1000)
p_tr = predict(nn, newdata = train)
SE_tr[i] = sum((train$sin - p_tr)^2)
p_va = predict(nn, newdata = valid)
SE_va[i] = sum((valid$sin - p_va)^2)
}
best_1 = which.min(SE_va)
plot(SE_tr, col = "red", ylab = "Sum of Squared Error", main = "one layer")
par(new=TRUE)
plot(SE_va, col = "blue", ylab = "Sum of Squared Error")
#set.seed(12345)
#weight = runif(22, -1, 1)
SE_tr_2 = vector("numeric", length = 10)
SE_va_2 = vector("numeric", length = 10)
for (i in 1:10){
nn_2 = neuralnet(sin~value, data = train, hidden = c(3, 3), startweights = winit, threshold = threshold[i])
p_tr_2 = predict(nn_2, newdata = train)
SE_tr_2[i] = sum((train$sin - p_tr_2)^2)
p_va_2 = predict(nn_2, newdata = valid)
SE_va_2[i] = sum((valid$sin - p_va_2)^2)
}
best_2 = which.min(SE_va_2)
plot(SE_tr_2, col = "red", ylab = "Sum of Squared Error", main= "two layer")
par(new=TRUE)
plot(SE_va_2, col = "blue", ylab = "Sum of Squared Error")
SE_va[best_1]
SE_va_2[best_2]
#best model = 1 layer of 10 hidden units with threshold = 1/1000
nn <- neuralnet(sin~value, data = train, hidden = c(10), startweights = winit, threshold = best_1/1000)
p_tr = predict(nn, newdata = train)
p_va = predict(nn, newdata = valid)
plot(x = valid$value, y=p_va, col="blue")
points(valid, col="red")
# This model was choosen because it generated the lowest squared error for validation data.
# Step 2 - Generalisation error for NN above:
# Sampling more data:
set.seed(12345)
value = runif(25, 0, 10)
sin = sin(value)
test = data.frame(value, sin)
p_test = predict(nn, newdata = test)
MSE_test = sum((p_test-test$sin)^2)/nrow(test)
MSE_test
plot(x = test$value, y=p_test, col="blue")
points(test, col="red")
# 1 layer was the best model according to our error, to test our model further we could
# se that with the new data, error was still low. Therfore, a deeper nn is not always better
# it could lead to overfitting the data. generating a high variance by capturing a lot fo noise
|
5b567c4f84008682305d36717ec6ed175d82c7b9 | d692ef9a915e8d901d6c17567a632f555c70d5c9 | /YATAWebCore/R/YATAWebTools.R | 7925f8ea696e7188f6f0b9c585699896e794bbb6 | [
"CC0-1.0"
] | permissive | cryptobuks1/YATA2 | 386f0cc47e6bfeda8c626d6d44e033708f06c404 | 04d7c4fefea3babc0d0f30ee15c9a52602623055 | refs/heads/main | 2023-04-26T16:05:54.430701 | 2021-05-27T15:00:58 | 2021-05-27T15:00:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 95 | r | YATAWebTools.R | yataActiveNS = function(id) {
data = strsplit(id, "-")
data[[1]][length(data[[1]])]
}
|
3d9c90424a1d3844dbee1ef5a098b12325916963 | 23a269209e0b631daea4f5f2f99f2ee8127ca8ba | /app/global.R | 6c9b5eca4ac662ad78b1a68a9baf4a1e41267b58 | [] | no_license | TZstatsADS/Fall2021-Project2-group5 | e261ba3279ad09556c3cf626f8331c3fbea28bb7 | 47b28e06cc0e41e949ab91eec273257f75e228e9 | refs/heads/master | 2023-08-27T20:30:55.928087 | 2021-10-20T20:17:37 | 2021-10-20T20:17:37 | 413,476,036 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,797 | r | global.R | if(!require(devtools)) install.packages("devtools", repos = "http://cran.us.r-project.org")
if(!require(RSocrata)) devtools::install_github("Chicago/RSocrata")
if(!require(forcats)) install.packages("forcats", repos = "http://cran.us.r-project.org")
if(!require(stringr)) install.packages("stringr", repos = "http://cran.us.r-project.org")
if(!require(dplyr)) install.packages("dplyr", repos = "http://cran.us.r-project.org")
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
if(!require(RColorBrewer)) install.packages("RColorBrewer", repos = "http://cran.us.r-project.org")
if(!require(leaflet)) install.packages("leaflet", repos = "http://cran.us.r-project.org")
if(!require(lubridate)) install.packages("lubridate", repos = "http://cran.us.r-project.org")
if(!require(plotly)) install.packages("plotly", repos = "http://cran.us.r-project.org")
if(!require(geojsonio)) install.packages("geojsonio", repos = "http://cran.us.r-project.org")
if(!require(shiny)) install.packages("shiny", repos = "http://cran.us.r-project.org")
if(!require(shinyWidgets)) install.packages("shinyWidgets", repos = "http://cran.us.r-project.org")
if(!require(shinydashboard)) install.packages("shinydashboard", repos = "http://cran.us.r-project.org")
if(!require(shinythemes)) install.packages("shinythemes", repos = "http://cran.us.r-project.org")
if(!require(here)) install.packages("here", repos = "http://cran.us.r-project.org")
if(!require(readr)) install.packages("readr", repos = "http://cran.us.r-project.org")
if(!require(tidyr)) install.packages("tidyr", repos = "http://cran.us.r-project.org")
if(!require(ggmap)) install.packages("ggmap", repos = "http://cran.us.r-project.org")
if(!require(googleway)) install.packages("googleway", repos = "http://cran.us.r-project.org")
if(!require(viridis)) install.packages("viridis", repos = "http://cran.us.r-project.org")
if(!require(htmltools)) install.packages("htmltools", repos = "http://cran.us.r-project.org")
setwd(".")
# ==================================================== Load data ==========================================================
#nyc covid data
covid <- read.csv("https://data.cityofnewyork.us/resource/rc75-m7u3.csv", stringsAsFactors = FALSE)
covid$date_of_interest<-substr(covid$date_of_interest,1,10)
covid<-covid[,!grepl("all_",colnames(covid))]
covid<-covid[,!grepl("probable_death_count",colnames(covid))]
covid <- covid %>%rename_all(funs(str_replace_all(., "_7day_avg", " (7day_avg)")))%>%
dplyr::select(-death_count_probable,-incomplete)
nyc_latest <- covid %>% tail(1)
nyc_yesterday<- covid[nrow(covid)-1,]
### need to change to Github path
freemeal <- readr::read_csv('./output/COVID-19_Free_Meals_Locations.csv')
shelters=read.csv(file="./output/sorted_shelter_by_date_borough.csv")
|
d4d593a50b5510c28f582d7b2abea923ddf05ce7 | fcce4c98d688442eb03615e26bb377559a67f3af | /codes/SCRNAQC.R | 1fc3979f930a3b1fd71632585607cb8d5218d9f2 | [
"MIT"
] | permissive | zhewa/SCRNAQC | e04adf323635eece0522be739506773480d5675c | 3896485fd2d32298bcaa5c7a5c651fb08c8c7d8e | refs/heads/master | 2021-06-23T07:11:34.260228 | 2017-09-05T02:01:09 | 2017-09-05T02:01:09 | 95,233,585 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,680 | r | SCRNAQC.R | #!/usr/bin/env Rscript
## SCRNAQC main script
## Zhe Wang
## zhe@bu.edu
## 2017/06/28
## scRNA-seq QC pipeline
## Calculate metrics of in vitro transcription RNA molecules and
## PCR amplification products
## Input: directory of demultiplexed (plate specific) SAM files
## Output: QC stats table and plots
## required packages:
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(gtools))
suppressPackageStartupMessages(library(stringdist))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(ggthemes))
suppressPackageStartupMessages(library(scales))
suppressPackageStartupMessages(library(gridExtra))
suppressPackageStartupMessages(library(MASS))
suppressPackageStartupMessages(library(ggseqlogo))
source("SCRNAQC_functions.R")
#source("SCRNAQC_functions_test.R")
## global parameters
# SAM file directory
sam.dir <- "../data/sam/"
# acceptable umi sequence mismatches
umi.edit <- 1
# maximum acceptable gap between fragments
umi.max.gap <- 40
# maximum acceptable gap for alignment position correction
pos.max.gap <- 5
# output directory
output.dir <- "../res/"
# plate name
platename <- "CS_1017"
#stats.file <- "../res/Alignments_RPI1_UMI_stats.tab"
stats.file <- "../res/sam_UMI_stats.tab"
# palette for ggplot
cpalette <- c("#386cb0","#fdb462","#7fc97f","#ef3b2c","#662506","#0A3708",
"#a6cee3","#fb9a99","#984ea3","#ffff33","#000000", "#756682")
# run batch QC for one plate
batch.QC.sam(sam.dir, umi.edit, umi.max.gap, pos.max.gap, output.dir)
#pdf(paste0(output.dir, "QC_stats_visualization.pdf"))
#print(visualize.QC.stats(stats.file, platename))
#dev.off()
|
dd51bb3d36673f170a9ce643b844a701c50215b8 | c91c477a8e8aa21f50a697e9365e13eefcd954b3 | /Shiny/ui.R | e0aeeefb5016a72d34e28fb77ddfea7bd3c0162c | [] | no_license | omrisap/Miles-per-gallon | fc8356ae46b1f7645ea57c72da33ee439d0abb0c | f145c5957506de3002591d88573c6a5a03bf0afb | refs/heads/master | 2020-12-02T06:39:02.774072 | 2017-07-11T08:49:20 | 2017-07-11T08:49:20 | 96,870,870 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 983 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
data("mtcars")
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Miles per gallons"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
radioButtons("xa",
"MPG vs ?",
c("disp","wt","qsec","hp")),
headerPanel("filter"),
sliderInput("cyl", "cyl:",
min = 4, max = 8, value = 6, step= 2),
checkboxInput("am", "am", FALSE),
textOutput("am")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
))
|
0fe35ac05a2209b20eaccff9e6676c1cf9962fe7 | 8f8eac85cfbf8d3bc768318848ec964cb297b1cb | /casen/household_income/6_household_income_statistics_region/11_household_income_statistics_region_2011.R | c73ab8c27c574c4caf0b055ca9fd32736a7a74dd | [] | no_license | jnaudon/datachile-etl | 5231a3762dd32f3f3def4d568fc63934d603cf8b | 8fa577378d38f8d63f6dfdb00ed515bbb439f154 | refs/heads/master | 2023-03-23T00:36:35.698292 | 2019-03-23T03:30:16 | 2019-03-23T03:30:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,253 | r | 11_household_income_statistics_region_2011.R | ########
# 2011 #
########
# Median by region
weighted_median_2011_region <- ddply(household_income_2011, .(region), summarise, FUN = matrixStats::weightedMedian(per_capita_income, w = exp_region, na.rm = TRUE))
setnames(weighted_median_2011_region, colnames(weighted_median_2011_region), c("region","2011"))
#weighted_median_2011_region <- insert_row(weighted_median_2011_region, c("Pa\u00eds", median(household_income_2011$per_capita_income, na.rm = TRUE)), nrow(weighted_median_2011_region)+1)
weighted_median_2011_region$`2011` <- as.numeric(weighted_median_2011_region$`2011`)
weighted_median_2011_region$`2011` <- round(weighted_median_2011_region$`2011`, 0)
# Mean by region
weighted_mean_2011_region <- ddply(household_income_2011, .(region), summarise, FUN = matrixStats::weightedMean(per_capita_income, w = exp_region, na.rm = TRUE))
setnames(weighted_mean_2011_region, colnames(weighted_mean_2011_region), c("region","2011"))
weighted_mean_2011_region$`2011` <- as.numeric(weighted_mean_2011_region$`2011`)
weighted_mean_2011_region$`2011` <- round(weighted_mean_2011_region$`2011`, 0)
# Gini by region
weighted_gini_2011_region <- ddply(household_income_2011, .(region), summarise, FUN = SciencesPo::Gini(per_capita_income, weights = exp_region, na.rm = TRUE))
setnames(weighted_gini_2011_region, colnames(weighted_gini_2011_region), c("region","2011"))
weighted_gini_2011_region$`2011` <- as.numeric(weighted_gini_2011_region$`2011`)
weighted_gini_2011_region$`2011` <- round(weighted_gini_2011_region$`2011`, 3)
# Lower Bound for Median by region
lb_weighted_median_2011_region <- ddply(household_income_2011, .(region), summarise, FUN = lb_weighted_median_plyr(per_capita_income, exp_region))
setnames(lb_weighted_median_2011_region, colnames(lb_weighted_median_2011_region), c("region","2011"))
lb_weighted_median_2011_region$`2011` <- as.numeric(lb_weighted_median_2011_region$`2011`)
lb_weighted_median_2011_region$`2011` <- round(lb_weighted_median_2011_region$`2011`, 0)
# Upper Bound for Median by region
ub_weighted_median_2011_region <- ddply(household_income_2011, .(region), summarise, FUN = ub_weighted_median_plyr(per_capita_income, exp_region))
setnames(ub_weighted_median_2011_region, colnames(ub_weighted_median_2011_region), c("region","2011"))
ub_weighted_median_2011_region$`2011` <- as.numeric(ub_weighted_median_2011_region$`2011`)
ub_weighted_median_2011_region$`2011` <- round(ub_weighted_median_2011_region$`2011`, 0)
# Lower Bound for Mean by region
lb_weighted_mean_2011_region <- ddply(household_income_2011, .(region), summarise, FUN = lb_weighted_mean_plyr(per_capita_income, exp_region))
setnames(lb_weighted_mean_2011_region, colnames(lb_weighted_mean_2011_region), c("region","2011"))
lb_weighted_mean_2011_region$`2011` <- as.numeric(lb_weighted_mean_2011_region$`2011`)
lb_weighted_mean_2011_region$`2011` <- round(lb_weighted_mean_2011_region$`2011`, 0)
# Upper Bound for Mean by region
ub_weighted_mean_2011_region <- ddply(household_income_2011, .(region), summarise, FUN = ub_weighted_mean_plyr(per_capita_income, exp_region))
setnames(ub_weighted_mean_2011_region, colnames(ub_weighted_mean_2011_region), c("region","2011"))
ub_weighted_mean_2011_region$`2011` <- as.numeric(ub_weighted_mean_2011_region$`2011`)
ub_weighted_mean_2011_region$`2011` <- round(ub_weighted_mean_2011_region$`2011`, 0)
# Lower Bound for Gini by region
lb_weighted_gini_2011_region <- ddply(household_income_2011, .(region), summarise, FUN = lb_weighted_gini_plyr(per_capita_income, exp_region))
setnames(lb_weighted_gini_2011_region, colnames(lb_weighted_gini_2011_region), c("region","2011"))
lb_weighted_gini_2011_region$`2011` <- as.numeric(lb_weighted_gini_2011_region$`2011`)
lb_weighted_gini_2011_region$`2011` <- round(lb_weighted_gini_2011_region$`2011`, 3)
# Upper Bound for Gini by region
ub_weighted_gini_2011_region <- ddply(household_income_2011, .(region), summarise, FUN = ub_weighted_gini_plyr(per_capita_income, exp_region))
setnames(ub_weighted_gini_2011_region, colnames(ub_weighted_gini_2011_region), c("region","2011"))
ub_weighted_gini_2011_region$`2011` <- as.numeric(ub_weighted_gini_2011_region$`2011`)
ub_weighted_gini_2011_region$`2011` <- round(ub_weighted_gini_2011_region$`2011`, 3)
|
024cee0d3d16d19fc7c207090646134ade143cb7 | 5394122fc755b642849dcefc25d6056e49272120 | /r/runsystymer.R | a6035550b17c6ae3bbe3698712eb80876887fdd8 | [
"Unlicense"
] | permissive | timkphd/examples | 3e2a0c514322a8537671c80c909d835eeca5aec9 | e84f249a4f72e45767d29ca889f5ea286ba0de1a | refs/heads/master | 2023-09-01T02:51:00.798436 | 2023-08-19T22:39:44 | 2023-08-19T22:39:44 | 188,473,532 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 362 | r | runsystymer.R | # show how these work
# "source" this in a notebook
#
source("mysys.R")
source("tymer.R")
sys("cat runsystymer.R")
ls()
html("readme.html")
imbase("images/Rstuff")
nextim()
sys("ls -lt")
z<-sys_df("srun -n 8 ./bcast.R")
z
tymer("begin")
srun("-n 4 ./flower.R")
tymer("end")
print(sprintf("%4.4d",12))
nextim(51)
for (i in 1:3) {
nextim()
Sys.sleep(3)
}
|
6ad26066aaafb533ae55279e6cfac75eb9a29214 | 0dcdb2de8a998be4510363f762a42fb8b15e556e | /tests/testthat/test-mapping.R | 4aa9ed1848ddab6f67d6d20dc5276a09c6b21f14 | [
"MIT"
] | permissive | carlganz/shinytest | 16c97318d444ce72ff3b8cf011379907df44eb8c | e6c1ddaa072e76b12b5870b68d4ca2f4997f8fd8 | refs/heads/master | 2020-12-30T23:22:02.126630 | 2016-09-16T13:11:46 | 2016-09-16T13:11:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,369 | r | test-mapping.R |
context("mapping")
test_that("input widgets", {
app <- shinyapp$new("apps/081-widgets-gallery")
expect_equal(app$find_widget("action")$get_type(), "actionButton")
expect_equal(app$find_widget("checkbox")$get_type(), "checkboxInput")
expect_equal(app$find_widget("checkGroup")$get_type(), "checkboxGroupInput")
expect_equal(app$find_widget("date")$get_type(), "dateInput")
expect_equal(app$find_widget("dates")$get_type(), "dateRangeInput")
expect_equal(app$find_widget("file")$get_type(), "fileInput")
expect_equal(app$find_widget("num")$get_type(), "numericInput")
expect_equal(app$find_widget("radio")$get_type(), "radioButtons")
expect_equal(app$find_widget("select")$get_type(), "selectInput")
expect_equal(app$find_widget("slider1")$get_type(), "sliderInput")
expect_equal(app$find_widget("slider2")$get_type(), "sliderInput")
expect_equal(app$find_widget("text")$get_type(), "textInput")
})
test_that("output widgets with the same name", {
app <- shinyapp$new("apps/081-widgets-gallery")
names <- c(
"action", "checkbox", "checkGroup", "date", "dates", "file", "num",
"radio", "select", "slider1", "slider2", "text"
)
for (n in names) {
expect_equal(
app$find_widget(n, "output")$get_type(),
"verbatimTextOutput",
info = n
)
}
})
|
0df1f4c170cc5aa0f5e087d6e2fdce4e13363bbb | a6f2bd158a96455638822f53c203978ae3e71fcb | /Introductory.R | 3200007affdab6f0598a7e534fde715acd4b18cf | [] | no_license | danStich/data4_r4nalms | 74343a8c5f34fab1bbc2266250880a19cc74735a | e70b96bd95567802f2ccb784a061b9fd0683eed8 | refs/heads/master | 2022-11-23T13:05:31.024434 | 2022-11-08T11:15:09 | 2022-11-08T11:15:09 | 152,175,858 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,582 | r | Introductory.R |
## --------------------------------------------------------------------------
# This is a comment.
# We know because it is preceded
# by a hashtag, or 'octothorpe'.
# R ignores comments so you have
# a way to write down what you have
# done or what you are doing.
# This is useful for sharing
# code or just figuring out
# what you did.
## --------------------------------------------------------------------------
# Add 1 and 1 together
1 + 1
## --------------------------------------------------------------------------
# Example (run the following lines):
a <- 1
A <- 2
# Are these two things equal?
a == A
## ---------------------------------------------------------------
## # This won't work
## 1a <- 1
##
## # But this one works
## # Try it by typing
## # a1,
## # print(a) or
## # show(a)
## # in the console:
## a1 <- 1
##
## --------------------------------------------------------------------------
a <- 1
a <- 2
a
## --------------------------------------------------------------------------
T == TRUE
## --------------------------------------------------------------------------
a <- 1
a
## --------------------------------------------------------------------------
a <- c(1, 2, 3, 4, 5) # Make a vector of integers 1-5
print(a) # One way to look at our vector
show(a) # Another way to look at it
a # A third way to look at it
str(a) # Look at the structure, integer class
## --------------------------------------------------------------------------
# Define the same vector using a sequence
a <- seq(from = 1, to = 5, by = 1)
str(a)
## --------------------------------------------------------------------------
b <- c("a", "b", "c", "d", "e") # Make a character vector
b # Print it to the console
str(b) # Now it's a character vector
b <- as.factor(b) # But we can change if we want
b
str(b) # Look at the data structure
## ---- eval=FALSE-----------------------------------------------------------
## as.numeric(b)
##
## # What did that do?
## ?as.numeric
## ---- message=FALSE, warning=FALSE-----------------------------------------
# The '==' compares the numeric vector to the factor one
c <- a == b
c
str(c)
## --------------------------------------------------------------------------
is.na(a) # We can check for missing values
is.finite(a) # We can make sure that all values are finite
!is.na(a) # The exclamation point means 'not'
a == 3 # We can see if specific elements meet a criterion
unique(b) # We can just look at unique values
## --------------------------------------------------------------------------
# This one just prints it
a[3]
# This one stores it in a new object
f <- a[3]
## --------------------------------------------------------------------------
b[b == "c"]
which(b == "c")
## --------------------------------------------------------------------------
a * .5 # Multiplication
a + 100 # Addition
a - 3 # Subtraction
a / 2 # Division
a^2 # Exponentiation
exp(a) # This is the same as 'e to the...'
log(a) # Natural logarithm
log10(a) # Log base 10
## --------------------------------------------------------------------------
b <- as.character(b)
paste(b, "AAAA", sep = "") # We can append text
paste("AAAA", b, sep = "") # We can do it the other way
paste("AAAA", b, sep = "--") # Add symbols to separate
gsub(pattern = "c", replacement = "AAAA", b) # We can replace text
e <- paste("AAAA", b, sep = "") # Make a new object
e # Print to console
substr(e, start = 5, stop = 5) # We can strip text (or dates, or times, etc.)
## --------------------------------------------------------------------------
length(a) # A has a length of 5, try it and check it
a # Yup, looks about right
## --------------------------------------------------------------------------
cbind(a, e)
## --------------------------------------------------------------------------
matrix(0, nrow = 3, ncol = 4)
## --------------------------------------------------------------------------
mat <- matrix(seq(1, 12), ncol = 3, nrow = 4)
## --------------------------------------------------------------------------
ncol(mat) # Number of columns
nrow(mat) # Number of rows
length(mat) # Total number of entries
mat[2, 3] # Value of row 2, column 3
str(mat)
## --------------------------------------------------------------------------
colnames(mat) <- c("first", "second", "third")
rownames(mat) <- c("This", "is", "a", "matrix")
mat
str(mat) # Take a look to understand
## -----------------------------------------------------------
otsego <- read.csv("data/physical.csv")
## ----eval = FALSE----------------------------------------------------------
## ls()
## ---- eval = FALSE---------------------------------------------------------
## ls() # We can use ls() to see what is in our environment
## head(otsego) # Look at the first six rows of data in the object
## nrow(otsego) # How many rows does it have?
## ncol(otsego) # How many columns?
## names(otsego) # What are the column names?
## str(otsego) # Have a look at the data structure
## summary(otsego) # Summarize the variables in the dataframe
## --------------------------------------------------------------------------
otsego[12, 4]
## --------------------------------------------------------------------------
otsego[12, "depth"]
## --------------------------------------------------------------------------
mean(otsego$temp[otsego$depth == 10.0], na.rm = TRUE)
## ---- warning=FALSE, message=FALSE-----------------------------------------
library(tidyverse)
## --------------------------------------------------------------------------
otsego <- group_by(otsego, month, depth)
## --------------------------------------------------------------------------
otsego_summary <- summarize(otsego, mean_temp = mean(temp), sd_temp = sd(temp))
## --------------------------------------------------------------------------
print(otsego_summary)
## ---- message = FALSE, warning = FALSE-------------------------------------
ggplot(otsego_summary, aes(x = mean_temp, y = depth)) +
geom_point() +
facet_wrap(~month)
## --------------------------------------------------------------------------
# Define a function to convert temperature
# in celcius to temperature in farenheit:
cToF <- function(cels) {
faren <- cels * (9 / 5) + 32
return(faren)
}
## --------------------------------------------------------------------------
# Test the function out.
# Here, we create a new variable in the
# otsego dataframe to hold the result
otsego$tempF <- cToF(otsego$temp)
## -------------------------------------------------------------------
## write.csv(x = otsego, file = "physicalF.csv")
## -------------------------------------------------------------------
## # Write the data set to a csv file.
## # We specify the object, the filename, and
## # we tell R we don't want rownames or quotes
## # around character strings. Finally, we tell
## # R to separate columns using a comma.
##
## # We could do exactly the same thing, but use a ".txt"
## # file extensions, and we would get a comma-separated
## # text file if we needed one.
## write.table(
## x = otsego, file = "physicalF.csv", row.names = FALSE,
## quote = FALSE, sep = ","
## )
## ---- eval=FALSE-----------------------------------------------------------
## save(otsego, file = "otsego.rda")
|
ec09433d10fb26c5f10982055bb1d051e09d4e9d | 277dbb992966a549176e2b7f526715574b421440 | /R_training/실습제출/박우찬/191101/hotel.R | 81f7f67cd70a8c58f8881eecd3dfe3821ca8a730 | [] | no_license | BaeYS-marketing/R | 58bc7f448d7486510218035a3e09d1dd562bca4b | 03b500cb428eded36d7c65bd8b2ee3437a7f5ef1 | refs/heads/master | 2020-12-11T04:30:28.034460 | 2020-01-17T08:47:38 | 2020-01-17T08:47:38 | 227,819,378 | 0 | 0 | null | 2019-12-13T12:06:33 | 2019-12-13T10:56:18 | C++ | UTF-8 | R | false | false | 1,266 | r | hotel.R | # 아고다 소스 ]
remDr <- remoteDriver(remoteServerAddr = "localhost" , port = 4445, browserName = "chrome")
remDr$open()
url<-'https://www.agoda.com/ko-kr/shilla-stay-seocho/hotel/seoul-kr.html?cid=-204'
remDr$navigate(url)
#나중에 하기 클릭하여 팝업메뉴 없애기
laterAction = remDr$findElement(using='css',
'body > div.SearchboxBackdrop')
laterAction$clickElement()
# 스크롤 없애기기
remDr$executeScript("scrollBy(0, 8400)")
# 이용후기 클릭릭
test<-NULL
test<-remDr$findElement(using='css',
'#customer-reviews-panel > button > div > span')
test$clickElement()
result = NULL
while (TRUE) {
# 댓글 불러오기기
totalReview = remDr$findElements(using = 'css','p.Review-comment-bodyText')
reviews = sapply(totalReview, function(x){x$getElementText()})
result = c(result,reviews)
# 다음으로 넘어가기
NextAction = remDr$findElement(using='css',
'#reviewSection > div:nth-child(4) > div > span:nth-child(3) > i')
NextAction$clickElement()
Sys.sleep(2)
if(length(NextAction) == 0)
break;
}
cat(summary(unlist(result))[[1]],'개의 댓글추출',sep = '')
write(unlist(result), ' hotel.txt')
|
f6704c4bdbd0928a9744a5d87e3f5b036db571d8 | 6861114e6fde7a8db5a9d2a164362d28de2f2aa0 | /man/testFoo.Rd | eacf2c16f7a91ebec22a91d8c5badf9980b808ea | [] | no_license | jlombard314159/cacheRExample | adfc495d2880b3f8a0d6efce8b5bbcd6d9fb3cf4 | dc0df2b5e49981b8f38e97eb638e62ac91593c8d | refs/heads/main | 2023-07-12T22:51:39.128928 | 2021-05-12T16:35:29 | 2021-05-12T16:35:29 | 366,583,342 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 215 | rd | testFoo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testFoo.R
\name{testFoo}
\alias{testFoo}
\title{testFoo}
\usage{
testFoo(cif)
}
\arguments{
\item{cif}{dead things}
}
\description{
ok
}
|
49b048dbe2f0622a5551c37a46ba5a3232488e4e | e64682d3b561e8e1fa7962709d900a3047edb11d | /plot1.R | 0bdec7290f666177996f44caab75b5a60da791fa | [] | no_license | EdmondKonya/ExData_Plotting1 | 3e4ab07b6811bf9ef2084baf6db9d524d0e4d8cd | 6ed53c5023d62bb0aa1bdbea056500d3230d8874 | refs/heads/master | 2021-01-21T16:39:15.530662 | 2015-07-10T20:07:25 | 2015-07-10T20:07:25 | 38,881,724 | 0 | 0 | null | 2015-07-10T13:25:40 | 2015-07-10T13:25:39 | null | UTF-8 | R | false | false | 1,877 | r | plot1.R | #Check if the working directory already contains the data_set; "household_power_consumption.txt"
if(!file.exists("household_power_consumption.txt")){
fileURl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURl, destfile = "./energy_data.zip")
unzip(zipfile = "./energy_data.zip", exdir = ".")
unlink("energy_data.zip")
#In case the energy data-set has already been downloaded, unzipped and saved in the current working directory,
#the user is being informed about it via a message box
} else {
message("The corresponding data-set is available in the current working directory.")
}
#Load relevant data (from 01/02/2007 till 02/02/2007) and set date- and time-formats
energy_data <- read.table(file = "household_power_consumption.txt",
sep = ";",
header = F,
col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
skip = grep("1/2/2007", readLines("household_power_consumption.txt")),
colClasses = c("factor", "factor", "numeric", "numeric", "numeric", "numeric","numeric", "numeric", "numeric"),
nrows = 2880,
na.strings = "?")
energy_data$Date <- as.Date(energy_data$Date, format = "%d/%m/%Y")
energy_data$Time <- strptime(energy_data$Time, format = "%H:%M:%S")
#Plotting the corresponding graf and saving it as a .png file
png(filename = "plot1.png",
width = 480,
height = 480,
units = "px",
bg = "white")
hist(x = energy_data$Global_active_power,
breaks = 12,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (in kilowatts)")
dev.off() |
fa3ea113bbd9149c177a64f741110fbbf7192cf0 | 19fb74ecb354cfc273bdb828d057494a23d9c492 | /eda_r/diamonds.R | 7cf3fd3ae86fb7ba47f55b51fd0e5db1f79cc3b4 | [] | no_license | Sando1/udacity-data-analyst | 909fd07c2a41ce3bbad3037efecc6514430ff910 | 2c701951ea4af99a458eda91238a762db58d8e7f | refs/heads/master | 2021-06-19T08:23:01.379864 | 2019-10-01T21:06:12 | 2019-10-01T21:06:12 | 150,722,188 | 5 | 2 | null | null | null | null | UTF-8 | R | false | false | 6,407 | r | diamonds.R | # install and load packages
# install.packages('ggplot2')
library(ggplot2)
# install.packages('ggthemes', dependencies = TRUE)
library(ggthemes)
# install.packages('gridExtra')
library(gridExtra)
library(scales)
# load dataset
data("diamonds")
# find all info available on the dataset
help("diamonds")
# no of rows in data set
nrow(diamonds)
# number of ordered factors
summary(diamonds)
# highest level
help("diamonds")
# make a histogram of the price of
# all the diamonds in the diamond data set
ggplot(diamonds, aes(x = price)) +
geom_histogram(color = "black", fill = "LightBlue", binwidth = 1000) +
scale_x_continuous(breaks = seq(0, 20000, 1000)) +
theme(axis.text.x = element_text(angle = 90)) +
xlab("Price in $") + ylab("Count")
summary(diamonds$price)
# COMMENTS ON THE PLOT
# The histogram is negtively skewed with the long shot of at the
# 1000-2000 line. The mean is $3933 while the range is (18823-326)
# of 18497. The Median is of course less than the mean being 2401
# proving that this is a right tailed plot.
# How many diamonds cost less than $500 = 1729
summary(diamonds$price < 500)
# How many diamonds cost less than $250 = 0
summary(diamonds$price < 250)
# How many diamonds cost $15000 or more = 1656
summary(diamonds$price >= 15000)
# exploring the largest bin in the histogram plotted before
# i.e the 1000 bin
ggplot(diamonds, aes(x = price)) +
geom_histogram(color = "black", fill = "LightGreen", binwidth = 50) +
scale_x_continuous(breaks = seq(500, 1500, 50)) +
theme(axis.text.x = element_text(angle = 90)) +
coord_cartesian(c(500,1500)) +
xlab("Price in $") + ylab("Count")
# There are no diamonds that cost $1500.
# For diamonds that cost less than $2,000,
# the most common price of a diamond is around $700
# Break out the histogram of diamond prices by cut.
ggplot(diamonds, aes(x = price)) +
geom_histogram(color = "black", fill = "DarkOrange", binwidth = 50) +
scale_x_continuous(breaks = seq(0, 4000, 100)) +
theme(axis.text.x = element_text(angle = 90)) +
coord_cartesian(c(0,4000)) +
facet_grid(cut~.) +
xlab("Price") + ylab("Count")
# Which cut has the highest priced diamond ? Premium
by(diamonds$price, diamonds$cut, max)
# Which cut has the lowest price diamond? Very Good, Premium and Ideal
by(diamonds$price, diamonds$cut, min)
# Which cut has the lowest median price? Ideal
by(diamonds$price, diamonds$cut, median)
# In the last exercise, we looked at the summary statistics
# for diamond price by cut. If we look at the output table, the
# the median and quartiles are reasonably close to each other.
# diamonds$cut: Fair
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 337 2050 3282 4359 5206 18570
# ------------------------------------------------------------------------
# diamonds$cut: Good
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 327 1145 3050 3929 5028 18790
# ------------------------------------------------------------------------
# diamonds$cut: Very Good
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 336 912 2648 3982 5373 18820
# ------------------------------------------------------------------------
# diamonds$cut: Premium
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 326 1046 3185 4584 6296 18820
# ------------------------------------------------------------------------
# diamonds$cut: Ideal
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 326 878 1810 3458 4678 18810
# This means the distributions should be somewhat similar,
# but the histograms we created don't show that.
# The 'Fair' and 'Good' diamonds appear to have
# different distributions compared to the better
# cut diamonds. They seem somewhat uniform
# on the left with long tails on the right.
# Let's look in to this more.
# Look up the documentation for facet_wrap in R Studio.
# Then, scroll back up and add a parameter to facet_wrap so that
# the y-axis in the histograms is not fixed. You want the y-axis to
# be different for each histogram.
qplot(x = price, data = diamonds, binwidth = 100) +
facet_wrap(~cut, scales = "free")
# Create a histogram of price per carat
# and facet it by cut.
ggplot(diamonds, aes(x = price/carat)) +
geom_histogram(color = "black", fill = "DarkRed", binwidth = .05) +
theme(axis.text.x = element_text(angle = 0)) +
scale_x_log10(expression(paste(Log[10], " of Price")),
breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x))) +
facet_grid(cut~., scale = "free") + ylab("Count")
# Investigate the price of diamonds using box plots,
# numerical summaries, and one of the following categorical
# variables: cut, clarity, or color.
ggplot(diamonds, aes(x = cut, y = price)) +
geom_boxplot(varwidth = TRUE, outlier.colour = "red", outlier.shape = 1) +
theme(axis.text.x = element_text(angle = 0)) +
facet_grid(color~., margins = TRUE)
# a) What is the price range for the
#middle 50% of the diamonds with color D?
# b) What is the price range for the
#middle 50% of the diamonds with color J?
# c) What is the IQR for diamonds with the best color?
# d) What is the IQR for diamonds with the worst color?
by(diamonds$price, diamonds$color, summary)
IQR(subset(diamonds, color == 'D')$price)
IQR(subset(diamonds, color == 'J')$price)
# Investigate the price per carat of diamonds across
# the different colors of diamonds using boxplots.
ggplot(diamonds, aes(x = color, y = price/carat)) +
geom_boxplot(varwidth = TRUE, outlier.colour = "red", outlier.shape = 1) +
theme(axis.text.x = element_text(angle = 0)) +
xlab("Color") + ylab("Price per Carat")
# Investigate the weight of the diamonds (carat) using a
#frequency polygon. Use different bin widths to see how the
#frequency polygon changes. What carat size has a count greater
#than 2000? Check all that apply.
sizes = c(0.1, 0.3, 0.8, 1.01, 1.6, 2.0, 3.0, 5.0)
ggplot(diamonds, aes(x=carat)) +
geom_freqpoly(binwidth=0.1, alpha = 0.75) +
scale_x_continuous(breaks=sizes, expand = c(0,0)) +
scale_y_continuous(expand=c(0,0))+
geom_vline(xintercept = c(0.1, 0.3, 0.8, 1.01, 1.6, 2.0, 3.0, 5.0), color = "darkblue", alpha = 0.5) +
geom_hline(yintercept = 2000, color = "brown", alpha = 0.5) +
xlab("Carat Size") + ylab("Count") |
0693bb256f40dcb6cabfb13ec60fa774fb3d67c8 | 48fac7f36d8fc50191b8f6a362ebc6770c8a3560 | /code/2020W34_Extinct_Plants.R | f92cd0c58c7255e1a3f96c6223d9feb0a07bcdf4 | [] | no_license | dosullivan019/tidytuesday | eae0df9588d53434ce5f6dcf607a2b529bee778f | 37e14584179928455355dc8c24764580a9f1c3fb | refs/heads/master | 2023-08-05T23:10:22.386935 | 2021-09-08T15:20:00 | 2021-09-08T15:20:00 | 286,712,994 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,044 | r | 2020W34_Extinct_Plants.R | # Tidy Tuesday
# 18/08/2020: Plants in Danger
library(ggplot2)
library(dplyr)
library(rnaturalearth)
library(wesanderson)
# plants <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-08-18/plants.csv')
# actions <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-08-18/actions.csv')
threats <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-08-18/threats.csv')
sapply(threats, function(x) sum(is.na(x))) # year_last_seen is only variable with na's
# Calculating the percentage of plants classified as threatened in each country
threat_pct <- threats %>% group_by(country) %>%
summarise_at(., 'threatened', function(x) (sum(x)/length(x) * 100)) %>% arrange(-threatened)
colnames(threat_pct) <- c('name', 'pct_threatened')
# Creating map using rnaturalearth
rne_map <- ne_countries(scale = "medium", returnclass = "sf")
# Plotting % plants threatened
pct_threatened_map =
left_join(rne_map, threat_pct, by='name') %>%
ggplot(aes(fill =pct_threatened)) +
geom_sf(color = NA) +
scale_fill_gradient(high='navy',low='darkturquoise',na.value = 'lightgrey') +
theme_void() + labs(title = "Plants in Danger",
subtitle = "% of observed plants threatened per country",
fill = "% plants threatened",
caption = "Data source: IUCN Red List") +
theme(plot.title = element_text(hjust = 0.5,size=15,face='bold'),
plot.subtitle = element_text(hjust = 0.5,size=12),
legend.position = 'top', legend.title=element_text(vjust = 0.75, size=8,face='bold'),
legend.text = element_text(size=5), legend.key.height = unit(0.25, "cm"))
ggsave(filename='20200818_PlantsInDanger_PercentageThreatenedMap.png', plot=pct_threatened_map, path='./plots')
# Calculating the greatest threat_type of threatened plants in each country
# Need the number of plants threatened by country and threat type
# Then find the threat which has the max number of plants threatened for each country
threats_for_vz <- threats %>%
group_by(country) %>% mutate(total_cntry_sp = length(country)) %>%
group_by(country) %>% mutate(pct_cntry_sp_threatened = sum(threatened)/length(country) * 100) %>%
group_by(country, threat_type) %>% mutate(total_sp_threat_type_cntry=sum(threatened)) %>%
group_by(country) %>% mutate(greatest_threat_cntry=max(total_sp_threat_type_cntry))
threat_type_vz <-
unique(threats_for_vz[which(threats_for_vz$total_sp_threat_type_cntry==threats_for_vz$greatest_threat_cntry),
c('country', 'threat_type', "total_sp_threat_type_cntry", "greatest_threat_cntry")] ) %>%
group_by(country) %>% mutate(cnt_threat = length(country)) %>%
# if a country has more than one threat type being the biggest threat then rename as 'Multiple Factors'
mutate(greatest_threat=ifelse(cnt_threat>1, 'Multiple Factors', threat_type)) %>%
group_by(country) %>% select(country,greatest_threat)
colnames(threat_type_vz)=c('name','greatest_threat')
# Joining to map and plot
greatest_threat_map =
left_join(rne_map, threat_type_vz, by='name') %>%
ggplot(aes(fill =greatest_threat)) +
geom_sf(color = NA) +
scale_fill_manual(values=c(wes_palettes$Cavalcanti1, wes_palettes$Rushmore1),na.value='lightgrey') +
theme_void() + labs(title = "Plants in Danger",
subtitle = "Most common threat which has caused plants to become threatened",
fill = "Threat",
caption = "Data source: IUCN Red List") +
theme(plot.title = element_text(hjust = 0.5,size=15, face='bold'),
plot.subtitle = element_text(hjust = 0.5,size=12),
legend.position = 'top', legend.title=element_text(size=8,vjust = 0.75, face='bold'),
legend.text = element_text(size=6), legend.key.height = unit(0.25, "cm"))
ggsave(filename='20200818_PlantsInDanger_CauseOfThreatenedMap.png', plot=greatest_threat_map, path='./plots') |
de5dc2f08c3e979a436bfdad22760e2669af2052 | 8b982cdde20a781eb7289e051e7eef0f1aadbe24 | /DataAnalysis.R | abae7144b206b6aa1c6403e838fba59505e53160 | [] | no_license | roger555330/Working-Memory-Sums | 5542c5bc18a560252ab635017ded2f726f326e69 | b9a9faf5e3af0b52c1ef695e79f0b88778f3c922 | refs/heads/master | 2020-05-16T03:08:01.080993 | 2017-11-28T17:25:37 | 2017-11-28T17:25:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,411 | r | DataAnalysis.R | # data analysis for Working Memory Sums task
library(plotrix)
# intialize grouping variables arrays
allConSubjAcc = c()
allConMedRT = c()
allConSumTenAcc = c()
allConSumNotTenAcc = c()
allConNumPers = c()
allConRTSumTen = c()
allConRTSumNotTen = c()
allCussSubjAcc = c()
allCussMedRT = c()
allCussSumTenAcc = c()
allCussSumNotTenAcc = c()
allCussNumPers = c()
allCussRTSumTen = c()
allCussRTSumNotTen = c()
###################### con group ######################################
baseFolder = "/Users/rorden/Documents/MATLAB/Working-Memory-Sums-master (4)/data/con"
fileList = list.files(baseFolder, pattern = ".csv", full.names = TRUE)
numberOfFiles = length(fileList)
for (i in 1:numberOfFiles) {
file = fileList[i]
data = read.csv(file)
subjAcc = round(mean(data$accuracy)*100, digits = 2)
sumIsTenTrials = round(mean(data$accuracy[data$trialType == 1])*100,digits=2)
sumNotTenTrials = round(mean(data$accuracy[data$trialType == 0])*100,digits=2)
averageRT = round(mean(data$RT[data$RT<999]), digits =2)
medianRT = round(median(data$RT[data$RT < 999]),digits=2)
numPers = length(data$RT[data$RT<0.1]) # perseverations are RT less than 100ms
RTSumTen = round(median(data$RT[data$trialType == 1 & data$RT < 999]),digits=2)
RTSumNotTen = round(median(data$RT[data$trialType == 0 & data$RT < 999]),digits=2)
allConSubjAcc = append(allConSubjAcc, subjAcc)
allConMedRT = append(allConMedRT, medianRT)
allConSumTenAcc = append(allConSumTenAcc, sumIsTenTrials)
allConSumNotTenAcc = append(allConSumNotTenAcc, sumNotTenTrials)
allConNumPers = append(allConNumPers, numPers)
allConRTSumTen = append(allConRTSumTen, RTSumTen)
allConRTSumNotTen = append(allConRTSumNotTen, RTSumNotTen)
}
dataForCard_zConWM = scale(allConSubjAcc)
###################### cuss group ######################################
baseFolder = "/Users/rorden/Documents/MATLAB/Working-Memory-Sums-master (4)/data/cuss"
fileList = list.files(baseFolder, pattern = ".csv", full.names = TRUE)
numberOfFiles = length(fileList)
for (i in 1:numberOfFiles) {
file = fileList[i]
data = read.csv(file)
subjAcc = round(mean(data$accuracy)*100, digits = 2)
sumIsTenTrials = round(mean(data$accuracy[data$trialType == 1])*100,digits=2)
sumNotTenTrials = round(mean(data$accuracy[data$trialType == 0])*100,digits=2)
averageRT = round(mean(data$RT[data$RT<999]), digits =2)
medianRT = round(median(data$RT[data$RT < 999]),digits=2)
numPers = length(data$RT[data$RT<0.1]) # perseverations are RT less than 100ms
RTSumTen = round(median(data$RT[data$trialType == 1 & data$RT < 999]),digits=2)
RTSumNotTen = round(median(data$RT[data$trialType == 0 & data$RT < 999]),digits=2)
allCussSubjAcc = append(allCussSubjAcc, subjAcc)
allCussMedRT = append(allCussMedRT, medianRT)
allCussSumTenAcc = append(allCussSumTenAcc, sumIsTenTrials)
allCussSumNotTenAcc = append(allCussSumNotTenAcc, sumNotTenTrials)
allCussNumPers = append(allCussNumPers, numPers)
allCussRTSumTen = append(allCussRTSumTen, RTSumTen)
allCussRTSumNotTen = append(allCussRTSumNotTen, RTSumNotTen)
}
dataForCard_zCussWM = scale(allCussSubjAcc)
t = t.test(allConSubjAcc, allCussSubjAcc, var.equal = TRUE)
t
t = t.test(allConMedRT, allCussMedRT, var.equal = TRUE)
t
t = t.test(allConSumTenAcc, allCussSumTenAcc, var.equal = TRUE)
t
t = t.test(allConSumNotTenAcc, allCussSumNotTenAcc, var.equal = TRUE)
t
t = t.test(allConNumPers, allCussNumPers, var.equal = TRUE)
t
t = t.test(allConRTSumTen, allCussRTSumTen, var.equal = TRUE)
t
t = t.test(allConRTSumNotTen, allCussRTSumNotTen, var.equal = TRUE)
t
t = t.test(allConRTSumTen, allConRTSumNotTen, paired = TRUE, var.equal = TRUE)
t
t = t.test(allCussRTSumTen, allCussRTSumNotTen, paired = TRUE, var.equal = TRUE)
t
myVarToPlotA = allConSumTenAcc
myVarToPlotB = allCussSumTenAcc
barCenters = barplot(c(mean(myVarToPlotA), mean(myVarToPlotB)),
main = "Subj Mean Acc Sum 10: Con vs. Cuss",
xlab = "Group",
ylim = c(0, 100))
# segments(barCenters, mean(myVarToPlotA) - std.error(myVarToPlotA) * 2, barCenters,
# mean(myVarToPlotB) + std.error(myVarToPlotB) * 2, lwd = 1.5)
# arrows(barCenters, mean(myVarToPlotA) - std.error(myVarToPlotA) * 2, barCenters,
# mean(myVarToPlotB) + std.error(myVarToPlotB) * 2, lwd = 1.5, angle = 90,
# code = 3, length = 0.05)
|
1bb11800ede2640944eab10432b83fa06e386e57 | d420f963ab5ff604cc74a3037f9396cd826e606c | /R/new_zealand_deaths_reg.R | 00a3236b76e21bdaa096dbea160311393e038061 | [] | no_license | johnrbryant/bdefdata | d103f09bcd26cbfe677449d799ba9274cf0209e7 | 6c637e28d891c32cb0e8bd18d046c355287eadb8 | refs/heads/master | 2020-03-18T17:16:50.127404 | 2018-09-23T06:43:11 | 2018-09-23T06:43:11 | 135,018,297 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 399 | r | new_zealand_deaths_reg.R |
#' Registered deaths in New Zealand by region, 1997-2016.
#'
#' The years are June years. For instance, the year "1997" runs from 1 July 1996
#' to 30 Jun 1997.
#'
#' To preserve confidentiality, death counts have been randomly rounded to base 3.
#'
#' @format An array with dimensions "region", and "time"
#'
#' @source Custom tabulation from Statistics New Zealand.
#'
"new_zealand_deaths_reg"
|
5a6e28b2beeb0607bb44446cb7ca0c41fa51a6c6 | 4c32ee156e20f2ce888471a203a103dd226f5896 | /R/plot_coral_plots.R | 33fc342b6f9def5958f24a48d30f824a7dbdea91 | [] | no_license | kmichael08/ceterisParibus | 56d81d98b96c2b9fec18d54b06f718b1b4b37b4f | 4c9b1134f5fc84698ad864a751bbd50bab4ce03c | refs/heads/master | 2020-03-31T20:51:55.235812 | 2018-09-11T20:53:56 | 2018-09-11T20:53:56 | 152,558,313 | 0 | 1 | null | 2018-10-11T08:34:33 | 2018-10-11T08:34:32 | null | UTF-8 | R | false | false | 5,458 | r | plot_coral_plots.R | #' Local Fit Plots / Wangkardu Explanations
#'
#' Function 'plot.local_fit_explainer' plots Local Fit Plots for a single prediction / observation.
#'
#' @param x a local fir explainer produced with the 'local_fit' function
#' @param ... other explainers that shall be plotted together
#' @param plot_residuals if TRUE (default) then residuals are plotted as red/blue bars
#' @param palette color palette. Currently the choice is limited to 'wangkardu' and 'default'
#'
#' @return a ggplot2 object
#' @export
#' @importFrom stats na.omit
#'
#' @examples
#' library("DALEX")
#' \dontrun{
#' library("randomForest")
#' set.seed(59)
#'
#' apartments_rf_model <- randomForest(m2.price ~ construction.year + surface + floor +
#' no.rooms + district, data = apartments)
#'
#' explainer_rf <- explain(apartments_rf_model,
#' data = apartmentsTest[,2:6], y = apartmentsTest$m2.price)
#'
#' new_apartment <- apartmentsTest[1, ]
#' new_apartment
#'
#' cr_rf <- local_fit(explainer_rf, observation = new_apartment,
#' select_points = 0.002, selected_variable = "surface")
#' plot(cr_rf, plot_residuals = FALSE)
#' plot(cr_rf)
#'
#' cr_rf <- local_fit(explainer_rf, observation = new_apartment,
#' select_points = 0.002, selected_variable = "surface")
#' plot(cr_rf, plot_residuals = FALSE, palette = "wangkardu")
#' plot(cr_rf, palette = "wangkardu")
#'
#' new_apartment <- apartmentsTest[10, ]
#' cr_rf <- local_fit(explainer_rf, observation = new_apartment,
#' select_points = 0.002, selected_variable = "surface")
#' plot(cr_rf, plot_residuals = FALSE)
#' plot(cr_rf)
#'
#' new_apartment <- apartmentsTest[302, ]
#' cr_rf <- local_fit(explainer_rf, observation = new_apartment,
#' select_points = 0.002, selected_variable = "surface")
#' plot(cr_rf, plot_residuals = FALSE)
#' plot(cr_rf)
#'
#' new_apartment <- apartmentsTest[720, ]
#' cr_rf <- local_fit(explainer_rf, observation = new_apartment,
#' select_points = 0.002, selected_variable = "surface")
#' plot(cr_rf, plot_residuals = FALSE)
#' plot(cr_rf)
#' }
plot.local_fit_explainer <- function(x, ..., plot_residuals = TRUE, palette = "default") {
all_responses <- x
class(all_responses) <- "data.frame"
all_predictions <- attr(x, "prediction")
# Wangkardu palette
selected_palette <- switch(palette,
wangkardu = list(light = "#f5ffea", dark = "#e43d19", medium = "#de8131", up = "#de8131", down = "#de8131", background = "#f6d288", alpha = 0.6),
list(light = "black", dark = "black", medium = "#de8131", up = "red3", down = "blue3", background = "white", alpha = 0.1)
)
predicted_y <- all_predictions[1, "predictions"]
predicted_x <- all_predictions[1, "x"]
vname <- all_responses[1, "vname"]
# fake variables added because of the CHECK
new_x <- obs_id <- predictions <- y <- y_hat <- NULL
pl <- ggplot(na.omit(all_responses), aes(new_x, group = obs_id)) +
geom_line(aes(y = y_hat), alpha = selected_palette$alpha, color = selected_palette$light) +
geom_line(aes(y = y_hat), data = all_responses[all_responses$obs_id == 0, ], lwd = 1, color = selected_palette$dark)
if (plot_residuals) {
pl <- pl + geom_linerange(data = na.omit(all_predictions), aes(x, ymin = predictions, ymax = y, color = predictions > y), alpha = 0.5) +
geom_point(data = na.omit(all_predictions), aes(x, y, color = predictions > y), alpha = 0.5)
} else {
pl <- pl +
geom_point(data = all_predictions, aes(x, predictions), alpha = 0.5, color = selected_palette$dark)
}
pl <- pl +
geom_point(data = all_predictions[1,], aes(x, predictions), size = 6, color = selected_palette$dark) +
geom_point(data = all_predictions[1,], aes(x, predictions), size = 4, color = selected_palette$background) +
ylab("Predicted y") + xlab(vname) + theme(legend.position = "none") +
scale_color_manual(values = c("TRUE" = selected_palette$down, "FALSE" = selected_palette$up))
pl + if(palette == "wangkardu") theme_wangkardu(selected_palette) else theme_mi2()
}
theme_wangkardu <- function(selected_palette) {
theme(axis.ticks = element_line(linetype = "blank"),
axis.text = element_text(family = "sans", color = selected_palette$medium),
axis.title = element_text(family = "sans", color = selected_palette$medium),
plot.title = element_text(family = "sans", color = selected_palette$medium),
legend.text = element_text(family = "sans", color = selected_palette$medium),
legend.title = element_text(family = "sans", color = selected_palette$medium),
panel.background = element_rect(fill = selected_palette$background),
panel.grid.minor.x = element_line(linetype = "dotted", colour = selected_palette$background),
panel.grid.minor.y = element_line(linetype = "dotted", colour = selected_palette$medium),
panel.grid.major.x = element_line(linetype = "dotted", colour = selected_palette$background),
panel.grid.major.y = element_line(linetype = "dotted", colour = selected_palette$medium),
legend.position = "none",
plot.background = element_rect(fill = selected_palette$background, colour = selected_palette$background,
size = 0.8, linetype = "dotted"),
strip.background = element_rect(fill = selected_palette$background),
strip.text = element_text(family = "sans", color = selected_palette$medium))
}
|
ce6c37d90e23bafe66c5c9f736e282e9e8652fd2 | 2a1d3d7d0d75c48d26a3e5b581af9f5b5537f355 | /R/knit2byrokrates.R | 84ccd740872225f768374b058f0d801e4d4db679 | [
"MIT"
] | permissive | petrbouchal/pbtools | 4b85a423f06dfc55c25254f84ff121a102cc4db8 | b983ac639fc8b23e836c7edd55cdbf81a1164f74 | refs/heads/master | 2021-01-21T12:49:41.109594 | 2016-05-28T20:31:21 | 2016-05-28T20:31:21 | 19,662,881 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,306 | r | knit2byrokrates.R | #' Rmd to Byrokrates
#'
#' Upload Rmd document as post to Byrokrates WordPress blog
#' @param filename name od Rmd file
#' @param title Title of blog post
#' @param publish FALSE for draft, TRUE to publish. Defaults to FALSE
#' @param preview Whether to open preview in browser upon publication. Defaults to TRUE
#' @keywords wordpress blog
#' @export
knit2byrokrates <- function (filename, title=paste('R blog',format(Sys.time(), "%d/%m/%Y %H:%M")),
publish=F, preview=T) {
if (!require('RWordPress'))
install.packages('RWordPress', repos = 'http://www.omegahat.org/R', type = 'source')
library(RWordPress)
password <- readline("enter password: ")
print(password)
options(WordpressLogin = c(admin=password),
WordpressURL = 'http://byrokrates.cz/xmlrpc.php')
# opts_knit$set(base.url = 'http://dl.dropboxusercontent.com/u/1998974/wp/',
# base.dir = '~/Dropbox/Public/wp/')
library(knitr)
wordpress.url = function(file) {
require(RWordPress)
uploadFile(file)$url
}
opts_knit$set(upload.fun=wordpress.url)
result <- knit2wp(filename, title = title, publish = publish)
print(result)
previewurl <- paste0('http://byrokrates.cz?p=',result[1],'&preview=true')
if(preview) {browseURL(previewurl)}
}
|
70542ef5f46b8ec62d1c3aae2b8c0244f1afc58c | b869c19db780a472d017b16a49122317bbdab603 | /scripts/ceps_correios_2014.R | 9e78968b75c809111a00376e2589a5a2fe61d269 | [] | no_license | kl3ber/ibge | 4d8b15a5cadc8e724cea0863ff64fe1516a4f46d | 032b97d217e979b28d65a234652e0a404fc6b75d | refs/heads/master | 2020-04-23T03:42:28.666297 | 2019-03-07T19:53:44 | 2019-03-07T19:53:44 | 170,886,173 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 460 | r | ceps_correios_2014.R |
### Tratar faixas de CEPs faltantes
ceps = read.csv('data/input/ceps_correios/cep_correios_gpbe_2014.csv', colClasses=c("numeric",rep("character",4)))
ceps = ceps[order(ceps$CEP_INICIO),]
for (i in seq(1:(nrow(ceps) - 1))) {
if (as.integer(ceps[i, 'CEP_FINAL']) + 1 != as.integer(ceps[i+1, 'CEP_INICIO'])) {
ceps[i, 'CEP_FINAL'] = ceps[i+1, 'CEP_INICIO']
}
}
write.csv(ceps, file='data/output/ceps_correios.csv', row.names=F)
remove(ceps, i)
|
95dd7e88a7650cba632a442699498e9e3ef5bd7e | 52ccdf59cd5b37d2a19d525a2ca9473d05f705ee | /man/knnRec.Rd | db0706b2e6dc2f41646a5db2effeb6e82a06d6c0 | [] | no_license | hugokce/regtools | 140cb80a74e516516a5f090c66ad17e1dbb82e96 | b2ebcbd38f65e642733cdc68c88e7ca8f5f33e14 | refs/heads/master | 2023-04-20T11:16:57.632591 | 2021-05-09T04:23:36 | 2021-05-09T04:23:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,321 | rd | knnRec.Rd | \name{knnRec,anovaRec,mfRec,predictMany}
\alias{knnRec,anovaRec,mfRec,predictMany}
\alias{predict.knnRec,predict.anovaRec,predict.anovaRec.mfRec}
\title{Tools for Recommender Systems}
\description{Functions for collaborative filtering.}
\usage{
knnRec(ratings)
predict.knnRec(object, user, item, k, minMatch = 1)
anovaRec(ratingsDF,userCvrs=NULL,itemCvrs=NULL)
predict.anovaRec(object,user,item,userCvrVals=NULL,itemCvrVals=NULL)
mfRec(ratings,rnk=10,nmf=FALSE,niter=20,lambda=0)
predict.mfRec(object,user,item,)
predictMany(object,newxs)
}
\arguments{
\item{ratings}{A data frame whose first 3 columns are user ID, item
ID and rating. All must be numeric, though they need not be
consecutive nor begin with 1.}
\item{ratingsDF}{As with \code{ratings}.}
\item{object}{An object returned by one of the training set
functions, such as \code{knnRec}.}
\item{user}{Numeric user ID}
\item{item}{Numeric item ID. In the case of \code{mfRec}, specifying
a negative number -m will result in the m top-rated items for this
user.}
\item{k}{Number of nearest neighbors.}
\item{minMatch}{Minimum number of nearest neighbors to be counted in
neighborhood mean.}
\item{userCvrs}{A character vector, listing column names of the user
covariates.}
\item{itemCvrs}{A character vector, listing column names of the item
covariates.}
\item{userCvrVals}{An R list, with components of the form covariate
name = covariate value.}
\item{itemCvrVals}{An R list, with components of the form covariate
name = covariate value.}
\item{rnk}{Matrix rank.}
\item{nmf}{If TRUE, use NMF instead of SVD.}
\item{niter}{Number of iterations.}
\item{lambda}{L1 regularizer.}
\item{newxs}{Data frame consisting of a user column and an item
column.}
}
\details{
Several methods for collaborative filtering.
\itemize{
\item The function \code{knnRec} uses a k-Nearest Neighbor method.
If we desire a rating for user u of item i, the method first finds
all users who rated i, then averages the ratings of i by the k
nearest of these users to u.
Requires a large dataset.
\item The function \code{anovaRec} gets its name from its ANOVA-like
decompositions, e.g.
E(Y_ij) = mu + alpha_i + beta_j + (alphabeta)_ij
where Y_ij is the rating user i gives item j. Only 2-way
interactions are allowed.
The function \code{anovaRec} allows for covariates ("side
information"). These may be useful, say, for prediction of new cases
having very little rating data.
Very fast, almost no memory usage.
\item The function \code{mfRec} uses the matrix factorization method.
One can also request multiple top-rated items.
In some cases, this will be the most accurate method. Slower, and
somewhat memory-intensive.
}
The function \code{knnRec} does the preparation work on the training
set. Prediction is then done via \code{predict.knnRec}, via the generic
function \code{predict}. Similarly for \code{anovaRec} for the others.
For \code{anovaRec}, the covariates are divided into user covariates and
item covariates. To see why, consider the famous MovieLens data, say
using Genre as a covariate. Since knowledge of the item/film implies
knowledge of the genre, interaction between item and covariate in this
cases makes no sense, but a user/genre interaction may be reasonable.
At present, only categorical covariates are allowed. Use
\code{discretize} to convert continuous variables.
The \code{predict.*} functions operate on a single (user,item) pair. If
many new pairs are to be predicted, use the wrapper \code{predictMany}.
}
\value{
The function \code{knnRec} returns an object of class \code{'knnRec'},
to be used as input to \code{predict}. The output of the latter is
the predicted value. Similarly for the others.
}
\examples{
m <- rbind( c(5,2,1), c(5,4,1), c(2,2,2), c(2,4,4), c(5,3,6), c(2,1,1), c(2,5,2), c(3,4,3), c(2,3,5), c(3,2,1))
colnames(m) <- c('u','v','r')
predict(z,3,1,1) # 1
predict(z,3,1,1) # 1, with warning that k was reduced to 1
predict(z,3,3,1) # 5
predict(z,3,3,2) # 5.5
m <- rbind( c(5,2,1), c(5,4,1), c(2,2,2), c(2,4,4), c(5,3,6), c(2,1,1), c(2,5,2), c(3,4,3), c(2,3,5), c(3,2,1))
m <- as.data.frame(m) set.seed(9999)
m <- cbind(m,sample(5:10,10,replace=T)) # add a user covar
colnames(m) <- c('u','v','r','x')
obj <- anovaRec(m,'x')
# userCvrXEffects$x['3','8'] = 0.6
predict(obj,3,3) # 4.9
predict(obj,3,3,list(x=6)) # 3.9
m <- cbind(m,sample(1:2,10,replace=T))
colnames(m) <- c('u','v','r','x','y') # add an item covar
obj <- anovaRec(m,userCvrs='x',itemCvrs='y')
predict(obj,3,3,list(x=6),list(y=1)) # 9.1
\dontrun{
library(dslabs)
data(movielens)
ml <- movielens[,c(5,1,6,4)]
z <- sapply(1:nrow(ml),function(rw) length(grep('Action',ml[rw,4])) > 0)
ml$action <- as.integer(z) # dummy for Action genre
set.seed(9999)
tstidxs <- sample(1:nrow(ml),1000)
trn <- ml[-tstidxs,]
tst <- ml[tstidxs,]
anovaout <- anovaRec(ml,userCvrs='action')
predtst <- function(i)
predict(anovaout,tst[i,1],tst[i,2],list(action=tst[i,5]))
preds <- sapply(1:1000,predtst)
mean(abs(preds - tst[,3])) # 0.636
}
}
\author{
Norm Matloff
}
|
ba432f2c661af332634349969f62e1dc0bf6c47b | 0f104ea64886750d6c5f7051810b4ee39fa91ba9 | /inst/test-data/specific-redcapr/log-read/2021-07-11-record3-user.R | d6fa14d0753fd898da598c41b327c0daa259a84b | [
"MIT"
] | permissive | OuhscBbmc/REDCapR | 3ca0c106e93b14d55e2c3e678f7178f0e925a83a | 34f2154852fb52fb99bccd8e8295df8171eb1c18 | refs/heads/main | 2023-07-24T02:44:12.211484 | 2023-07-15T23:03:31 | 2023-07-15T23:03:31 | 14,738,204 | 108 | 43 | NOASSERTION | 2023-09-04T23:07:30 | 2013-11-27T05:27:58 | R | UTF-8 | R | false | false | 1,680 | r | 2021-07-11-record3-user.R | structure(list(timestamp = structure(c(1626044880, 1626044880,
1626044640, 1626044640, 1626044160, 1626044160, 1625968020, 1625968020,
1625967780, 1625967780, 1625967240, 1625967240), class = c("POSIXct",
"POSIXt"), tzone = "UTC"), username = c("unittestphifree", "unittestphifree",
"unittestphifree", "unittestphifree", "unittestphifree", "unittestphifree",
"unittestphifree", "unittestphifree", "unittestphifree", "unittestphifree",
"unittestphifree", "unittestphifree"), action = c("Manage/Design",
"Manage/Design", "Manage/Design", "Manage/Design", "Manage/Design",
"Manage/Design", "Manage/Design", "Manage/Design", "Manage/Design",
"Manage/Design", "Manage/Design", "Manage/Design"), details = c("Download file (API)",
"Download file (API)", "Download file (API)", "Download file (API)",
"Download file (API)", "Download file (API)", "Download file (API)",
"Download file (API)", "Download file (API)", "Download file (API)",
"Download file (API)", "Download file (API)"), record = c(NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)), row.names = c(NA,
-12L), spec = structure(list(cols = list(timestamp = structure(list(
format = ""), class = c("collector_datetime", "collector"
)), username = structure(list(), class = c("collector_character",
"collector")), action = structure(list(), class = c("collector_character",
"collector")), details = structure(list(), class = c("collector_character",
"collector")), record = structure(list(), class = c("collector_logical",
"collector"))), default = structure(list(), class = c("collector_guess",
"collector")), delim = ","), class = "col_spec"), class = c("spec_tbl_df",
"tbl_df", "tbl", "data.frame"))
|
ccac115ca5a0d8f41025049cbd44cced99d32085 | 1c03917b86f5e47c4bf954afce910ce439fd552b | /ongoing_studies/multiple_pdfs_to_tokens.R | b0cfd8fb8d0a4cf6e2060c9924d06f3894f9ee6a | [] | no_license | sameerpadhye/Personal-projects | d7da869de225c48ce7c7b3ece8663bc10042c55e | d04fc8bbe723d90559f64ea7c8f8ca01eae17c56 | refs/heads/master | 2022-07-31T03:30:24.966932 | 2022-07-11T15:25:25 | 2022-07-11T15:25:25 | 179,165,892 | 1 | 1 | null | 2020-03-05T15:54:40 | 2019-04-02T22:06:24 | R | UTF-8 | R | false | false | 770 | r | multiple_pdfs_to_tokens.R |
require(pdftools)
require(tidyverse)
require(purrr)
library(tm)
library(quanteda)
fraud_data_path<-"C:/Research_data/Research data/Other groups/Fraud Faunistics correspondence/Faunistics checklist fraud pprs/fraud_pprs"
good_data_path<-"C:/Research_data/Research data/Other groups/Fraud Faunistics correspondence/Faunistics checklist fraud pprs/good_pprs"
file_list_1<-list.files(fraud_data_path,pattern = '*.pdf')%>%
paste0(fraud_data_path,"/",.)
file_list_1[1]
fraud_text<-purrr::map(file_list_1,crpus_to_tokens)
View(fraud_text[[1]])
crpus_to_tokens<-function(x){
x%>%
pdftools::pdf_text(.)%>%
quanteda::corpus(.)%>%
quanteda::tokens(.,
remove_punct = TRUE,
remove_numbers = TRUE)
}
|
23b21b5f7250ac639c695c1084d845009d6570fc | fa70b2a61f6c1e760151c3c04d0c7d55098231af | /man/NoData.Rd | dbdf3a9d1d814ec7fa54d1e28deedb6f1a52a88b | [] | no_license | Displayr/flipData | 383735ede9065ceec10b84b9d63a8bece6122257 | 9c24004e6e8dc70f856efc773a6a4a6f95db1489 | refs/heads/master | 2023-06-22T18:27:09.861659 | 2023-06-19T03:06:09 | 2023-06-19T03:06:51 | 59,714,768 | 0 | 4 | null | 2023-08-23T04:43:16 | 2016-05-26T02:54:37 | R | UTF-8 | R | false | true | 330 | rd | NoData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/missingdata.R
\name{NoData}
\alias{NoData}
\title{\code{NoData}}
\usage{
NoData()
}
\description{
Error thrown when all cases contain missing data and no data is
available for use as the "Exclude cases with missing data" option was
selected.
}
|
f2d1f59b93b654b807896347f412cc4eb0cedce8 | dcccc84b47137433c5695fd6e1bbbb2789196f5e | /alg/backups/1 - exploratory.R | 9ebc7a2fd23a34075a06768f32ef63401c46d9fe | [] | no_license | joaopdalbino/data_analysis_spotify | ed0a7a418c4f31b704e0da740ccd50420c8fd6d3 | 8e54e7c43903ff77030c127830b4b841a09674bf | refs/heads/master | 2022-12-18T15:35:26.818846 | 2020-09-28T11:58:04 | 2020-09-28T11:58:04 | 262,198,950 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,779 | r | 1 - exploratory.R | library("readxl")
library(stringr)
library(ggplot2)
source <- getwd()
results_path <- paste(source,"/results/", sep = "", collapse = NULL)
path <- paste(source,"/data/timelines.csv", sep = "", collapse = NULL)
data <- read.csv(path)
data$X...date <- as.Date(data$X...date , format = "%Y-%m-%d")
data <- data[data$X...date > "2020-05-01",]
"SIMPLE PLOT STREAMS AND DATE"
png(filename=paste(results_path,"SIMPLE_PLOT_STREAMS_AND_DATE.png",sep = "", collapse = NULL)
)
plot_var <- plot(data$X...date, data$streams,
main= "Streams",
xlab= "Date",
ylab= "Streams",
col= "blue", pch = 19, cex = 0.4, lty = "solid", lwd = 2)
plot_var <- text(data$X...date, data$streams, labels=data$streams, cex= 0.5, pos=1)
dev.off()
"SIMPLE PLOT STREAMS AND DATE"
"SIMPLE PLOT LISTENERS AND DATE"
png(filename=paste(results_path,"SIMPLE_PLOT_LISTENERS_AND_DATE.png",sep = "", collapse = NULL)
)
plot_var <- plot(data$X...date, data$listeners,
main= "Listeners",
xlab= "Date",
ylab= "Listeners",
col= "blue", pch = 19, cex = 0.4, lty = "solid", lwd = 2)
plot_var <- text(data$X...date, data$listeners, labels=data$listeners, cex= 0.5, pos=1)
dev.off()
"SIMPLE PLOT LISTENERS AND DATE"
"SIMPLE PLOT FOLLOWRERS AND DATE"
png(filename=paste(results_path,"SIMPLE_PLOT_FOLLOWERS_AND_DATE.png",sep = "", collapse = NULL)
)
plot_var <- plot(data$X...date, data$followers,
main= "Followers",
xlab= "Date",
ylab= "Followers",
col= "blue", pch = 19, cex = 0.4, lty = "solid", lwd = 2)
plot_var <- text(data$X...date, data$followers, labels=data$followers, cex= 0.5, pos=1)
dev.off()
"SIMPLE PLOT FOLLOWRERS AND DATE"
"PLOT FOLLOWRERS AND LISTENERS"
png(filename=paste(results_path,"PLOT_FOLLOWERS_AND_LISTENERS.png",sep = "", collapse = NULL)
)
plot_var <- plot(data$followers, data$listeners,
main= "Listeners x Followers",
xlab= "Followers",
ylab= "Listeners",
col= "blue", pch = 19, cex = 0.4, lty = "solid", lwd = 2)
plot_var <- text(data$followers, data$listeners, labels=data$listeners, cex= 0.5, pos=1)
dev.off()
"PLOT FOLLOWRERS AND DATE"
"PLOT STREAMS AND LISTENERS"
png(filename=paste(results_path,"PLOT_STREAMS_AND_LISTENERS.png",sep = "", collapse = NULL)
)
plot_var <- plot(data$listeners, data$streams,
main= "Listeners x Streams",
xlab= "Listeners",
ylab= "Streams",
col= "blue", pch = 19, cex = 0.4, lty = "solid", lwd = 2)
plot_var <- text(data$listeners, data$streams, labels=data$streams, cex= 0.5, pos=1)
dev.off()
"PLOT FOLLOWRERS AND DATE"
|
60063ebcfbda352c0b7af4a7a0a8afda3259ec93 | 75dea6f2b5ac6ae68b40fcd84ecec5bd93d6b0b0 | /server_data_processor.R | 40f639a2bb76682d79650df19b1394a1b27998a5 | [] | no_license | shenjixiaodao/ChineseStock_Visualization_Analysis | a1f9eed0c47978fc191f0e77f48d1ed1f49df3f3 | b1db97e2fd46f0b9c66dcf3412a17d13199cfdfc | refs/heads/master | 2021-01-21T09:52:48.365160 | 2016-01-11T13:49:07 | 2016-01-11T13:49:07 | 47,862,056 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 126 | r | server_data_processor.R | #前10股东
top10_shareholder = read.delim("top10_shareholder/HLD_Shareholders.txt",header = TRUE, colClasses = "character")
|
b14f9deaf3a915265b129964e4aa93fac93f057c | 7398b8fe21f917e6ec35cf111984e87650f1b199 | /r4ds_wrangle.r | e0ac29c0822d6b54e973634e7e5963c3164a268f | [] | no_license | JSA10/r4ds | a8eac7d550dfbab5b4de9cddbb8546f12f8e5d99 | a7580b75e7edabb36bd8bb7bcb44859a60d2cf5b | refs/heads/master | 2021-01-21T17:46:59.742104 | 2017-05-21T21:41:16 | 2017-05-21T21:41:16 | 91,989,043 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,905 | r | r4ds_wrangle.r | #r4ds chapter 9 wrangle
#
#9.1 Tibbles
#tibbles are an updated version of data frames, with a few tweaks that iron
# out old bugs from base R versions that are circa 20 years old and work better
# in tidyverse
library(tidyverse)
#coerce data frames into tibbles
as_tibble(iris)
#You can create a new tibble from individual vectors with tibble()
#tibble() will automatically recycle inputs of length 1, and allows you to
#refer to variables that you just created, as shown below.
tibble(x = 1:5, y = 1, z = x ^ 2 + y)
#> # A tibble: 5 × 3
#> x y z
#> <int> <dbl> <dbl>
#> 1 1 1 2
#> 2 2 1 5
#> 3 3 1 10
#> 4 4 1 17
#> 5 5 1 26
"""
If you’re already familiar with data.frame(), note that tibble() does much
less: it never changes the type of the inputs (e.g. it never converts strings
to factors!), it never changes the names of variables, and it never creates
row names.
It’s possible for a tibble to have column names that are not valid R variable
names, aka non-syntactic names. For example, they might not start with a letter,
or they might contain unusual characters like a space. To refer to these
variables, you need to surround them with backticks, `:
"""
tb <- tibble(
`:)` = "smile",
` ` = "space",
`2000` = "number"
)
tb
#> # A tibble: 1 × 3
#> `:)` ` ` `2000`
#> <chr> <chr> <chr>
#> 1 smile space number
"""
You’ll also need the backticks when working with these variables in other
packages, like ggplot2, dplyr, and tidyr.
"""
#TRIBBLES
"""
Another way to create a tibble is with tribble(), short for transposed tibble.
tribble() is customised for data entry in code: column headings are defined by
formulas (i.e. they start with ~), and entries are separated by commas. This
makes it possible to lay out small amounts of data in easy to read form.
"""
tribble(
~x, ~y, ~z,
#--|--|----
"a", 2, 3.6,
"b", 1, 8.5
)
#> # A tibble: 2 × 3
#> x y z
#> <chr> <dbl> <dbl>
#> 1 a 2 3.6
#> 2 b 1 8.5
#I often add a comment (the line starting with #), to make it really clear
#where the header is.
#tibbles vs. data frames
# printing and subsetting are main differences
#printing
#printing tibbles by name shows only first 10 rows and columns that fit on page
# = easier to work with big data
# also each column displays data type - feature borrowed by str
#when need to display more data
# 1. use print and control rows and cols with n and width
nycflights13::flights %>%
print(n = 10, width = Inf)
# 2. use options to set default print behaviour
options(tibble.print_max = n, tibble.print_min = m)
#if more than m rows, print only n rows.
#Use options(dplyr.print_min = Inf) to always show all rows.
#Use options(tibble.width = Inf) to always print all columns, regardless of the
#width of the screen.
#You can see a complete list of options by looking at the package help with
#package?tibble.
# 3. A final option is to use RStudio’s built-in data viewer to get a scrollable
#view of the complete dataset. This is also often useful at the end of a long
#chain of manipulations.
nycflights13::flights %>%
View()
# remember can use commands like head etc. to control amount displayed using
# view()
#subsetting
#use same tools as dataframes - $ and [[]] but tibbles are stricter
# - never do partial matching and will generate a warning if the column doesn't
# exist
# remember:
# [[]] matches name or position,
# $ only matches by name but less typing
df <- tibble(
x = runif(5),
y = rnorm(5)
)
df$x
#> [1] 0.434 0.395 0.548 0.762 0.254
df[["x"]]
#> [1] 0.434 0.395 0.548 0.762 0.254
# Extract by position
df[[1]]
# To use these in a pipe, you’ll need to use the special placeholder .:
df %>% .$x
#> [1] 0.434 0.395 0.548 0.762 0.254
df %>% .[["x"]]
#> [1] 0.434 0.395 0.548 0.762 0.254
## 10.4 Interacting with older code
"""
Some older functions don’t work with tibbles. If you encounter one of these
functions, use as.data.frame() to turn a tibble back to a data.frame:
"""
class(as.data.frame(tb))
#> [1] "data.frame"
"""
The main reason that some older functions don’t work with tibble is the
[ function. We don’t use [ much in this book because dplyr::filter() and
dplyr::select() allow you to solve the same problems with clearer code
(but you will learn a little about it in vector subsetting). With base R
data frames, [ sometimes returns a data frame, and sometimes returns a vector.
With tibbles, [ always returns another tibble.
"""
#10.5 Exercises
# come back to if want to practise
# also if want to go deeper check out: vignette("tibble")
## 11 Data import
# using readr package
"""
11.2 Getting started
Most of readr’s functions are concerned with turning flat files into data
frames:
read_csv() reads comma delimited files, read_csv2() reads semicolon separated
files (common in countries where , is used as the decimal place), read_tsv()
reads tab delimited files, and read_delim() reads in files with any delimiter.
read_fwf() reads fixed width files. You can specify fields either by their
widths with fwf_widths() or their position with fwf_positions(). read_table()
reads a common variation of fixed width files where columns are separated by
white space.
read_log() reads Apache style log files. (But also check out webreadr which
is built on top of read_log() and provides many more helpful tools.)
These functions all have similar syntax: once you’ve mastered one, you can use
the others with ease. For the rest of this chapter we’ll focus on read_csv().
Not only are csv files one of the most common forms of data storage, but once
you understand read_csv(), you can easily apply your knowledge to all the other
functions in readr.
"""
#The first argument to read_csv() is the most important: it’s the path to the
#file to read.
heights <- read_csv("data/heights.csv")
"""
When you run read_csv() it prints out a column specification that gives the
name and type of each column. That’s an important part of readr, which we’ll
come back to in parsing a file.
"""
#you can also supply an inline csv file. This is useful for experimenting with
#readr and for creating reproducible examples to share with others:
read_csv("a,b,c
1,2,3
4,5,6")
#> # A tibble: 2 × 3
#> a b c
#> <int> <int> <int>
#> 1 1 2 3
#> 2 4 5 6
#two cases where might not want first line to be columns:
# 1. Some metadata at top of the file
# skip = n to skip n lines
# comment = "#" to drop any lines that start with #
#
read_csv("The first line of metadata
The second line of metadata
x,y,z
1,2,3", skip = 2)
read_csv("# A comment I want to skip
x,y,z
1,2,3", comment = "#")
# 2. Data might not have column names
# use col_names = FALSE
read_csv("1,2,3\n4,5,6", col_names = FALSE)
# NOTE: "\n" is a convenient shortcut for adding a new line. More in strings
# or can pass col_names a character vector with new names
read_csv("1,2,3\n4,5,6", col_names = c("x", "y", "z"))
# can identify the values in file to be imported that represent missing values
read_csv("a,b,c\n1,2,.", na = ".")
#> # A tibble: 1 × 3
#> a b c
#> <int> <int> <chr>
#> 1 1 2 <NA>
"""
This is all you need to know to read ~75% of CSV files that you’ll encounter in
practice. You can also easily adapt what you’ve learned to read tab separated
files with read_tsv() and fixed width files with read_fwf().
To read in more challenging files, you’ll need to learn more about how readr
parses each column, turning them into R vectors.
"""
"""
11.2.1 Compared to base R
If you’ve used R before, you might wonder why we’re not using read.csv(). There
are a few good reasons to favour readr functions over the base equivalents:
They are typically much faster (~10x) than their base equivalents. Long running
jobs have a progress bar, so you can see what’s happening. If you’re looking
for raw speed, try data.table::fread(). It doesn’t fit quite so well into the
tidyverse, but it can be quite a bit faster.
They produce tibbles, they don’t convert character vectors to factors, use row
names, or munge the column names. These are common sources of frustration with
the base R functions.
They are more reproducible. Base R functions inherit some behaviour from your
operating system and environment variables, so import code that works on your
computer might not work on someone else’s.
"""
## section on parsing vectors is useful reading for understanding how readr
## works
"""
Using parsers is mostly a matter of understanding what’s available and how they
deal with different types of input. There are eight particularly important
parsers:
parse_logical() and parse_integer() parse logicals and integers respectively.
There’s basically nothing that can go wrong with these parsers so I won’t
describe them here further.
parse_double() is a strict numeric parser, and parse_number() is a flexible
numeric parser. These are more complicated than you might expect because
different parts of the world write numbers in different ways.
parse_character() seems so simple that it shouldn’t be necessary. But one
complication makes it quite important: character encodings.
parse_factor() create factors, the data structure that R uses to represent
categorical variables with fixed and known values.
parse_datetime(), parse_date(), and parse_time() allow you to parse various
date & time specifications. These are the most complicated because there are
so many different ways of writing dates.
"""
#for now just need basics - use the import section as a reference as you go
read_csv()
# In order to allocate character types automatically readr uses a strategy of
# analyzing first 1000 rows and then 'guessing' the col type using certain rules
# worth knowing as can lead to problems in a couple special examples.
"""
The heuristic tries each of the following types, stopping when it finds a match:
logical: contains only “F”, “T”, “FALSE”, or “TRUE”.
integer: contains only numeric characters (and -).
double: contains only valid doubles (including numbers like 4.5e-5).
number: contains valid doubles with the grouping mark inside.
time: matches the default time_format.
date: matches the default date_format.
date-time: any ISO8601 date.
If none of these rules apply, then the column will stay as a vector of strings.
"""
write_csv(object, "filename.csv")
#NOTE: writing to a csv is good for compatibility of sharing (UTF-8 and
#date/time in ISO8601 format), but doesn't preserve r formatting and metadata)
#use rds format for caching / saving interim data that will work on again
"""
write_rds() and read_rds() are uniform wrappers around the base functions
readRDS() and saveRDS(). These store data in R’s custom binary format called
RDS
"""
write_rds(challenge, "challenge.rds")
read_rds("challenge.rds")
#11.6 Other types of data
"""
To get other types of data into R, we recommend starting with the tidyverse
packages listed below. They’re certainly not perfect, but they are a good
place to start. For rectangular data:
haven reads SPSS, Stata, and SAS files.
readxl reads excel files (both .xls and .xlsx).
DBI, along with a database specific backend (e.g. RMySQL, RSQLite, RPostgreSQL
etc) allows you to run SQL queries against a database and return a data frame.
For hierarchical data: use jsonlite (by Jeroen Ooms) for json, and xml2 for
XML. Jenny Bryan has some excellent worked examples at
https://jennybc.github.io/purrr-tutorial/examples.html.
For other file types, try the R data import/export manual and the rio package.
"""
### 12. TIDY DATA
#Recap
#There are three interrelated rules which make a dataset tidy:
#1. Each variable must have its own column.
#2. Each observation must have its own row.
#3. Each value must have its own cell.
"""
These three rules are interrelated because it’s impossible to only satisfy
two of the three. That interrelationship leads to an even simpler set of
practical instructions:
*****
BROADLY THERE ARE TWO JOBS TO DO TO MAKE DATA TIDY:
Put each dataset in a tibble.
Put each variable in a column.
*****
"""
"""
Why ensure that your data is tidy? There are two main advantages:
1. There’s a general advantage to picking one consistent way of storing data. If
you have a consistent data structure, it’s easier to learn the tools that work
with it because they have an underlying uniformity.
2. There’s a specific advantage to placing variables in columns because it allows
R’s vectorised nature to shine. As you learned in mutate and summary functions,
most built-in R functions work with vectors of values. That makes transforming
tidy data feel particularly natural.
dplyr, ggplot2, and all the other packages in the tidyverse are designed to
work with tidy data.
"""
#examples
# Compute rate per 10,000
table1 %>%
mutate(rate = cases / population * 10000)
# Compute cases per year
table1 %>%
count(year, wt = cases)
# Visualise changes over time
library(ggplot2)
ggplot(table1, aes(year, cases)) +
geom_line(aes(group = country), colour = "grey50") +
geom_point(aes(colour = country))
"""
The principles of tidy data seem so obvious that you might wonder if you’ll
ever encounter a dataset that isn’t tidy. Unfortunately, however, most data
that you will encounter will be untidy. There are two main reasons:
1. Most people aren’t familiar with the principles of tidy data, and it’s hard to
derive them yourself unless you spend a lot of time working with data.
2. Data is often organised to facilitate some use other than analysis. For example,
data is often organised to make entry as easy as possible.
This means for most real analyses, you’ll need to do some tidying. The first
step is always to figure out what the variables and observations are. Sometimes
this is easy; other times you’ll need to consult with the people who originally
generated the data. The second step is to resolve one of two common problems:
One variable might be spread across multiple columns.
One observation might be scattered across multiple rows.
Typically a dataset will only suffer from one of these problems; it’ll only
suffer from both if you’re really unlucky! To fix these problems, you’ll need
the two most important functions in tidyr: gather() and spread().
"""
# GATHER
# gather() used when you have columns that aren't distinct variables, likely to
# be categories within a variable that could be combined. So rather than
# having 1999, 2000, 2001 as columns they can all be 'gathered' into 1 column
# called Years
gather('colnames', key = new_var_name, value = new_value_name)
#e.g.
table4a
#> # A tibble: 3 × 3
#> country `1999` `2000`
#> * <chr> <int> <int>
#> 1 Afghanistan 745 2666
#> 2 Brazil 37737 80488
#> 3 China 212258 213766
table4a %>%
gather(`1999`, `2000`, key = "year", value = "cases")
#this gathers the columns 1999 and 2000 into two new columns labelled year and
#cases
"""
Hadley version
To tidy a dataset like this, we need to gather those columns into a new pair of
variables. To describe that operation we need three parameters:
1. The set of columns that represent values, not variables. In this example, those
are the columns 1999 and 2000.
2.The name of the variable whose values form the column names. I call that the
key, and here it is year.
3. The name of the variable whose values are spread over the cells. I call that
value, and here it’s the number of cases.
"""
# SPREAD
# spread() solves the opposite problem, use when an observation is spread
# across multiple rows
#e.g
table2
#> # A tibble: 12 × 4
#> country year type count
#> <chr> <int> <chr> <int>
#> 1 Afghanistan 1999 cases 745
#> 2 Afghanistan 1999 population 19987071
#> 3 Afghanistan 2000 cases 2666
#> 4 Afghanistan 2000 population 20595360
#> 5 Brazil 1999 cases 37737
#> 6 Brazil 1999 population 172006362
#> # ... with 6 more rows
"""
Need two parameters:
1. The column that contains variable names, the key column. Here, it’s type.
2. The column that contains values forms multiple variables, the value column.
Here it’s count.
"""
spread(table2, key = type, value = count)
#> # A tibble: 6 × 4
#> country year cases population
#> * <chr> <int> <int> <int>
#> 1 Afghanistan 1999 745 19987071
#> 2 Afghanistan 2000 2666 20595360
#> 3 Brazil 1999 37737 172006362
#> 4 Brazil 2000 80488 174504898
#> 5 China 1999 212258 1272915272
#> 6 China 2000 213766 1280428583
# Separating & Uniting
"""
separate() pulls apart one column into multiple columns, by splitting wherever
a separator character appears. Take table3:
"""
table3
#> # A tibble: 6 × 3
#> country year rate
#> * <chr> <int> <chr>
#> 1 Afghanistan 1999 745/19987071
#> 2 Afghanistan 2000 2666/20595360
#> 3 Brazil 1999 37737/172006362
#> 4 Brazil 2000 80488/174504898
#> 5 China 1999 212258/1272915272
#> 6 China 2000 213766/1280428583
# problem looks very similar to an Excel 'text to columns' job
table3 %>%
separate(rate, into = c("cases", "population"))
# defaults to splitting on any non alpha numeric character it finds
# can specify the separator using sep argument
table3 %>%
separate(rate, into = c("cases", "population"), sep = "/")
# sep = a regular expression (more in strings)
# BE CAREFUL WITH COL FORMAT
# Separate leaves the column type as it was originally by default. In this case
# the column was characters, so both cases and population have adopted
# in order to parse to a more appropriate type, use convert argument:
table3 %>%
separate(rate, into = c("cases", "population"), convert = TRUE)
"""
You can also pass a vector of integers to sep. separate() will interpret the
integers as positions to split at. Positive values start at 1 on the far-left
of the strings; negative value start at -1 on the far-right of the strings.
When using integers to separate strings, the length of sep should be one less
than the number of names in into.
"""
#e.g. separate the last two digits of each year. This make this data less tidy,
#but is useful in other cases, as you’ll see in a little bit.
table3 %>%
separate(year, into = c("century", "year"), sep = 2)
#> # A tibble: 6 × 4
#> country century year rate
#> * <chr> <chr> <chr> <chr>
#> 1 Afghanistan 19 99 745/19987071
#> 2 Afghanistan 20 00 2666/20595360
#> 3 Brazil 19 99 37737/172006362
#> 4 Brazil 20 00 80488/174504898
#> 5 China 19 99 212258/1272915272
#> 6 China 20 00 213766/1280428583
## Unite
"""
unite() is the inverse of separate(): it combines multiple columns into a
single column. You’ll need it much less frequently than separate(), but it’s
still a useful tool to have in your back pocket.
"""
# Uniting `table5` makes it tidy
#We can use unite() to rejoin the century and year columns that we created in
#the last example. That data is saved as tidyr::table5. unite() takes a data
#frame, the name of the new variable to create, and a set of columns to
#combine, again specified in dplyr::select() style:
table5 %>%
unite(new, century, year)
#> # A tibble: 6 × 3
#> country new rate
#> * <chr> <chr> <chr>
#> 1 Afghanistan 19_99 745/19987071
#> 2 Afghanistan 20_00 2666/20595360
#> 3 Brazil 19_99 37737/172006362
#> 4 Brazil 20_00 80488/174504898
#> 5 China 19_99 212258/1272915272
#> 6 China 20_00 213766/1280428583
#in this case also need sep argument as default is to place an underscore btw
#columns
# we don't want any separator so use ""
table5 %>%
unite(new, century, year, sep = "")
#12.5 Missing values
"""
Changing the representation of a dataset brings up an important subtlety of
missing values. Surprisingly, a value can be missing in one of two possible
ways:
1. Explicitly, i.e. flagged with NA.
2. Implicitly, i.e. simply not present in the data.
"""
#Let’s illustrate this idea with a very simple data set:
stocks <- tibble(
year = c(2015, 2015, 2015, 2015, 2016, 2016, 2016),
qtr = c( 1, 2, 3, 4, 2, 3, 4),
return = c(1.88, 0.59, 0.35, NA, 0.92, 0.17, 2.66)
)
"""
There are two missing values in this dataset:
The return for the fourth quarter of 2015 is explicitly missing, because the
cell where its value should be instead contains NA.
The return for the first quarter of 2016 is implicitly missing, because it
simply does not appear in the dataset.
One way to think about the difference is with this Zen-like koan: An explicit
missing value is the presence of an absence; an implicit missing value is the
absence of a presence.
"""
# making implicit missing values explicit
#
# 1. display in a table format
stocks %>%
spread(year, return)
#> # A tibble: 4 × 3
#> qtr `2015` `2016`
#> * <dbl> <dbl> <dbl>
#> 1 1 1.88 NA
#> 2 2 0.59 0.92
#> 3 3 0.35 0.17
#> 4 4 NA 2.66
#Because these explicit missing values may not be important in other
#representations of the data, you can set na.rm = TRUE in gather() to turn
#explicit missing values implicit:
stocks %>%
spread(year, return) %>%
gather(year, return, `2015`:`2016`, na.rm = TRUE)
#> # A tibble: 6 × 3
#> qtr year return
#> * <dbl> <chr> <dbl>
#> 1 1 2015 1.88
#> 2 2 2015 0.59
#> 3 3 2015 0.35
#> 4 2 2016 0.92
#> 5 3 2016 0.17
#> 6 4 2016 2.66
"""
Another important tool for making missing values explicit in tidy data is
complete():
"""
stocks %>%
complete(year, qtr)
#> # A tibble: 8 × 3
#> year qtr return
#> <dbl> <dbl> <dbl>
#> 1 2015 1 1.88
#> 2 2015 2 0.59
#> 3 2015 3 0.35
#> 4 2015 4 NA
#> 5 2016 1 NA
#> 6 2016 2 0.92
#> # ... with 2 more rows
#complete() takes a set of columns, and finds all unique combinations. It then
#ensures the original dataset contains all those values, filling in explicit
#NAs where necessary.
"""
There’s one other important tool that you should know for working with missing
values. Sometimes when a data source has primarily been used for data entry,
missing values indicate that the previous value should be carried forward:
"""
treatment <- tribble(
~ person, ~ treatment, ~response,
"Derrick Whitmore", 1, 7,
NA, 2, 10,
NA, 3, 9,
"Katherine Burke", 1, 4
)
"""
You can fill in these missing values with fill(). It takes a set of columns
where you want missing values to be replaced by the most recent non-missing
value (sometimes called last observation carried forward).
"""
treatment %>%
fill(person)
#> # A tibble: 4 × 3
#> person treatment response
#> <chr> <dbl> <dbl>
#> 1 Derrick Whitmore 1 7
#> 2 Derrick Whitmore 2 10
#> 3 Derrick Whitmore 3 9
#> 4 Katherine Burke 1 4
## CASE STUDY - pulling it all together
# WHO data case study a useful worked through example of tidying a dataset
# final code pipeline
who %>%
gather(code, value, new_sp_m014:newrel_f65, na.rm = TRUE) %>%
mutate(code = stringr::str_replace(code, "newrel", "new_rel")) %>%
separate(code, c("new", "var", "sexage")) %>%
select(-new, -iso2, -iso3) %>%
separate(sexage, c("sex", "age"), sep = 1)
"""
12.7 Non-tidy data
Before we continue on to other topics, it’s worth talking briefly about non-tidy
data. Earlier in the chapter, I used the pejorative term “messy” to refer to
non-tidy data. That’s an oversimplification: there are lots of useful and
well-founded data structures that are not tidy data. There are two main reasons
to use other data structures:
Alternative representations may have substantial performance or space
advantages.
Specialised fields have evolved their own conventions for storing data that
may be quite different to the conventions of tidy data.
Either of these reasons means you’ll need something other than a tibble (or
data frame). If your data does fit naturally into a rectangular structure
composed of observations and variables, I think tidy data should be your
default choice. But there are good reasons to use other structures; tidy data
is not the only way.
If you’d like to learn more about non-tidy data, I’d highly recommend this
thoughtful blog post by Jeff Leek:
http://simplystatistics.org/2016/02/17/non-tidy-data/
"""
|
f53adced6506896a261b244ac2ded604ea66c64a | 72f0630c4897b91a652f100bbfad383e76cf77ee | /man/statemove.Rd | 762dc0d743d48d6a2f954a3680857377ee6de4ed | [] | no_license | kosukeimai/fastLink | da540df1cc5fa5dee6e806678948b12893d159ad | 7da6aeb29cafa6810a10e7978ae1f443fbc4bcdf | refs/heads/master | 2022-10-21T04:49:13.475862 | 2022-10-10T16:27:05 | 2022-10-10T16:27:05 | 84,469,064 | 248 | 49 | null | 2019-02-19T13:57:40 | 2017-03-09T17:17:16 | R | UTF-8 | R | false | true | 395 | rd | statemove.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{statemove}
\alias{statemove}
\title{In-state movers rates by state}
\format{A dataframe containing 51 observations.}
\usage{
statemove
}
\description{
This data collects in-state movers rates by state,
for imputation where within-county movers rates are
not available.
}
\keyword{dataset}
|
c2a3e3b3669a643c28287d4d53cce050675e9f49 | aef4e3933589c2cf1faae1e3e326b44925ade7a4 | /R/compute.R | 313ebcc677e1142bc764ec55e3c56f72ea5a6a03 | [] | no_license | tanaylab/repguide | 79f1f9058ceefcdefeebd29a83df5843e0d5aaf1 | d38d36902eb773f9b5f6f83cb22e990f862ff660 | refs/heads/master | 2020-07-26T07:18:35.733929 | 2020-06-24T12:15:55 | 2020-06-24T12:15:55 | 208,576,067 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 17,633 | r | compute.R | #' Cluster guideRNAs
#'
#' Clusters guides based on their target binding profile
#'
#' @param guideSet guideSet containing guide mappings
#' @param min_Son Numeric from 0 through 1. Only considers genomic target binding sites above \code{min_Son} score.
#' @param n_clust Integer from 1 to 20. Number of clusters to group guides into. Passed to \code{cutree()} function.
#' @return guideSet object with clustered guides.
#' @examples
#' \dontrun{
#' gs <- createGuideSet(Hsapiens, tes = te_annotation_df)
#' gs <- addTargets(gs, targets = 'LTR13A')
#' gs <- addGuides(gs, guide_length = 16)
#' gs <- compClusts(gs, min_Son = 0.25, n_clust = 10)
#' gs <- plotGuides(gs)
#' }
#' @seealso [addGuides()], and [plotGuides()]
#' @export
clustGuides <- function(guideSet,
min_Son = 0,
n_clust = 15,
alpha = 10)
{
if(n_clust > 20) { stop('Maximal 20 clusters currently supported') }
message('Clustering kmers')
set.seed(guideSet@.seed)
kmers <- as_tibble(guideSet@kmers) %>% select(-matches('kmer_clust|te_clust'))
kmers_filt <-
kmers %>%
filter(Son > min_Son & on_target == 1) %>%
filter(valid)
if (nrow(kmers_filt) == 0) { stop ('No valid guides found, try relaxing selection parameters of addGuides function') }
if (length(unique(kmers_filt$kmer_id)) < n_clust) { stop ('Less valid guides than number of clusters') }
mat_full <-
kmers_filt %>%
select(kmer_id, te_id, Son) %>%
.tidyToSparse()
#mat_full = log2(mat_full+1)
# mat_slim <- kmers %>%
# filter(on_target >= 0) %>%
# mutate(on_target = on_target * Sbind) %>%
# select(kmer_id, te_id, on_target) %>%
# tidyToSparse()
print(paste0('Clustering ', nrow(mat_full), ' kmers into ', n_clust, ' groups'))
#kmer_cors <- as.matrix(qlcMatrix::cosSparse(t(mat_full)))
#kmer_cors <- tgs_cor(as.matrix(t(mat_full)), spearman = TRUE)
kmer_clusts <- tibble(kmer_id = as.numeric(rownames(mat_full)),
#kmer_clust = as.numeric(cutree(fastcluster::hclust(tgstat::tgs_dist(kmer_cors), 'ward.D2'), n_clust)))
kmer_clust = skmeans::skmeans(mat_full, k = n_clust, method = 'pclust')$cluster)
if (ncol(mat_full) > 2e4)
{
message ('Downsampling the matrix')
#vars <- matrixStats::colVars(mat_full)
vars <- apply(mat_full, 2, var)
mat_full <- mat_full[, tail(order(vars), 2e4)]
} else {
mat_full <- mat_full
}
print(paste0('Clustering ', ncol(mat_full), ' loci into ', n_clust, ' groups'))
#loci_cors <- as.matrix(qlcMatrix::cosSparse(mat_full))
loci_clusts <- tibble(te_id = as.numeric(colnames(mat_full)),
#te_clust = as.numeric(cutree(fastcluster::hclust(tgstat::tgs_dist(loci_cors), 'ward.D2'), n_clust)))
te_clust = skmeans::skmeans(t(mat_full), k = n_clust, method = 'pclust')$cluster)
kmers <- left_join(kmers, kmer_clusts, by = 'kmer_id') %>% left_join(., loci_clusts, by = 'te_id')
guideSet@kmers$kmer_clust <- kmers$kmer_clust
guideSet@kmers$te_clust <- kmers$te_clust
guideSet <- .selBestKmers(guideSet, alpha = alpha)
return(guideSet)
}
.compGuideScores <- function(guideSet,
blacklist_penalty = 10) # Sbind can be interprated as the likelyhood of binding to a site (0 - 1)
{
print('Computing guide scores')
kmers <- as_tibble(guideSet@kmers) %>% select(-matches('Sbind'))
PAM <- guideSet@PAM
guide_length <- guideSet@guide_length # + nchar(PAM)
max_mismatches <- max(kmers$n_mismatches)
###################
# Calculate Sbind #
###################
if (max_mismatches > 0)
{
mismatch_universe <- .compMismatchUniverse(guide_length, max_mismatches) # guidelength must be 1 based
mismatch_universe_scored <- .compMismatchScore(mismatch_universe, guide_length) # guidelength must be 1 based
# get kmer mismatches
mismatches <- stringr::str_extract_all(kmers$mismatches, '[0-9]+', simplify = TRUE) # could be parallelized!, quite fast already
colnames(mismatches) <- c('first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eights')[1:ncol(mismatches)]
# add mismatches to kmer data.frame
kmers <- bind_cols(kmers,
(as_tibble(mismatches) %>% mutate_all(as.numeric)))
# add mismatch score (Sbind), could be improved with data.table
kmers <-
suppressMessages(left_join(kmers,
mismatch_universe_scored)) %>%
mutate(Sbind = ifelse(n_mismatches == 0, 1, Sbind))
} else {
kmers$Sbind <- 1
}
###################
# Calculate Scis #
###################
# Compute cis score (Boltzmann sigmoid)
.scoreCis <- function(x, slope = 5, midpoint = 2000)
{
s <- 1 + ((0.01 - 1) / (1 + exp((midpoint - x)/(slope * 100)))) # Boltzmann
#s <- pmin(100, s)
return(s)
}
kmers$Scis = .scoreCis(kmers$cis_dist)
kmers <- kmers %>% mutate(Scis = ifelse(is.na(Scis), 0.01, Scis))
#######################################
# Compute Soff
kmers$Soff <- kmers$Sbind * kmers$Scis
kmers <-
kmers %>%
mutate(Soff = ifelse(on_target >= 0, 0, Soff),
Soff = ifelse(blacklisted, Soff * blacklist_penalty, Soff),
Soff = ifelse(is.na(Soff), 0, Soff), # if Soff(0) x blacklist_penalty(Inf) = NaN
Soff = ifelse(whitelisted, 0, Soff))
#kmers$Soff <- round(kmers$Soff, 2)
# Compute Son
kmers$Son <- kmers$Sbind
kmers <- kmers %>% mutate(Son = ifelse(on_target <= 0, 0, Son))
#kmers$Son <- round(kmers$Son, 2)
# Add results to guideSet
guideSet@kmers$Sbind <- kmers$Sbind
guideSet@kmers$Scis <- kmers$Scis
guideSet@kmers$Soff <- kmers$Soff
guideSet@kmers$Son <- kmers$Son
return(guideSet)
}
.compMismatchUniverse <- function(guide_length, max_mismatches)
{
column_labels <- c('first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eights')
liste <- list()
for (n_mism in 1:max_mismatches)
{
liste[[n_mism]] <-
combn(0:(guide_length -1), n_mism) %>%
t %>%
as.data.frame
}
mismatch_universe <- as_tibble(do.call(bind_rows, liste))
colnames(mismatch_universe) <- column_labels[1:max_mismatches]
return(mismatch_universe)
}
.compMismatchScore <- function(mismatch_universe, guide_length)
{
guide_length <- guide_length - 1
# Formula adapted from Breaking-Cas: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4987939/
.scoring <- function(weight, d, m, guide_length)
{
s <- weight * (1 / (((guide_length - d) / guide_length) * 1 + 1)) * (1 / (m * m))
return(s)
}
# clean data.frame in case of repeated usage
mismatch_universe <- mismatch_universe %>% select(-which(colnames(mismatch_universe) %in% c('m', 'max', 'min', 'd', 'weight', 'Sbind')))
# total mismatches
mismatch_universe$m <- rowSums(!is.na(mismatch_universe %>% select(-which(colnames(mismatch_universe) %in% c('m')))))
# approximate of average pairwise distance between mismatches
mismatch_universe <-
mismatch_universe %>%
mutate(max = apply(mismatch_universe[, !colnames(mismatch_universe) %in% 'm'], 1, max, na.rm = TRUE),
min = apply(mismatch_universe[, !colnames(mismatch_universe) %in% 'm'], 1, min, na.rm = TRUE),
d = ifelse(m > 1, (max - min) / (m - 1), 0))
# custom weighting function based on position in guide
mismatch_universe <-
mismatch_universe %>%
mutate(weight = apply(mismatch_universe[, !colnames(mismatch_universe) %in% c('m', 'max', 'min', 'd', 'weight')], 1, function(x)
{
x <- 1 - x / (guide_length + 1)
weight <- prod(x, na.rm = TRUE)
})
)
mismatch_universe <-
mismatch_universe %>%
mutate(Sbind = .scoring(weight, d, m, guide_length)) %>%
select(-m, -max, -min, -d, -weight)
return(mismatch_universe)
}
.compCombinations <- function(guideSet,
n_guides_max = 5,
method = 'cluster')
{
if (method == 'cluster')
{
if (is.null(guideSet@kmers$best)) { stop ('No best Kmer selection found') }
kmers <- as_tibble(guideSet@kmers)
kmers_best_per_clust <- kmers %>% filter(best) %>% pull(kmer_id) %>% unique
# Calc all combinations for up to max guides
combinatorics_df <-
lapply(
sapply(1:n_guides_max, function(x)
{
a <- combn(kmers_best_per_clust, x); b <- as_tibble(a); return(b)
}),
function(y)
{
y %>% gather('combi_id', 'kmer_id') %>% mutate(n_guides = nrow(y))
}) %>%
do.call(rbind, .) %>%
mutate(combi_id = paste0(n_guides, combi_id)) %>%
data.table::as.data.table(key = 'kmer_id')
# ===========================================
kmers_slim <- # max Son/Soff per kmer and unique_id
kmers %>%
filter(kmer_id %in% kmers_best_per_clust) %>%
select(kmer_id, unique_id, Son, Soff) %>%
distinct %>%
data.table::as.data.table(key = 'kmer_id')
# Calculate on/off targets per combo
combination_hits <- kmers_slim[combinatorics_df, on = 'kmer_id', allow.cartesian = TRUE, nomatch = 0]
combination_stats <- combination_hits[, .(Son = max(Son),
Soff = max(Soff)),
by = c('combi_id', 'unique_id')
][, .(Son_tot = sum(Son),
Soff_tot = sum(Soff),
on_tot = length(unique_id[Son > 0]),
off_tot = length(unique_id[Soff > 0])),
by = 'combi_id'] %>%
as_tibble %>%
left_join(., combinatorics_df, by = 'combi_id') %>%
arrange(n_guides) %>%
nest(kmer_id, .key = 'kmer_id') %>%
mutate(enr = ifelse(Soff_tot == 0, (Son_tot) / (Soff_tot + 0.01), Son_tot / Soff_tot)) # adding a pseudo-count to avoid Inf)
# ===================================
guideSet@combinations <- combination_stats
}
if (method == 'rank')
{
a <- kmers
kmers_best <- list()
for (i in 1:n_guides)
{
best_kmer <-
a %>%
count(kmer_id, on_target) %>%
spread(on_target, n, fill = 0, sep = '_') %>%
filter(on_target_FALSE <= max_off_target) %>%
top_n(1, on_target_TRUE) %>%
top_n(-1, on_target_FALSE) %>%
sample_n(1) %>%
pull(kmer_id)
covered_loci <- a %>% filter(kmer_id == best_kmer & on_target) %>% pull(te_id)
a <-
a %>%
mutate(on_target = ifelse(te_id %in% covered_loci, FALSE, on_target))
kmers_best[[i]] <- best_kmer
print(i)
}
kmers_final <- unlist(kmers_best)
}
return(guideSet)
}
# Check multi processor performance
# Think of providing substituiton matrix
.compMSA <- function(seqs,
max_gap_freq = 0.8,
iterations,
refinements,
kmer_length = 5,
n_clust = 10,
clust_perc = 2,
seed = 19) # ultrafast but rough MSA
{
set.seed(seed)
if (class(seqs) == 'DNAStringSet')
{
if (is.null(names(seqs)))
{
names(seqs) <- 1:length(seqs)
}
}
if (class(seqs) == 'GRanges')
{
seqs_gr <- seqs
seqs <- seqs_gr$seq
names(seqs) <- seqs_gr$te_id
}
if (length(seqs) > 10000) { seqs <- sample(seqs, 10000) }
seqs <- DNAStringSet(seqs)
seqs <- seqs[width(seqs) < 9e4]
seqs_bin <- ape::as.DNAbin(seqs)
clusts <-
tibble(te_id = names(seqs),
clust = skmeans::skmeans(kmer::kcount(seqs_bin, k = kmer_length), k = n_clust, method = 'pclust')$cluster) %>%
add_count(clust, name = 'clust_size')
# sample ids per clust proportional to clust size
ids_sel <-
clusts %>%
nest(te_id) %>%
mutate(n_samp = ceiling(clust_size / 100 * clust_perc),
samp = purrr::map2(data, n_samp, sample_n)) %>%
select(clust, samp) %>%
unnest %>%
pull(te_id)
message(paste0('Aligning ', length(ids_sel), ' sequences'))
alignment <- DECIPHER::AlignSeqs(seqs[ids_sel],
#processors = NULL,
iterations = iterations,
refinements = refinements,
useStructures = FALSE,
verbose = FALSE)
alignment_wo_gaps <- .rmGaps(alignment, max_gap_freq = max_gap_freq)
return(alignment_wo_gaps)
}
.compGreedy <- function(guideSet,
alpha = 1,
iterations = 10)
{
if (alpha == Inf) { stop ('Alpha must be finite number') }
kmers <- as_tibble(guideSet@kmers) %>% filter(valid) %>% mutate(kmer_id = as.character(kmer_id))
n_kmers_tot <- length(unique(kmers$kmer_id))
combinations <- guideSet@combinations %>% unnest
max_n_guides <- max(combinations$n_guides)
if (max_n_guides > 1)
{
# Create the score matrix
kmers <- as.data.table(kmers)
mat <- kmers[, .(score = max(Son, Soff)), by = c('unique_id', 'kmer_id')] %>% .tidyToSparse
on_indeces <- which(rownames(mat) %in% unique(kmers$unique_id[kmers$on_target == 1]))
off_indeces <- which(rownames(mat) %in% unique(kmers$unique_id[kmers$on_target == -1]))
message('Running greedy optimization on ', nrow(mat), ' x ', ncol(mat), ' dimensional matrix')
report <- foreach::foreach (nguides = 2:max_n_guides, .combine = rbind) %dopar%
{
if (nguides < n_kmers_tot)
{
set.seed(guideSet@.seed)
# Get current best combination and calc stats
kmers_best <- combinations %>% filter(n_guides == nguides & best) %>% pull(kmer_id) %>% as.character()
#kmers_best <- sample(rownames(mat), nguides)
score_onoff_best <- qlcMatrix::rowMax(mat[, kmers_best])
score_on_best <- sum(score_onoff_best[on_indeces])
score_off_best <- sum(score_onoff_best[off_indeces])
# create template df
df <- tibble(iterations = 1:iterations,
Son_tot = score_on_best,
Soff_tot = score_off_best,
n_guides = nguides,
kmer_id = list(kmers_best))
# Run greedy search
for (i in 1:iterations)
{
message(nguides, ' guides greedy iteration: ', i)
# Throw one kmer randomly
kmers_subs <- sort(sample(kmers_best, length(kmers_best) - 1))
# Score kmer subset
kmers_subs_score <- if (length(kmers_subs) == 1) { mat[, kmers_subs] } else { qlcMatrix::rowMax(mat[, kmers_subs]) }
# Score delta against all kmers
score_delta <- mat[, !colnames(mat) %in% kmers_subs] - kmers_subs_score
attr(score_delta, 'x')[attr(score_delta, 'x') < 0] <- 0
# Find and add best other kmer
if (is.null(ncol(score_delta))) # means that only 1 guide to chose from is left
{
score_on_delta <- sum(score_delta[on_indeces])
score_off_delta <- sum(score_delta[off_indeces])
} else {
score_on_delta <- Matrix::colSums(score_delta[on_indeces, ])
score_off_delta <- Matrix::colSums(score_delta[off_indeces, ])
}
kmers_new <- c(kmers_subs, names(sort(score_on_delta - score_off_delta * alpha, decreasing = TRUE)[1])) # add best kmer
#kmers_new <- c(kmers_subs, colnames(mat)[which.max(score_on_delta)])
# Score new subset
score_onoff_new <- qlcMatrix::rowMax(mat[, kmers_new])
score_on_new <- sum(score_onoff_new[on_indeces])
score_off_new <- sum(score_onoff_new[off_indeces])
# Update kmers if optimized
if ((score_on_new / score_off_new * alpha) > (score_on_best - score_off_best * alpha))
{
kmers_best <- kmers_new
score_on_best <- score_on_new
score_off_best <- score_off_new
}
# Update results df
df[i, 'Son_tot'] <- score_on_best
df[i, 'Soff_tot'] <- score_off_best
df[i, 'kmer_id'][[1]][[1]] <- kmers_best
}
return(df)
}
}
#report %>% ggplot(aes(iterations, Son_tot)) + geom_point() + facet_wrap(~n_guides, scales = 'free')
# format report to match combinations for binding
greedy_res <-
report %>%
unnest %>%
group_by(iterations, n_guides) %>%
mutate(on_tot = sum(Matrix::rowSums(mat[on_indeces, kmer_id] != 0)!=0),
off_tot = sum(Matrix::rowSums(mat[off_indeces, kmer_id] != 0)!=0)) %>%
ungroup %>%
mutate(kmer_id = as.double(kmer_id),
enr = ifelse(Soff_tot == 0, (Son_tot) / (Soff_tot + 0.01), Son_tot / Soff_tot),
best = iterations == max(iterations),
combi_id = paste0(n_guides, 'V', iterations, '_greedy'))
# Update combinations
combinations <-
combinations %>%
filter(!best | n_guides == 1) %>%
bind_rows(., greedy_res) %>%
nest(kmer_id, .key = 'kmer_id')
}
guideSet@combinations <- combinations
return(guideSet)
}
|
d2d101c464137f111463d26cf38e363ecc6e4ccb | 3d6cb42436c103239398ed3ee3c3f7cdf9b6daf9 | /r/IBIname_match.r | 8ab667a5f8f9443f46743c6a442d9c26f8f214d0 | [] | no_license | mengeln/ibiwebapp | cdc9d54b2fbf89df6dc8995bccd8d9faef17f3ee | 03abeeac07f2c033463fb5911683f0f7e0068ea5 | refs/heads/master | 2021-01-16T21:00:53.945903 | 2012-08-28T21:07:32 | 2012-08-28T21:07:32 | 5,334,848 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,084 | r | IBIname_match.r | IBIname_match <- function(data, DistinctCode=F){
colnames(data)[which(colnames(data) == "FinalID")] <- "Taxa"
colnames(data)[which(colnames(data) == "BAResult")] <- "Result"
data <- data[which(!is.na(data$Result)), ]
load("../Data/ibi.RData")
require(plyr)
ibi <- idata.frame(ibi)
load("../Data/taxonomy.RData")
taxonomy <- idata.frame(taxonomy)
###Convert FinalID to SAFIT1###
data$SAFIT <- rep(NA, length(data$Taxa))
data$SAFIT <- ibi$SAFIT1[match(data$Taxa, ibi$FinalID)]
data$SAFIT[is.na(data$SAFIT)] <- "Missing"
###Fix extra spaces
data$Taxa <- as.character(data$Taxa)
complex <- grep("Group", data$Taxa[data$SAFIT == "Missing"])
extraspace <- data$Taxa[intersect(which(data$SAFIT == "Missing"), which(!(data$Taxa %in% (data$Taxa[data$SAFIT == "Missing"][complex]))))]
data$Taxa[intersect(which(data$SAFIT == "Missing"), which(!(data$Taxa %in% (data$Taxa[data$SAFIT == "Missing"][complex]))))] <-
gsub("(\\w+)\\s+$", "\\1", extraspace)
data$SAFIT <- ibi$SAFIT1[match(data$Taxa, ibi$FinalID)]
data$SAFIT[is.na(data$SAFIT)] <- "Missing"
###Fix extra caps###
cap1 <- gsub("(^\\w)[[:alnum:][:space:]]+", "\\1", data$Taxa[data$SAFIT == "Missing"])
cap2 <- gsub("(^\\w)(\\w+)", "\\2", data$Taxa[data$SAFIT == "Missing"])
data$Taxa[data$SAFIT == "Missing"] <- paste0(cap1, tolower(cap2))
data$SAFIT <- ibi$SAFIT1[match(data$Taxa, ibi$FinalID)]
data$SAFIT[is.na(data$SAFIT)] <- "Missing"
###Label FinalIDs that match SAFIT1 as distinct###
data$distinct <- rep(NA, length(data$Taxa))
#data$distinct[which(data$Taxa == data$SAFIT)] <- "Distinct"
data$distinct[data$SAFIT == "Missing"] <- "Missing"
###Determine whether the rest of the FinalIDs are distinct###
todetermine <- as.character(data$SAFIT[which(is.na(data$distinct))])
todetermine <- as.data.frame(cbind(as.character(data$SampleID[which(is.na(data$distinct))]), todetermine))
todeterminebystation <- tapply(as.character(todetermine$todetermine), todetermine$V1, list)
for(j in 1:length(todeterminebystation)){
data$distinct[intersect(which(data$SampleID %in% names(todeterminebystation[j])), which(data$SAFIT %in% todeterminebystation[[j]]))] <- sapply(1:length(todeterminebystation[[j]]), function(i, determine, tax){
index <- which(tax$FinalID == todeterminebystation[[j]][i])
level <- as.numeric(tax[index, "TaxonomicLevelCode"])
levelname <- tax[index, "TaxonomicLevelName"]
if(level >= 60){"Distinct"} else{
criteron1 <- which(taxonomy$FinalID %in% todeterminebystation[[j]][which(!(unlist(todeterminebystation[j]) %in% unlist(todeterminebystation[[j]][i])))])
criteron2 <- which(tax[criteron1, levelname] == tax[index, levelname])
criteron3 <- which(tax[criteron2, "TaxonomicLevelCode"] > tax[index, "TaxonomicLevelCode"])
if(length(criteron3) > 0){"Non-distinct "} else
{"Distinct"}}}, determine=todeterminebystation[[j]], tax=taxonomy)}
if(DistinctCode == T){
data[data$distinct == "Not Distinct" & data$DistinctCode == "Yes", "distinct"] <- "Distinct"
}
return(data)
} |
4f5597665388371c38b9e967675281b589403303 | fe3800d2b7553cec61c1a704a279d00e55a08a44 | /scripts/generate_scenarios.R | 5a382753b7ff43b1a0f7f379fa3edbd44f98c1d2 | [] | no_license | femeunier/LianaAlbedo | 07d8384342d3b2ee305103c9d848b10efe0df011 | fda0c6b1bac39556798c1dff717d3e80916867ad | refs/heads/master | 2023-01-29T12:00:56.463546 | 2020-12-09T13:26:49 | 2020-12-09T13:26:49 | 319,947,869 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,316 | r | generate_scenarios.R | rm(list = ls())
library(ED2scenarios)
library(PEcAn.ED2)
library(purrr)
ref_dir <- "/data/gent/vo/000/gvo00074/pecan/output/other_runs/albedo"
ed2in <- read_ed2in(file.path(ref_dir,"ED2IN"))
# No -T- Files
ed2in$ITOUTPUT <- 0
rundir <- "/data/gent/vo/000/gvo00074/pecan/output/other_runs/albedo/run"
outdir <- "/data/gent/vo/000/gvo00074/pecan/output/other_runs/albedo/out"
if(!dir.exists(rundir)) dir.create(rundir)
if(!dir.exists(outdir)) dir.create(outdir)
##############################################################################
# Default
PREFIX_XML <- "<?xml version=\"1.0\"?>\n<!DOCTYPE config SYSTEM \"ed.dtd\">\n"
defaults <- list_dir <- list()
# Default settings
settings <- list(model = list(revision = "git",
config.header = NULL),
pfts = list(pft = list(num = 2,
ed2_pft_number = 2,
name = "Early"),
pft = list(num = 3,
ed2_pft_number = 3,
name = "Mid"),
pft = list(num = 4,
ed2_pft_number = 4,
name = "Late"),
pft = list(num = 17,
ed2_pft_number = 17,
name = "Liana")))
leaf_trans_vis_T <- 0.01
leaf_trans_nir_T <- 0.22
leaf_reflect_vis_T <- 0.0516
leaf_reflect_nir_T <- 0.299
orient_factor_T <- -0.1
clumping_factor_T <- 0.6
leaf_trans_vis_L <- 0.0244
leaf_trans_nir_L <- 0.244
leaf_reflect_vis_L <- 0.0665
leaf_reflect_nir_L <- 0.300
orient_factor_L <- 0.4
clumping_factor_L <- 0.7
# Default config
config <- list()
config[["Early"]] <- unlist(list(clumping_factor = clumping_factor_T,
seedling_mortality = 0.98,
Vcmax = 16*2.4,
Vm0 = 16*2.4,
leaf_trans_vis = leaf_trans_vis_T,
leaf_trans_nir = leaf_trans_nir_T,
leaf_reflect_vis = leaf_reflect_vis_T,
leaf_reflect_nir = leaf_reflect_nir_T,
orient_factor = orient_factor_T))
config[["Mid"]] <- unlist(list(clumping_factor = clumping_factor_T,
Vcmax = 13*2.4,
Vm0 = 13*2.4,
leaf_trans_vis = leaf_trans_vis_T,
leaf_trans_nir = leaf_trans_nir_T,
leaf_reflect_vis = leaf_reflect_vis_T,
leaf_reflect_nir = leaf_reflect_nir_T,
orient_factor = orient_factor_T))
config[["Late"]] <- unlist(list(clumping_factor = clumping_factor_T,
Vcmax = 4.5*2.4,
Vm0 = 4.5*2.4,
wood_Kmax = 0.008,
leaf_trans_vis = leaf_trans_vis_T,
leaf_trans_nir = leaf_trans_nir_T,
leaf_reflect_vis = leaf_reflect_vis_T,
leaf_reflect_nir = leaf_reflect_nir_T,
orient_factor = orient_factor_T))
config[["Liana"]] <- unlist(
list(
rho = 0.462893312003502,
wood_Kexp = 2.06151664261015,
Vcmax = 21.0195095978388 * 2.4,
wood_Kmax = 0.118592088619329,
wood_water_cap = 0.00831146542859373*1000,
b1Rd = 0.251705611238744,
b2Rd = 0.251058588541278,
wood_psi50 = 122.88209151827,
growth_resp_factor = 0.352803405024027,
SLA = 22.9831799052029 * 0.48,
b1Bl_large = 0.0957164598030354,
stoma_psi_b = 160.017481634853,
root_respiration_factor = 0.280639319284819,
b2Ht = 0.868131191794218,
SRA = 48.1711743548512,
r_fract = 0.826262914185645,
stomatal_slope = 10.4797428731951,
root_beta = 0.0501418540509767,
b2Bl_large = 1.84721490377007,
b1Bs_large = 0.271899528000708,
b2Bs_large = 2.57118662996341,
b1Ht = 0.100034825515468,
q = 0.994400362018496,
mort2 = 15.3333587065344,
leaf_turnover_rate = 1.85273895977298,
root_turnover_rate = 1.27805201890461,
stoma_psi_c = 2.9926889645867,
dark_respiration_factor = 0.0279573623213031,
quantum_efficiency = 0.057162389334215,
mort3 = 0.0508703883618926,
leaf_psi_tlp = 204.690265902307,
leaf_water_cap = 0.00189950774801228*100,
Vm0 = 21.0195095978388 * 2.4,
clumping_factor = clumping_factor_T,
seedling_mortality = 0.98,
leaf_trans_vis = leaf_trans_vis_T,
leaf_trans_nir = leaf_trans_nir_T,
leaf_reflect_vis = leaf_reflect_vis_T,
leaf_reflect_nir = leaf_reflect_nir_T,
orient_factor = orient_factor_T))
##########################################################################################
# Reference simulation
run_name <- "ref"
run_ref <- file.path(rundir,run_name)
out_ref <- file.path(outdir,run_name)
if(!dir.exists(run_ref)) dir.create(run_ref)
if(!dir.exists(out_ref)) dir.create(out_ref)
if(!dir.exists(file.path(out_ref,"analy"))) dir.create(file.path(out_ref,"analy"))
if(!dir.exists(file.path(out_ref,"histo"))) dir.create(file.path(out_ref,"histo"))
# ED2IN
ed2in_scenar <- ed2in
ed2in_scenar$IEDCNFGF <- file.path(run_ref,"config.xml")
ed2in_scenar$FFILOUT = file.path(out_ref,"analy","analysis")
ed2in_scenar$SFILOUT = file.path(out_ref,"histo","history")
write_ed2in(ed2in_scenar,filename = file.path(run_ref,"ED2IN"))
# Config
config_default <- config
xml <- write.config.xml.ED2(defaults = defaults,
settings = settings,
trait.values = config_default)
XML::saveXML(xml, file = file.path(run_ref,"config.xml"), indent = TRUE,
prefix = PREFIX_XML)
# job.sh
write_job(file = file.path(run_ref,"job.sh"),
nodes = 1,ppn = 18,mem = 16,walltime = 4,
prerun = "ml UDUNITS/2.2.26-intel-2018a R/3.4.4-intel-2018a-X11-20180131 HDF5/1.10.1-intel-2018a; ulimit -s unlimited",
CD = run_ref,
ed_exec = "/user/scratchkyukon/gent/gvo000/gvo00074/felicien/ED2/ED/run/ed_2.1-opt",
ED2IN = "ED2IN")
list_dir[[run_name]] = run_ref
##########################################################################################
# Reference simulation no liana
run_name <- "no_liana"
run_ref <- file.path(rundir,run_name)
out_ref <- file.path(outdir,run_name)
if(!dir.exists(run_ref)) dir.create(run_ref)
if(!dir.exists(out_ref)) dir.create(out_ref)
if(!dir.exists(file.path(out_ref,"analy"))) dir.create(file.path(out_ref,"analy"))
if(!dir.exists(file.path(out_ref,"histo"))) dir.create(file.path(out_ref,"histo"))
# ED2IN
ed2in_scenar <- ed2in
ed2in_scenar$IEDCNFGF <- file.path(run_ref,"config.xml")
ed2in_scenar$FFILOUT = file.path(out_ref,"analy","analysis")
ed2in_scenar$SFILOUT = file.path(out_ref,"histo","history")
ed2in_scenar$INCLUDE_THESE_PFT = c(2,3,4)
write_ed2in(ed2in_scenar,filename = file.path(run_ref,"ED2IN"))
# Config
config_default <- config
config_default[["Liana"]] <- NULL
xml <- write.config.xml.ED2(defaults = defaults,
settings = settings,
trait.values = config_default)
XML::saveXML(xml, file = file.path(run_ref,"config.xml"), indent = TRUE,
prefix = PREFIX_XML)
# job.sh
write_job(file = file.path(run_ref,"job.sh"),
nodes = 1,ppn = 18,mem = 16,walltime = 4,
prerun = "ml UDUNITS/2.2.26-intel-2018a R/3.4.4-intel-2018a-X11-20180131 HDF5/1.10.1-intel-2018a; ulimit -s unlimited",
CD = run_ref,
ed_exec = "/user/scratchkyukon/gent/gvo000/gvo00074/felicien/ED2/ED/run/ed_2.1-opt",
ED2IN = "ED2IN")
list_dir[[run_name]] = run_ref
#######################################################################################
run_name <- "liana_RTM"
run_ref <- file.path(rundir,run_name)
out_ref <- file.path(outdir,run_name)
if(!dir.exists(run_ref)) dir.create(run_ref)
if(!dir.exists(out_ref)) dir.create(out_ref)
if(!dir.exists(file.path(out_ref,"analy"))) dir.create(file.path(out_ref,"analy"))
if(!dir.exists(file.path(out_ref,"histo"))) dir.create(file.path(out_ref,"histo"))
# ED2IN
ed2in_scenar <- ed2in
ed2in_scenar$IEDCNFGF <- file.path(run_ref,"config.xml")
ed2in_scenar$FFILOUT = file.path(out_ref,"analy","analysis")
ed2in_scenar$SFILOUT = file.path(out_ref,"histo","history")
write_ed2in(ed2in_scenar,filename = file.path(run_ref,"ED2IN"))
# Config
config_default <- config
config_default$Liana["leaf_reflect_vis"] <- leaf_reflect_vis_L
config_default$Liana["leaf_reflect_nir"] <- leaf_reflect_nir_L
config_default$Liana["leaf_trans_vis"] <- leaf_trans_vis_L
config_default$Liana["leaf_trans_nir"] <- leaf_trans_nir_L
xml <- write.config.xml.ED2(defaults = defaults,
settings = settings,
trait.values = config_default)
XML::saveXML(xml, file = file.path(run_ref,"config.xml"), indent = TRUE,
prefix = PREFIX_XML)
# job.sh
write_job(file = file.path(run_ref,"job.sh"),
nodes = 1,ppn = 18,mem = 16,walltime = 4,
prerun = "ml UDUNITS/2.2.26-intel-2018a R/3.4.4-intel-2018a-X11-20180131 HDF5/1.10.1-intel-2018a; ulimit -s unlimited",
CD = run_ref,
ed_exec = "/user/scratchkyukon/gent/gvo000/gvo00074/felicien/ED2/ED/run/ed_2.1-opt",
ED2IN = "ED2IN")
list_dir[[run_name]] = run_ref
#######################################################################################
run_name <- "liana_other"
run_ref <- file.path(rundir,run_name)
out_ref <- file.path(outdir,run_name)
if(!dir.exists(run_ref)) dir.create(run_ref)
if(!dir.exists(out_ref)) dir.create(out_ref)
if(!dir.exists(file.path(out_ref,"analy"))) dir.create(file.path(out_ref,"analy"))
if(!dir.exists(file.path(out_ref,"histo"))) dir.create(file.path(out_ref,"histo"))
# ED2IN
ed2in_scenar <- ed2in
ed2in_scenar$IEDCNFGF <- file.path(run_ref,"config.xml")
ed2in_scenar$FFILOUT = file.path(out_ref,"analy","analysis")
ed2in_scenar$SFILOUT = file.path(out_ref,"histo","history")
write_ed2in(ed2in_scenar,filename = file.path(run_ref,"ED2IN"))
# Config
config_default <- config
config_default$Liana["orient_factor"] <- orient_factor_L
config_default$Liana["clumping_factor"] <- clumping_factor_L
xml <- write.config.xml.ED2(defaults = defaults,
settings = settings,
trait.values = config_default)
XML::saveXML(xml, file = file.path(run_ref,"config.xml"), indent = TRUE,
prefix = PREFIX_XML)
# job.sh
write_job(file = file.path(run_ref,"job.sh"),
nodes = 1,ppn = 18,mem = 16,walltime = 4,
prerun = "ml UDUNITS/2.2.26-intel-2018a R/3.4.4-intel-2018a-X11-20180131 HDF5/1.10.1-intel-2018a; ulimit -s unlimited",
CD = run_ref,
ed_exec = "/user/scratchkyukon/gent/gvo000/gvo00074/felicien/ED2/ED/run/ed_2.1-opt",
ED2IN = "ED2IN")
list_dir[[run_name]] = run_ref
#######################################################################################
run_name <- "liana_all"
run_ref <- file.path(rundir,run_name)
out_ref <- file.path(outdir,run_name)
if(!dir.exists(run_ref)) dir.create(run_ref)
if(!dir.exists(out_ref)) dir.create(out_ref)
if(!dir.exists(file.path(out_ref,"analy"))) dir.create(file.path(out_ref,"analy"))
if(!dir.exists(file.path(out_ref,"histo"))) dir.create(file.path(out_ref,"histo"))
# ED2IN
ed2in_scenar <- ed2in
ed2in_scenar$IEDCNFGF <- file.path(run_ref,"config.xml")
ed2in_scenar$FFILOUT = file.path(out_ref,"analy","analysis")
ed2in_scenar$SFILOUT = file.path(out_ref,"histo","history")
write_ed2in(ed2in_scenar,filename = file.path(run_ref,"ED2IN"))
# Config
config_default <- config
config_default$Liana["orient_factor"] <- orient_factor_L
config_default$Liana["clumping_factor"] <- clumping_factor_L
config_default$Liana["leaf_reflect_vis"] <- leaf_reflect_vis_L
config_default$Liana["leaf_reflect_nir"] <- leaf_reflect_nir_L
config_default$Liana["leaf_trans_vis"] <- leaf_trans_vis_L
config_default$Liana["leaf_trans_nir"] <- leaf_trans_nir_L
xml <- write.config.xml.ED2(defaults = defaults,
settings = settings,
trait.values = config_default)
XML::saveXML(xml, file = file.path(run_ref,"config.xml"), indent = TRUE,
prefix = PREFIX_XML)
# job.sh
write_job(file = file.path(run_ref,"job.sh"),
nodes = 1,ppn = 18,mem = 16,walltime = 4,
prerun = "ml UDUNITS/2.2.26-intel-2018a R/3.4.4-intel-2018a-X11-20180131 HDF5/1.10.1-intel-2018a; ulimit -s unlimited",
CD = run_ref,
ed_exec = "/user/scratchkyukon/gent/gvo000/gvo00074/felicien/ED2/ED/run/ed_2.1-opt",
ED2IN = "ED2IN")
list_dir[[run_name]] = run_ref
#######################################################################################
dumb <- write_bash_submission(file = file.path(rundir,"all_jobs.sh"),
list_files = list_dir,
job_name = "job.sh")
|
85ffe3256bca2b0c6aa692904a1fd54fd3512264 | 1959a1c0c11353b98f1091399d962d1f24a53d1a | /R/activeSet.R | 872c7821b1213daccb200e16379d54f0204e4c03 | [] | no_license | cran/isotone | 0b15803f73bc3bfb3c2bc513ae57e809a9368189 | 033c470f2851171b7cc1b187ceae8f143b0b368d | refs/heads/master | 2023-03-03T11:23:53.570874 | 2023-02-22T06:33:36 | 2023-02-22T06:33:36 | 17,696,842 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 4,333 | r | activeSet.R | #active set methods for different solver
activeSet <- function(isomat, mySolver = "LS", x0 = NULL, ups = 1e-12, check = TRUE, maxiter = 100, ...)
{
a <- isomat #matrix with order restrictions
if (ncol(isomat) != 2) stop("isomat must have 2 columns!")
extra <- list(...)
if (length(x0) == 0) x0 <- rep(0,max(isomat))
n <- length(x0)
xold <- x0 #starting values
a <- isomat[,c(2,1)]
ax <- aTx(a, xold) #difference between order restrictions
if (any(ax < 0)) stop("Starting solution not feasible. Ax must be >= 0!")
ia <- is.active(ax, ups = ups) #which constraints are active
iter <- 0
#--------------- solver specification --------------------
mySolverDB <- list()
mySolverDB$chebyshev <- mSolver
mySolverDB$LS <- lsSolver
mySolverDB$L1 <- dSolver
mySolverDB$quantile <- pSolver
mySolverDB$GLS <- lfSolver
mySolverDB$poisson <- sSolver
mySolverDB$Lp <- oSolver
mySolverDB$asyLS <- aSolver
mySolverDB$L1eps <- eSolver
mySolverDB$huber <- hSolver
mySolverDB$SILF <- iSolver
if(is.character(mySolver)) {
pos <- pmatch(tolower(mySolver),
tolower(names(mySolverDB)))
if(is.na(pos))
stop(gettextf("Invalid skmeans method '%s'.", mySolver))
mySolver <- mySolverDB[[pos]]
}
#------------ end solver specification -----------------
#-------------- start active set iterations ------------------------
repeat {
iter <- iter + 1
if (length(ia)==0) { #no set active (typically 1st iteration)
aia <- NULL
} else { #active set
aia <- a[ia,] #active constraint
}
yl <- mySolver(xold, aia, extra) #call solver
y <- yl$x #fitted values
lbd <- yl$lbd #Lagrange multiplier (KKT vector lambda)
fy <- yl$f #value target function
gy <- yl$gx #gradient
ay <- aTx(a,y) #compute Ax
iy <- which.min(ay) #restriction with the largest violation of Ax >= 0
my <- ay[iy] #value of this restriction (worst one)
if (length(lbd)==0) { #no lambda
ml <- Inf
} else {
il <- which.min(lbd) #index minimum Lagrange
ml <- lbd[il] #value minimum Lagrange (worst one)
}
if (is.pos(my, ups)) { #no violation of Ax >= 0 (feasible)
if (is.pos(ml, ups)) break() #convergence reached, all lambda >= 0
xnew <- y #Ax >= 0 ok; lamba >= 0 not
ax <- ay
ia <- ia[-il] #add constraint to active set with worst lambda
} else { #still constraint violations (infeasible)
k <- which((ax>-ups) & (ay<ups)) #index where we have violations
#k <- which((ax>0) & (ay<0)) #index where we have violations
rat <- -ay[k]/(ax[k]-ay[k]) #line search starts
ir <- which.max(rat)
alw <- rat[ir] #alpha
xnew <- y+alw*(xold-y) #update function values
ax <- aTx(a,xnew) #update constraints Ax
ia <- sort(c(ia,k[ir])) #collect active sets
}
xold <- xnew #end iteration, start new one
if (iter == maxiter) {
warning("Maximum number of iterations reached!")
break()
}
}
#---------------------- end active set iterations --------------------
options(warn = 0)
lup <- rep(0, length(ay))
lup[ia] <- lbd #final vector of lambdas (0 where there was no active set)
hl <- taTx(a, lup, n) #A'lambda (should be equal to gradient)
if (check) { #check KKT
ck <- checkSol(y, gy, a, ay, hl, lup, ups) #checks feasibility of 4 KKT conditions
ck <- as.list(ck)
names(ck) <- c("stationarity","primal.feasibility","dual.feasibility","complementary.slackness")
} else {
ck <- NULL
}
result <- list(x = y, y = extra$y, lambda = lup, fval = fy, constr.val = ay, Alambda = hl,
gradient = gy, isocheck = ck, niter = iter, call = match.call())
class(result) <- "activeset"
result
}
|
97b12788c670bc9b2da2e0370a02f2921ba94c5e | 5e1b775edcb7683f1039eb20f2e6f9594a59f8eb | /chapter 2/car.R | 7c603c0e16d17f7b70c5dd607100a71b86d33677 | [] | no_license | loufin/Business-Analytics | 1973a649fca58da8aa4092a212eeb15e1c240098 | e15f69154df19e7a017ba47ce9dd30f2c39d9b7d | refs/heads/main | 2023-04-28T22:56:29.092261 | 2021-05-12T14:05:35 | 2021-05-12T14:05:35 | 335,809,886 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 991 | r | car.R | car.df <- read.csv("ToyotaCorolla.csv", header=TRUE)
View(car.df)
car1.df <- subset(car.df, select=-Model)
View(car1.df)
#method-1 for dummies
car2.df <- fastDummies::dummy_cols(car1.df)
View(car2.df)
#method-2 for dummies
car2a <- model.matrix(~0 + Fuel_Type + Color, data=car1.df)
car2a.df <- as.data.frame(car2a)
View(car2a.df)
# in this method, only the variables selected will be included in the data frame
# which in this case means Fuel_Type and Color variables. This can be either
# convenient or inconvenient depending on what you are trying to do.
train.rows <- sample(rownames(car2.df), dim(car2.df)[1]*0.5)
train.data <- car2.df[train.rows, ]
rest.rows <- setdiff(rownames(car2.df), train.rows)
rest.data <- car2.df[rest.rows, ]
valid.rows <- sample(rownames(rest.data), dim(rest.data)[1]*0.6)
valid.data <- car2.df[valid.rows, ]
test.rows <- setdiff(rownames(rest.data), valid.rows)
test.data <- car2.df[test.rows, ]
View(train.data)
View(valid.data)
View(test.data)
|
b76accce79205aa53bf819a61d10eada0f40cbed | d13e3550cc9e5c0c98396cf5032f2ee0d1b8ceef | /junk.R | 1b99f055d666f70ef8affa63d393d11b376d4695 | [] | no_license | JohnOrmerod/BayesianStarWarsLectures | d6fc2323fb461a10a2bbf1ff72b910fce0beafa2 | 17510e99a809fbeb0a52e1ee3d9979c342aeb1ff | refs/heads/main | 2023-01-06T15:04:19.861388 | 2020-11-11T11:25:03 | 2020-11-11T11:25:03 | 311,948,746 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,842 | r | junk.R |
---
class: segue
.white[
# Bayes theorem machinary
]
---
### Bayes theorem for discrete distributions
.small[
Suppose that we have two events $A$ and $B$ and
* $P(B=b|A=a)$
* $P(A=a)$
* $A$ can take the values $a_1,\ldots,a_K$.
suppose that we want to find $P(A=a|B=b)$.
Bayes theorem
$$
P(A=a|B=b) = \frac{P(B=b|A=a)P(A=a)}{P(B=b)}
$$
where the total probability for $B=b$ is obtained by summing up all possibilities of
values $A$ can take, i.e.,
$$P(B=b)=\sum_{k=1}^KP(B=b|A=a_k)P(A=a_k)=\sum_{k=1}^KP(B=b,A=a_k)$$
]
---
### From two events to three
.small[
Suppose that we have two events $A$, $B$ and $C$ and that
* $P(B=b|A=a)$
* $P(C=c|A=a)$
* $P(A=a)$
* $A$ can take the values $a_1,\ldots,a_K$.
suppose that we want to find $P(A=a|B=b,C=c)$.
Bayes theorem
$$
P(A=a|B=b,C=c) = \frac{P(B=b|A=a) P(C=c|A=a)P(A=a)}{P(B=b,C=c)}
$$
where the total probability for $B=b$ and $C=c$ is obtained by summing up all possibilities of
values $A$ can take, i.e.,
$$P(B=b,C=c)=\sum_{k=1}^KP(B=b|A=a_k)P(C=c|A=a_k)P(A=a_k)=\sum_{k=1}^KP(A=a_k,B=b,C=c)$$
]
---
### From discrete to continuous parameters
.small[
Suppose that we have two events $A$, $B$ and $C$ and that we have three densities
* $p(b|a)$
* $p(c|a)$
* $p(a)$
* $a$ can take the all values in the set $\mathcal{A}$.
Suppose that we want to find $p(a|b,c)$.
Using Bayes theorem we have
$$
p(a|b,c) = \frac{p(b|a)p(c|a)p(a)}{p(b,c)}
$$
where the density $p(b,c)$ is obtained by integrating over all values $A$ can take, i.e.,
$$p(b,c)= \int_{a\in\mathcal{A}}p(b|a)p(c|a)p(a)da= \int_{a\in\mathcal{A}}p(a,b,c)da$$
]
---
class: segue
.white[
# Bayesian inference as learning from data
]
---
.pull-left-2[
### Posterior update formula
.small[
Let $\vx_n = (x_1,\ldots,x_n)$ be the data after collecting $n$ samples.
Let $p(x_1|\theta)$ denote a single sample and $p(\theta)$ be the prior.
The posterior after collecting $n=0$ samples is
\begin{align}
p(\theta| \vx_{\emptyset})
& = \frac{ \prod_{i\in \emptyset} p(x_i|\theta) p(\theta) }{
\int \prod_{i\in \emptyset} p(x_i|\theta) p(\theta) d\theta } = p(\theta).
\end{align}
That is after collecting zero samples the "posterior distribution"
simplifies to the prior distribution. After collecting one sample
\begin{align}
\ds p(\theta|x_1)
& = \frac{\ds p(x_1|\theta) p(\theta) }{\ds \int p(x_1|\theta) p(\theta) d\theta }
\\
& = \frac{\ds p(x_1|\theta) p(\theta| \vx_{\emptyset}) }{\ds \int p(x_1|\theta) p(\theta| \vx_{\emptyset}) d\theta }
\end{align}
]
]
---
.pull-left-2[
### Posterior update formula
Note that
\begin{align}
\ds p(\theta|\vx_{n+1})
& = \frac{\ds p(x_{n+1}|\theta) p(\theta|\vx_n) }{\ds \int p(x_{n+1}|\theta) p(\theta|\vx_n) d\theta }
\\
& = \frac{\ds p(x_{n+1}|\theta) p(\vx_n|\theta)p(\theta)/p(\vx_n) }{\ds \int p(x_{n+1}|\theta) p(\vx_n|\theta)p(\theta)/p(\vx_n) d\theta }
\\
& = \frac{\ds p(\vx_{n+1},\theta) }{\ds \int p(\vx_{n+1},\theta) d\theta }
\end{align}
which implies that the posterior for $\theta$ when using $n+1$ samples
can be obtained by treating $p(x_{n+1}|\theta)$ as the contribution from
the "likelihood" and treating $p(\theta| \vx_n)$ as an "informative"
prior.
]
|
6285dfccd35846155947f63ffb8ddb1e7b562dba | e9c51696f28668459b9229811374a5e528966d48 | /man/UnionExtractFields.Rd | 3097a5ba77c5ea1bd84a07324257e8270e33cf69 | [
"MIT"
] | permissive | piquelab/QuASAR | bf360a982c058293705f085d5f68d96a2cc7f36e | a458b8a1238ae9d94d8dced6534283be68d5cd9a | refs/heads/master | 2020-04-12T07:25:42.412678 | 2017-02-04T01:18:46 | 2017-02-04T01:18:46 | 20,872,613 | 22 | 10 | null | 2017-02-03T04:29:44 | 2014-06-16T04:22:14 | R | UTF-8 | R | false | false | 798 | rd | UnionExtractFields.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{UnionExtractFields}
\alias{UnionExtractFields}
\title{UnionExtractFields}
\usage{
UnionExtractFields(fileList, combine = FALSE)
}
\arguments{
\item{fileList}{List of files *.quasar.in.gz}
\item{combine}{Collapses all samples into a single sample if true. Default is false}
}
\value{
returns a R list with the following elements:
Ref: Matrix with number of reads matching the reference allele.
Alt: Matrix with number of reads matching the alternate allele.
Err: Matrix with number of reads matchine neither the ref. or alt. allele.
anno: Object with the annotation for the SNPs.
}
\description{
From a list of files (fileList), find the union of all loci and return a list with the data prepared to run QuASAR.
}
\author{
Chris Harvey
}
|
8e7a38ef9d61e2661b6d18fd1a5a1b5411627747 | cdbca901bbf564d9c94a74a4ab3aabe303691ce8 | /data-raw/cacao/cacao.R | 55f35d7afca73e5f1432cd6c3fca1e4defc12472 | [] | no_license | wilkelab/ungeviz | 6d57a5a69bf46efe35f5e4d04f3f413c47273fff | aeae12b014c718dcd01343548f2221b58ece63f6 | refs/heads/master | 2020-03-27T00:49:35.929110 | 2019-01-24T20:14:20 | 2019-01-24T20:14:20 | 145,660,189 | 95 | 8 | null | null | null | null | UTF-8 | R | false | false | 143 | r | cacao.R | library(here)
library(tidyverse)
cacao <- read_csv(here("data-raw", "cacao", "cacao_clean.csv"))
devtools::use_data(cacao, overwrite = TRUE)
|
00c819ef919701b4a3d1cc76c4e1e936128edb51 | a38668bb0993cfa9f634859543e858b7246acd7f | /man/milk_mir_spectra_factory.Rd | 974e4c54dfbd09ea042541fb8a0e1b68d0920698 | [] | no_license | youngtf/milkMiR | 24a6b0c4d4c913d0f10f07458213eb04fbb9b865 | 0f97ab3d406bf3c7b2034e669060cb28110ec30c | refs/heads/master | 2021-03-19T06:05:16.675528 | 2018-02-04T21:28:17 | 2018-02-04T21:28:17 | 91,848,887 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 367 | rd | milk_mir_spectra_factory.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mirSpectra.R
\docType{class}
\name{milk_mir_spectra_factory}
\alias{milk_mir_spectra_factory}
\title{Generate a MilkMirSpectra object}
\format{An \code{\link{R6Class}} generator object}
\usage{
milk_mir_spectra_factory
}
\description{
Generate a MilkMirSpectra object
}
\keyword{datasets}
|
738e728e0d16b569c70211f2975e0c9ea3fdfb2f | f0352034f8467e2c82a31443ae6e3125039879ac | /R/plotContrastHeatmap.R | c6cfefb3aa08a5dd7f52bd8f55cc2853ceaf8eda | [] | no_license | epurdom/clusterExperiment | 8d5d43a250a1a3c28d4745aae4b72285458ba1a2 | ae86ee09697c13ccd5d32f964e28ab7d82b455d6 | refs/heads/master | 2022-11-04T01:54:19.806886 | 2022-10-11T22:00:27 | 2022-10-11T22:00:27 | 47,139,877 | 39 | 15 | null | 2021-01-27T21:26:28 | 2015-11-30T19:06:53 | R | UTF-8 | R | false | false | 6,451 | r | plotContrastHeatmap.R | #' @rdname plotContrastHeatmap
#' @aliases plotContrastHeatmap
#' @title Plot heatmaps showing significant genes per contrast
#' @description Plots a heatmap of the data, with the genes grouped based on the
#' contrast for which they were significant.
#' @param object ClusterExperiment object on which biomarkers were found
#' @param signifTable A \code{data.frame} in format of the result of
#' \code{\link{getBestFeatures}}. It must minimally contain columns 'Contrast'
#' and 'IndexInOriginal' giving the grouping and original index of the
#' features in the \code{assay(object)}
#' @param whichCluster if not NULL, indicates cluster used in making the
#' significance table. Used to match to colors in \code{clusterLegend(object)}
#' (relevant for one-vs-all contrast so that color aligns). See description of
#' argument in \code{\link{getClusterIndex}} for futher details.
#' @param contrastColors vector of colors to be given to contrasts. Should match
#' the name of the contrasts in the 'Contrast' column of \code{signifTable} or
#' 'ContrastName', if given.. If missing, default colors given by match to
#' the cluster names of \code{whichCluster} (see above), or otherwise given a
#' default assignment.
#' @param ... Arguments passed to \code{\link{plotHeatmap}}
#' @details If the column 'ContrastName' is given in \code{signifTable}, these
#' names will be used to describe the contrast in the legend.
#' @details Within each contrast, the genes are sorted by log fold-change if the
#' column "logFC" is in the \code{signifTable} data.frame
#' @details Note that if \code{whichCluster} is NOT given (the default) then there is
#' no automatic match of colors with contrasts based on the information in
#' \code{object}.
#' @return A heatmap is created. The output of \code{plotHeatmap} is returned.
#' @seealso \code{\link{plotHeatmap}}, \code{\link{makeBlankData}},
#' \code{\link{getBestFeatures}}
#' @export
#'
#' @examples
#' data(simData)
#'
#' cl <- clusterSingle(simData, subsample=FALSE,
#' sequential=FALSE,
#' mainClusterArgs=list(clusterFunction="pam", clusterArgs=list(k=8)))
#'
#' #Do all pairwise, only return significant, try different adjustments:
#' pairsPerC <- getBestFeatures(cl, contrastType="Pairs", number=5,
#' p.value=0.05, DEMethod="limma")
#' plotContrastHeatmap(cl,pairsPerC)
setMethod(
f = "plotContrastHeatmap",
signature = "ClusterExperiment",
definition = function(object,signifTable,whichCluster=NULL,contrastColors=NULL,...) {
if(!all(c("IndexInOriginal","Contrast") %in% colnames(signifTable ))) stop("signifTable must have columns 'IndexInOriginal' and 'Contrast'")
if(!is.numeric(signifTable$IndexInOriginal)) stop("Column 'IndexInOriginal' Must consist of numeric values")
if(!all(signifTable$IndexInOriginal %in% seq_len(nrow(object)))) stop("Column 'IndexInOriginal' must consist of indices that match the row indices of 'object'")
#divide by contrast, sort by FC (if exists) and return index
geneByContrast<-by(signifTable,signifTable$Contrast,function(x){
if("logFC" %in% names(x)){
x<-x[order(x$logFC),]
}
x$IndexInOriginal
})
##Check names of contrastColors
if(!is.null(contrastColors)){
if(!all(sort(names(contrastColors)) == sort(unique(signifTable$Contrast))) ){
if("ContrastName" %in% colnames(signifTable)){
if(!all(sort(names(contrastColors)) == sort(unique(signifTable$ContrastName))) ){
warning("names of contrastColors do not match 'Contrast' or 'ContrastName' values; will be ignored.")
contrastColors<-NULL
}
else contrastMatch<-FALSE #FALSE because don't have to fix the names to match ContrastName, since already do...
}
else{
warning("names of contrastColors do not match 'Contrast' values; will be ignored.")
contrastColors<-NULL
}
}
else contrastMatch<-TRUE
}
if(is.null(contrastColors)) contrastMatch<-FALSE
if("ContrastName" %in% colnames(signifTable)){
#give names to be contrast names
internalNames<-names(geneByContrast) #incase need the Contrast name later in code
gpnames<-unique(signifTable[,c("Contrast", "ContrastName", "InternalName")])
if(nrow(gpnames)==length(geneByContrast)){
m<-match(names(geneByContrast),gpnames[,"Contrast"])
names(geneByContrast)<-gpnames[m,"ContrastName"]
internalNames<-gpnames[m,"InternalName"]
#give colors the new names so will match
if(!is.null(contrastColors) & contrastMatch){
m<-match(names(contrastColors),gpnames[,"Contrast"])
names(contrastColors)<-gpnames[m,"ContrastName"]
}
}
#fix so order is order of contrast names
order<-order(names(geneByContrast))
geneByContrast<-geneByContrast[order]
}
if(!is.null(whichCluster)){
## Assign colors to the contrasts
## (only if contrast is oneAgainstAll)
whichCluster<-getSingleClusterIndex(object,
whichCluster,passedArgs=list(...))
cl<-clusterMatrix(object)[,whichCluster]
clMat<-clusterLegend(object)[[whichCluster]]
clMat<-clMat[which(clMat[,"clusterIds"]>0),] #remove negatives
### If the contrast is one against all, should have name of cluster so give color
### Note that need to use InternalId, in case the names are not unique?
internalNames<-gsub("Cl","",internalNames)
# This can give warnings for NAs, and if option(warn=2) create error...
internalNames<-try(sort(as.numeric(internalNames)),silent=TRUE)
if(inherits(internalNames, "try-error") ||
any(is.na(internalNames))) isOneVAll<-FALSE
else isOneVAll<-TRUE
clIds<-unname(sort(as.numeric(clMat[,"clusterIds"])))
if(is.null(contrastColors) && isOneVAll &&
identical(internalNames, clIds)){
contrastColors<-clMat[,"color"]
names(contrastColors)<-names(geneByContrast)
}
}
if(is.null(contrastColors)){
#do it here so don't mess up above default assignment of colors
contrastColors<-tail(massivePalette,length(geneByContrast)) #least likely to be important colors by accident
names(contrastColors)<-names(geneByContrast)
}
plotHeatmap(object,
clusterFeaturesData=geneByContrast,
clusterLegend=list("Gene Group"=contrastColors),...)
}
)
|
4841ec3b3815ff91cb914b226ac5bdb6e650c430 | 51fa858515a15e3787f210ad24b7e6f912e3ab70 | /Subgroup1/code/PT_DiscardEstimation_H3.r | c2c22463a6f34bc321a42b153af42884ca780c80 | [
"MIT"
] | permissive | JoelKos/WKRATIO | a95385006e22feed378bdb1515b218db4f0ea45f | 60b41fd27cd2c9ad616d47723950b25875c56340 | refs/heads/main | 2023-04-26T05:26:06.096839 | 2021-06-04T10:11:40 | 2021-06-04T10:11:40 | 373,797,419 | 0 | 0 | MIT | 2021-06-04T10:11:41 | 2021-06-04T09:56:49 | null | UTF-8 | R | false | false | 8,984 | r | PT_DiscardEstimation_H3.r | #####################################################################################################################
## WKRATIO: Portuguese data testing
## National discard raising procedure using RDBES data format (Hierarchy 3)
## Ana Cláudia Fernandes, 31may - 4jun
#####################################################################################################################
## No national CE table available - "Logbook effort data" prepared according to RDBES format
## Biological data: subset for blue whiting (WHB)
# Load our functions
source("RDBES_Functions.R")
# IMPORTANT: Hack to stop write.csv changing numbers to scientific notation
options(scipen=500) # big number of digits
## Loads data
### Effort data prepared (CE table)
load('.../Data/effTable.rdata')
### HIERARCHY 3 - Data subset for WHB
load('.../Data/H3_WHB.rdata')
## data formats
myExchangeFileH3$FM$FMnumberAtUnit <- as.numeric(as.character(myExchangeFileH3$FM$FMnumberAtUnit))
myExchangeFileH3$SA$SAnumberSampled <- as.numeric(myExchangeFileH3$SA$SAnumberSampled)
myExchangeFileH3$SA$SAnumberTotal <- as.numeric(myExchangeFileH3$SA$SAnumberTotal)
myExchangeFileH3$SA$SAtotalWeightLive <- as.numeric(myExchangeFileH3$SA$SAtotalWeightLive)
myExchangeFileH3$SA$SAsampleWeightLive <- as.numeric(myExchangeFileH3$SA$SAsampleWeightLive)
myExchangeFileH3$SA$SAtotalWeightMeasured <- as.numeric(myExchangeFileH3$SA$SAtotalWeightMeasured)
myExchangeFileH3$SA$SAsampleWeightMeasured <- as.numeric(myExchangeFileH3$SA$SAsampleWeightMeasured)
myExchangeFileH3$SS$SSnumberSampled <- as.numeric(myExchangeFileH3$SS$SSnumberSampled)
myExchangeFileH3$SS$SSnumberTotal <- as.numeric(myExchangeFileH3$SS$SSnumberTotal)
myExchangeFileH3$FO$FOnumberSampled <- 1
myExchangeFileH3$FO$FOnumberTotal <- 1
myExchangeFileH3$FT$FTnumberSampled <- as.numeric(myExchangeFileH3$FT$FTnumberSampled)
myExchangeFileH3$FT$FTnumberTotal <- as.numeric(myExchangeFileH3$FT$FTnumberTotal)
myExchangeFileH3$VS$VSinclusionProb <- 0.1923077 ## ~ vessel coverage
# ## FROM LIZ CODE IN THE GITHUB ('WKRATIO estimates.R') #################################################################
## calculate inclusion probabilities assuming SRS within strata
myExchangeFileH3$SA$SAinclusionProb <- myExchangeFileH3$SA$SAnumberSampled/myExchangeFileH3$SA$SAnumberTotal
myExchangeFileH3$SA[is.na(myExchangeFileH3$SA$SAinclusionProb),]$SAinclusionProb <- 1
myExchangeFileH3$SS$SSinclusionProb <- myExchangeFileH3$SS$SSnumberSampled/myExchangeFileH3$SS$SSnumberTotal
myExchangeFileH3$FO$FOinclusionProb <- myExchangeFileH3$FO$FOnumberSampled/myExchangeFileH3$FO$FOnumberTotal
myExchangeFileH3$FT$FTinclusionProb <- myExchangeFileH3$FT$FTnumberSampled/myExchangeFileH3$FT$FTnumberTotal
# caclulate quarter from date, and then make a domain of quarter combined with area
# allocate domains to FM as well
myExchangeFileH3$SA$FOid <- myExchangeFileH3$SS$FOid[match(myExchangeFileH3$SA$SSid,myExchangeFileH3$SS$SSid)]
myExchangeFileH3$SA$date <- myExchangeFileH3$FO$FOendDate[match(myExchangeFileH3$SA$FOid,myExchangeFileH3$FO$FOid)]
myExchangeFileH3$SA$quarter <- paste("Q",(as.numeric(substr(myExchangeFileH3$SA$date,6,7))-1) %/% 3 + 1,sep="")
myExchangeFileH3$SA$SAarea <- '27.9.a'
myExchangeFileH3$SA$domain <- paste(myExchangeFileH3$SA$quarter,myExchangeFileH3$SA$SAarea)
myExchangeFileH3$FM$domain <- myExchangeFileH3$SA$domain[match(myExchangeFileH3$FM$SAid,myExchangeFileH3$SA$SAid)]
myExchangeFileH3$BV$domain <- myExchangeFileH3$FM$domain[match(myExchangeFileH3$BV$FMid,myExchangeFileH3$FM$FMid)]
# a function to calculate inclusion probabilities for the units at the final stage
# of sampling given all the other stages
getIncProb <- function(RDB,stages){
#browser()
nStages <- length(stages)
if (any(stages %in% c("FM"))) {
RDB[["FM"]][["FMinclusionProb"]] <- 1
}
#browser()
RDB[[stages[[1]]]][["inclusionProb"]] <- RDB[[stages[[1]]]][[paste(stages[[1]],"inclusionProb",sep="")]]
for (i in 2:(nStages)) {
#browser()
indx <- RDB[[stages[[i]]]][[paste(stages[[i-1]],"id",sep="")]]
indxPrev <- RDB[[stages[[i-1]]]][[paste(stages[[i-1]],"id",sep="")]]
RDB[[stages[[i]]]][["incProbPrev"]] <- RDB[[stages[[i-1]]]][[paste("inclusionProb",sep="")]][match(indx,indxPrev)]
RDB[[stages[[i]]]][["inclusionProb"]] <- RDB[[stages[[i]]]][["incProbPrev"]]*RDB[[stages[[i]]]][[paste(stages[[i]],"inclusionProb",sep="")]]
}
return(RDB)
}
# first do Number-at-Length (H5)
stages <- list("VS","FT","FO","SS","SA","FM")
myExchangeFileH3 <- getIncProb(myExchangeFileH3,stages)
###################################################################################################################################
### Discards estimation
## To calculate discards per hour in each trip (DpueTrp)
trip_info <- myExchangeFileH3$FT %>% transmute(FTid, FTarrivalDate, FTquarter= quarters(as.Date(FTarrivalDate)),FishingDays=as.numeric((as.Date(FTarrivalDate)-as.Date(myExchangeFileH3$FT$FTdepartureDate))+1), FTnumberOfHauls=as.numeric(FTnumberOfHauls), FTnumberTotal=as.numeric(FTnumberTotal), FTnumberSampled=as.numeric(FTnumberSampled), FTinclusionProb, incProbPrev, inclusionProb, VSid)
trip_info <- droplevels(trip_info[trip_info$FishingDays<4,])
haul_info <- myExchangeFileH3$FO %>% transmute(FOid, FOendDate, FOduration=as.numeric(FOduration), FOarea, FOmetier5, FOsampled, incProbPrev, inclusionProb, FTid )
haul_info$FishingDays <- trip_info$FishingDays[match(haul_info$FTid, trip_info$FTid)]
samp <- myExchangeFileH3$SA[,c('SAid','SAspeciesCode','SAspeciesCodeFAO','SAcatchCategory','SAmetier5','SAtotalWeightLive','SAsampleWeightLive','SAnumberTotal','SAnumberSampled','SAinclusionProb','SAtotalWeightMeasured',
'SAsampleWeightMeasured', 'quarter', 'incProbPrev', 'inclusionProb', 'SSid', 'FOid')]
samp$FTid <- haul_info$FTid[match(samp$FOid, haul_info$FOid)]
samp$SAtotalWeightMeasured <- samp$SAtotalWeightMeasured/1000 # to have weight in Kg
haul_info_dis <- merge(haul_info[haul_info$FOsampled=='Y',], samp[samp$SAcatchCategory=='Dis',c('FOid','SAtotalWeightMeasured')], by='FOid', all.x=T)
haul_info_dis$FOduration <- round(haul_info_dis$FOduration/60,2) ## duration in hours
haul_info_dis$FishingDays <- trip_info$FishingDays[match(haul_info_dis$FTid, trip_info$FTid)]
haul_info_dis$dpue <- round(haul_info_dis$SAtotalWeightMeasured/haul_info_dis$FOduration,2)
haul_info_dis[is.na(haul_info_dis$dpue),]$dpue <- 0; haul_info_dis[is.na(haul_info_dis$SAtotalWeightMeasured),]$SAtotalWeightMeasured <- 0
## Estimation of DPUE at trip level
## DpueTrp, total duration and total discards for sampled hauls
DpueTrp <- data.frame(haul_info_dis %>% group_by(FOmetier5,FishingDays,FTid) %>% summarise(DpueTrp=sum(SAtotalWeightMeasured)/sum(FOduration),
totDuration=sum(FOduration), totDisc=sum(SAtotalWeightMeasured)))
## To raise discards
totDurationTrp <- data.frame(haul_info_dis %>% group_by(FTid) %>% summarise(totDurationTrp=sum(FOduration)))
trip_info_dis <- merge(trip_info, DpueTrp[,c('FTid','totDuration','totDisc','DpueTrp')], by='FTid', all.x=T)
trip_info_dis$totDurationTrp <- totDurationTrp$totDurationTrp[match(trip_info_dis$FTid, totDurationTrp$FTid)]
## Get metier of the trip from FO table (P.S.- in OTB fleet all hauls from a trip have the same metier5...)
trip_info_dis$FOmetier5 <- haul_info$FOmetier5[match(trip_info_dis$FTid, haul_info$FTid)]
## mean DPUE and variance estimation
meanDpueTrp <- tapply(trip_info_dis$totDisc, list(trip_info_dis$FOmetier5, trip_info_dis$FishingDays), sum, na.rm=T)/tapply(trip_info_dis$totDuration, list(trip_info_dis$FOmetier5, trip_info_dis$FishingDays), sum, na.rm=T)
# variance of mean DPUE
discTrp <- tapply(trip_info_dis$totDisc, list(trip_info_dis$FTid), sum, na.rm=T)
durTrp <- tapply(trip_info_dis$totDuration, list(trip_info_dis$FTid), sum, na.rm=T)
nDaysTrp <- tapply(trip_info_dis$FishingDays, list(trip_info_dis$FTid), unique)
metierTrp <- tapply(trip_info_dis$FOmetier5, list(trip_info_dis$FTid), unique)
meanDpueTrp2 <- tapply((discTrp/durTrp)^2, list(metierTrp, nDaysTrp), na.rm=TRUE, sum)
nTrp <- tapply(trip_info_dis$FTid, list(trip_info_dis$FOmetier5, trip_info_dis$FishingDays), function(x){length(unique(x))})
varDpueTrp <- (meanDpueTrp2-nTrp*meanDpueTrp^2)/((nTrp-1)*nTrp) #
## Discard estimates for total duration by fishing days: popDiscPerDaysWght
## Total 'Fishing duration' from "CE table"
popDurationPerDays <- tapply(effTable$CEoffVesFishHour, list(effTable$CEfishTech, effTable$CEoffFishDay), sum, na.rm=TRUE)
## Discard estimates by fishing days
popDiscPerDays <- (popDurationPerDays * meanDpueTrp)/1000
## Variance estimates for total duration by fishing days (popVarPerDays)
popVarPerDays <- (popDurationPerDays^2 * varDpueTrp)/1000000
## Total discards and variance, by fleet
popDiscTotalMetier <- apply(popDiscPerDays, c(1), sum, na.rm=TRUE)
popVarTotalMetier <- apply(popVarPerDays, c(1), sum, na.rm=TRUE)
|
2ce64064db3bb723a63f5cf4fbc9c42f11b79c83 | fc2359c3a7cbf5f885e5e4a5135b6e7394715aaa | /R/alexnet_partition.R | 95b196a2eea42efefcd5249d55a22d12344a5945 | [] | no_license | parvezrana/alexnet | f677cf9030fd9042d0e8002f93ab6e69dd91f28a | a3e59fdf02a6614520cb4c4c034e9c47887fae61 | refs/heads/master | 2022-11-30T20:36:23.099001 | 2020-08-15T02:43:38 | 2020-08-15T02:43:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,285 | r | alexnet_partition.R | #' Retrieve ImageNet Partition
#'
#' Retrieves a partition to be used with \code{imagenet_train} as the \code{data}
#' parameter.
#'
#' @partition The partition to retrieve.
#' @total The total number of partitions.
#' @url The url of the board containing imagenet.
#'
#' @export
imagenet_partition <- function(partition, total = 16, url = "https://storage.googleapis.com/r-imagenet/") {
pins::board_register(url, "imagenet")
categories <- pins::pin_get("categories", board = "imagenet")
partition_size <- ceiling(length(categories$id) / total)
categories <- categories$id[(partition_size * (partition - 1)) + (1:partition_size)]
procs <- lapply(categories, function(cat)
callr::r_bg(function(cat) {
library(pins)
board_register("https://storage.googleapis.com/r-imagenet/", "imagenet")
pin_get(cat, board = "imagenet", extract = TRUE)
}, args = list(cat))
)
while (any(sapply(procs, function(p) p$is_alive()))) Sys.sleep(1)
list(
image = unlist(lapply(categories, function(cat) {
pins::pin_get(cat, board = "imagenet", download = FALSE)
})),
category = unlist(lapply(categories, function(cat) {
rep(cat, length(pins::pin_get(cat, board = "imagenet", download = FALSE)))
})),
categories = categories
)
}
|
49916efbc27827e4ce106a3ccc6a61cdf0e6ef2e | d9e0c4f508481c333969039b36b149d5ed63a003 | /R/chisq_compute.R | 34b778dfed862d1327b927b65ea68201c4013be8 | [] | no_license | strategist922/CDM | 71d8b67514a316676c8e124ffcfa918596b96f16 | a18c0e1dbea443c9794924ac4aeadbe60f591a24 | refs/heads/master | 2021-01-21T12:27:47.351062 | 2017-08-07T16:06:28 | 2017-08-07T16:06:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 94 | r | chisq_compute.R |
chisq_compute <- function(obs, exp)
{
chisq <- sum( ( obs - exp)^2 / exp )
return(chisq)
} |
10641433caee7ec0805c1311a0eb20980bf55125 | 9b86e35e5058b988ffec1f93fda4d6393da2ab14 | /calculateRemoval.R | 90683a67f0e2bf5bcfc9be6ff3d3c86d90032d37 | [
"MIT"
] | permissive | jwolfand/sw-log-removal-model | ac86b3dd5548bbd30c920bb92346324489b68cd3 | c8c2049bfc2ffe8e66c221f855cd0af56fc4e848 | refs/heads/master | 2020-03-08T22:31:39.756590 | 2018-04-25T22:14:04 | 2018-04-25T22:14:04 | 128,433,256 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 587 | r | calculateRemoval.R | #################################################
# FUNCTION Calculate removal
# J. Wolfand Written 6/29/17 Updated 7/14/17
# Purpose: Function to take log removal of concentration during storm events
# Inputs:
# Cin = timeseries of concentration in
# storm_timeseries = timeseries noting when storms are occuring
# logRem = log removal
# Outputs:
# Cout = concentration after removal
calculateRemoval <- function(Cin, storm_timeseries, logRem) {
Cout <- Cin
# Route both dry and wet weather through BMPs for removal
Cout<-
10 ^ (log10(Cin) - logRem)
return (Cout)
} |
b8a6faf56259dc1249bf493c2460604a63454b90 | fad36ba43fc642cc4f05a37ff41a2bfdeb13ff6b | /man/pr2_ex_data.Rd | 39172946276a06c6ecb7207afac7e8e74dc457bf | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | ctb/metacoder | fd7f4830189515bacd5101a35621e4087018f576 | f1203b17a0f90cb928cf176d040344fdfbbd0971 | refs/heads/master | 2022-06-04T09:11:45.810079 | 2017-08-22T20:14:49 | 2017-08-22T20:14:49 | 103,031,498 | 1 | 0 | null | 2017-09-10T13:29:41 | 2017-09-10T13:29:41 | null | UTF-8 | R | false | true | 1,020 | rd | pr2_ex_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataset_documentation.R
\name{pr2_ex_data}
\alias{pr2_ex_data}
\title{Example of PR2 SSU data}
\format{An object of type \code{\link{taxmap}}}
\description{
A dataset containing information from 249 Stramenopile sequences from the PR2 reference database.
}
\examples{
\dontrun{
file_path <- system.file("extdata", "pr2_stramenopiles_gb203.fasta", package = "metacoder")
sequences <- ape::read.FASTA(file_path)
library(taxa) # The parsers in taxa are used
pr2_ex_data <- extract_tax_data(names(sequences),
regex = "^(.*\\\\..*?)\\\\|(.*)$",
key = c("info", "class"),
class_sep = "|")
}
}
\seealso{
Other example_datasets: \code{\link{bryophytes_ex_data}},
\code{\link{contaminants}},
\code{\link{genbank_ex_data}},
\code{\link{its1_ex_data}}, \code{\link{silva_ex_data}},
\code{\link{unite_ex_data_1}},
\code{\link{unite_ex_data_2}}
}
|
1da4295ee7322c15f486e6b86d5d6a884c1d80d4 | 87428e50ba0ae6e471d9bcf8a90fb496862876cb | /code/archive/03_explore_som_main_6_vars.R | dc7dcea389a936fb5aa50f3be28dd072b58e679d | [] | no_license | imarkonis/ROS | a85d3e5ae66a13f1a42b66e1478b0c9656dac092 | 68600bb4a1b166042932b6a01662c71a402af719 | refs/heads/master | 2023-04-25T05:06:30.493397 | 2021-05-08T08:33:31 | 2021-05-08T08:33:31 | 347,308,613 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,865 | r | 03_explore_som_main_6_vars.R | library(ggplot2); library(data.table); library(fmsb)
library(kohonen)
#############################
### Exploration
#############################
#Part 1: we examine node properties
#Hierarchical clustering for further classification set to 16 -> groups = nodes
load('./data/som_4x4_main_6_new_vars_rev.rdata')
my_palette <- colorRampPalette(c("#4C3F54", "#486824", "#F2C057", "#D13525"))
n_groups <- 16 # number of groups with similar properties | each node is a group
#ros_som_hc <- cutree(hclust(dist(ros_som$codes[[1]])), n_groups)
#ros_group <- ros_som_hc[ros_som$unit.classif] #group id of each ros event
#ros_subset$group <- as.factor(as.numeric(ros_group))
ros_subset$node <- as.factor(ros_som$unit.classif)
ros_subset[, n_ros_group := .N, by = node] #number of ros events per hclust group
#pdf('./results/plots/01_nodes_main_6_vars_hc.pdf', width = 6, height = 4)
#plot(ros_som,
# type = "mapping",
# main = "Cluster Map",
# bgcol = my_palette(groups)[ros_som_hc])
#add.cluster.boundaries(ros_som, ros_som_hc)
#dev.off()
#Summary statistics for groups
ros_events <- ros_subset[, c(2:18, 20:21)]#change this to match number of variables
group_medians <- ros_events[, lapply(.SD, median, na.rm = T), by = node]
group_medians <- group_medians[order(EventPrec, decreasing = T), ] #sets main variable for box plot order
group_medians$group <- factor(1:n_groups, ordered = T) #Adding group ids according to precipitation order
group_node <- group_medians[, .(node, group)]
ros_subset <- group_node[ros_subset, on = 'node']
group_types <- ros_subset[, .(Q_group_size = .N), .(group, Q_group)]
most_events_type <- group_types[group_types[, .I[Q_group_size == max(Q_group_size)], by = group]$V1]
most_events_type <- most_events_type[order(group)]
write.csv(ros_subset[,-1], file = 'ros_som_6_groups.csv')
#to_plot <- data.table(melt(group_medians, id.vars = c('node', 'group'))) #tidy up
to_plot_groups <- data.table(melt(ros_subset, id.vars = c('ID', 'node', 'group'))) #tidy up
aa <- most_events_type[, .(group, Q_group)]
to_plot_groups <- aa[to_plot_groups, on = 'group']
vars_to_plot <- unique(to_plot_groups$variable)[c(1:7, 10, 13, 15, 17)]
#groups_to_plot <- c(1:6, 8, 10, 13, 16)
groups_to_plot <- c(1:16)
to_plot_groups <- to_plot_groups[variable %in% vars_to_plot & group %in% groups_to_plot]
to_plot_groups$variable <- relevel(to_plot_groups$variable, "EventPrec")
to_plot_groups$variable <- droplevels(to_plot_groups$variable)
levels(to_plot_groups$variable) <-
c("P", "T", "Pint", "Qint", "C", "SWE", "SCA", "Snowmelt", "Water balance",
"Qmax", "RCA")
g1 <- ggplot(to_plot_groups,
aes(x = group, y = value, fill = factor(Q_group))) + #ploting boxplots ordered by EventMelt medians
geom_boxplot() +
scale_x_discrete(labels = groups_to_plot) +
facet_wrap(~variable, scales = "free") +
scale_fill_manual(values = my_palette(4)) +
xlab("Group") +
ylab("Variable values") +
theme_minimal() +
theme(panel.spacing = unit(0.8, "cm")) +
guides(fill = guide_legend(title = "Main runoff \ntype"))
ggsave('./results/plots/01_nodes_main_6_new_vars_boxplot_10_Qtypes_rev.png', g1, 'png',
width = 20, height = 13, units = 'cm')
#Months with most events per node
ros_months <- ros_subset[ros[, .(ID,
month = as.factor(month))],
on = 'ID']
group_months <- ros_months[, .N, .(group, month)]
group_months <- group_months[complete.cases(group_months)]
most_events_month <- group_months[group_months[, .I[N == max(N)], by = group]$V1]
most_events_month <- most_events_month[order(group)]
# Node characteristics with radar chart
colnames(group_medians)[2:(n_var + 1)] <-
c("T", "P", "Pint", "Qint", "C", "SWE")
group_var_range <- apply(group_medians, 2, function(x) range(as.numeric(x)))
n_ros_group <- unique(ros_subset[, .(group, n_ros_group)])
n_ros_group <- n_ros_group[order(group), ]
op <- par(mar = c(1, 1, 1, 1),
mfrow = c(4, 4))
for(group_id in 1:n_groups){
group_for_radar <- rbind(group_var_range[2:1, ],
group_medians[group == group_id, ])[, 2:(n_var + 1)]
radarchart(group_for_radar,
axistype = 4,
title = paste0("Group ", group_id, " (N: ", n_ros_group[group_id, 2],
", Q type: ", most_events_type[group_id]$Q_group, ")"),
pcol = rgb(0.2, 0.5, 0.5, 0.9),
pfcol = rgb(0.2, 0.5, 0.5, 0.5),
plwd = 4,
vlcex = 1.5,
cglcol = "grey",
cglty = 1,
axislabcol = "grey",
cglwd = 1.2)
}
dev.copy(png, './results/plots/01_nodes_main_6_vars_new_radar.png',
width = 800, height = 800)
dev.off()
#Months per group
group_months <- group_months[unique(to_plot_groups[, .(group, order_id)]), on = 'group']
to_plot <- group_months[, perc := N / sum(N), group]
g2 <- ggplot(to_plot, aes(x = "", y = perc, fill = as.factor(month))) +
geom_bar(stat = 'identity') +
facet_wrap(~group) +
coord_polar("y", start = 0) +
scale_fill_manual(values = my_palette(8)) +
labs(fill = "Month") +
theme(legend.position = "none") +
theme_void()
ggsave('./results/plots/01_nodes_month_main_6_vars.pdf', g2, 'pdf',
width = 30, height = 20, units = 'cm')
#Runoff groups vs SOM nodes
to_plot <- ros_subset[, .N, .(group, Q_group)]
#to_plot <- group_order_id[to_plot, on = 'group']
to_plot <- to_plot[, perc := N / sum(N), group]
g3 <- ggplot(to_plot, aes(x = "", y = N, fill = as.factor(Q_group))) +
geom_bar(stat = 'identity', position = position_dodge2(width = 0.9, preserve = "single")) +
facet_wrap(~group, scales = 'free') +
# coord_polar("y", start = 0) +
scale_fill_manual(values = my_palette(6)[4:1]) +
theme(legend.position = "none") +
guides(fill=guide_legend(title="Q type")) +
theme_light()
ggsave('./results/plots/01_nodes_Qgroupmain_6_vars.pdf', g3, 'pdf',
width = 30, height = 20, units = 'cm')
#Climate properties
climate_som <- readRDS('./data/climate_groups.rds')
ros_subset <- ros_subset[climate_som, on = 'ID']
to_plot <- to_plot_groups[, .N, .(group, cl_group_label)]
to_plot <- to_plot[complete.cases(to_plot)]
to_plot <- to_plot[, perc := N / sum(N), group]
g4 <- ggplot(to_plot, aes(x = "", y = perc, fill = as.factor(cl_group_label))) +
geom_bar(stat = 'identity') +
facet_wrap(~group) +
coord_polar("y", start = 0) +
scale_fill_manual(values = my_palette(6)[6:1]) +
theme(legend.position = "none") +
guides(fill=guide_legend(title = "Climate group")) +
theme_void()
ggsave('./results/plots/01_nodes_climate_main_6_vars.pdf', g4, 'pdf',
width = 30, height = 20, units = 'cm')
#Catchment properties
catchments <- readRDS('./data/catchment_data.RDs')
ros_cat <- ros_cat[catchments[, .(catchment, elev_diff = Elevation.difference..m., area = Area..km2.)], on = "catchment"]
dummy <- ros_cat[ros_subset, on = 'ID']
dummy <- dummy[complete.cases(dummy)]
to_plot <- dummy[, .N, .(group, catchment)]
to_plot <- to_plot[, perc := N / sum(N), group]
g5 <- ggplot(to_plot, aes(x = "", y = perc, fill = as.factor(catchment))) +
geom_bar(stat = 'identity') +
facet_wrap(~group) +
coord_polar("y", start = 0) +
scale_fill_manual(values = my_palette(14)) +
theme(legend.position = "none") +
guides(fill=guide_legend(title="Catchment")) +
theme_void()
ggsave('./results/plots/01_nodes_catchment_6_vars.pdf', g5, 'pdf',
width = 30, height = 20, units = 'cm')
to_plot <- dummy[, .N, .(group, mountain)]
to_plot <- to_plot[, perc := N / sum(N), group]
to_plot <- group_order_id[to_plot, on = 'group']
g6 <- ggplot(to_plot, aes(x = "", y = perc, fill = as.factor(mountain))) +
geom_bar(stat = 'identity') +
facet_wrap(~order_id) +
coord_polar("y", start = 0) +
scale_fill_manual(values = my_palette(14)[c(3,7)]) +
theme(legend.position = "none") +
guides(fill=guide_legend(title="Catchment")) +
theme_void()
ggsave('./results/plots/01_nodes_mountain_6_vars.pdf', g6, 'pdf',
width = 30, height = 20, units = 'cm')
############################################
#Part 2 we examine group properties
## THE END ##
## Rest of code is not used - Keep it for future experiments
#Hierarchical clustering for further classification set to 4 -> groups
load('./data/som_4x4_main_vars')
groups <- 6 # number of groups with similar properties
ros_som_hc <- cutree(hclust(dist(ros_som$codes[[1]])), groups)
pdf('./results/plots/02_groups_main_vars_hc.pdf', width = 6, height = 4)
plot(ros_som,
type = "mapping",
main = "Cluster Map",
bgcol = my_palette(groups)[ros_som_hc])
add.cluster.boundaries(ros_som, ros_som_hc)
dev.off()
ros_group <- ros_som_hc[ros_som$unit.classif] #group id of each ros event
ros_subset$group <- as.factor(as.numeric(ros_group))
ros_subset$node <- as.factor(ros_som$unit.classif)
#Summary statistics for groups
ros_subset[, n_ros_group := .N, by = group] #number of ros events per hclust group
to_plot_groups <- data.table(melt(ros_subset[, 2:(n_var + 1)],
id.vars = c('group'))) #tidy up
dummy <- ros_subset[, 2:(n_var + 1)]
group_medians <- dummy[, lapply(.SD, median, na.rm = T), by = group]
group_medians <- group_medians[order(EventMelt), ]
group_medians$order_id <- factor(1:nrow(group_medians))
to_plot <- data.table(melt(group_medians, id.vars = c('order_id', 'group'))) #tidy up
dummy <- unique(to_plot[, .(group, order_id)])
to_plot_groups <- dummy[to_plot_groups, on = c('group')]
g1 <- ggplot(to_plot_groups[variable != 'Q_group'],
aes(x = order_id, y = value, fill = group)) + #ploting boxplots ordered by EventMelt medians
geom_boxplot() +
facet_wrap(~variable, scales = "free") +
theme(legend.position = "none") +
scale_fill_manual(values = my_palette(groups)) +
theme_minimal()
ggsave('./results/plots/02_groups_main_vars_boxplot.pdf', g1, 'pdf',
width = 30, height = 20, units = 'cm')
#Months with most events per group
my_palette <- colorRampPalette(c("#4C3F54", "#486824", "#F2C057", "#D13525"))
ros_months <- ros_subset[ros[, .(ID,
month = as.factor(month))],
on = 'ID']
group_months <- ros_months[, .N, .(group, month)]
group_months <- group_months[complete.cases(group_months)]
most_events_month <- group_months[group_months[, .I[N == max(N)], by=group]$V1]
most_events_month <- most_events_month[order(group)]
# Group characteristics with radar chart
group_var_range <- apply(group_medians, 2, function(x) range(as.numeric(x)))
n_ros_group <- unique(ros_subset[, .(group, n_ros_group)])
n_ros_group <- n_ros_group[order(group), ]
op <- par(mar = c(1, 1, 1, 1),
mfrow = c(3, 2))
for(group_id in 1:groups){
group_for_radar <- rbind(group_var_range[2:1, ], group_medians[group == group_id, ])[, 2:n_var]
radarchart(group_for_radar,
axistype = 4,
title = paste0("Group ", group_id, " (N: ", n_ros_group[group_id, 2],
", month: ", most_events_month[group_id]$month, ")"),
pcol = rgb(0.2, 0.5, 0.5, 0.9),
pfcol = rgb(0.2, 0.5, 0.5, 0.5),
plwd = 4,
vlcex = 1,
cglcol = "grey",
cglty = 1,
axislabcol = "grey",
cglwd = 1.2)
}
dev.copy(pdf, './results/plots/02_groups_main_vars_radar.pdf')
dev.off()
#Months per group
group_months <- group_months[unique(to_plot_groups[, .(group, order_id)]), on = 'group']
to_plot <- group_months[, perc := N / sum(N), group]
g2 <- ggplot(to_plot, aes(x = "", y = perc, fill = as.factor(month))) +
geom_bar(stat = 'identity') +
facet_wrap(~group) +
coord_polar("y", start = 0) +
scale_fill_manual(values = my_palette(8)) +
labs(fill = "Month") +
theme(legend.position = "none") +
theme_void()
ggsave('./results/plots/02_groups_month.pdf', g2, 'pdf',
width = 30, height = 20, units = 'cm')
#Runoff groups vs SOM groups
to_plot <- ros_subset[, .N, .(group, Q_group)]
to_plot <- to_plot[complete.cases(to_plot)]
to_plot <- to_plot[, perc := N / sum(N), group]
g3 <- ggplot(to_plot, aes(x = "", y = perc, fill = as.factor(Q_group))) +
geom_bar(stat = 'identity') +
facet_wrap(~group) +
coord_polar("y", start = 0) +
scale_fill_manual(values = my_palette(6)[4:1]) +
theme(legend.position = "none") +
guides(fill=guide_legend(title = "Q group")) +
theme_void()
ggsave('./results/plots/02_groups_Qgroup.pdf', g3, 'pdf',
width = 30, height = 20, units = 'cm')
#Climate properties
climate_som <- readRDS('./data/climate_groups.rds')
ros_subset <- ros_subset[climate_som, on = 'ID']
to_plot <- ros_subset[, .N, .(group, cl_group_label)]
to_plot <- to_plot[complete.cases(to_plot)]
to_plot <- to_plot[, perc := N / sum(N), group]
g4 <- ggplot(to_plot, aes(x = "", y = perc, fill = as.factor(cl_group_label))) +
geom_bar(stat = 'identity') +
facet_wrap(~group) +
coord_polar("y", start = 0) +
scale_fill_manual(values = my_palette(6)[6:1]) +
theme(legend.position = "none") +
guides(fill=guide_legend(title = "Climate group")) +
theme_void()
ggsave('./results/plots/02_groups_climate.pdf', g4, 'pdf',
width = 30, height = 20, units = 'cm')
#Checking nodes
ros_subset[, n_ros_node := .N, by = node] #number of ros events per som node
to_plot_nodes <- data.table(melt(ros_subset[, 2:(n_var + 2)],
id.vars = c('node', 'group'))) #tidy up
ggplot(to_plot_nodes, aes(x = node, y = value, fill = node)) +
geom_boxplot() +
stat_summary(aes(color = value), fun.y = median, geom = "point", shape = 20, size = 2) +
facet_wrap(~variable, scales = "free") +
theme(legend.position = "none") +
scale_fill_manual(values = my_palette(n_nodes)) +
theme_minimal()
#Summary statistics for nodes
dummy <- ros_subset[, 2:(n_var + 2)]
dummy[, group := NULL]
node_medians <- dummy[, lapply(.SD, median, na.rm = T), by = node]
node_medians <- node_medians[order(EventMelt), ]
node_medians$order_id <- 1:nrow(node_medians)
to_plot <- data.table(melt(node_medians, id.vars = c('order_id', 'node'))) #tidy up
ggplot(to_plot, aes(x = order_id, y = value, col = variable)) +
geom_line() +
geom_point() +
facet_wrap(~variable, scales = "free") +
theme(legend.position = "none") +
scale_color_manual(values = my_palette(n_var)) +
theme_minimal()
#Checking groups with properties that were not used in classification, for instance catchments
ros_catchments <- ros_subset[ros[, .(ID,
where = factor(where),
hora = factor(hora))],
on = 'ID']
ros_catchments[, no_events := .N, where]
dummy <- ros_subset[, 1:(n_var + 1)]
ros_Q <- dummy[ros[, .(ID, Total)], on = 'ID'] #Total volume of runout water per RuE event in mm
#keeping the major groups
ros_cat_group <- ros_catchments[no_events > 10, .N, .(group, where)]
ros_cat_group <- ros_cat_group[N > 10]
ggplot(ros_cat_group, aes(x = where, y = N, fill = group)) +
geom_bar(stat = 'identity') +
scale_fill_manual(values = my_palette(4)) +
theme_minimal()
to_plot <- data.table(melt(ros_Q,
id.vars = c('group', 'ID'))) #tidy up
to_plot <- to_plot[complete.cases(to_plot)]
ggplot(to_plot, aes(x = group, y = value, fill = group)) +
geom_boxplot() +
facet_wrap(~variable, scales = "free") +
theme(legend.position = "none") +
scale_fill_manual(values = my_palette(groups)) +
theme_minimal()
#checking these high TotalQ/low TimeToMaxPeak group 10
ros_catchments[group == 10, table(where)] #Checking where the appear
ros_merger <- ros_subset[, .(ID, group)]
ros_group_10 <- ros_merger[ros, on = 'ID']
ros_group_10 <- ros_group_10[group == 10]
|
97f974763666bd5f57b956345faa23cbc6fae950 | 29585dff702209dd446c0ab52ceea046c58e384e | /subspaceMOA/R/plot.R | d3112f3a570de890ab1dd2e153524c2f1720f84d | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,734 | r | plot.R | #'Animate Stream Clustering.
#'
#'A function to plot data streams and clusterings. The visualisation is based on
#'\link[shiny]{shiny} and \link[ggplot2]{ggplot}. Data is plotted as a
#'scatterplot matrix and individual scatterplots can be selected for a more
#'detailed view that includes tooltips. Please note that this function was
#'developed for the Streaming algorithms in the subspaceMOA package and may or may
#'not work for streams and clustering algorithms.
#'
#'@param dsc a DSC object representing the clustering of a data stream.
#'@param dsd a DSD object representing a data stream.
#'@param step the step size used in \link{animate_stream_interactive}. This
#' regulates how many points will be taken out of the stream, clustered and the
#' plotted along with their clusters every time a step is performed.
#'@param delay time between two clustering steps
#'@param launch.browser will be passed on to \link[shiny]{runApp}, so that the
#' visualisation can be shown in e.g. RStudio's Viewer pane, if this is
#' desired.
#'@export
#'@import ggplot2
#'@import shiny
#'@import magrittr
#'@import stream
#'@import streamMOA
animate_stream_interactive <- function(dsc,dsd,step=1500,delay=10000,launch.browser=getOption("shiny.launch.browser",interactive())) {
#Create a shiny UI in which to display the streaming data
ui <- makeUI(show_animate_buttons=T)
server <- makeServer(dsc,dsd,step,delay=delay)
onStart <- function(){}
app <- shinyApp(ui=ui,server=server,onStart=onStart)
runApp(app,launch.browser=launch.browser)
}
#'Show Stream Clustering.
#'
#'A non-animated version of \link{animate_stream_interactive}.
#'
#'@param dsc a DSC object representing the clustering of a data stream.
#'@param points a \link{data.frame} of points that will be plotted along with
#' the clustering.
#'@param launch.browser will be passed on to \link[shiny]{runApp}, so that the
#' visualisation can be shown in e.g. RStudio's Viewer pane, if this is
#' desired.
#'@export
plot_stream_interactive <- function(dsc,points,launch.browser=getOption("shiny.launch.browser",interactive())) {
ui <- makeUI(show_animate_buttons=F)
server <- makeServer(dsc,points)
onStart <- function(){}
app <- shinyApp(ui=ui,server=server,onStart=onStart)
runApp(app,launch.browser=launch.browser)
}
makeUI <- function(show_animate_buttons) {
ui <- fluidPage(
#This dummy input exists because a conditional panel can only depend on
#values in the input or the output object, so we encode part of the
#application's state in an invisible selectInput. This is, of course, a very
#horrible way of doing it, but it works.
conditionalPanel("false",
selectInput("dummyInput",
label="You should not be seeing this",
choices=c("matrix","detail"))),
conditionalPanel("input.dummyInput == 'matrix'",
plotOutput("plot_matrix",click="plot_matrix_click",
width="95%",
height="600px"),
conditionalPanel(r_logical_to_js_boolean_string(show_animate_buttons),
fluidRow(
column(4,actionButton(inputId="stop_button",label="Stop",
class="btn-danger btn-large btn-block")),
column(4,actionButton(inputId="step_button",label="Step",
class="btn-primary btn-large btn-block")),
column(4,actionButton(inputId="run_button",label="Run",
class="btn-success btn-large btn-block"))
))),
conditionalPanel("input.dummyInput=='detail'",
fluidRow(plotOutput("detail_plot",
hover="detail_plot_hover",
height="600px")),
#The button to go back to the plot matrix view
fluidRow(
actionButton(inputId="back_button",
label="",icon=icon(name="th"),
class="btn-primary btn-large btn-block")
),
#The text field in which information on the point that is hovered
#over is given.
fluidRow(
wellPanel(htmlOutput("tooltip"))
)
)
)
return(ui)
}
#Creates a Shiny server to handle the logic of which plots are shown
makeServer <- function(dsc,dsd,step=NULL,delay=5000) {
if(is.data.frame(dsd)) {
points <- dsd
initial_data_frame <- format_data_from_dsc(dsc,points=points)
if(is.null(points[["class"]])) {
number_of_dimensions <- ncol(points)
}
else {
number_of_dimensions <- ncol(points)-1
}
} else {
initial_data_frame <- format_data_from_dsc(dsc)
#Try to get the number of dimensions of the stream
number_of_dimensions <- dsd[["d"]]
#If that failed just take one point and find out how many dimensions the stream data has
if(is.null(number_of_dimensions)) {
number_of_dimensions <- ncol(get_points(dsd,1,cluster=F,class=F))
}
}
server <- function(input,output,session){
#A reactiveValues object to keep track of global application state. In this
#case, we keep track of whether we are showing the clustering as a
#scatterplot Matrix (display_mode=="matrix") or a detailed view in which two
#dimensions are plotted against each other (display_mode=="detail")
#Additionally we are keeping track of the data frame that is currently being
#shown as well as whether we are currently running the stream clustering
#continuously.
state <- reactiveValues(display_mode="matrix",current_data_frame=initial_data_frame,
running=F,
should_perform_step=F,
plot_was_recently_drawn=F)
#When the "back" button (looks like a grid of squares) is pressed, the
#display mode should be set to "matrix"
observeEvent(input$back_button, {
state$display_mode <- "matrix"
})
observeEvent(input$step_button, {
state$should_perform_step <- T
})
observeEvent(input$run_button, {
state$running <- T
})
observeEvent(input$stop_button, {
state$running <- F
})
#If the current state of the app is running then start performing a step and
#repeat this action after "delay"
observe({
if(state$running) {
isolate({state$should_perform_step <- T})
invalidateLater(delay,session)
}
})
#Whenever a step should be performed, get new data from the
#stream and push the data into the clusterer.
observe({
if(state$should_perform_step) {
isolate({
new_points <- get_points(dsd,step,class=T)
withProgress({
update(dsc,DSD_Memory(new_points[,1:(ncol(new_points)-1)]),step)
},message="Updating the Clustering")
res <- format_data_from_dsc(dsc,points=new_points)
state$current_data_frame <- res
})
state$should_perform_step <- F
}
})
#Always have the selected value in the dummyInput reflect the current state of the application
#This expression is executed every time state$display_mode changes.
observe({
updateSelectInput(session=session,inputId="dummyInput",selected=state$display_mode)
})
#Keep track of the last plot that was clicked on in the scatterplot matrix.
#Changes whenever the main plot is clicked on.
#A helper function in helper.R determines which plot was being clicked on.
last_plot_clicked_on_in_matrix <- reactive({
c <- input$plot_matrix_click
if(is.null(c)) {
return(NULL)
} else {
state$display_mode <- "detail"
return(from_coords_to_plot(x=c$x,y=c$y,domain=c$domain,number_of_dimensions=number_of_dimensions))
}
})
#Make sure that last_plot_clicked_on_in_matrix is always current whenever a
#click occurs. If it weren't for this observe block, its value would get
#recomputed only when the detail plot is being shown, which is not the behavior
#we want because it keeps the detail plot from ever being shown.
observe({
last_plot_clicked_on_in_matrix()
})
#Draw the plot matrix from the current data frame
output$plot_matrix <- renderPlot({
withProgress({
list_of_plots <- create_plot_matrix(state$current_data_frame)
incProgress(message="Creating Scatterplot Matrix")
plotmatrix <- make_plot_matrix(list_of_plots,ncol=number_of_dimensions)
incProgress(amount = 0.3,message="Displaying Scatterplot Matrix")
if(is.null(plotmatrix)) return()
grid::grid.draw(plotmatrix)
},message="Creating Scatterplots")
isolate({state$plot_was_recently_drawn <- T})
})
output$detail_plot <- renderPlot({
if(!is.null(last_plot_clicked_on_in_matrix())) {
res <- state$current_data_frame %>%
basic_plot_from_dataframe(last_plot_clicked_on_in_matrix()) %>%
style_plot_for_detail()
isolate({state$plot_was_recently_drawn <- T})
res
}
})
output$tooltip <- renderPrint({
row <- row_from_two_values(dataframe=state$current_data_frame,
hover_list=input$detail_plot_hover,
area_around_cursor=0.05)
res <- dataframe_row_to_html(row)
cat(res)
})
}
return(server)
}
|
e52ae483c973011ff3abe79aeb2f05119a1175e5 | 30954ed2c633319c48a9542b957dd82fbcbb636b | /SamTurner/code/model_functions.R | 4a23132c8ea22b85d021480086dd7b3c5110288a | [] | no_license | zongyi2020/CMEEProject | ae057530c39a637df2159e44efca6f4af917c0d5 | 1d86ed13218ab1e30e3da31b96321b71f38cea2a | refs/heads/master | 2023-08-02T16:12:43.159300 | 2021-09-20T11:21:49 | 2021-09-20T11:21:49 | 384,176,870 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,291 | r | model_functions.R | ## Script: model_functions.R
## Author: Sam Turner sat19@ic.ac.uk
## About: Model specifications.
# to fit in log space
Quadratic <- function(t, a, b,c){
return(c*t**2 + b*t + a)
}
Cubic <- function(t, a, b, c, d){
return(d*t**3 + c*t**2 + b*t + a)
}
Logistic <- function(t, N0, Nmax, r){
N <- N0 * Nmax / (N0 + (Nmax - N0) * exp(-r*t))
return( log10(N) )
}
Gompertz <- function(t, N_0, N_max, r_max, t_lag){ # Modified gompertz growth model (Zwietering 1990)
return(N_0 + (N_max - N_0) * exp(-exp(r_max * exp(1) * (t_lag - t)/((N_max - N_0) * log(10)) + 1)))
}
Baranyi <- function(t, N_0, N_max, r_max, t_lag){ # Baranyi model (Baranyi 1993)
return(N_max + log10((-1+exp(r_max*t_lag) + exp(r_max*t))/(exp(r_max*t) - 1 + exp(r_max*t_lag) * 10^(N_max-N_0))))
}
Buchanan <- function(t, N_0, N_max, r_max, t_lag){ # Buchanan model - three phase logistic (Buchanan 1997)
return(N_0 + (t >= t_lag) * (t <= (t_lag + (N_max - N_0) * log(10)/r_max)) * r_max * (t - t_lag)/log(10) + (t >= t_lag) * (t > (t_lag + (N_max - N_0) * log(10)/r_max)) * (N_max - N_0))
}
# to fit in linear space
Logistic.exp <- function(t, N0, Nmax, r){
return( 10^(Logistic(t, N0, Nmax, r) ))
}
Gompertz.exp <- function(t, N0, Nmax, r, t_lag){
return( 10^(Gompertz(t, N0, Nmax, r, t_lag) ))
}
Baranyi.exp <- function(t, N0, Nmax, r, t_lag){
return( 10^(Baranyi(t, N0, Nmax, r, t_lag) ))
}
Buchanan.exp <- function(t, N0, Nmax, r, t_lag){
return( 10^(Buchanan(t, N0, Nmax, r, t_lag) ))
}
Quadratic.exp <- function(t, a, b,c){
return(c*t**2 + b*t + a)
}
Cubic.exp <- function(t, a, b, c, d){
return(d*t**3 + c*t**2 + b*t + a)
}
# lists and vectors of model names and functions for fitting and plotting
all_models <- c("Logistic", "Gompertz", "Baranyi", "Buchanan", "Quadratic", "Cubic")
all_models.linear <- c("Logistic.exp", "Gompertz.exp", "Baranyi.exp", "Buchanan.exp", "Quadratic.exp", "Cubic.exp")
non_linear_models <- c("Logistic", "Gompertz", "Baranyi", "Buchanan")
linear_space_models <- c("Logistic.exp", "Gompertz.exp", "Baranyi.exp", "Buchanan.exp", "Quadratic.exp", "Cubic.exp")
draw_functions <- list("Logistic" = Logistic , "Gompertz" = Gompertz, "Baranyi" = Baranyi, "Buchanan" = Buchanan, "Quadratic" = Quadratic, "Cubic" = Cubic,"Logistic.exp" = Logistic.exp , "Gompertz.exp" = Gompertz.exp, "Baranyi.exp" = Baranyi.exp, "Buchanan.exp" = Buchanan.exp, "Quadratic.exp" = Quadratic.exp, "Cubic.exp" = Cubic.exp)
fit_functions <- list("Logistic" = Logistic , "Gompertz" = Gompertz, "Baranyi" = Baranyi, "Buchanan" = Buchanan, "Quadratic" = 2, "Cubic" = 3, "Logistic.exp" = Logistic.exp , "Gompertz.exp" = Gompertz.exp, "Baranyi.exp" = Baranyi.exp, "Buchanan.exp" = Buchanan.exp, "Quadratic.exp" = 2, "Cubic.exp" = 3)
model_params <- list("Logistic" = 3 , "Gompertz" = 4, "Baranyi" = 4, "Buchanan" = 4, "Quadratic" = 3, "Cubic" = 4, "Logistic.exp" = 3 , "Gompertz.exp" = 4, "Baranyi.exp" = 4, "Buchanan.exp" = 4, "Quadratic.exp" = 3, "Cubic.exp" = 4)
colours <- c("Logistic" = 'black' , "Gompertz" = 'red', "Baranyi" = 'dark green', "Buchanan" = 'blue', "Quadratic" = '#66a61e', "Cubic" = '#e6ab02', "Logistic.exp" = 'black' , "Gompertz.exp" = 'red', "Baranyi.exp" = 'dark green', "Buchanan.exp" = 'blue', "Quadratic.exp" = '#66a61e', "Cubic.exp" = '#e6ab02')
|
addfd5c9594ab46b6e9fc6db6062117e544f1a20 | 09fe54286ef4bf3a4bae93f38cd3eba67858837b | /claret/plots/rawmix-tput-vs-lat.r | 7b088f400d691e73d98708e889a121357685098d | [] | no_license | bholt/claret-doc | 6add727969dc33e83686cc485ebbcad9b8aae424 | 6d91ceae0da53114810637286379f6d4cea93e07 | refs/heads/master | 2020-04-05T14:08:23.367486 | 2016-09-04T20:24:24 | 2016-09-04T20:24:24 | 32,820,492 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,641 | r | rawmix-tput-vs-lat.r | #!/usr/bin/env Rscript
source('common.r')
a <- parse.args()
# d <- data.or.csv('data/rawmix-tput-vs-lat.csv', gen = function() {
d <- data.rawmix(where="name like 'v0.28.1%' and nclients = 4 and duration = 30 and length = 4 and rate = 100 and total_time < 61")
d <- subset(d, commute_ratio == 0.5 & alpha == 0.6)
# d
# }
# )
d$label <- d$nthreads * num(d$nclients) + "x" + d$rate
d$x <- d$nthreads * num(d$nclients)
g.cc_ph <- guide_legend(nrow = 6)
# force all non-transactional ones to be considered 'rw' mode (messes up some groupings otherwise)
d[d$cc_ph == NOTXN,]$cc <- RW
# subset to just my selected set of lines
d <- subset(d, cc_ph %in% c(RW+BASE, COMM, COMM+PH, COMB+PH, NOTXN))
# d.mean <- mean_path(d, throughput, avg_latency_ms, .(nthreads,cc,phasing,cc_ph))
save(
ggplot(d, aes(
x = throughput,
y = avg_latency_ms,
group = cc_ph, fill = cc_ph, color = cc_ph, linetype = cc_ph
))+
xlab('Throughput (txn/s)')+ylab('Mean latency (ms)')+
# geom_point()+
# geom_path()+
geom_mean_path(d, throughput, avg_latency_ms, .(x,cc_ph,rate))+
expand_limits(y=0)+
coord_cartesian(ylim=c(0,4))+
# scale_y_continuous(breaks=c(2,4,6,8,10))+
scale_x_continuous(labels=function(x){ x/1000 + 'k' })+
cc_ph_scales(guide = guide_legend(nrow=5))+
theme.mine()+
theme(
panel.grid.major.x = element_line(color="grey80", size=0.2),
panel.grid.minor.x = element_line(color="grey90", size=0.2),
panel.grid.minor.y = element_line(color="grey90", size=0.2),
legend.key = element_rect(fill=NA, color=NA),
legend.text = element_text(lineheight=0.9),
legend.key.height = unit(32,'pt'),
legend.key.width = unit(20,'pt'),
legend.title.align = 0.5,
legend.margin = unit(0,'pt'),
legend.title = element_blank()
)
, w=5, h=2.5)
write.csv(ddply(d, .(cc_ph,x), summarize, throughput=mean(throughput)), file='rawmix-tput.csv')
save(
ggplot(subset(d), aes(
x = x,
y = throughput,
group = cc_ph, fill = cc_ph, color = cc_ph, linetype = cc_ph,
))+
xlab('Clients')+ylab('Throughput (txn/s)')+
stat_summary(geom='line', fun.y=mean)+
stat_summary(geom='point', fun.y=mean)+
scale_x_continuous(breaks=c(16,64,128,256,384))+
scale_y_continuous(labels=function(x){ x/1000 + 'k' }, breaks=c(10000,20000,30000,40000,50000,60000,70000))+
expand_limits(y=0)+
cc_ph_scales()+
theme.mine()+
theme(
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
legend.key = element_rect(fill=NA, color=NA),
legend.text = element_text(lineheight=0.9),
legend.key.height = unit(40,'pt'),
legend.key.width = unit(20,'pt'),
legend.title.align = 0.5,
legend.margin = unit(0,'pt'),
legend.title = element_blank()
)
, 'rawmix-tput', w=5, h=3.5)
# save(
# ggplot(d, aes(
# x = x,
# y = avg_latency_ms,
# group = cc_ph, fill = cc_ph, color = cc_ph, linetype = cc_ph,
# ))+
# xlab('Clients')+ylab('Mean transaction latency (ms)')+
# stat_summary(geom='line', fun.y=mean)+
# stat_summary(geom='point', fun.y=mean)+
# # stat_summary(geom='smooth', fun.data=mean_cl_normal)+
# # stat_summary(fun.data=mean_cl_normal, geom='errorbar', width=0.2, aes(color='black'))+
# scale_x_continuous(breaks=c(16,32,64,128,256,384))+
# scale_y_continuous(labels=k.labels)+
# expand_limits(y=0)+
# cc_ph_scales()+
# my_theme()
# , 'rawmix-latency', w=5, h=3.5)
# cl_normal_min <- function(r) { mean_cl_normal(r)$ymin }
# cl_normal_max <- function(r) { mean_cl_normal(r)$ymax }
#
# d.mean <- ddply(d, .(rate,nthreads,cc_ph,phasing,cc,timeout_scaling), summarise, y=mean(throughput), ymin=cl_normal_min(throughput), ymax=cl_normal_max(throughput))
#
# save(
# ggplot(d.max, aes(
# x = cc_ph,
# y = y,
# group = cc_ph, fill = cc_ph,
# ))+
# xlab('total clients')+ylab('throughput (txn/s)')+
# geom_bar(stat="identity")+
# geom_errorbar(aes(ymin=ymin,ymax=ymax), width=0.2)+
# # scale_x_continuous(trans=log2_trans(), breaks=c(16,32,64,128,256,384))+
# scale_y_continuous(labels=k.labels)+
# expand_limits(y=0)+
# cc_ph_scales()+
# my_theme()+theme(legend.position='none')
# , w=4.5, h=3.5)
# dr <- data.or.csv(
# csv = 'data/rawmix-retries.csv',
# gen = function() {
# d <- data.rawmix(where="name like 'v0.28%' and nclients = 4 and duration = 30 and length = 4 and rate = 50")
# dc <- adply(d, 1, function(r){
# c <- fromJSON(jsfix(r$server_txn_conflict_on_tag))
# c <- c[['0']][['s']]
# data.frame(conflicts = c$conflicts, conflicts_total = c$total)
# })
# subset(dc, select = c('name', 'nthreads', 'cc', 'phasing', 'disable_txns', 'cc_ph', 'txn_retries', 'txn_count', 'total_time', 'throughput', 'avg_latency_ms', 'conflicts', 'conflicts_total', 'server_acquire_attempts', 'server_acquire_first_success'))
# }
# )
#
# dr$cc_ph <- factor(dr$cc_ph, levels = rev(levels(dr$cc_ph)))
# # dr$retry_rate <- with(dr, txn_retries / txn_count * 100)
# dr$retry_rate <- with(dr, txn_retries / total_time)
#
# dr$acquire_rate <- with(dr,
# (server_acquire_first_success)/server_acquire_attempts * 100
# )
#
# save(
# ggplot(subset(dr, disable_txns == 0), aes(x = cc_ph, y = acquire_rate, group = cc_ph, color = cc_ph, fill = cc_ph))+
# xlab('mode')+
# ylab('lock acquire success (percentage)')+
# stat_summary(geom='bar', fun.y=mean)+
# scale_y_continuous(labels=function(x) x+"%")+
# expand_limits(y=0)+
# cc_ph_scales()+
# coord_flip()+
# my_theme()+theme(legend.position='none')
# , 'rawmix-retries', w=4.5, h=2.5)
|
504add888b4d151de8eca098e1cf9b7f52fb6e4a | 15a4c80cfe2686ebae020f6964a84df0f68f1381 | /models/jags/seedBagBurial/jagsNpConstantMortalityConstantGermination.R | 252876745d4a5b06c1ea06feb9bfe30b472e5845 | [
"MIT"
] | permissive | gregor-fausto/seed-bank-inference | 3bef67f9cb5092948b7d78902b4ac4d63d18cd4f | 4bf4b7056e6b76aaf2bc72fc38ec91d221d25809 | refs/heads/main | 2023-04-11T09:20:01.732744 | 2022-12-01T16:54:05 | 2022-12-01T16:54:05 | 436,662,955 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,033 | r | jagsNpConstantMortalityConstantGermination.R | model {
# - Priors ---------------------------------------------------------------
# - +Probability of mortality ----
p.m ~ dbeta(1,1)
# - +Probability of germination ----
p.g ~ dbeta(1,1)
# - Transformed parameters ---------------------------------------------------------------
# - +Mortality history ----
p.s[1] = (1-p.m)
p.s[2] = (1-p.m)*(1-p.m)
p.s[3] = (1-p.m)*(1-p.m)*(1-p.m)
# - +Germination history ----
theta_c[1] = 1
theta_c[2] = (1-p.g)
theta_c[3] = (1-p.g)^2
# - Likelihoods ---------------------------------------------------------------
# - +Intact seed observations ----
for(i in 1:n_s){
# - +Deterministic survival model ----
# multiplies mortality and germination histories
mu[i] <- theta_c[t_obs[i]]*p.s[t_obs[i]]
# - +Likelihood ----
y.s_obs[i] ~ dbinom(mu[i], n.s_obs[i])
}
# - +Seedling observations ----
for(i in 1:n_g){
# - +Likelihood ----
y.g_obs[i] ~ dbinom(p.g, n.g_obs[i])
}
}
|
72e6ce3dd0b2e1cf9842ef5b6620c88366eb792d | 1b79b4efcb658fde21cc26d048166c850dac8dbb | /RW/fairing.R | ffb370e96f4eaeb1ee03ab0e3acf96d9e11579ce | [] | no_license | shimaXX/Rsource | 669dc81632710ab9b7f3a48faba03c953e991799 | e2d16f6c88be29b02418965c63040aa70a3adf84 | refs/heads/master | 2021-01-17T17:05:11.132207 | 2014-05-31T10:40:34 | 2014-05-31T10:40:34 | null | 0 | 0 | null | null | null | null | SHIFT_JIS | R | false | false | 984 | r | fairing.R | # TODO: Add comment
#
# Author: n_shimada
###############################################################################
#library(som)
#library(mvpart)
setwd("C:/RW/galsterBIG")
data <- read.table("galster_verb_count.txt",sep="\t")
verb <- read.csv("galster_verb.csv",header=F)
usr <- read.csv("galster_usr.csv",header=F)
day <- read.table("galster_day.txt",header=F)
NoUsr <- nrow(usr)
Noverb <- nrow(verb)
NoRow <- nrow(data)
#データの初期化
Uroop <- 1
myCol <- 1
#データの箱を準備
result <- matrix(0, NoUsr, Noverb) #ユーザ名を格納しないもの
resultInName <- matrix(0, NoUsr, Noverb+1) #ユーザ名を格納するもの
for(u in 1:NoUsr){
while(data[Uroop,1] == usr[u,1]){
for(i in 1:Noverb){
if(data[Uroop,2] == verb[i,1]){
result[u, i] <- data[Uroop,3]
break
}
}
Uroop <- Uroop + 1
}
}
resultInName <- cbind(usr[,1],result)
write.table(result, "galster_data.txt")
write.table(resultInName, "galster_class_data.txt") |
8130e81f8125866b242b17c368b6f70c47d62744 | e2dca774ef7572091781ef0b393f59130e9b75c6 | /server.R | 733a5466104dcb38003fa728d5525099a15f8f73 | [
"CC-BY-4.0"
] | permissive | SonjaHahn/Cor_AO | e5bfb7c875ea06b2f16d85e90463f701d0031a38 | 04f5e06eb9ee280a28a4733bab559aa71bfdf51e | refs/heads/master | 2022-01-13T07:51:21.172625 | 2019-07-22T10:48:43 | 2019-07-22T10:48:43 | 198,184,521 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,081 | r | server.R |
library(shiny)
library(MASS)
library(ggplot2)
library(rmarkdown)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
daten <- reactive({
N <- input$Nges
m1 <- input$m1
sd1 <- input$sd1 # > 0
m2 <- input$m2
sd2 <- input$sd2 # > 0
cor12 <- input$corr
cov12 <- cor12*sd1*sd2
Sigma <- matrix(c(sd1^2,cov12,cov12,sd2^2),2,2)
daten <- data.frame(mvrnorm(n = N, mu=c(m1,m2), Sigma, empirical = TRUE))
names(daten) <- c("Variable1", "Variable2")
daten
})
output$corPlot <- renderPlot({
p <- ggplot(daten(), aes(x=Variable1, y=Variable2)) +
geom_point(shape=1) + # Use hollow circles
theme_minimal() +
theme(axis.title = element_text(size = 15),
axis.text = element_text(size=12)) +
coord_cartesian(xlim=c(-5,5), ylim=c(-5,5)) +
scale_y_continuous(breaks=seq(-5, 5)) +
scale_x_continuous(breaks=seq(-5, 5))
# if(input$RegLine == TRUE)
# {p = p + geom_smooth(method=lm, se=FALSE) }
if(input$MLine == TRUE)
{p = p +
geom_hline(aes(yintercept=input$m2), colour="deepskyblue", linetype="dashed", size=1) +
geom_vline(aes(xintercept=input$m1), colour="deepskyblue", linetype="dashed", size=1)}
if(input$Ellipse == TRUE)
{p = p + stat_ellipse(linetype = "dashed", colour="red", size=2)}
p
})
output$numbers <- renderTable({
daten <- daten()
pear <- cor.test(daten$Variable1, daten$Variable2, method = "pearson")
spear <- cor.test(daten$Variable1, daten$Variable2, method = "spearman")
tabelle <- data.frame(matrix(c(pear$estimate, spear$estimate,
pear$p.value, spear$p.value), ncol=2), row.names = c("Pearson","Spearman"))
names(tabelle) <- c("Korrelation","p-Wert")
tabelle
},
align = c("c"),
rownames = TRUE)
}
)
|
1d4cf5f305b2310400f761ffaa91442ce7741565 | 446b0f604c6eb71157118e4e69ee0e616d04c82a | /Chapter2/A5.R | a5f746cccf180ba6e8eb1bca21c5de03f1dd4b94 | [] | no_license | RWB0104/R | e5c4f1dd1e8f4b4f5ce6daa2ef5f6e2dee0af4de | f3f9053f2b6585bb84d290e07e8972353a1e8f74 | refs/heads/master | 2020-06-06T03:07:46.065207 | 2019-07-29T00:03:10 | 2019-07-29T00:03:10 | 192,621,110 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 51 | r | A5.R | exam = c(2 ,4 ,5 ,5 ,5 ,2 ,1)
which.max(exam)
|
c019b471ac2a8acfe4f1dc6102efff61b946bbc2 | 9f35ee6ed29a04bf6c5bafd7ff09a076f626dc01 | /ICP_score/4.11.clinical.mean-exp_of_ICP_all_posible_features.R | e8105c5a6ad789aeac3038d8c4d0bce95be1ad74 | [] | no_license | mywanuo/immune-cp | fb4fbb9ef20120a1d2cad65f524d0a34bc2b5d9d | 9fb0ee0eff8636008eb587b20d5d2c9e3da18dad | refs/heads/master | 2022-10-28T11:44:24.513357 | 2020-06-19T10:00:30 | 2020-06-19T10:00:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,959 | r | 4.11.clinical.mean-exp_of_ICP_all_posible_features.R | ### mean expression of each gene features in TCGA
library(magrittr)
library(tidyverse)
# data path ---------------------------------------------------------------
# server 1
basic_path <- file.path("/home/huff/project")
immune_res_path <- file.path(basic_path,"immune_checkpoint/result_20171025")
TCGA_path <- file.path("/home/huff/data/TCGA/TCGA_data")
gene_list_path <- file.path(basic_path,"immune_checkpoint/checkpoint/20171021_checkpoint")
res_path <- file.path(immune_res_path,"ICP_score.new")
# load data ---------------------------------------------------------------
# expression data
expr <- readr::read_tsv(file.path(basic_path,"immune_checkpoint/clinical_response_data/mRNA_exp","all_FPKM_expression_2.txt"))
gene_list <- readr::read_tsv(file.path(gene_list_path, "ICPs_all_info_class-new.tsv")) %>%
dplyr::mutate(Exp_site.1 = ifelse(Exp_site %in% c("N"),"Not_sure",Exp_site))
# gene id transfer --------------------------------------------------------
ensembl_gene <- readr::read_tsv(file.path(basic_path,"immune_checkpoint/clinical_response_data","Homo_sapiens.gene_info.geneid.symbol.ensembl"))
gene_list %>%
dplyr::inner_join(ensembl_gene, by = "GeneID") %>%
dplyr::filter(!is.na(ens_id)) -> gene_list
# 1.get mean expression of all possible features of ICP ----------------------------------------------
# 1.1. get gene feature from gene list ----
genelist <- list()
#### gene list feature by gene exp site #####
for(expsite in c("Tumor cell dominate","Immune and tumor cell almost","Immune cell dominate")){
genelist[[expsite]] <- gene_list %>%
dplyr::filter(Exp_site.1 == expsite) %>%
.$symbol
}
#### gene list feature by gene function in immune system #####
for(fun_class in unique(gene_list$functionWithImmune)){
genelist[[fun_class]] <- gene_list %>%
dplyr::filter(functionWithImmune == fun_class) %>%
.$symbol
}
#### gene list feature by ligand-receptor pairs #####
for(p in unique(gene_list$Recepter_pairs)){
if(!is.na(p)){
genelist[[p]] <- gene_list %>%
dplyr::filter(Recepter_pairs == p) %>%
.$symbol
}
}
#### gene list feature by gene family #####
for(f in unique(gene_list$family)){
if(f != "Other"){
genelist[[f]] <- gene_list %>%
dplyr::filter(family == f) %>%
.$symbol
}
}
genelist[["All_gene"]] <- gene_list$symbol
# 1.2 funciton to get mean expression ----
# fn_mean <- function(names,genelist,exp){
# genes <- genelist[[names]]
# exp %>%
# dplyr::filter(symbol %in% genes) %>%
# dplyr::group_by(barcode) %>%
# dplyr::mutate(value = mean(exp)) %>%
# dplyr::ungroup() %>%
# dplyr::mutate(Features = paste("Mean.",names,sep=""))%>%
# dplyr::select(barcode,value,Features) %>%
# unique()
# }
gene_list %>%
dplyr::select(symbol,ens_id) %>%
dplyr::rename("gene_id"="ens_id")-> gene_list_ensid
expr %>%
dplyr::inner_join(gene_list_ensid, by="gene_id") %>%
dplyr::select(-gene_id) %>%
tidyr::gather(-symbol,key="barcode",value="exp") %>%
dplyr::mutate(exp = ifelse(is.na(exp),0,exp)) -> expr_ready
# 1.3 calculation of mean exp-----
# tibble::tibble(names = names(genelist)) %>%
# dplyr::mutate(mean = purrr::map(names,fn_mean,genelist=genelist,exp=expr_ready)) -> clinical_mean_exp
# 2.get fold expression of all possible features of ICP ----------------------------------------------
# 2.1 function to get ratio of (geneA+geneB+..)/geneA ------
fn_ratio_A <- function(names,genelist,exp){
genes <- genelist[[names]]
exp %>%
dplyr::filter(symbol %in% genes) %>%
dplyr::group_by(barcode) %>%
dplyr::mutate(value = sum(exp)) %>%
dplyr::ungroup() %>%
dplyr::select(barcode,value) %>%
unique() -> exp.sum
exp %>%
dplyr::filter(symbol %in% genes) %>%
dplyr::inner_join(exp.sum,by="barcode") %>%
dplyr::mutate(value = (exp+1)/(value+1)) %>%
dplyr::mutate(Features = paste("Frac.",symbol,".",names,sep="")) %>%
dplyr::select(barcode,value,Features) %>%
unique()
}
# 2.2 calculation -------
tibble::tibble(names = names(genelist)) %>%
dplyr::mutate(fold = purrr::map(names,fn_ratio_A,genelist=genelist,exp=expr_ready)) -> clinical_fold_exp
# 3.get ratio expression of all possible features of ICP ----------------------------------------------
# 2.1 function to get ratio of geneA/geneB ------
fn_ratio_B <- function(names,genelist,exp){
genes <- genelist[[names]]
combn(genes,2) %>%
as.data.frame() %>%
dplyr::as.tbl() %>%
tidyr::gather(key="combn",value="symbol") %>%
tidyr::nest(-combn) -> .combn
.combn %>%
dplyr::mutate(ratio = purrr::map(data,.f=function(.x){
exp %>%
dplyr::filter(symbol %in% .x$symbol) %>%
tidyr::spread(key="symbol",value="exp") %>%
dplyr::rename("G1"=.x$symbol[1],"G2"=.x$symbol[2]) %>%
dplyr::mutate(value = (G1+1)/(G2+1)) %>%
dplyr::mutate(Features = paste("Ratio.",.x$symbol[1],".",.x$symbol[2],sep=""))%>%
dplyr::select(barcode,value,Features) %>%
unique()
})) %>%
dplyr::select(-data,-combn) %>%
tidyr::unnest()
}
# 2.2 calculation -------
lapply(genelist, function(x){length(x)}) %>%
t() %>%
as.data.frame() %>%
dplyr::as.tbl() %>%
tidyr::gather(key="names",value="count") %>%
tidyr::unnest() %>%
dplyr::filter(count < 5) %>%
.$names -> names
tibble::tibble(names = names) %>%
dplyr::mutate(fold = purrr::map(names,fn_ratio_B,genelist=genelist,exp=expr_ready)) -> clinical_ratio_exp
# 4.combine all score -----------------------------------------------------
clinical_ratio_exp %>%
tidyr::unnest() %>%
rbind(clinical_fold_exp %>% tidyr::unnest()) %>%
# rbind(clinical_mean_exp %>% tidyr::unnest()) %>%
dplyr::select(-names) %>%
tidyr::spread(key="Features",value="value") -> clinical_mean_fold_ratio_features_value
clinical_mean_fold_ratio_features_value %>%
readr::write_rds(file.path(res_path,"new191213-clinical_mean_fold_ratio_features_value.rds.gz"),compress = "gz")
|
9e945ab628cad14e3851d886e2b25a5c42fe3abd | 9f7a468f257895dd163b5d7d6619d6b160db78a5 | /R/R-class/script/graphics2-20160427.R | cd0e43f80f4cb155ff8462038dd30c7f8c989d7d | [] | no_license | hunterlinsq/R-in-SOE | 5657c7dc48f53cd64b6f03133be08e9f876b387b | 0bd7301988ea2510d158273d647fd9e2a8b305ed | refs/heads/master | 2021-06-14T12:42:53.622731 | 2017-03-13T04:15:18 | 2017-03-13T04:15:18 | 58,655,412 | 1 | 0 | null | 2017-03-13T04:15:19 | 2016-05-12T15:48:38 | HTML | UTF-8 | R | false | false | 6,146 | r | graphics2-20160427.R | #************************************************************
#茎叶图
x<-c(25, 45, 50, 54, 55, 61, 64, 68, 72, 75, 75,
78, 79, 81, 83, 84, 84, 84, 85, 86, 86, 86,
87, 89, 89, 89, 90, 91, 91, 92, 100)
stem(x)
##
## The decimal point is 1 digit(s) to the right of the |
##
## 2 | 5
## 3 |
## 4 | 5
## 5 | 045
## 6 | 148
## 7 | 25589
## 8 | 1344456667999
## 9 | 0112
## 10 | 0
stem(x,scale=2)#10分成2部分,0-4和5-9
##
## The decimal point is 1 digit(s) to the right of the |
##
## 2 | 5
## 3 |
## 3 |
## 4 |
## 4 | 5
## 5 | 04
## 5 | 5
## 6 | 14
## 6 | 8
## 7 | 2
## 7 | 5589
## 8 | 13444
## 8 | 56667999
## 9 | 0112
## 9 |
## 10 | 0
stem(x,scale=.5)#10分成0.5部分,相当于20为一组
##
## The decimal point is 1 digit(s) to the right of the |
##
## 2 | 5
## 4 | 5045
## 6 | 14825589
## 8 | 13444566679990112
## 10 | 0
#***************************************************
library(vcd)
counts <- table(Arthritis$Improved)
counts
#一维柱状图(横竖)
par(mfrow = c(1, 2))
barplot(counts, main = "Simple Bar Plot", xlab = "Improvement", ylab = "Frequency")
barplot(counts, main = "Horizontal Bar Plot", xlab = "Frequency",
ylab = "Improvement", horiz = TRUE,
names.arg=c("A","B","C"))
#
counts <- table(Arthritis$Improved, Arthritis$Treatment)
counts
#分组分颜色累积柱状图,beside决定是堆叠还是并排
#opar<-par(no.readonly = TRUE)
par(mfrow = c(1, 2))
barplot(counts, main="Stacked Bar Plot", xlab="Treatment",
ylab="Frequency", col=c("red", "yellow","green"), legend=rownames(counts))
barplot(counts, main="Grouped Bar Plot", xlab="Treatment",
ylab="Frequency", col=c("red", "yellow", "green"),
legend=rownames(counts), beside=TRUE)
#
states <- data.frame(state.region, state.x77)
means <- aggregate(states$Illiteracy, by = list(state.region),
FUN = mean)
means
## Group.1 x
## 1 Northeast 1.000000
## 2 South 1.737500
## 3 North Central 0.700000
## 4 West 1.023077
means <- means[order(means$x), ]
means
## Group.1 x
## 3 North Central 0.700000
## 1 Northeast 1.000000
## 4 West 1.023077
## 2 South 1.737500
barplot(means$x, names.arg = means$Group.1)
title("Mean Illiteracy Rate")
#***************************************************
#饼图
par(mfrow = c(2, 2))
#1平面饼图
slices <- c(10, 12, 4, 16, 8)
lbls <- c("US", "UK", "Australia", "Germany", "France")
pie(slices, labels = lbls, main = "Simple Pie Chart")
#2平面饼图带百分比
pct <- round(slices/sum(slices) * 100)
lbls2 <- paste(lbls, " ", pct, "%", sep = "")
pie(slices, labels = lbls2, col = rainbow(length(lbls)),
main = "Pie Chart with Percentages")
#3 3d饼图
library(plotrix)
pie3D(slices, labels = lbls, explode = 0.1, main = "3D Pie Chart ")
#4 饼图带频数
mytable <- table(state.region)
lbls <- paste(names(mytable), "\n", mytable, sep = "")
pie(mytable, labels = lbls, main = "Pie Chart from a Table\n (with sample sizes)")
#***************************************************
#画密度函数
par(mfrow = c(2, 1))
d <- density(mtcars$mpg)
windows(7,7)
plot(d)
d <- density(mtcars$mpg)
plot(d, main = "Kernel Density of Miles Per Gallon")
polygon(d, col = "red", border = "blue")#填充红色,轮廓蓝色
rug(mtcars$mpg, col = "brown")#加须
#
par(lwd = 2)
library(sm)
attach(mtcars)
cyl.f <- factor(cyl, levels = c(4, 6, 8), labels = c("4 cylinder", "6 cylinder", "8 cylinder"))
sm.density.compare(mpg, cyl, xlab = "Miles Per Gallon")
title(main = "MPG Distribution by Car Cylinders")
colfill <- c(2:(1 + length(levels(cyl.f))))
cat("Use mouse to place legend...", "\n\n")
legend(locator(1), levels(cyl.f), fill = colfill)#鼠标指定放图例
detach(mtcars)
par(lwd = 1)
#*****************************************************
#直方图
windows(7,7)
par(mfrow = c(1, 2))
hist(mtcars$mpg, breaks = 7, col = "yellow", xlab = "Miles Per Gallon",
main = "Colored histogram with 12 bins")
hist(mtcars$mpg, freq = FALSE, breaks = 12, col = "red", xlab = "Miles Per Gallon",
main = "Histogram, rug plot, density curve")
rug(mtcars$mpg)
lines(density(mtcars$mpg), col = "blue", lwd = 2)
box()#把图围起来
library(shiny)#可视化编辑,左上角+号里面的shiny app
#***************************************************
boxplot(mtcars$mpg, main="Box plot", ylab="Miles per Gallon")
boxplot.stats(mtcars$mpg)
## $stats
## [1] 10.40 15.35 19.20 22.80 33.90
##
## $n
## [1] 32
##
## $conf
## [1] 17.11916 21.28084
##
## $out
## numeric(0)
par(mfrow = c(1, 2))
boxplot(mpg ~ cyl, data = mtcars, main = "Car Milage Data",
xlab = "Number of Cylinders", ylab = "Miles Per Gallon")
mtcars$cyl.f <- factor(mtcars$cyl, levels = c(4, 6, 8),
labels = c("4", "6", "8"))
mtcars$am.f <- factor(mtcars$am, levels = c(0, 1),
labels = c("auto", "standard"))
#分类箱线图并上色
boxplot(mpg ~ am.f * cyl.f, data = mtcars, varwidth = TRUE, col = c("gold", "darkgreen"),
main = "MPG Distribution by Auto Type", xlab = "Auto Type")
#*****************************************************
#windows(width = 7, height = 4)
par(mfrow = c(1, 2))
windows(7,7)
dotchart(mtcars$mpg, labels = row.names(mtcars), cex = 0.7,
main = "Gas Milage for Car Models", xlab = "Miles Per Gallon")
x <- mtcars[order(mtcars$mpg),]
x$cyl <- factor(x$cyl)
x$color[x$cyl == 4] <- "red"
x$color[x$cyl == 6] <- "blue"
x$color[x$cyl == 8] <- "darkgreen"
dotchart(x$mpg, labels = row.names(x), cex = 0.7, pch = 19,
groups = x$cyl, gcolor = "black", color = x$color,
main = "Gas Milage for Car Models\ngrouped by cylinder",
xlab = "Miles Per Gallon")#groups按照cyl分组
#******************************************************
windows(7,7)
attach(mtcars)
#点的大小表示值大小
symbols(wt, mpg, circle=disp)
r <- sqrt(disp/pi)#缩小圈圈之间的大小差距(权重)
symbols(wt, mpg, circle=r, inches=0.30,
fg="white", bg="lightblue",#加入填充色
main="Bubble Plot with point size proportional to displacement",
ylab="Miles Per Gallon", xlab="Weight of Car (lbs/1000)")
text(wt, mpg, rownames(mtcars), cex=0.6)
detach(mtcars)
|
b75d65b142d72e124def5be47e9780c66663f777 | 9b301684365bd519fe661756335e96ba44e71a68 | /tests/testthat/test_draic.r | 6f89858d0704e8f56b9a29be4ecaa7d44e94501d | [] | no_license | chjackson/msm | faa982e8c77f178ae860e0655d25263e70726e5b | e4831870ae139431de1723178343aca03d2d30ec | refs/heads/master | 2023-09-01T00:13:45.911442 | 2023-08-29T15:41:59 | 2023-08-29T15:41:59 | 70,476,569 | 45 | 19 | null | 2023-05-18T15:40:38 | 2016-10-10T10:14:36 | R | UTF-8 | R | false | false | 1,434 | r | test_draic.r | psor.msm <- msm(state ~ months, subject=ptnum, data=psor, qmatrix = psor.q,
covariates = ~ollwsdrt+hieffusn,
constraint = list(hieffusn=c(1,1,1),ollwsdrt=c(1,1,2)),
control=list(fnscale=1))
## Merge states 2 and 3
psor$state3 <- ifelse(psor$state==3, 2,
ifelse(psor$state==4, 3, psor$state))
psor3.q <- psor.q[1:3,1:3]
psor3.msm <- msm(state3 ~ months, subject=ptnum, data=psor, qmatrix = psor3.q,
covariates = ~ollwsdrt+hieffusn,
constraint = list(hieffusn=c(1,1),ollwsdrt=c(1,1)),
control=list(fnscale=1))
test_that("DRAIC",{
d <- draic.msm(psor.msm, psor3.msm)
expect_true(is.numeric(d$draic))
dl <- draic.msm(psor.msm, psor3.msm, likelihood.only=TRUE)
expect_equal(d$lik.restricted["complex","-LL"], dl[["complex"]])
expect_equal(as.numeric(dl[["complex"]]), 415.032735368145, tolerance=1e-06)
expect_true(is.numeric(draic.msm(psor.msm, psor3.msm, tl=0.99)$ti["Lower"]))
})
test_that("DRAIC with observed information", {
skip_on_cran()
expect_error(draic.msm(psor.msm, psor3.msm, information="observed"), NA)
})
test_that("DRLCV",{
psor.msm <- msm(state ~ months, subject=ptnum, data=psor[1:29,], qmatrix = psor.q)
psor3.msm <- msm(state3 ~ months, subject=ptnum, data=psor[1:29,], qmatrix = psor3.q)
expect_error(drlcv.msm(psor.msm, psor3.msm, verbose=FALSE), NA)
})
|
1c4c4de96b96c0c476dbdfceadbdf52a9c54c07a | 2960defa608b3fae5fe6dfec2d09553f889b2f91 | /04-exploratory-data-analysis/projects/project-02/code/plot3.R | fe95b3348dbf2e7f77b4a05368b809dea19064ac | [] | no_license | Pratiksapkota169/coursera-data-science-specialization | bff3f00f63c0eb1ff4970efa3174a12457db476c | dd88cc85fccc480c2c943f16f35fbb0874a6252d | refs/heads/master | 2020-04-13T20:14:14.638272 | 2018-01-14T18:52:42 | 2018-01-14T18:52:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,707 | r | plot3.R | ################################################################################
## Source: Coursera
## Specialization: Data Science
## Course: 4 Exploratory Data Analysis
## Week: Week 4
## Project: Project 2
## File: plot3.R
## Date: 2016-02-06
################################################################################
################################################################################
## Question 3:
## Of the four types of sources indicated by the type (point, nonpoint, onroad,
## nonroad) variable, which of these four sources have seen decreases
## in emissions from 1999–2008 for Baltimore City? Which have seen increases
## in emissions from 1999–2008?
## Use the ggplot2 plotting system to make a plot answer this question.
################################################################################
################################################################################
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
library(dplyr)
baltimore_data <- subset(NEI, fips=="24510")
total_emissions_by_year_and_type <- baltimore_data %>%
group_by(year, type) %>%
summarise(total.emissions = sum(Emissions))
total_emissions_by_year_and_type
library(ggplot2)
ggplot(total_emissions_by_year_and_type,
aes(x=year, y=total.emissions)) +
geom_line() +
facet_grid(. ~ type) +
ggtitle("Total Emissions Per Source Type During 1999-2008") +
labs(x="Year", y="Total Emissions (in tons)") +
scale_x_continuous(breaks=seq(1999, 2008, by=3))
ggsave(file="output/plot3.png", width=9.5, height=4.5)
################################################################################ |
cd28f145ea3cc25765cae5d4ff56fa090fed1647 | 396866f2bd1ea9cc6b443f71a988c7da51fc5c1e | /build_hc_tables/code/r/dsgn/cond_PERS.R | 751841fd316f2a248bd5b52697e574e79c708dc3 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
] | permissive | RandomCriticalAnalysis/MEPS-summary-tables | 8d946594a42e6f1d9b924da9fd25fe8312b5c4cb | d2efcc5d7a93e4eedfcf08e9ee955b92e3702e18 | refs/heads/master | 2020-04-09T20:52:05.833235 | 2018-09-10T18:16:59 | 2018-09-10T18:16:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 366 | r | cond_PERS.R | # Sum by person, condition, across event
all_pers <- all_events %>%
group_by(.subgrps., DUPERSID, VARSTR, VARPSU, PERWT.yy.F, Condition, count) %>%
summarize_at(vars(SF.yy.X, PR.yy.X, MR.yy.X, MD.yy.X, OZ.yy.X, XP.yy.X),sum) %>% ungroup
PERSdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = all_pers,
nest = TRUE)
|
be9fce2257bcd812cf6e020af497ade61def1126 | 0aec55cd48f860f7d51fc62ca1067f3d8224a711 | /R/likelihoodGamma.R | b5e5707bdd4e294b709c81c2a266e4811607c079 | [] | no_license | cran/MsdeParEst | a83a0fb3fe7dacd30954a2863610836b6c531343 | db23b5f42d636274f8ca0aaddfd79cc3e28e345d | refs/heads/master | 2022-11-28T18:09:36.867669 | 2017-09-16T20:46:51 | 2017-09-16T20:46:51 | 103,786,504 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,287 | r | likelihoodGamma.R | # MsdeParEst R package ; file likelihoodGamma.r (last modified: 2017-09-15)
# Authors: M. Delattre, C. Dion
# Copyright INRA 2017
contrastGamma <- function(a, lambda, U, V, S, K, drift.fixed) {
invlambda <- 1/lambda
M <- length(S)
loglik <- vector(length = M)
phi1 <- drift.fixed[1]
phi2 <- drift.fixed[2]
U1 <- rep(0, M)
U2 <- rep(0, M)
V11 <- rep(0, M)
V22 <- rep(0, M)
V12 <- rep(0, M)
ST <- rep(0, M)
for (j in 1:M) {
U1[j] <- U[1, j]
U2[j] <- U[2, j]
V11[j] <- V[[j]][1, 1]
V22[j] <- V[[j]][2, 2]
V12[j] <- V[[j]][1, 2]
}
for (j in 1:M) {
ST[j] <- (S[j] - 2 * (phi1 * U1[j] + phi2 * U2[j]) + phi1^2 * V11[j] + phi2^2 *
V22[j] + 2 * phi1 * phi2 * V12[j])
}
for (j in 1:M) {
loglik[j] <- -2 * ((1 - a) * log(2/K + ST[j]/K) - log(gamma(a)) + a * log(invlambda) -
(a + K/2) * log((2 * invlambda/K + ST[j]/K)/(2/K + S[j]/K)))
}
L <- sum(loglik)
return(L)
}
likelihoodGamma <- function(a, lambda, U, V, S, SigDelta, K, drift.fixed) {
if (is.infinite(log(gamma(a + K/2)))) {
L <- Inf
} else {
invlambda <- 1/lambda
M <- length(S)
loglik <- vector(length = M)
phi1 <- drift.fixed[1]
phi2 <- drift.fixed[2]
U1 <- rep(0, M)
U2 <- rep(0, M)
V11 <- rep(0, M)
V22 <- rep(0, M)
V12 <- rep(0, M)
ST <- rep(0, M)
for (j in 1:M) {
U1[j] <- U[1, j]
U2[j] <- U[2, j]
V11[j] <- V[[j]][1, 1]
V22[j] <- V[[j]][2, 2]
V12[j] <- V[[j]][1, 2]
}
for (j in 1:M) {
ST[j] <- (S[j] - 2 * (phi1 * U1[j] + phi2 * U2[j]) + phi1^2 * V11[j] + phi2^2 *
V22[j] + 2 * phi1 * phi2 * V12[j])
}
for (j in 1:M) {
loglik[j] <- K * log(2 * pi) + sum(SigDelta[j]) - 2 * (a * log(invlambda) +
log(gamma(a + K/2)) - log(gamma(a)) + (a + K/2) * log(invlambda + ST[j]/2))
}
L <- sum(loglik)
}
return(L)
}
|
e3e68d687a7f5621bac63768ff6748b542334c83 | eeb4249594b67f0564e8563ab83ecf641ef3ed8f | /R/nclusters.R | 4aeb540ce5e3a5239caec120a11510e6ed805afc | [] | no_license | cran/smerc | 6e81aaa86f2405364f2368079a07f949317848fc | aab00112b726a9392395b1937f3f92e1bbd3cb3e | refs/heads/master | 2023-07-23T05:53:39.316070 | 2023-07-15T19:30:02 | 2023-07-15T20:30:28 | 48,088,790 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 615 | r | nclusters.R | #' Number of clusters
#'
#' \code{nclusters} returns the number of clusters
#' identified in a \code{smerc_cluster} object.
#' @param x A \code{smerc_cluster object}
#'
#' @return A non-negative integer.
#' @export
#' @examples
#' data(nydf)
#' coords <- with(nydf, cbind(longitude, latitude))
#' out <- scan.test(
#' coords = coords, cases = floor(nydf$cases),
#' pop = nydf$pop, nsim = 19,
#' alpha = 0.3, longlat = TRUE
#' )
#' nclusters(out)
nclusters <- function(x) {
if (!is.element("smerc_cluster", class(x))) {
stop("x must be a smerc_cluster")
}
length(x$clusters)
}
|
103849ed959ff1f406021337f55f2d4cf2d37a3e | 35076f29694f5874a2ffab0a8b7d93c8773044b4 | /climate strike analysis.R | d90a5083fa3d5e58b99f0ec558d153a7682be3ca | [] | no_license | bblonder/climate_strike_2019 | 7b1e99c792c1b417fe67a4506c6382bbdde3c93e | f0722ede3ef782f8e70bf7f3da7455317a9ce53d | refs/heads/master | 2020-07-30T02:53:10.044482 | 2019-09-21T22:55:55 | 2019-09-21T22:55:55 | 210,062,899 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,645 | r | climate strike analysis.R | library(readxl)
library(dplyr)
library(ggplot2)
library(car)
library(DHARMa)
library(visreg)
library(ggrepel)
library(ggpubr)
data = read_excel('climate strike data.xlsx')
names(data) = make.names(names(data))
data$Turnout.estimate.factor = factor(data$Turnout.estimate,levels=c("hundreds","thousands","tens of thousands","hundred thousand"),ordered=TRUE)
# guess turnouts
data$EstimateQuantitative = 5*10^(1+as.numeric(data$Turnout.estimate.factor))
# nyc correction
data$EstimateQuantitative[data$Metro=="New York-Newark-Jersey City, NY-NJ-PA MSA"] = 60000
# per capita analyses
data$PerCapitaTurnout = data$EstimateQuantitative / data$Pop.2018
data$Students.allowed = factor(data$Students.allowed,ordered=TRUE,levels=c(0,1))
data$Weather.day.of = factor(data$Weather.day.of,levels=c("OK","Thunder / floods"),ordered=TRUE)
# linear model
m_lm_all = lm(log(PerCapitaTurnout)~
Air.temp.day.of +
Students.allowed +
Weather.day.of + Democrat.vote.2016 +
Income.2017 +
Income.ratio.1.to.99.2015,
data=data)
# diagnostics
simulateResiduals(fittedModel = m_lm_all, n = 1000) %>% plot
# significance
summary(m_lm_all)
Anova(m_lm_all,type=2)
# make turnout graph
g_pc = ggplot(data,aes(x=reorder(gsub(" MSA","",Metro), PerCapitaTurnout),y=PerCapitaTurnout)) +
geom_bar(stat='identity') +
xlab("Metropolitan statistical area") + ylab("Turnout (per capita)") +
coord_flip() +
theme_bw()
ggsave(g_pc,file='g_pc.png',width=6,height=6)
plot_partial_resid_with_label = function(model, data, var)
{
v = visreg(m_lm_all,var,gg=TRUE) + theme_bw()
df_with_name = data.frame(ggplot_build(v)$plot$data,Metro=data$Metro)
v_final = v + geom_label_repel(data=df_with_name,aes(x=x,y=y,label=Metro,col=(Metro=="Phoenix-Mesa-Chandler, AZ MSA")),size=1.25) + scale_color_manual(values=c("black","red")) + theme(legend.position='none')
return(v_final)
}
g_air = plot_partial_resid_with_label(m_lm_all,data,"Air.temp.day.of")
plot_partial_resid_with_label(m_lm_all,data,"Democrat.vote.2016")
g_income = plot_partial_resid_with_label(m_lm_all,data,"Income.2017")
plot_partial_resid_with_label(m_lm_all,data,"Income.ratio.1.to.99.2015")
ggsave(ggarrange(g_air, g_income, nrow=1,ncol=2),file='g_pred.png',width=9,height=5)
# obs vs pred
g_obspred = data.frame(pred=predict(m_lm_all),obs=log(data$PerCapitaTurnout),Metro=data$Metro) %>% ggplot(aes(x=obs,y=pred,label=Metro)) + geom_abline(slope=1,intercept=0,color='gray') + geom_label_repel(size=2,aes(color=(Metro=="Phoenix-Mesa-Chandler, AZ MSA")),show.legend=FALSE) + scale_color_manual(values=c("black","red")) + theme(legend.position='none') + theme_bw()
ggsave(g_obspred,file='g_obspred.pdf')
|
146eb33c6a04395fae1016228f78fd1ed5a138a4 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/vrtest/examples/Wright.Rd.R | 55603ea8e80f631e2a8ba31ad550ae2c143bb69f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 276 | r | Wright.Rd.R | library(vrtest)
### Name: Wright
### Title: Wright's Rank and Sign Tests
### Aliases: Wright
### Keywords: htest
### ** Examples
data(exrates)
y <- exrates$ca
nob <- length(y)
r <- log(y[2:nob])-log(y[1:(nob-1)])
kvec <- c(2,5,10)
Wright(r,kvec)
|
f04426afc2691b63f7e6704841e7124c793e1c00 | 2416d7df345c4605edfad5b2f0b899ef10eb7980 | /Task 3 - sentiment dynamics.R | 3f60de36d24796d24a3acc3b386696bf8d534151 | [
"MIT"
] | permissive | oleksandrkarasov/test_task | 9e35f057cb334719146488035feec85e82ac3913 | d289114d07a2858155348e0e9bcf022046de93de | refs/heads/main | 2023-08-07T12:02:18.937911 | 2021-10-09T12:39:53 | 2021-10-09T12:39:53 | 413,596,848 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,573 | r | Task 3 - sentiment dynamics.R | # This code is created for sentiment polarity assessment
# with sentimentr package.
# Author: Oleksandr Karasov, R community and contributors
# install.packages("sentimentr")
# install.packages("tidyverse")
# install.packages("lubridate")
library(ggplot2)
library(dplyr)
library(hrbrthemes)
library(sentimentr)
library(tidyverse)
library(lubridate)
library(zoo)
# Open data (workind directory is assumed)
twitter_df = read.csv("twitter_sample_data.csv", header= TRUE, encoding="UTF-8")
head(twitter_df)
sentiment=sentiment_by(twitter_df$text1)
summary(sentiment$ave_sentiment)
qplot(sentiment$ave_sentiment, geom="histogram",binwidth=0.1,main="Twitter Sentiment Histogram")
twitter_df$ave_sentiment<-sentiment$ave_sentiment
twitter_df$sd_sentiment<-sentiment$sd
str(twitter_df)
twitter_df$rand_id<-as.factor(twitter_df$rand_id)
# Median calculation for tweets by rand_id
twitter_df<-twitter_df%>%
group_by(rand_id)%>%
mutate(ave_sentiment_median=median(ave_sentiment,na.rm=FALSE))%>%
ungroup()
# Twitter users ranged by sentiment polarity of their tweets
windows(45,15)
ggplot(twitter_df, aes(x=reorder(rand_id,ave_sentiment_median)))+
geom_violin(aes(y=ave_sentiment),fill="purple", width=1.5, color="#502c6b")+
theme_minimal()+
theme(axis.text.x=element_text(size=8,angle=90),
axis.title=element_text(size=11))+
ggtitle("Twitter's sentiment polarity")+
xlab("User ID")+
ylab("Sentiment score")
# Plot by date
# Get the date column
twitter_df$date<-ymd_hms(twitter_df$created_at)
twitter_df$my<-format(twitter_df$date, format="%Y-%m")
twitter_df$my<-as.yearmon(twitter_df$my)
# Median ave_sentiment per month
twitter_df<-twitter_df%>%
group_by(my)%>%
mutate(median_sentiment=median(ave_sentiment,na.rm =TRUE),
sd_sentiment=sd(ave_sentiment,na.rm =TRUE))%>%
ungroup()
str(twitter_df)
# Get unique values for plotting
twitter_df2<-twitter_df%>%select(median_sentiment,my,sd_sentiment )%>%unique()
# Final visualisation
windows(30,20)
ggplot(data=twitter_df2,aes(x=my))+
geom_segment(aes(x = my, y = (median_sentiment-sd_sentiment), xend = my,
yend = (median_sentiment+sd_sentiment), colour = "segment"), size=3, color='#ededed', alpha=0.5)+
geom_point(aes(y=median_sentiment), color='#ed2b2b', size=2)+
geom_line(aes(y=median_sentiment),color='#ed2b2b', alpha=0.5)+
theme_minimal()+
scale_x_yearmon()+
xlab("Date") +
ylab("Median sentiment polarity per month")+
theme(axis.text=element_text(size=10),
axis.title=element_text(size=12))
|
4ad2303e37aa983a7b7105f07c37143f8877c96e | 5e6fce7ad124d9f2fa240496aad5eaf545c7f636 | /R/test5.R | 7c985cf326ee3bc9c2e32731ba19ac507763144e | [] | no_license | HeWei-imagineer/FormerCode | 656a121b3a381796d62ab5499453d8d21a957ba2 | a4989157b02f46ab0ae40105c4926265a41ae315 | refs/heads/master | 2021-09-11T15:28:07.384228 | 2018-04-09T08:45:23 | 2018-04-09T08:45:23 | 70,399,895 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 609 | r | test5.R | mushrooms <- read.csv("E:\\大三\\R语言与机器学习\\5\\mushrooms.csv",stringsAsFactors = TRUE)
str(mushrooms)
mushrooms$veil_type <-NULL
table(mushrooms$type)
install.packages("RWeka")
library(RWeka)
head(mushrooms)
mushroom_1R <- OneR(type~.,data = mushrooms)
mushroom_1R
summary(mushroom_1R)
#为何跟之前的结果一样
mushroom_1R <- OneR(type~ odor + cap_color,data = mushrooms)
mushroom_1R
summary(mushroom_1R)
mushroom_JRiP <- JRip(type~.,data = mushrooms)
mushroom_JRiP
#JRiP算法中每一条规则都选出了一个有毒的品种,这属于数据的特殊性,复杂的数据待考察
|
367fb8bff286b22788430c7ed843a31e467704a8 | 348151912a328dde6ce0c1254bec5524da555590 | /other/test-rtweet.R | 0cec7c087c2fcc99cf409810d97fa53749b07926 | [
"MIT"
] | permissive | rwright88/rwmisc | ceabf91a13c3280da34c64579622c38fc61e66dc | 27245180b0ef94e1b4af4221e2c5558dc2d214c5 | refs/heads/master | 2020-04-24T11:48:13.618043 | 2019-12-11T19:27:18 | 2019-12-11T19:27:18 | 171,937,873 | 0 | 0 | NOASSERTION | 2019-12-11T19:27:20 | 2019-02-21T20:09:47 | R | UTF-8 | R | false | false | 805 | r | test-rtweet.R | #' Calculate twitter score
#'
#' @param user Screen name or user ID
#' @return Double
#' @export
twt_user_score <- function(user) {
followers <- rtweet::get_followers(user, n = 75000, retryonratelimit = TRUE)
n_follows <- nrow(followers)
limit <- 90000
if (n_follows <= limit) {
data <- rtweet::lookup_users(followers$user_id)
} else {
n_batches <- ceiling(n_follows / limit)
data <- vector("list", length(n_batches))
for (i in seq_along(n_batches)) {
first <- (i - 1) * limit + 1
last <- min(first + limit - 1, n_files)
ids <- followers$user_id[first:last]
data[[i]] <- rtweet::lookup_users(ids)
}
data <- data.table::rbindlist(data)
}
data <- data[data$friends_count > 0, ]
out <- sum(data$followers_count / data$friends_count)
out
}
|
85e634e75fbda0f92705f88e0b63f5a090d6acc6 | bc6c0bdfa801eabf8922ae5a33baa528ac1dea10 | /Hackathon1/Hackathon2.R | 21e98268f1462a3150f29982b2edabd92cac703d | [] | no_license | a12jain/R-Projects | e078631ebb1273579bbe9bf72974b3914d60a0fe | 81fc639076ea0d5d892df471957cecb835e6f337 | refs/heads/master | 2020-12-28T10:13:59.909650 | 2020-07-07T18:06:47 | 2020-07-07T18:06:47 | 238,285,084 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,241 | r | Hackathon2.R | library(caTools)
library(rpart)
library(rpart.plot)
setwd("D:/Great Learning/HackAll")
data.xl <- readxl::read_xlsx("train_set_v1.xlsx")
head(data.xl)
str(data.xl)
summary(data.xl)
colSums(is.na(data.xl))
sum(is.na(data.xl))
str(data.xl)
names(data.xl)
data_new <- data.xl[,-c(1,3,17)]
colSums(is.na(data_new))
sum(is.na(data_new))
str(data_new)
data_new <-na.omit(data_new)
str(data_new)
colSums(is.na(data_new))
str(data.csv)
??dummyfy
??prediction
data_new$marital <- as.factor(data_new$marital)
data_new$education <- as.factor(data_new$education)
data_new$default <- as.factor(data_new$default)
data_new$housing <- as.factor(data_new$housing)
data_new$loan <- as.factor(data_new$loan)
data_new$contact <- as.factor(data_new$contact)
data_new$month <- as.factor(data_new$month)
data_new$day_of_week <- as.factor(data_new$day_of_week)
data_new$poutcome <- as.factor(data_new$poutcome)
data_new$Term<- as.factor(data_new$Term)
str(data_new)
# Cart 1
cartModel <- rpart(data_new$Term~.,data = data_new,method = "class")
cartModel
prp(cartModel)
rpart.plot(cartModel)
cartTrain <- predict(cartModel, data_new, type = "class")
cartTrain
str(cartTrain)
caret::confusionMatrix(cartTrain,data_new$Term)
cartTest <- predict(cartModel, testData, type = "class")
caret::confusionMatrix(cartTest,testData$Term)
Test_data<- read.csv("test_set.csv")
Test_data.xl <- readxl::read_xlsx("test_set.xlsx")
TestData <- Test_data.xl[,-c(1,3,17)]
str(TestData)
TestData$marital <- as.factor(TestData$marital)
TestData$education <- as.factor(TestData$education)
TestData$default <- as.factor(TestData$default)
TestData$housing <- as.factor(TestData$housing)
TestData$loan <- as.factor(TestData$loan)
TestData$contact <- as.factor(TestData$contact)
TestData$month <- as.factor(TestData$month)
TestData$day_of_week <- as.factor(TestData$day_of_week)
TestData$poutcome <- as.factor(TestData$poutcome)
str(TestData)
str(data_new)
CartUnseen <- predict(cartModel, newdata = TestData ,type = "class")
View(CartUnseen)
predicted <-cbind(Test_data$ID,CartUnseen)
View(predicted)
predicted<-as.data.frame(predicted)
predicted$CartUnseen
predicted$CartUnseen <-ifelse(predicted$CartUnseen == "1",0,1)
colnames(predicted) <- c("ID","Term")
str(predicted)
#Cart 2
ctrl <- rpart.control(minsplit = 180,xval = 100)
cartModel2 <- rpart(trainData$Term~., data = trainData,control = ctrl,method = "class")
cartModel2
prp(cartModel2)
rpart.plot(cartModel2)
cartTrain2 <- predict(cartModel2, trainData, type = "class")
cartTrain2
caret::confusionMatrix(cartTrain2,trainData$Term)
str(cartTrain2)
cartTest2 <- predict(cartModel2, testData, type = "class")
caret::confusionMatrix(cartTest2,testData$Term)
CartUnseen2 <- predict(cartModel2, newdata = TestData ,type = "class")
View(CartUnseen2)
predicted2 <-cbind(Test_data$ID,CartUnseen2)
View(predicted2)
predicted2<-as.data.frame(predicted2)
predicted2$CartUnseen2
predicted2$CartUnseen2 <-ifelse(predicted2$CartUnseen2 == "1",0,1)
colnames(predicted2) <- c("ID","Term")
#Cart 3
ctrl <- rpart.control(minsplit = 100,xval = 100)
cartModel3 <- rpart(trainData$Term~., data = trainData,control = ctrl,method = "class")
cartModel3
prp(cartModel3)
rpart.plot(cartModel3)
cartTrain3 <- predict(cartModel3, trainData, type = "class")
cartTrain3
caret::confusionMatrix(cartTrain3,trainData$Term)
str(cartTrain3)
cartTest3 <- predict(cartModel3, testData, type = "class")
caret::confusionMatrix(cartTest3,testData$Term)
CartUnseen3 <- predict(cartModel3, newdata = TestData ,type = "class")
#View(CartUnseen3)
predicted3 <-cbind(Test_data$ID,CartUnseen3)
#View(predicted3)
predicted3<-as.data.frame(predicted3)
predicted3$CartUnseen3
predicted3$CartUnseen3 <-ifelse(predicted3$CartUnseen3 == "1",0,1)
colnames(predicted3) <- c("ID","Term")
write.csv(predicted3,"D:/Great Learning/HackAll/predicted3.csv")
#Cart4
printcp(cartModel3)
cartprune <- prune(cartModel3,cp = 0.010412,"CP")
prunepred <- predict(cartprune,newdata = TestData, type = "class")
predicted4 <-cbind(Test_data$ID,prunepred)
#View(predicted3)
predicted4<-as.data.frame(predicted4)
predicted4$prunepred <-ifelse(predicted4$prunepred == "1",0,1)
colnames(predicted4) <- c("ID","Term")
write.csv(predicted4,"D:/Great Learning/HackAll/predicted4.csv")
RFModel <- randomForest::randomForest(Term~.,data = trainData,type = "class", mtry = 3,
nodesize = 10, ntree= 1200, importance = FALSE)
summary(RFModel)
rfpred <- predict(RFModel, newdata = trainData,type = "class")
caret::confusionMatrix(rfpred,trainData$Term)
rfpred.Test <- predict(RFModel,newdata = testData,type = "class")
caret::confusionMatrix(rfpred.Test,testData$Term)
names(testData)
test<- testData[,-19]
test <- rbind(test[1,],TestData)
test<- test[-1,]
View(test)
rfpred.unseen <- predict(RFModel, newdata = unseen_test, type = "class")
View(rfpred.unseen)
rfFinal <-cbind(Test_data$ID,rfpred.unseen)
#View(predicted3)
rfFinal<-as.data.frame(rfFinal)
View(rfFinal)
rfFinal$rfpred.unseen
rfFinal$rfpred.unseen <-ifelse(rfFinal$rfpred.unseen == "1",0,1)
colnames(rfFinal) <- c("ID","Term")
View(rfFinal)
write.csv(rfFinal,"D:/Great Learning/HackAll/rffinal.csv")
library(randomForest)
names(trainData)
tRF <- tuneRF(x = trainData[,-19],
y=trainData$Term,
mtryStart = 3,
ntreeTry=15000,
stepFactor = 1.5,
improve = 0.0001,
trace=TRUE,
plot = TRUE,
doBest = TRUE,
nodesize = 50,
importance=TRUE
)
?tuneRF
summary(tRF)
summary(tRF$forest)
rfpred2 <- predict(tRF,trainData,type = "class")
caret::confusionMatrix(trainData$Term,rfpred2)
rfpredTest2 <- predict(tRF,testData, type = "class")
caret::confusionMatrix(testData$Term,rfpredTest2)
unseen_test <- rbind(testData[1,-19],completeDData)
unseen_test<-rbind(unseen_test[-1,])
rfFinal2 <- predict(tRF,newdata = unseen_test,type = "class")
rfFinal2.sol<- cbind(Test_data$ID,rfFinal2)
View(rfFinal2.sol)
rfFinal2.sol<- as.data.frame(rfFinal2.sol)
rfFinal2.sol$rfFinal2 <- ifelse(rfFinal2.sol$rfFinal2 ==1,0,1)
View(rfFinal2.sol)
colnames(rfFinal2.sol) <- c("ID","Term")
View(rfFinal2.sol)
write.csv(rfFinal2.sol,"D:/Great Learning/HackAll/rffinal2.1.csv")
#RF Using train
library(caret)
control <- trainControl(method = "cv",number = 10, repeats = 3)
metric <- "Accuracy"
mtry <- 5
tunegrid <- expand.grid(mtry=mtry)
rf.Train <- train(Term~.,data=trainData,method = "rf",metric = metric, tuneGrid = tunegrid,trControl=control)
summary(rf.Train)
#XGBoostinggg
str(trainData)
trainData$marital<- as.numeric(trainData$marital)
trainData$education<- as.numeric(trainData$education)
trainData$default<- as.numeric(trainData$default)
trainData$housing<- as.numeric(trainData$housing)
trainData$loan<- as.numeric(trainData$loan)
trainData$contact<- as.numeric(trainData$contact)
trainData$month<- as.numeric(trainData$month)
trainData$day_of_week<- as.numeric(trainData$day_of_week)
trainData$poutcome<- as.numeric(trainData$poutcome)
trainData$Term<- as.numeric(trainData$Term)
str(trainData)
testData$marital<- as.numeric(testData$marital)
testData$education<- as.numeric(testData$education)
testData$default<- as.numeric(testData$default)
testData$housing<- as.numeric(testData$housing)
testData$loan<- as.numeric(testData$loan)
testData$contact<- as.numeric(testData$contact)
testData$month<- as.numeric(testData$month)
testData$day_of_week<- as.numeric(testData$day_of_week)
testData$poutcome<- as.numeric(testData$poutcome)
testData$Term<- as.numeric(testData$Term)
str(trainData)
trainData$Term <- ifelse(trainData$Term =="1",0,1)
testData$Term <- ifelse(testData$Term =="1",0,1)
features_train<- as.matrix(trainData[,1:18])
label_train <- as.matrix(trainData[,19])
features_test<- as.matrix(testData[,1:18])
library(xgboost)
xgbModel <- xgboost(
data = features_train,
label = label_train,
eta = 0.01,
max_depth = 100,
min_child_weight = 3,
nrounds = 1000,
nfold = 10,
objective = "reg:logistic",
verbose = 0,
early_stopping_rounds = 10)
summary(xgbModel)
# Performing Prediction on Train data.
#str(label_train)
xgb.train.pred<- predict(xgbModel, newdata = features_train)
xgb.train.pred1 <- ifelse(xgb.train.pred<.4,0,1)
xgb.train.pred1<- as.factor(xgb.train.pred1)
trainData$Term <- as.factor(trainData$Term)
#str(trainData$Term)
#str(xgb.train.pred1)
#dim(trainData$Term)
#dim(p)
caret::confusionMatrix(xgb.train.pred1, trainData$Term)
xgb.test.pred <- predict(xgbModel,newdata = features_test)
xgb.test.pred1 <- ifelse(xgb.test.pred<.4,0,1)
xgb.test.pred1<- as.factor(xgb.test.pred1)
testData$Term <- as.factor(testData$Term)
$str(testData)
caret::confusionMatrix(xgb.test.pred1, testData$Term)
unseen_test <- rbind(testData[1,-19],completeDData)
unseen_test<-rbind(unseen_test[-1,])
str(unseen_test)
unseen_test$marital<- as.numeric(unseen_test$marital)
unseen_test$education<- as.numeric(unseen_test$education)
unseen_test$default<- as.numeric(unseen_test$default)
unseen_test$housing<- as.numeric(unseen_test$housing)
unseen_test$loan<- as.numeric(unseen_test$loan)
unseen_test$contact<- as.numeric(unseen_test$contact)
unseen_test$month<- as.numeric(unseen_test$month)
unseen_test$day_of_week<- as.numeric(unseen_test$day_of_week)
unseen_test$poutcome<- as.numeric(unseen_test$poutcome)
str(unseen_test)
mat.given <- as.matrix(unseen_test)
xgbFinal <- predict(xgbModel,newdata = mat.given)
xgbFinal1 <- ifelse(xgbFinal<.5,0,1)
xgbFinal.sol<- cbind(Test_data$ID,xgbFinal1)
View(xgbFinal.sol)
xgbFinal.sol<- as.data.frame(xgbFinal.sol)
xgbFinal.sol$xgbFinal1<- ifelse(xgbFinal.sol$xgbFinal1 ==1,0,1)
View(xgbFinal.sol)
colnames(xgbFinal.sol) <- c("ID","Term")
View(xgbFinal.sol)
write.csv(xgbFinal.sol,"D:/Great Learning/HackAll/xgbFinal.sol.csv")
str(trainData)
xgbModel2 <- train(Term~.,trainData,
trControl = trainControl("cv", number = 15),method = "xgbTree")
summary(xgbModel2)
xgb2.train.pred <- predict(xgbModel2,trainData)
caret::confusionMatrix(trainData$Term,xgb2.train.pred)
xgb2.test.pred <- predict(xgbModel2,testData)
caret::confusionMatrix(xgb2.test.pred, testData$Term)
xgbFinal2 <- predict(xgbModel2, unseen_test)
xgbFinal2.sol<- cbind(Test_data$ID,xgbFinal2)
View(xgbFinal2.sol)
xgbFinal2.sol<- as.data.frame(xgbFinal2.sol)
xgbFinal2.sol$xgbFinal2<- ifelse(xgbFinal2.sol$xgbFinal2 ==1,0,1)
View(xgbFinal2.sol)
colnames(xgbFinal2.sol) <- c("ID","Term")
View(xgbFinal2.sol)
write.csv(xgbFinal2.sol,"D:/Great Learning/HackAll/xgbFinal2.sol.csv")
logitModel<- glm(Term~., data = trainData,family = "binomial")
summary(logitModel)
logit.pred.train <- predict(logitModel, trainData,type = "response")
logit.pred.train1 <- ifelse(logit.pred.train<.5,0,1)
logit.pred.train1<- as.factor(logit.pred.train1)
caret::confusionMatrix(trainData$Term,logit.pred.train1)
logit.pred.test <- predict(logitModel, newdata = testData,type = "response")
logit.pred.test1 <- ifelse(logit.pred.test<.5,0,1)
logit.pred.test1<- as.factor(logit.pred.test1)
caret::confusionMatrix(logit.pred.test1,testData$Term)
View(completeDData)
str(trainData)
given.logit.pred <- predict(logitModel, newdata = unseen_test,type = "response")
given.logit.pred1 <- ifelse(given.logit.pred<.5,0,1)
logitFinal.sol<- cbind(Test_data$ID,given.logit.pred1)
View(logitFinal.sol)
logitFinal.sol<- as.data.frame(logitFinal.sol)
xgbFinal2.sol$xgbFinal2<- ifelse(xgbFinal2.sol$xgbFinal2 ==1,0,1)
View(logitFinal.sol)
colnames(logitFinal.sol) <- c("ID","Term")
View(logitFinal.sol)
write.csv(logitFinal.sol,"D:/Great Learning/HackAll/logitFinal.sol.csv")
nn.dev <- neuralnet(Term~.,
data = trainData,
#hidden = c(4,2),
err.fct = "sse",
linear.output = FALSE,
lifesign = "full",
lifesign.step = 1,
threshold = .09,
stepmax = 2000
)
plot(nn.dev)
quantile(nn.dev$net.result[[1]], c(0,1,5,10,25,50,75,90,95,99,100)/100)
misClassTable = data.frame(trainData$Term, Predict.score = nn.dev$net.result[[1]] )
misClassTable$Predict.class = ifelse(misClassTable$Predict.score>0.6,1,0)
with(misClassTable, table(df_train$Target ,Predict.class)) |
018ff48a2c3061c597917d9a9232e3d831ad2d25 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/crmPack/examples/maxSize.Rd.R | 91b9d731f933e0b90792308f84a1c761a607b52c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 766 | r | maxSize.Rd.R | library(crmPack)
### Name: maxSize
### Title: "MAX" combination of cohort size rules
### Aliases: maxSize maxSize,CohortSize-method
### Keywords: methods
### ** Examples
# Here is the rule for:
# having cohort of size 1 for doses <30
# and having cohort of size 3 for doses >=30
mySize1 <- CohortSizeRange(intervals = c(0, 30),
cohortSize = c(1, 3))
# Here is the rule for:
# having cohort of size 1 until no DLT were observed
# and having cohort of size 3 as soon as 1 DLT is observed
mySize2 <- CohortSizeDLT(DLTintervals=c(0, 1),
cohortSize=c(1, 3))
# This is combining the two rules above by taking the maximum of the sample sizes of
# the single rules
mySize <- maxSize(mySize1, mySize2)
|
fad0f7652558c195e2927ada7d905b814bd1f3b4 | a5b8881eed15254315a3309835f3061aca563d0d | /server.R | b86e2e1d119e4189dfb2b7e42e759a3cd3e60c08 | [] | no_license | finite2/phaseOne | 965b3cbf8d99e1024a5134f877771c573b0bdfae | 960f259ba54fdde048087c7f60bfb4e9067c8314 | refs/heads/master | 2020-07-22T22:42:02.946987 | 2016-08-25T11:01:21 | 2016-08-25T11:01:21 | 66,348,566 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 360 | r | server.R |
# Define server logic for random distribution application
library(shiny)
library(shinyjs)
shinyServer(function(input, output){
useShinyjs(html = TRUE)
extendShinyjs(script = "www/js/rjs.js")
observe({
if(!is.null(input$button)){
if(input$button > 0){
isolate({
js$addPatients(10)
})
}
}
})
}) |
b7aae1ce57b7e8b6d656b815e993b1c91d576263 | 3fdfbd6728f19b4221129cc0d9637e2f1a4e0f3b | /R/translate_value.R | d72011702f35e738852ba2fb38032b2a85aa55e5 | [] | no_license | billdenney/bsd.report | 09e2b5642046ee51956d33199d6e1ae670b424ff | 778f0298da10f6369bd7ffaa1a5c20d3f8f691fa | refs/heads/main | 2023-02-19T22:49:25.273961 | 2023-02-15T21:54:41 | 2023-02-15T21:54:41 | 153,116,819 | 3 | 0 | null | 2023-02-15T22:57:48 | 2018-10-15T13:21:55 | R | UTF-8 | R | false | false | 1,254 | r | translate_value.R | #' Convert a value from something old to something new (using gsub)
#'
#' @param x The object to translate
#' @param old,new The old and new character strings
#' @param ... Passed to gsub
#' @param fixed Passed to gsub
#' @param exclude_col A vector of columns to exclude from data.frame translation
#' @return The object with old converted to new
#' @export
translate_value <- function(x, old, new, ...) {
UseMethod("translate_value")
}
#' @describeIn translate_value Convert all columns (unless excluded)
#' @export
#' @importFrom dplyr mutate_at
translate_value.data.frame <- function(x, old, new, ..., exclude_col=NULL) {
dplyr::mutate_at(
.tbl=x,
.vars=setdiff(names(x), exclude_col),
.funs=translate_value,
old=old, new=new, ...
)
}
#' @describeIn translate_value Convert the levels
#' @export
translate_value.factor <- function(x, old, new, ...) {
levels(x) <- translate_value(levels(x), old, new, ...)
x
}
#' @describeIn translate_value Use gsub.
#' @export
translate_value.character <- function(x, old, new, ..., fixed=TRUE) {
gsub(pattern=old, replacement=new, x=x, fixed=fixed, ...)
}
#' @describeIn translate_value No translation done.
#' @export
translate_value.default <- function(x, old, new, ...) {
x
}
|
8a37a5f8db575fcf5c8e24e9a2720895baf7679e | a50e8e51cc49dc6624fc5f9c35ecedc46a7ac2ed | /R/get_sample_files_all.R | ee058773fa9677ae6a80571fa1b9431e6daf668e | [] | no_license | HughParsonage/taxstats | 7a98779d3ec2b2c97b3a78b0a56bcfeb0389661e | 330e0df1e6242c2836afa4dc497b8454939ef53e | refs/heads/master | 2020-04-16T23:38:05.842037 | 2019-11-15T10:38:09 | 2019-11-15T10:38:09 | 51,502,209 | 4 | 1 | null | 2019-06-14T11:49:49 | 2016-02-11T08:24:44 | R | UTF-8 | R | false | false | 2,683 | r | get_sample_files_all.R | #' Get the object containing all sample files
#'
#' @return All sample files bound by row \code{sample_files_all}. See \code{?sample_files}.
#'
#' \code{get_sample_files_all2()} is different in two ways: (1) uses a \code{fy_year} name,
#' instead of \code{fy.year}, and (2) uses the same names as in \code{sample_file_1314}.
#'
#' @import data.table
#' @export get_sample_files_all
get_sample_files_all <- function(){
if (!requireNamespace("data.table", quietly = TRUE)) {
stop("Attach the data.table package.")
} else {
fy.year <- NULL
WEIGHT <- NULL
sample_files_all <-
rbindlist(lapply(list("2003-04" = sample_file_0304, # <-get_sample_file(2004)
"2004-05" = sample_file_0405, # <-get_sample_file(2005)
"2005-06" = sample_file_0506, # <-get_sample_file(2006)
"2006-07" = sample_file_0607, # <-get_sample_file(2007)
"2007-08" = sample_file_0708, # <-get_sample_file(2008)
"2008-09" = sample_file_0809, # <-get_sample_file(2009)
"2009-10" = sample_file_0910, # <-get_sample_file(2010)
"2010-11" = sample_file_1011, # <-get_sample_file(2011)
"2011-12" = sample_file_1112, # <-get_sample_file(2012)
"2012-13" = sample_file_1213, # <-get_sample_file(2013))
"2013-14" = sample_file_1314),
as.data.table),
use.names = TRUE,
fill = TRUE,
idcol = "fy.year")
wt_50 <- .subset2(sample_files_all, "fy.year") > "2010-11"
sample_files_all$WEIGHT <- ifelse(wt_50, 50L, 100L)
sample_files_all
}
}
#' @rdname get_sample_files_all
#' @export
get_sample_files_all2 <- function() {
standardize <- function(yr) {
suffix <- paste0(substr(yr - 1L, 3, 4),
substr(yr, 3, 4))
object_name <- paste0("sample_file_", suffix)
DT <- getExportedValue("taxstats", object_name)
out <- as.data.table(DT)
out[, "fy_year" := grattan::yr2fy(yr)]
if (yr > 2011L) {
out[, WEIGHT := 50L]
} else {
out[, WEIGHT := 100L]
}
do_setnames <- function(dt, old, new) {
if (old %in% names(dt)) {
setnames(dt, old, new)
}
dt
}
do_setnames(out, "Birth_year", "age_range")
do_setnames(out, "Marital_status", "Partner_status")
do_setnames(out, "HECS_accum_ind", "Help_debt")
out
}
rbindlist(lapply(2004:2014, standardize),
use.names = TRUE,
fill = TRUE)
}
|
7364ef1171544ab2285c11427ce862072c11e949 | 0ec1de41844954d397da2553e03c7001e32109a6 | /R/simple_linear_regression.R | 84ddb4906a85168f9c8876650fc4147639a22975 | [] | no_license | Irving-Estrada/Machine-Learning | d5bf31dd49d2ff4ba4123a678eeac241ba312a8a | bbe6d0281d9990ec95c63fb4908d2db7ae79e684 | refs/heads/master | 2022-05-03T23:39:16.731933 | 2022-04-01T02:54:11 | 2022-04-01T02:54:11 | 252,332,262 | 0 | 0 | null | null | null | null | ISO-8859-10 | R | false | false | 1,753 | r | simple_linear_regression.R | #Regresion lineal simple
#Importar dataset
dataset = read.csv('Salary_Data.csv')
#Cargar libreria y dividir conjunto entrenamiento y training
library(caTools)
set.seed(123)
split = sample.split(dataset$Salary, SplitRatio = 2/3)
training_set = subset(dataset, split == TRUE)
testing_set = subset(dataset, split == FALSE)
#Utilizando summary(Regressor) podemos ver muchos datos de nuestra regresion lineal
#que tan buena es, lo mas importante son los coeficientes la relevancia estadistica
#se representa en los coeficientes con asteriscos *** el maximo de asteriscos es tres
regressor = lm(formula = Salary ~ YearsExperience,
data = training_set)
#predecir resultados con el congunto de test
#Aqui usamos el modelo de prediccion el cual es regresor y lo comparamos con el testis_set
#Visualizacion de los datos en el conjunto de entrenamiento
#install.packages("ggplot2")
library(ggplot2)
ggplot()+
geom_point(aes(x = training_set$YearsExperience, y = training_set$Salary),
colour = "red")+
geom_line(aes(x =training_set$YearsExperience,
y = predict(regressor, newdata = training_set)),
colour = "blue")+
ggtitle("Sueldo vs Aņos de experiencia ") +
xlab("Aņos de experiencia")+
ylab("Sueldo")
#Visualisacion de los resultados de testing
ggplot()+
geom_point(aes(x = testing_set$YearsExperience, y = testing_set$Salary),
colour = "red")+
geom_line(aes(x =training_set$YearsExperience,
y = predict(regressor, newdata = training_set)),
colour = "blue")+
ggtitle("Sueldo vs Aņos de experiencia (Testing)") +
xlab("Aņos de experiencia")+
ylab("Sueldo")
|
aa81861e7a039ff1515e79746a883541b87f0499 | bc5aa2493a04fab4ab76a54c135b6c91bb50a921 | /hw10/phoneme_svm.R | 03101a17773eaf28e3ff93514f57728f919cf23d | [] | no_license | reking/stat852 | b3caba3d5ecfbb1e61874e955cb3e43b2fbdcd1d | e704cfe9f4f49b43fda64211a78188bf9cfabc97 | refs/heads/master | 2021-01-10T05:04:34.782068 | 2016-01-08T06:08:18 | 2016-01-08T06:08:18 | 44,140,619 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,844 | r | phoneme_svm.R | phoneme <- read.table("~/stat852/data/phoneme.csv", header=TRUE, sep=",", na.strings=" ")
kernels <- cbind("aa","ao","dcl", "iy", "sh")
gammao <- 10^(-3:0)
costa <- 10^(-3:1)
degrea <- cbind(1,2,3)
coefa <- cbind(0,1,5)
iter=5
phoneme.val <- matrix(NA,nrow = length(degrea)* length(coefa)* length(gammao) * length(costa),ncol=iter+4)
phoneme.train <- matrix(NA,nrow = length(degrea)* length(coefa)* length(gammao) * length(costa),ncol=iter+4)
library(e1071)
for(i in 1:iter)
{
resamp <- sample.int(n=nrow(phoneme), size= nrow(phoneme), replace=TRUE)
x.r <- phoneme[resamp,2:257]
y.r <- phoneme[resamp,258]
x.p <- phoneme[-unique(resamp),2:257]
y.p <- phoneme[-unique(resamp),258]
print("Start working!")
ii = 1
for(gammap in gammao)
for(costo in costa)
for(degreo in degrea)
for(coefo in coefa)
{
phoneme.val[ii,1:4] <- c(gammap,costo,degreo,coefo)
phoneme.train[ii,1:4] <- c(gammap,costo,degreo,coefo)
phoneme.svm <- svm(data=data.frame(x.r,type=y.r), type ~ ., kernel="polynomial", gamma=gammap, cost=costo, degree=degreo, coef0 = coefo)
pred.train <- predict(phoneme.svm, newdata=x.r)
phoneme.train[ii,i+4] <- mean(ifelse(pred.train == y.r, yes=0, no=1))
pred.val <- predict(phoneme.svm, newdata=x.p)
phoneme.val[ii,i+4] <- mean(ifelse(pred.val == y.p, yes=0, no=1))
ii <- ii + 1
print("iteration finished once!")
}
print("finish one loop!")
Mean_val <- rowMeans(phoneme.val[,-c(1,4)])
best_index <- which.min(Mean_val)
best_para <- phoneme.val[best_index,1:4]
write.csv(phoneme.val,"~/stat852/hw10/svm_val.csv")
write.csv(phoneme.train,"~/stat852/hw10/svm_train.csv")
}
|
d0f3f675d044e67c3b22fe7594af441b885300e8 | 653680dc0e122ca7531e1f9ef67c697a09886981 | /src/data/Get_data.R | cbfa2a0d9fdb5fb65deb7d9f4948c88fd7306deb | [] | no_license | lifewatch/Tripod-frame_Performance-test | 00088d32f9319e0e26b7fee6afef117a0bcb00c1 | bf33ef4ec5da2a898ca2c03d28cf8edc25d01f77 | refs/heads/master | 2022-11-10T11:23:27.561626 | 2020-06-22T14:10:18 | 2020-06-22T14:10:18 | 252,498,984 | 0 | 0 | null | 2020-06-22T14:10:19 | 2020-04-02T15:48:28 | R | UTF-8 | R | false | false | 759 | r | Get_data.R |
#############################
# Get data and make folders #
#############################
# Jolien Goosens - Flanders Marine Institute (VLIZ) / Marine Biology Research Group, Ghent University (Marbiol)
# R version 3.6.2
#### Function to create directory ####
mkdirs <- function(fp) {
if(!dir.exists(fp)) {
dir.create(fp)
}
}
#### Create directories ####
mkdirs("data")
mkdirs("data/external")
mkdirs("data/interim")
mkdirs("data/processed")
mkdirs("data/raw")
mkdirs("reports")
mkdirs("reports/figures")
#### Get detection data, tilt and noise measurements and receiver metadata #####
# DOI: https://doi.org/10.14284/404
# Download data and save in data/raw
#### Mapping data ####
# Shape files originate from MarineRegions.org and EMODnet.
|
ed1812621474be55ea3adff0154ccfc446d60836 | 6464efbccd76256c3fb97fa4e50efb5d480b7c8c | /paws/man/imagebuilder_cancel_image_creation.Rd | 7cbfd78c99e31c3e8f9d534d5ca3155df0ee0601 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | johnnytommy/paws | 019b410ad8d4218199eb7349eb1844864bd45119 | a371a5f2207b534cf60735e693c809bd33ce3ccf | refs/heads/master | 2020-09-14T23:09:23.848860 | 2020-04-06T21:49:17 | 2020-04-06T21:49:17 | 223,286,996 | 1 | 0 | NOASSERTION | 2019-11-22T00:29:10 | 2019-11-21T23:56:19 | null | UTF-8 | R | false | true | 839 | rd | imagebuilder_cancel_image_creation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imagebuilder_operations.R
\name{imagebuilder_cancel_image_creation}
\alias{imagebuilder_cancel_image_creation}
\title{CancelImageCreation cancels the creation of Image}
\usage{
imagebuilder_cancel_image_creation(imageBuildVersionArn, clientToken)
}
\arguments{
\item{imageBuildVersionArn}{[required] The Amazon Resource Name (ARN) of the image whose creation you wish to
cancel.}
\item{clientToken}{[required] The idempotency token used to make this request idempotent.}
}
\description{
CancelImageCreation cancels the creation of Image. This operation may
only be used on images in a non-terminal state.
}
\section{Request syntax}{
\preformatted{svc$cancel_image_creation(
imageBuildVersionArn = "string",
clientToken = "string"
)
}
}
\keyword{internal}
|
3a3918a5bac5d1aef4e7522ede2a779043730374 | ed995d2f8a8e1bf3f5821cdb4ba424d70aca6e43 | /dashboard/server.R | f332f1e86fa118f35a58dc3cc65094f4ad1d762c | [
"MIT"
] | permissive | OHI-Baltic/bhi-shiny | ca71437e2ac9918cad8f77bd98b46c077006a75e | be4378b1308e5b7dc25756272a5b94c33d2ff022 | refs/heads/master | 2021-11-09T02:16:10.554772 | 2021-08-11T09:31:48 | 2021-08-11T09:31:48 | 221,214,807 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 33,135 | r | server.R | function(input, output, session){
spatial_unit <- reactive({input$spatial_unit})
dimension <- reactive({input$dimension})
view_year <- reactive({input$view_year})
# legend_title <-
## WELCOME ----
## flowerplot
values <- reactiveValues(flower_rgn = 0)
observeEvent(
eventExpr = input$flower_rgn, {
values$flower_rgn <- input$flower_rgn
## update flowerplot based on selection
callModule(
flowerplotCard,
"baltic_flowerplot",
dimension = "score",
flower_rgn_selected = reactive(values$flower_rgn)
)
}, ignoreNULL = FALSE
)
## video intro
output$iframe_video <- renderUI({
src = "https://www.youtube.com/embed/3g6Xfq9FOrU"
tags$iframe(src=src, width="515", height="290", frameborder="0", allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture")
})
## map with reactivity
output$index_map <- renderLeaflet({
## create leaflet map with popup text
result <- leaflet_map(
full_scores_lst,
spatial_unit(),
"Index",
dim = dimension(),
year = assess_year,
"Index Scores"
)
popup_text <- paste(
"<h5><strong>", "Score", "</strong>",
result$data_sf[["score"]], "</h5>",
"<h5><strong>", "Name", "</strong>",
result$data_sf[["Name"]], "</h5>", sep = " "
)
result$map %>% addPolygons(popup = popup_text, fillOpacity = 0, stroke = FALSE)
})
## selected-region overlay, based on spatial unit and flowerplot region
select_region <- reactive({
rgn_select <- values$flower_rgn
rgns_shp[rgns_shp@data$BHI_ID == rgn_select,]
})
observe({
select_region()
leafletProxy("index_map") %>%
addPolygons(
layerId = "selected",
data = select_region(),
stroke = FALSE, opacity = 0,
fillOpacity = 0.6, color = "magenta",
smoothFactor = 3
)
})
select_subbasin <- reactive({
rgn_select <- values$flower_rgn
subbasins_shp[subbasins_shp@data$HELCOM_ID == str_replace(rgn_select, "5", "SEA-0"),]
})
observe({
select_region()
leafletProxy("index_map") %>%
addPolygons(
layerId = "selected",
data = select_subbasin(),
stroke = FALSE, opacity = 0,
fillOpacity = 0.6, color = "magenta",
smoothFactor = 3
)
})
## INDEX CALC ----
output$method_figure <- renderImage({
list(
src = file.path(dir_main, "figures", "method_figure.png"),
contentType = "image/jpeg",
width = "675px",
height = "515px"
)
},
deleteFile = FALSE)
output$ohi_dims_figure <- renderImage({
list(
src = file.path(dir_main, "figures", "ohi_dimensions.png"),
contentType = "image/jpeg",
width = "320px",
height = "270px"
)
},
deleteFile = FALSE)
## info table about pressures layers matching to goals
output$prs_matrix = renderDataTable({
datatable(
prs_matrix,
class = "compact order-column strip row-border",
options = list(
dom = "t",
pageLength = 14
),
rownames = FALSE,
escape = FALSE
)
})
## info table about resilience components matching to goals
output$res_matrix = renderDataTable({
datatable(
res_matrix,
class = "compact order-column strip row-border",
options = list(
dom = "t",
pageLength = 14
),
rownames = FALSE,
escape = FALSE
)
})
## AO ----
## Artisanal Fishing Opportunity
## overall score box in top right
callModule(
scoreBox,
"ao_infobox",
goal_code = "AO"
)
## map
callModule(
mapCard,
"ao_map",
goal_code = "AO",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "ao_barplot",
goal_code = "AO",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$ao_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "AO") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "AO"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`ao_tsplot-select` = "cc_sst_bhi2019")
observeEvent(
eventExpr = input$`ao_tsplot-select`, {
values$`ao_tsplot-select` <- input$`ao_tsplot-select`
callModule(
tsplotCard,
"ao_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`ao_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## BD ----
## Biodiversity
## overall score box in top right
callModule(
scoreBox,
"bd_infobox",
goal_code = "BD"
)
## map
callModule(
mapCard,
"bd_map",
goal_code = "BD",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "bd_barplot",
goal_code = "BD",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$bd_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "BD") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "BD"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
NA
observeEvent(
eventExpr = input$`bd_tsplot-select`, {
values$`bd_tsplot-select` <- input$`bd_tsplot-select`
callModule(
tsplotCard,
"bd_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`bd_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## CS ----
## Carbon Storage
## overall score box in top right
callModule(
scoreBox,
"cs_infobox",
goal_code = "CS"
)
## map
callModule(
mapCard,
"cs_map",
goal_code = "CS",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "cs_barplot",
goal_code = "CS",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$cs_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "CS") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "CS"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
NA
observeEvent(
eventExpr = input$`cs_tsplot-select`, {
values$`cs_tsplot-select` <- input$`cs_tsplot-select`
callModule(
tsplotCard,
"cs_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`cs_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## CW ----
## Clean Waters
## overall score box in top right
callModule(
scoreBox,
"cw_infobox",
goal_code = "CW"
)
## map
callModule(
mapCard,
"cw_map",
goal_code = "CW",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "cw_barplot",
goal_code = "CW",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$cw_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "CW") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "CW"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`cw_tsplot-select` = "secchi_indicator_status")
observeEvent(
eventExpr = input$`cw_tsplot-select`, {
values$`cw_tsplot-select` <- input$`cw_tsplot-select`
callModule(
tsplotCard,
"cw_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`cw_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## CON ----
## Contaminants
## overall score box in top right
callModule(
scoreBox,
"con_infobox",
goal_code = "CON"
)
## map
callModule(
mapCard,
"con_map",
goal_code = "CON",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "con_barplot",
goal_code = "CON",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$con_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "CON") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "CON"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`con_tsplot-select` = "cw_con_pfos_bhi2019_bio")
observeEvent(
eventExpr = input$`con_tsplot-select`, {
values$`con_tsplot-select` <- input$`con_tsplot-select`
callModule(
tsplotCard,
"con_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`con_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## EUT ----
## Eutrophication
## overall score box in top right
callModule(
scoreBox,
"eut_infobox",
goal_code = "EUT"
)
## map
callModule(
mapCard,
"eut_map",
goal_code = "EUT",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "eut_barplot",
goal_code = "EUT",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$eut_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "EUT") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "EUT"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`eut_tsplot-select` = "po_pload_bhi2019")
observeEvent(
eventExpr = input$`eut_tsplot-select`, {
values$`eut_tsplot-select` <- input$`eut_tsplot-select`
callModule(
tsplotCard,
"eut_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`eut_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## TRA ----
## Trash
## overall score box in top right
callModule(
scoreBox,
"tra_infobox",
goal_code = "TRA"
)
## map
callModule(
mapCard,
"tra_map",
goal_code = "TRA",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "tra_barplot",
goal_code = "TRA",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$tra_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "TRA") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "TRA"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
NA
observeEvent(
eventExpr = input$`tra_tsplot-select`, {
values$`tra_tsplot-select` <- input$`tra_tsplot-select`
callModule(
tsplotCard,
"tra_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`tra_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## FP ----
## Food Provision
## overall score box in top right
callModule(
scoreBox,
"fp_infobox",
goal_code = "FP"
)
## map
callModule(
mapCard,
"fp_map",
goal_code = "FP",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "fp_barplot",
goal_code = "FP",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$fp_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "FP") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "FP"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`fp_tsplot-select` = "wildcaught_weight")
observeEvent(
eventExpr = input$`fp_tsplot-select`, {
values$`fp_tsplot-select` <- input$`fp_tsplot-select`
callModule(
tsplotCard,
"fp_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`fp_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## FIS ----
## Fisheries
## overall score box in top right
callModule(
scoreBox,
"fis_infobox",
goal_code = "FIS"
)
## map
callModule(
mapCard,
"fis_map",
goal_code = "FIS",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "fis_barplot",
goal_code = "FIS",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$fis_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "FIS") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "FIS"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`fis_tsplot-select` = "fis_bbmsy_bhi2019_cod")
observeEvent(
eventExpr = input$`fis_tsplot-select`, {
values$`fis_tsplot-select` <- input$`fis_tsplot-select`
callModule(
tsplotCard,
"fis_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`fis_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## MAR ----
## Mariculture
## overall score box in top right
callModule(
scoreBox,
"mar_infobox",
goal_code = "MAR"
)
## map
callModule(
mapCard,
"mar_map",
goal_code = "MAR",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "mar_barplot",
goal_code = "MAR",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$mar_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "MAR") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "MAR"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`mar_tsplot-select` = "mar_harvest_bhi2019")
observeEvent(
eventExpr = input$`mar_tsplot-select`, {
values$`mar_tsplot-select` <- input$`mar_tsplot-select`
callModule(
tsplotCard,
"mar_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`mar_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## LE ----
## Coastal Livelihoods & Economies
## overall score box in top right
callModule(
scoreBox,
"le_infobox",
goal_code = "LE"
)
## map
callModule(
mapCard,
"le_map",
goal_code = "LE",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "le_barplot",
goal_code = "LE",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$le_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "LE") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "LE"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
NA
observeEvent(
eventExpr = input$`le_tsplot-select`, {
values$`le_tsplot-select` <- input$`le_tsplot-select`
callModule(
tsplotCard,
"le_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`le_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## ECO ----
## Economies
## overall score box in top right
callModule(
scoreBox,
"eco_infobox",
goal_code = "ECO"
)
## map
callModule(
mapCard,
"eco_map",
goal_code = "ECO",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "eco_barplot",
goal_code = "ECO",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$eco_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "ECO") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "ECO"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`eco_tsplot-select` = "le_eco_yearly_gva_bhi2019_Coastal tourism")
observeEvent(
eventExpr = input$`eco_tsplot-select`, {
values$`eco_tsplot-select` <- input$`eco_tsplot-select`
callModule(
tsplotCard,
"eco_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`eco_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## LIV ----
## Livelihoods
## overall score box in top right
callModule(
scoreBox,
"liv_infobox",
goal_code = "LIV"
)
## map
callModule(
mapCard,
"liv_map",
goal_code = "LIV",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "liv_barplot",
goal_code = "LIV",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$liv_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "LIV") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "LIV"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`liv_tsplot-select` = "le_liv_regional_employ_bhi2019")
observeEvent(
eventExpr = input$`liv_tsplot-select`, {
values$`liv_tsplot-select` <- input$`liv_tsplot-select`
callModule(
tsplotCard,
"liv_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`liv_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## SP ----
## Sense of Place
## overall score box in top right
callModule(
scoreBox,
"sp_infobox",
goal_code = "SP"
)
## map
callModule(
mapCard,
"sp_map",
goal_code = "SP",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "sp_barplot",
goal_code = "SP",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$sp_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "SP") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "SP"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
NA
observeEvent(
eventExpr = input$`sp_tsplot-select`, {
values$`sp_tsplot-select` <- input$`sp_tsplot-select`
callModule(
tsplotCard,
"sp_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`sp_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## ICO ----
## Iconic Species
## overall score box in top right
callModule(
scoreBox,
"ico_infobox",
goal_code = "ICO"
)
## map
callModule(
mapCard,
"ico_map",
goal_code = "ICO",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "ico_barplot",
goal_code = "ICO",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$ico_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "ICO") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "ICO"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`ico_tsplot-select` = "sp_ico_assessments_bhi2019_Fish and Lamprey")
observeEvent(
eventExpr = input$`ico_tsplot-select`, {
values$`ico_tsplot-select` <- input$`ico_tsplot-select`
callModule(
tsplotCard,
"ico_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`ico_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## LSP ----
## Lasting Special Places
## overall score box in top right
callModule(
scoreBox,
"lsp_infobox",
goal_code = "LSP"
)
## map
callModule(
mapCard,
"lsp_map",
goal_code = "LSP",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "lsp_barplot",
goal_code = "LSP",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$lsp_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "LSP") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "LSP"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
NA
observeEvent(
eventExpr = input$`lsp_tsplot-select`, {
values$`lsp_tsplot-select` <- input$`lsp_tsplot-select`
callModule(
tsplotCard,
"lsp_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`lsp_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## NP ----
## Natural Products
## overall score box in top right
callModule(
scoreBox,
"np_infobox",
goal_code = "NP"
)
## map
callModule(
mapCard,
"np_map",
goal_code = "NP",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "np_barplot",
goal_code = "NP",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$np_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "NP") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "NP"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`np_tsplot-select` = "np_landings_bhi2019_sprat")
observeEvent(
eventExpr = input$`np_tsplot-select`, {
values$`np_tsplot-select` <- input$`np_tsplot-select`
callModule(
tsplotCard,
"np_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`np_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## TR ----
## Tourism & Recreation
## overall score box in top right
callModule(
scoreBox,
"tr_infobox",
goal_code = "TR"
)
## map
callModule(
mapCard,
"tr_map",
goal_code = "TR",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit,
year_selected = view_year,
legend_title = dimension,
lyrs_latlon = c(),
lyrs_polygons = list(),
popup_title = "Score:",
popup_add_field = "Name",
popup_add_field_title = "Name:"
)
## barplot
callModule(
barplotCard, "tr_barplot",
goal_code = "TR",
dimension_selected = dimension,
spatial_unit_selected = spatial_unit
)
## info table about input data layers
output$tr_datatable = renderDataTable({
datatable(
data_info %>%
filter(goal == "TR") %>%
select(-goal),
options = list(
dom = "t",
pageLength = nrow(filter(data_info, goal == "TR"))
),
rownames = FALSE,
escape = FALSE
)
})
## layers timeseries plot
values <- reactiveValues(`tr_tsplot-select` = "tr_coastal_tourism_gva_bhi2019")
observeEvent(
eventExpr = input$`tr_tsplot-select`, {
values$`tr_tsplot-select` <- input$`tr_tsplot-select`
callModule(
tsplotCard,
"tr_tsplot",
plot_type = "boxplot",
layer_selected = reactive(values$`tr_tsplot-select`),
spatial_unit_selected = spatial_unit
)
}, ignoreNULL = FALSE
)
## END ----
## signal end of goals pages stuff for rebuild functions
## SUMMARIZE AND COMPARE
## PRESSURES ----
output$pressure_ts <- renderPlotly({
# press_var <- input$press_var
press_dat <- readr::read_csv(file.path(dir_main, "data", "layers_data.csv")) %>%
dplyr::left_join(
select(thm$rgn_name_lookup, region_id, plot_title, subbasin),
by = "region_id"
) %>%
# dplyr::filter(layer == press_var) %>%
dplyr::filter(category == "pressure") %>%
dplyr::rename(Name = plot_title, Pressure = value, Year = year) %>%
dplyr::mutate(
layername = layer %>%
stringr::str_remove("_bhi2.*") %>%
stringr::str_replace_all("_", " ") %>%
stringr::str_to_upper()
)
if(spatial_unit() == "subbasins"){
press_dat <- press_dat %>%
left_join(
readr::read_csv(file.path(dir_main, "data", "regions.csv")) %>%
select(region_id, area_km2),
by = "region_id"
) %>%
group_by(subbasin, layer, layername, Year) %>%
summarize(Pressure = weighted.mean(Pressure, area_km2) %>% round(3)) %>%
left_join(
readr::read_csv(file.path(dir_main, "data", "basins.csv")) %>%
select(subbasin, order),
by = "subbasin"
) %>%
arrange(order) %>%
ungroup() %>%
mutate(Name = as.factor(subbasin))
} else {
press_dat <- press_dat %>%
dplyr::filter(region_id %in% 1:42) %>%
select(Name, layer, layername, Year, Pressure) %>%
mutate(Pressure = round(Pressure, 3))
}
## create color palette, have 16 distinct pressure layers
pressure_cols <- colorRampPalette(RColorBrewer::brewer.pal(8, "Set2")[c(1:7)])(16)
## bar plot
## once have timeseries data, can make this a timeseries plot
plot_obj <- ggplot2::ggplot(data = press_dat) +
geom_col(
position = position_stack(),
aes(
x = Name,
y = Pressure,
fill = layer,
text = sprintf("%s\n%s\nScore (scale 0-1): %s", Name, layername, Pressure)
)
) +
theme_bw() +
theme(
axis.text.x = element_text(size = 8, angle = 40, color = "grey40"),
legend.position = "none"
) +
scale_fill_manual(values = pressure_cols) +
labs(x = NULL, y = NULL, main = "Cumulative Pressures \n")
plotly::ggplotly(plot_obj, tooltip = "text")
})
## DATA LAYERS ----
## scatter plot
# output$layers_scatter <- renderPlot({
#
# gh_lyrs <- "https://raw.githubusercontent.com/OHI-Science/bhi-1.0-archive/draft/baltic2015/layers/"
# dat_x <- readr::read_csv(paste0(gh_lyrs, input$layerscatter_var_x))
# dat_y <- readr::read_csv(paste0(gh_lyrs, input$layerscatter_var_y))
#
# x_name <- str_to_upper(str_remove(input$layerscatter_var_x, ".csv"))
# y_name <- str_to_upper(str_remove(input$layerscatter_var_y, ".csv"))
#
# df <- left_join(dat_x, dat_y, by = "rgn_id")
# colnames(df) <- c("region_id", x_name, y_name)
#
# ggplot(data = df) +
# geom_point(aes_string(x_name, y_name)) +
# theme_minimal() +
# theme(
# axis.title.x = element_blank(),
# axis.title.y = element_blank()
# )
# })
## make datatable of data layers from bhi-prep
## will eventually read from bhi-prep repo, and won't need all filters...
# output$layers_datatab <- DT::renderDataTable({
# gh_lyrs <- "https://raw.githubusercontent.com/OHI-Science/bhi-1.0-archive/draft/baltic2015/layers/"
# # all_lyrs <- bhiprep_github_layers()
# all_lyrs <- bhiprep_github_layers("https://api.github.com/repos/OHI-Science/bhi-1.0-archive/git/trees/draft?recursive=1") %>% # a func defined in common.R
# dplyr::mutate(fn = str_extract(., pattern = "/[a-z0-9_].*.csv")) %>%
# dplyr::mutate(fn = str_remove(fn, pattern = "/layers/")) %>%
# dplyr::filter(!str_detect(., pattern = "without_social")) %>%
# dplyr::filter(!str_detect(fn, pattern = "gl2014")) %>%
# dplyr::filter(!str_detect(fn, pattern = "trend")) %>%
# dplyr::filter(!str_detect(fn, pattern = "slope")) %>%
# dplyr::filter(!str_detect(fn, pattern = "status")) %>%
# dplyr::filter(!str_detect(fn, pattern = "res_reg")) %>%
# dplyr::filter(!is.na(fn))
#
# lyrs_df <- readr::read_csv(paste0(gh_lyrs, "/", all_lyrs$fn[1])) # 2 cols, one is 'rgn_id' but really should use while...
# colnames(lyrs_df) <- c("rgn_id", str_remove(all_lyrs$fn[1], ".csv"))
# for(lyr in all_lyrs$fn[-1]){
# tmp <- readr::read_csv(paste0(gh_lyrs, "/", lyr))
# if(ncol(tmp) == 2 & "rgn_id" %in% colnames(tmp)){
# colnames(tmp) <- c("rgn_id", str_remove(lyr, ".csv"))
# lyrs_df <- dplyr::left_join(lyrs_df, tmp, by = "rgn_id") # c("region_id", "year")
# }
# }
# datatable(
# lyrs_df,
# extensions = "Buttons",
# options = list(
# dom = "Bfrtip",
# buttons = c("csv", "excel")
# )
# )
# })
## SHARE FEEDBACK ----
output$iframe <- renderUI({
src = "https://docs.google.com/forms/d/e/1FAIpQLSca7FSR3qy1kohCrh3uqkVBpTjCEKoS1wlB6DPkMrrB6w95fA/viewform?embedded=true"
tags$iframe(src=src, width="640", height="1660", frameborder="0", marginheight="0", marginwidth="0")
})
}
|
f3b2122f1deaffef55ea9ffea246ab0d1e51a94c | e1cbbf8791b0ac6d40f6d5b397785560105441d9 | /man/lmomTLgpa.Rd | 6f5cfca121a35dd2352a0a255b5f2f766041beb3 | [] | no_license | wasquith/lmomco | 96a783dc88b67017a315e51da3326dfc8af0c831 | 8d7cc8497702536f162d7114a4b0a4ad88f72048 | refs/heads/master | 2023-09-02T07:48:53.169644 | 2023-08-30T02:40:09 | 2023-08-30T02:40:09 | 108,880,810 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,930 | rd | lmomTLgpa.Rd | \name{lmomTLgpa}
\alias{lmomTLgpa}
\title{Trimmed L-moments of the Generalized Pareto Distribution}
\description{
This function estimates the symmetrical trimmed L-moments (TL-moments) for \eqn{t=1} of the Generalized Pareto distribution given the parameters (\eqn{\xi}, \eqn{\alpha}, and \eqn{\kappa}) from \code{\link{parTLgpa}}.
The TL-moments in terms of the parameters are
\deqn{\lambda^{(1)}_1 = \xi + \frac{\alpha(\kappa+5)}{(\kappa+3)(\kappa+2)} \mbox{,}}
\deqn{\lambda^{(1)}_2 = \frac{6\alpha}{(\kappa+4)(\kappa+3)(\kappa+2)} \mbox{,}}
\deqn{\tau^{(1)}_3 = \frac{10(1-\kappa)}{9(\kappa+5)} \mbox{, and}}
\deqn{\tau^{(1)}_4 = \frac{5(\kappa-1)(\kappa-2)}{4(\kappa+6)(\kappa+5)} \mbox{.}}
}
\usage{
lmomTLgpa(para)
}
\arguments{
\item{para}{The parameters of the distribution.}
}
\value{
An \R \code{list} is returned.
\item{lambdas}{Vector of the trimmed L-moments. First element is
\eqn{\lambda^{(1)}_1}, second element is \eqn{\lambda^{(1)}_2}, and so on.}
\item{ratios}{Vector of the L-moment ratios. Second element is
\eqn{\tau^{(1)}}, third element is \eqn{\tau^{(1)}_3} and so on. }
\item{trim}{Level of symmetrical trimming used in the computation, which is unity.}
\item{leftrim}{Level of left-tail trimming used in the computation, which is unity.}
\item{rightrim}{Level of right-tail trimming used in the computation, which is unity.}
\item{source}{An attribute identifying the computational source of the TL-moments: \dQuote{lmomTLgpa}.}
}
\references{
Elamir, E.A.H., and Seheult, A.H., 2003, Trimmed L-moments: Computational Statistics and Data Analysis, v. 43, pp. 299--314.
}
\author{W.H. Asquith}
\seealso{\code{\link{lmomgpa}}, \code{\link{parTLgpa}}, \code{\link{cdfgpa}}, \code{\link{pdfgpa}}, \code{\link{quagpa}} }
\examples{
TL <- TLmoms(c(123,34,4,654,37,78,21,3400),trim=1)
TL
lmomTLgpa(parTLgpa(TL))
}
\keyword{L-moment (distribution)}
\keyword{Distribution: Generalized Pareto}
|
f3ab18904f2bf9dbf47b385663a3a193b8c48110 | c6ed46f34454d3febb7f284a946c8fcd39a7a1b0 | /man/norm_dist_ci_data.Rd | 6d2c43fc25ad5c37315864c61fa26f29b5499c6d | [
"MIT"
] | permissive | simonecollier/lizardHMM | e1bf4559a5ea23a995a6e5da4a3284ddd5e2aa1b | 269d4c6c863bf5385acc8183c45d7f8e379ac952 | refs/heads/master | 2023-08-24T11:21:37.176643 | 2021-11-07T05:13:14 | 2021-11-07T05:13:14 | 383,538,271 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,609 | rd | norm_dist_ci_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/norm_ci_functions.R
\name{norm_dist_ci_data}
\alias{norm_dist_ci_data}
\title{Compute the confidence intervals of fitted normal distributions}
\usage{
norm_dist_ci_data(
x,
num_states,
num_variables,
num_subjects,
sample,
state_dep_dist_pooled = FALSE,
x_step = 0.2,
n = 100,
level = 0.975
)
}
\arguments{
\item{x}{The data to be fit with an HMM in the form of a 3D array. The
first index (row) corresponds to time, the second (column) to the
variable number, and the third (matrix number) to the subject number.}
\item{num_states}{The number of states in the desired HMM.}
\item{num_variables}{The number of variables in the data.}
\item{num_subjects}{The number of subjects/trials that generated the data.}
\item{sample}{A list of \code{mu} and \code{sigma} sampled according to \code{norm_ci()}.}
\item{state_dep_dist_pooled}{A logical variable indiacting whether the
state dependent distribution parameters \code{mu} and \code{sigma} should be
treated as equal for all subjects.}
\item{x_step}{A value indicating the step length for the range of
observation values.}
\item{n}{The number of samples in the Monte Carlo fitting.}
\item{level}{A number indicating the level of confidence for the desired
interval.}
}
\value{
A list containing the upper and lower confidence intervals and the
parameter esimates.
}
\description{
This is a helper function for \code{norm_hist_ci()} which computes the confidence
intervals for the fitted normal state dependent distributions by utilizing
\code{norm_ci()}.
}
|
da45bd404ede99786c2f0b62be539eb2c479930a | 454b3e35e84069026d4af58a62439e85822faff6 | /Models_each_predictor/Random_intercepts_alltypes.R | ae93e34de63558b4456d22c8cf5d6ce410eca963 | [] | no_license | COINtoolbox/NB_GCs | 14acdf8426a4012c40020378e67ff70cab98a3cb | e4d0b6cc8df371ed3a7b6e90c132de4c83d24364 | refs/heads/master | 2020-06-15T06:06:04.787271 | 2015-06-30T06:38:00 | 2015-06-30T06:38:00 | 27,399,804 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,285 | r | Random_intercepts_alltypes.R | # R script GLMM
# Copyright (C) 2014 Rafael S. de Souza, Bart Buelens, Ewan Cameron
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License version 3 as published by
#the Free Software Foundation.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#
# Required libraries
library(rjags)
library(ggmcmc)
library(ggplot2)
library(ggthemes)
library(pander)
library(Cairo)
library(plyr)
library(MASS)
library(scales)
require(runjags)
# Function to allow parse labels in facet_wrap
facet_wrap_labeller <- function(gg.plot,labels=NULL) {
#works with R 3.0.1 and ggplot2 0.9.3.1
require(gridExtra)
g <- ggplotGrob(gg.plot)
gg <- g$grobs
strips <- grep("strip_t", names(gg))
for(ii in seq_along(labels)) {
modgrob <- getGrob(gg[[strips[ii]]], "strip.text",
grep=TRUE, global=TRUE)
gg[[strips[ii]]]$children[[modgrob$name]] <- editGrob(modgrob,label=labels[ii])
}
g$grobs <- gg
class(g) = c("arrange", "ggplot",class(g))
g
}
give.n <- function(x){
return(c(y = 0.5, label = length(x)))
# experiment with the multiplier to find the perfect position
}
################
# Script starts here
# Read data
GCS = read.csv(file="..//Dataset//GCs_full.csv",header=TRUE,dec=".",sep="")
Full_type<-read.table(file="..//Dataset//fulltype.dat",header=TRUE)
GCS$alltype<-Full_type$fulltype
GCS$alltype<-GCS$alltype
GCS = subset(GCS, !is.na(MV_T))
#dim(GCS)
N_err<-GCS$N_GC_err
err_MV_T<-GCS$err_MV_T
N = nrow(GCS)
#type<-match(GCS$alltype,unique(GCS$alltype))
type<-match(Full_type$fulltype,unique(Full_type$fulltype))
Ntype<-length(unique(GCS$alltype))
######## NB with errors ########################################################
MV_Tx = seq(from = 1.05 * min(GCS$MV_T),
to = 0.95 * max(GCS$MV_T),
length.out = 500)
jags.data <- list(
N_GC = GCS$N_GC,
MV_T = GCS$MV_T,
errN_GC = GCS$N_GC_err,
N = nrow(GCS),
err_MV_T = err_MV_T,
MV_Tx = MV_Tx,
M = 500,
type=type,
Ntype=Ntype
)
model.NB <- "model{
# Priors for regression coefficients
beta.0~dnorm(0,0.000001)
beta.1~dnorm(0,0.000001)
beta.2~dnorm(0,0.000001)
tau.R~dgamma(0.01,0.01)
# Prior for size
size~dunif(0.001,5)
#
for (i in 1:N){
MV_T_true[i]~dunif(-26,-10)
}
for (j in 1:Ntype){
ranef[j]~dnorm(0,tau.R)
}
# Likelihood function
for (i in 1:N){
MV_T[i]~dnorm(MV_T_true[i],1/err_MV_T[i]^2);
errorN[i]~dbin(0.5,2*errN_GC[i])
eta[i]<-beta.0+beta.1*MV_T_true[i]+ranef[type[i]]
log(mu[i])<-log(exp(eta[i])+errorN[i]-errN_GC[i])
p[i]<-size/(size+mu[i])
N_GC[i]~dnegbin(p[i],size)
# Prediction
etaTrue[i]<-beta.0+beta.1*MV_T_true[i]
log(muTrue[i])<-max(-20,min(20,etaTrue[i]))
pTrue[i]<-size/(size+muTrue[i])
prediction.NB[i]~dnegbin(pTrue[i],size)
#prediction.NB[i]~dnegbin(p[i],size)
# Discrepancy measures
YNew[i] ~ dnegbin(p[i],size)
expY[i] <- mu[i]
varY[i] <- mu[i] + pow(mu[i],2) / size
PRes[i] <-(N_GC[i] - expY[i])/sqrt(varY[i])
PResNew[i] <-(YNew[i] - expY[i])/sqrt(varY[i])
D[i]<-pow(PRes[i],2)
DNew[i]<-pow(PResNew[i],2)
}
Fit<-sum(D[1:N])
New<-sum(DNew[1:N])
# Prediction for new data
for (j in 1:M){
etax[j]<-beta.0+beta.1*MV_Tx[j]
log(mux[j])<-max(-20,min(20,etax[j]))
px[j]<-size/(size+mux[j])
prediction.NBx[j]~dnegbin(px[j],size)
}
}"
inits1 <- list(beta.0=rnorm(1,0,0.1),beta.1=rnorm(1,0,0.1),size=runif(1,0.1,5))
inits2 <- list(beta.0=rnorm(1,0,0.1),beta.1=rnorm(1,0,0.1),size=runif(1,0.1,5))
inits3 <- list(beta.0=rnorm(1,0,0.1),beta.1=rnorm(1,0,0.1),size=runif(1,0.1,5))
params <- c("beta.0","beta.1","size","ranef","PRes","MV_T_true","Fit","New","prediction.NBx")
library(parallel)
cl <- makeCluster(3)
jags.neg <- run.jags(method="rjparallel", method.options=list(cl=cl),
data = jags.data,
inits = list(inits1,inits2,inits3),
model=model.NB,
n.chains = 3,
adapt=1500,
monitor=c(params),
burnin=25000,
sample=45000,
summarise=FALSE,
thin=2,
plots=FALSE
)
jagssamples.nb <- as.mcmc.list(jags.neg )
summary<-extend.jags(jags.neg,drop.monitor=c("PRes","MV_T_true","Fit","New","prediction.NBx"), summarise=TRUE)
MV_T_true<-summary(as.mcmc.list(jags.neg,vars="MV_T_true"),quantiles=0.5)
#pred.NBerr<-summary(as.mcmc.list(jagssamples.nb, vars="prediction.NB"),quantiles=c(0.005,0.025,0.25,0.5,0.75,0.975, 0.995))
#pred.NB2err<-data.frame(Type=GCS$Type,NGC=GCS$N_GC,MV_T_true=MV_T_true$quantiles,MV_T=GCS$MV_T,mean=pred.NBerr$statistics[,1],lwr1=pred.NBerr$quantiles[,3],lwr2=pred.NBerr$quantiles[,2],lwr3=pred.NBerr$quantiles[,1],upr1=pred.NBerr$quantiles[,5],upr2=pred.NBerr$quantiles[,6],upr3=pred.NBerr$quantiles[,7])
pred.NBerrx<-summary(as.mcmc.list(jags.neg, vars="prediction.NBx"),quantiles=c(0.005,0.025,0.25,0.5,0.75,0.975, 0.995))
pred.NB2errx<-data.frame(MV_Tx=MV_Tx,mean=pred.NBerrx$statistics[,1],lwr1=pred.NBerrx$quantiles[,3],lwr2=pred.NBerrx$quantiles[,2],lwr3=pred.NBerrx$quantiles[,1],upr1=pred.NBerrx$quantiles[,5],upr2=pred.NBerrx$quantiles[,6],upr3=pred.NBerrx$quantiles[,7])
N_low<-GCS$N_GC-N_err
N_low[N_low<0]<-0
asinh_trans <- function(){
trans_new(name = 'asinh', transform = function(x) asinh(x),
inverse = function(x) sinh(x))
}
cairo_pdf("..//Figures/M_Vx_random.pdf",height=8,width=9)
ggplot(GCS,aes(x=MV_T,y=N_GC))+
geom_ribbon(data=pred.NB2errx,aes(x=MV_Tx,y=mean,ymin=lwr1, ymax=upr1), alpha=0.45, fill="gray",method = "loess") +
geom_ribbon(data=pred.NB2errx,aes(x=MV_Tx,y=mean,ymin=lwr2, ymax=upr2), alpha=0.35, fill="gray",method = "loess") +
geom_ribbon(data=pred.NB2errx,aes(x=MV_Tx,y=mean,ymin=lwr3, ymax=upr3), alpha=0.25, fill="gray",method = "loess") +
geom_point(aes(colour=Type,shape=Type),size=3.25,alpha=0.8)+
geom_errorbar(guide="none",aes(colour=Type,ymin=N_low,ymax=N_GC+N_err),alpha=0.7,width=0.05)+
geom_errorbarh(guide="none",aes(colour=Type,xmin=MV_T-GCS$err_MV_T,
xmax=MV_T+err_MV_T),alpha=0.7,height=0.05)+
geom_line(data=pred.NB2errx,aes(x=MV_Tx,y=mean),colour="gray25",linetype="dashed",size=1.2)+
scale_y_continuous(trans = 'asinh',breaks=c(0,10,100,1000,10000,100000),labels=c("0",expression(10^1),expression(10^2),
expression(10^3),expression(10^4),expression(10^5)))+
scale_colour_gdocs()+
scale_shape_manual(values=c(19,2,8,10))+scale_x_reverse()+
# theme_economist_white(gray_bg = F, base_size = 11, base_family = "sans")+
theme_hc()+
ylab(expression(N[GC]))+
xlab(expression(M[V]))+theme(legend.position="top",plot.title = element_text(hjust=0.5),
axis.title.y=element_text(vjust=0.75),
axis.title.x=element_text(vjust=-0.25),
text = element_text(size=25))
dev.off()
L.radon.intercepts <- data.frame(
Parameter=paste("ranef[", seq(1:69), "]", sep=""),
Label=levels(Full_type$fulltype))
head(L.radon.intercepts)
S.full <- ggs(jagssamples.nb,par_labels=L.radon.intercepts,family=c("ranef"))
library(RColorBrewer)
blues_fun <- colorRampPalette(brewer.pal(9,"Blues")[4:9])
blues=blues_fun(69)
pdf("..//Figures/random.pdf",height=14,width=9)
ggs_caterpillar(S.full)+geom_vline(aes(yintercept=0),color="gray80",size=1,linetype="dashed")+
theme_hc()+
theme(legend.position="none",plot.title = element_text(hjust=0.5),
axis.title.y=element_text(size=25,vjust=0.75),
axis.title.x=element_text(size=25,vjust=-0.25),axis.text.x =element_text(size=25),
text = element_text(size=17))+aes(color=Parameter)+
scale_color_manual(guide="none",values = blues)+ylab("Galaxy Type")+
xlab(expression(paste(zeta[j]," Highest Posterior Density"," ")))
dev.off()
pdf("..//Figures/JAGS_NB_M_V.pdf",height=8,width=9)
ggplot(pred.NB2err,aes(x=MV_T,y=NGC))+
geom_ribbon(aes(x=MV_T_true,y=mean,ymin=lwr1, ymax=upr1), alpha=0.3, fill="gray") +
geom_ribbon(aes(x=MV_T_true,y=mean,ymin=lwr2, ymax=upr2), alpha=0.2, fill="gray") +
geom_ribbon(aes(x=MV_T_true,y=mean,ymin=lwr3, ymax=upr3), alpha=0.1, fill="gray") +
geom_point(aes(colour=Type,shape=Type),size=3.25)+
geom_errorbar(guide="none",aes(colour=Type,ymin=NGC-N_low,ymax=NGC+N_err),alpha=0.7)+
geom_errorbarh(guide="none",aes(colour=Type,xmin=MV_T-GCS$err_MV_T,
xmax=MV_T+err_MV_T),alpha=0.7)+
geom_line(aes(x=MV_T_true,y=mean),colour="gray25",linetype="dashed",size=1.2)+
scale_y_continuous(trans = 'asinh',breaks=c(0,10,100,1000,10000,100000),labels=c("0",expression(10^1),expression(10^2),
expression(10^3),expression(10^4),expression(10^5)))+
scale_colour_gdocs()+
scale_shape_manual(values=c(19,2,8,10))+scale_x_reverse()+
# theme_economist_white(gray_bg = F, base_size = 11, base_family = "sans")+
theme_hc()+
ylab(expression(N[GC]))+
xlab(expression(M[V]))+theme(legend.position="top",plot.title = element_text(hjust=0.5),
axis.title.y=element_text(vjust=0.75),
axis.title.x=element_text(vjust=-0.25),
text = element_text(size=25))
dev.off()
# Diagnostics
S.NB1<-ggs(jagssamples.nb ,family=c("beta"))
S.NB2<-ggs(jagssamples.nb,family=c("size"))
S.NB<-rbind(S.NB1,S.NB2,deparse.level=2)
S.NB$Parameter<-revalue(S.NB$Parameter, c("beta.0"=expression(beta[0]), "beta.1"=expression(beta[1]),
"size"="k"))
g1<-ggs_density(S.NB)+
scale_colour_economist(guide="none")+
theme_hc()+
scale_fill_economist()+
# theme_economist_white(gray_bg = F, base_size = 11, base_family = "sans")+
theme(strip.background = element_rect(fill="gray95"),plot.background = element_rect(fill = 'white', colour = 'white'),
legend.position="none",plot.title = element_text(hjust=0.5),
axis.title.y=element_text(vjust=0.75),axis.text.x=element_text(size=25),
strip.text.x=element_text(size=25),
axis.title.x=element_text(vjust=-0.25),
text = element_text(size=25))+xlab("Parameter value")+ylab("Density")
CairoPDF("..//Figures/posterior_MV_full.pdf",height=10,width=8)
facet_wrap_labeller(g1,labels=c(expression(beta[0]),expression(beta[1]),"k"))
dev.off()
# random intercepts
S.ran<-ggs(jagssamples.nb ,family=c("ranef"))
S.NB$Parameter<-revalue(S.NB$Parameter, c("beta.0"=expression(beta[0]), "beta.1"=expression(beta[1]),
"size"="k"))
gran<-ggs_density(S.ran)+
scale_colour_economist(guide="none")+
theme_hc()+
scale_fill_economist()+
# theme_economist_white(gray_bg = F, base_size = 11, base_family = "sans")+
theme(strip.background = element_rect(fill="gray95"),plot.background = element_rect(fill = 'white', colour = 'white'),
legend.position="none",plot.title = element_text(hjust=0.5),
axis.title.y=element_text(vjust=0.75),axis.text.x=element_text(size=25),
strip.text.x=element_text(size=25),
axis.title.x=element_text(vjust=-0.25),
text = element_text(size=25))+xlab("Parameter value")+ylab("Density")
CairoPDF("..//Figures/posterior_MV_full.pdf",height=10,width=8)
facet_wrap_labeller(g1,labels=c(expression(beta[0]),expression(beta[1]),"k"))
dev.off()
# Dispersion parameter
require(scales)
Pres<-summary(as.mcmc.list(jags.neg, vars="PRes"),quantiles=0.5)$quantiles
Dipersion = sum(Pres^2)/(N-72)# beta.0, beta.1 and k, 3 parameters + 69 random intercepts
# Model comparison
Pred<-ggs(codasamples.nb,family=c("New"))[,"value"]
Obs<-ggs(codasamples.nb,family=c("Fit"))[,"value"]
sqrt(mean((Pred-Obs)^2))
dicsamples.nb <- dic.samples(jags.neg, params, n.iter = 50000,type="pD")
Pres<-summary(as.mcmc.list(jags.neg, vars="PRes"),quantiles=0.5)$quantiles
# Plot residuals vc galaxy type
clus_data<-data.frame(Pres=Pres,type=GCS$alltype)
p <- ggplot(clus_data, aes(x=type, y=Pres),group=type)+ xlab("Galaxy Type") +
ylab("Pearson Residuals")
pdf("..//Figures/Pres_random.pdf",height=6,width=14)
p + stat_boxplot(colour="gray",geom ='errorbar')+geom_boxplot(aes(group=type,colour=type,fill=type),outlier.shape = 19,colour="gray",fatten=2,size=1,outlier.size=2,outlier.colour = "gray",notchwidth = 0.35,notch=F,data=clus_data)+
theme_hc()+
scale_fill_manual(guide="none",values = blues)+
theme(strip.background = element_rect(fill="gray95"),plot.background = element_rect(fill = 'white', colour = 'white'),
legend.position="none",plot.title = element_text(hjust=0.5),
axis.title.y=element_text(vjust=0.75),axis.text.x=element_text(angle=-90,size=12.5),
strip.text.x=element_text(size=25),
axis.title.x=element_text(vjust=-0.25),
text = element_text(size=25))
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.