blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
163162e9f429346c988683c385b67dfa5763c562 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/geoR/examples/nearloc.Rd.R | 349fe916cca32aa6921382099ebfc4d2c9095a34 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 394 | r | nearloc.Rd.R | library(geoR)
### Name: nearloc
### Title: Near location to a point
### Aliases: nearloc
### Keywords: spatial
### ** Examples
set.seed(276)
gr <- expand.grid(seq(0,1, l=11), seq(0,1, l=11))
plot(gr, asp=1)
pts <- matrix(runif(10), nc=2)
points(pts, pch=19)
near <- nearloc(points=pts, locations=gr)
points(near, pch=19, col=2)
rownames(near)
nearloc(points=pts, locations=gr, pos=TRUE)
|
f19d1ef39ca8aa71f757dfd97c53f1d0ea40c4bc | b93f14b970fe61ed7ffa4592654a027adc19b3fc | /R/fars_functions.R | cedd093e820bc6e394b8ec8c6f9ee5fb03e95b87 | [] | no_license | yuriygdv/farsfunctions | 1093001e2349400a18c2c800f158d88b090058fd | 0560cc087832a01c45e4014d8ecb3e6a837d31f0 | refs/heads/master | 2021-04-30T03:58:30.600541 | 2018-02-14T15:30:02 | 2018-02-14T15:30:02 | 121,523,457 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,640 | r | fars_functions.R | #' Read a csv data file into a table dataframe (tbl_df)
#'
#' This is a function that reads a csv data file into a table dataframe (tbl_df).
#' You are supposed to provide the name of a dataset (csv file) to read as the argument.
#'
#' @param filename A character string giving the name of the dataset to read in
#'
#' @return This function returns a table dataframe (tbl_df) if it is given
#' a valid name or a notification or a notification "file 'filename' does not exist"
#'
#' @examples \dontrun{
#' filname("somedata.csv") }
#'
#' @importFrom readr dplyr
#'
#' @note if there is no file wit hthe given filename, the function will return an error
#'
#' @export
fars_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_csv(filename, progress = FALSE)
})
dplyr::tbl_df(data)
}
#' Make a filename in the format used for FARS data files
#'
#' This is a function that takes a year in numeric format as an argument
#' and returns a filename of the corresponding FARS data file for the given year.
#'
#' @param year A year in the numeric format
#'
#' @return This function returns a character string that corresponds to the name of
#' the FARS datafile for the given year
#'
#' @examples
#' make_filename(2015)
#'
#' @note spaces, commas or other characters in the numeric argumet will retur an error
#'
#' @export
make_filename <- function(year) {
year <- as.integer(year)
sprintf("accident_%d.csv.bz2", year)
}
#' Read in the data on MONTH variable from multiple FARS datafiles for different years
#'
#' This is a function that takes a year or a list of years in numeric format, reads in
#' the FARS data files for the corresponding years, and returns a list of tables for each
#' year. Each table contains only the data for two variables: the year and months for which
#' FARS data are available in that year
#'
#' @param years a year in numeric format or a list of such years
#'
#' @return This function returns a list, each element of which is a two-column table (tibble)
#' with one column describing the year and the other column describing the month for each observation
#' in the FARS dataset.
#'
#' @examples \dontrun{
#' fars_read_years(2015)
#' fars_read_years(list(2014, 2015)) }
#'
#'
#' @note providing multiple years that are not in the list format as an argument will cause an error.
#' Also, the function will produce an error if the dplyr package is not loaded.
#'
#' @export
fars_read_years <- function(years) {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate(dat, year = year) %>%
dplyr::select(MONTH, year)
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
#' Summarize month data for multiple years
#'
#' This is a function that takes a year or a list of years in numeric format and
#' generates a summary table that describes the number of observations in FARS datafiles
#' for each month in each year.
#'
#' @param years a year in numeric format or a list of such years
#'
#' @return this function returns a table (tibble) with one column for each given year and
#' up to twelve rows each representing a month.
#'
#' @examples \dontrun{
#' fars_summarize_years(2015)
#' fars_summarize_years(list(2013, 2014, 2015)) }
#'
#' @importFrom dplyr tidyr
#'
#' @note if the list that is provided as an argument contains a year for which the dataset
#' is unavailable, it will produce an error.
#'
#' @export
fars_summarize_years <- function(years) {
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by(year, MONTH) %>%
dplyr::summarize(n = n()) %>%
tidyr::spread(year, n)
}
#' Plot a map of accidents in a given state for a given year
#'
#' This is a function that takes a state number and a year and plots
#' accidents on the states's map
#'
#' @param state.num a state in numeric format
#' @param year a year in numeric format or a list of such years
#'
#' @return this function returns a map on the graphical device, a message
#' "no accidents to plot", or a message saying that the provided state number is invalid.
#'
#' @examples \dontrun{
#' fars_map_state(6, 2015) }
#'
#' @importFrom maps graphics
#'
#' @note requires the package maps to be loaded, otherwise produces an error.
#' For some states, the function fails with the message "nothing to draw: all regions out of bounds",
#' e.g. for state.num = 2.
#'
#' @export
fars_map_state <- function(state.num, year) {
filename <- make_filename(year)
data <- fars_read(filename)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE)))
stop("invalid STATE number: ", state.num)
data.sub <- dplyr::filter(data, STATE == state.num)
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
|
1205677b96af4af8d5043274cfb905faf61394fb | fab563b952c80f6a90acdfdec77f082b60337dc3 | /dist/AWS_customBuild/packages_bioc_2.R | d6036589718b8c7dae5009d618fffe170ff69eb7 | [
"MIT"
] | permissive | wangdi2014/OmicsPipe2.0 | efa9d1399373ca9f8085ceb60037e6ffaa3ceb77 | 50bfe098d21bd4b22936ad754235e371c63f3797 | refs/heads/master | 2021-05-16T16:05:08.526516 | 2016-10-03T21:39:18 | 2016-10-03T21:39:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 229 | r | packages_bioc_2.R | source("~/.Rprofile")
source("http://bioconductor.org/biocLite.R")
biocLite(ask=FALSE)
biocLite(c("IRanges",
"KEGGgraph",
"KEGGREST",
"limma",
"org.Hs.eg.db",
"pathview",
"ReactomePA",
"SPIA",
"XVector"
),
ask=FALSE
) |
59559488a8e80dc96289d49f551e481c1967e3b8 | 2b9823b63a4856696e9d4ead7795310b2c72ad79 | /media/G3data/fdr18/trans/zero_gene_peaks/top2002/analysis.R | 4d79d037c66ea122d8a077716f2006f0cd23e411 | [] | no_license | anykine/radiationHybrid | 1e4645f67661805a71a64fa830e158d9d3dd3a3c | 6ef1732127b90c039ce865dcf22ccb32ffbd263f | refs/heads/master | 2020-04-29T06:03:07.692782 | 2019-03-15T23:20:07 | 2019-03-15T23:20:07 | 175,903,410 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 858 | r | analysis.R | # do a correlation of the genes regulated by the "same"
# 0-gene eqtls
total = 2001
data = matrix(0, total+1, 4);
for (n in 0:total) {
fname = paste("data", n, ".txt", sep="");
#cat(fname);
x = read.table(fname)
x.alpha = cor.test(x[,1], x[,3], method="pearson")
#x.alpha1 = cor.test(x[,1], x[,3], method="spearman")
x.nlp = cor.test(x[,2], x[,4], method="pearson")
#x.nlp1 = cor.test(x[,2], x[,4], method="spearman")
data[n+1, 1] = x.alpha$p.value
data[n+1, 2] = x.alpha$estimate
#data[n+1, 2] = x.alpha1$p.value
data[n+1, 3] = x.nlp$p.value
data[n+1, 4] = x.nlp$estimate
#data[n+1, 4] = x.nlp1$p.value
#dat = paste(x.alpha$p.value, x.alpha1$p.value, x.nlp$p.value, x.nlp1$p.value, sep="\t")
#cat(dat);
}
outfile = paste("correlations", total, ".txt", sep="")
write.table(data, file=outfile, sep="\t", quote=F, row.names=F, col.names=F)
|
9dd14be166591bb8dfe49ae9a761dfb0f5f839b9 | 6ec14566e49b07c600432588a4a392619b4860ba | /US_Arrests.R.R | a1a8bf81c50ad221d38e8e8b1d149d5002bb7004 | [] | no_license | fall2018-saltz/ananth_gv_ggmap | 834a2ac34bf231aac8b2b21695804a3489d96c11 | dca56f206aac919723f1268c36916ad29c24b9a9 | refs/heads/master | 2020-04-01T19:36:31.032572 | 2018-10-19T20:57:16 | 2018-10-19T20:57:16 | 153,562,642 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 196 | r | US_Arrests.R.R |
#2) Copy the USArrests dataset into a local variable (similar to HW 2)
arrests <- USArrests #Assigning the US USArrests data to arrests variable. This is fed as input to next R module.
|
fbdd90191591d63f769e7b478b667f0c63944cc5 | 98a0bd2de4836b813642df0faf5f0b5bd31f7617 | /man/tophatInstallation.Rd | 061932663663d96028c906c8dfd90d984ae3d55d | [] | no_license | inambioinfo/chimera | 7bf3834f72464e546b83f52704354acbc9c329bc | 17e0580ccd842a57f519fd968bc9df3d9ec29a0f | refs/heads/master | 2021-06-25T06:56:13.520654 | 2017-04-24T19:50:57 | 2017-04-24T19:50:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 637 | rd | tophatInstallation.Rd | \name{tophatInstallation}
\alias{tophatInstallation}
\title{A function to download tophat, bowtie and samtools}
\description{A function allowing the download and installation of tophat, bowtie and samtools in chimera package folder. The function also creates soft links in the user bin folder to allow the call of the above mentioned programs.
}
\usage{tophatInstallation(binDir, os=c("unix","mac"))}
\arguments{
\item{binDir}{The user bin folder}
\item{os}{The supported operating systems}
}
\author{Raffaele A Calogero}
\examples{
#tophatInstallation(binDir="/somewhere/inyourpc/bin", os="mac")
}
\keyword{utilities}
|
eb4af76c72b8c5d87dc3844762a88a710faa00f1 | 935b769a7ce00644143176a52e72401914a0c75e | /man/loo.moultmcmc.Rd | 92f72e9bb1e9031ae6298f0599c683efda7b8003 | [
"MIT"
] | permissive | pboesu/moultmcmc | 1b21a2701b85237763e9d9b8bee997280e5e3835 | 0a343e0589955fd917239590339acf7e73d6940d | refs/heads/master | 2023-04-16T22:51:20.054204 | 2023-02-15T15:33:32 | 2023-02-15T15:33:32 | 282,736,473 | 4 | 1 | MIT | 2022-12-27T15:27:53 | 2020-07-26T21:30:00 | R | UTF-8 | R | false | true | 1,125 | rd | loo.moultmcmc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{loo.moultmcmc}
\alias{loo.moultmcmc}
\title{Efficient approximate leave-one-out cross-validation (LOO)}
\usage{
\method{loo}{moultmcmc}(x, cores = getOption("mc.cores", 1))
}
\arguments{
\item{x}{A moultmcmc model object}
\item{cores}{The number of cores to use for parallelization. This defaults to the option mc.cores which can be set for an entire R session by options(mc.cores = NUMBER). The old option loo.cores is now deprecated but will be given precedence over mc.cores until loo.cores is removed in a future release. As of version 2.0.0 the default is now 1 core if mc.cores is not set, but we recommend using as many (or close to as many) cores as possible. Note for Windows 10 users: it is strongly recommended to avoid using the .Rprofile file to set mc.cores (using the cores argument or setting mc.cores interactively or in a script is fine).}
}
\value{
The loo() methods return a named list with class c("psis_loo", "loo"). See ?loo::loo
}
\description{
Efficient approximate leave-one-out cross-validation (LOO)
}
|
e1b4bebbe49ba99d7acdc1fbeedf1a306964f3d1 | e87fb65db88b9886eb8e1e0b7a2d7980bec57a69 | /R/getCurrentUser.R | 57ad05681c7d36fa3577a3149532ede9f11e2cd5 | [] | no_license | SantoshSrinivas79/Rspotify | 999aba6e81836e4d61cffd135e50df3e282feb42 | 94b90ff441e62d99fdce8405b1f00207b12ea7dc | refs/heads/master | 2021-01-23T18:00:54.405050 | 2017-02-25T20:07:11 | 2017-02-25T20:07:11 | 82,991,513 | 0 | 0 | null | 2017-02-24T02:12:45 | 2017-02-24T02:12:44 | null | UTF-8 | R | false | false | 933 | r | getCurrentUser.R | #' Get basic info of the current user
#'
#'
##'function to get basic info of the current user
#'@param user_id user id
#'@param token An OAuth token created with \code{spotifyOAuth}.
#' @examples \dontrun{
#' ## Example
#' my_oauth <- spotifyOAuth(app_id="xxxx",client_id="yyyy",client_secret="zzzz")
#' save(my_oauth, file="my_oauth")
#' load("my_oauth")
#' tiago <- getCurrentUser(user_id="t.mendesdantas",my_oauth)
#' }
#'
#'
#'@export
#'
#function to retrieve information about the current user
# https://developer.spotify.com/web-api/get-current-users-profile/
#information about the current user
getCurrentUser<-function(token){
req <- httr::GET(paste0("https://api.spotify.com/v1/me"), httr::config(token = token))
json1<-httr::content(req)
dados=data.frame(display_name=json1$display_name,
id=json1$id,
followers=json1$followers$total,stringsAsFactors = F)
return(dados)
}
|
adfa3f68a79cdc879c70e6479aabc6b6c7d8a135 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /Rrelperm/man/kr3p_StoneII_SwSg.Rd | f0ddedc2f771ca72027df16bd25eba2222d2d067 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 2,104 | rd | kr3p_StoneII_SwSg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rrelperm-package.R
\name{kr3p_StoneII_SwSg}
\alias{kr3p_StoneII_SwSg}
\title{Generate a matrix of three-phase relative permeability data for the water-gas-oil system using the modified Stone II model}
\usage{
kr3p_StoneII_SwSg(
SWCON,
SWCRIT,
SOIRW,
SORW,
SOIRG,
SORG,
SGCON,
SGCRIT,
KRWIRO,
KROCW,
KRGCL,
NW,
NOW,
NG,
NOG,
NP
)
}
\arguments{
\item{SWCON}{connate water saturation, fraction}
\item{SWCRIT}{critical water saturation, fraction}
\item{SOIRW}{irreducible oil saturation, fraction}
\item{SORW}{residual oil saturation, fraction}
\item{SOIRG}{irreducible oil saturation, fraction}
\item{SORG}{residual oil saturation, fraction}
\item{SGCON}{connate gas saturation, fraction}
\item{SGCRIT}{critical gas saturation, fraction}
\item{KRWIRO}{water relative permeability at irreducible oil}
\item{KROCW}{oil relative permeability at connate water}
\item{KRGCL}{gas relative permeability at connate liquid}
\item{NW}{exponent term for calculating krw}
\item{NOW}{exponent term for calculating krow}
\item{NG}{exponent term for calculating krg}
\item{NOG}{exponent term for calculating krog}
\item{NP}{number of saturation points in the two-phase relative permeability tables, the maximum acceptable value is 501. The number of data points in the three-phase relative permeability table is (0.5 * NP * (NP + 1))}
}
\value{
A matrix with water saturation, gas saturation, oil saturation, and oil relative permeability values, respectively.
}
\description{
The 'kr3p_StoneII_SwSg()' creates a table of three-phase oil relative permeability data for water, gas, and oil saturation values between zero and one. This model reads the water, and gas saturation values in the three-phase region as water, and gas saturation inputs into two-phase relative permeability models
}
\examples{
rel_perm_wgo <- kr3p_StoneII_SwSg(0.15, 0.2, 0.15, 0.15, 0.2, 0.2, 0.05, 0.05,
0.4, 1, 0.3, 3, 2, 4, 2.5, 101)
}
\references{
\insertRef{Stone1970}{Rrelperm}
\insertRef{Fayers1984}{Rrelperm}
}
|
81d4c77ad295aae16e141700bca92cd594ebe5b6 | 30a78aca3c329cb27bf68dcc0ae022179ccad18b | /Italy/code/utils/read-data-subnational.r | 9a68e4d96b3e0c3fc417c072dfbc327b7042ebf9 | [
"MIT"
] | permissive | wyuill/covid19model | 9c04ae4053a3aaaa59ff1527ea538d3504b9e8db | b98ffcde59cb307d359ae999d2c5e182d74116ba | refs/heads/master | 2022-09-28T17:54:44.960892 | 2020-06-02T08:54:55 | 2020-06-02T08:54:55 | 262,770,417 | 0 | 0 | MIT | 2020-06-02T08:54:56 | 2020-05-10T11:18:10 | Jupyter Notebook | UTF-8 | R | false | false | 16,619 | r | read-data-subnational.r | library(tidyr)
library(lubridate)
library(stringr)
library(tidyverse)
library(dplyr)
library(magrittr)
read_obs_data_italy <- function(){
# Read the deaths and cases data
d <- read.csv("Italy/data/dpc-covid19-ita-regioni.csv")
# Original column names
# [1] "data" "stato" "codice_regione" "denominazione_regione" "lat"
# [6] "long" "ricoverati_con_sintomi" "terapia_intensiva" "totale_ospedalizzati" "isolamento_domiciliare"
# [11] "totale_positivi" "variazione_totale_positivi" "nuovi_positivi" "dimessi_guariti" "deceduti"
# [16] "totale_casi" "tamponi" "note_it" "note_en"
d <- d %>% select(data,denominazione_regione,totale_casi, deceduti)
colnames(d)<-c("DateRep","country","Cases","Deaths")
d$DateRep<-as.Date(d$DateRep)
d$country<-str_replace_all(d$country, " ", "_")
d$country<-as.factor(d$country)
#d$Cases<-as.numeric(d$Cases)
#d$Deaths<-as.numeric(d$Deaths)
#df<-d %>% group_by(DateRep) %>% summarise(Cases=sum(Cases),Deaths = sum(Deaths))
#write.csv(df,"Italy_18-04.csv")
# Getting daily data from the cummulative data set
d= d %>% group_by(country) %>%
arrange(DateRep) %>%
mutate(Deaths = Deaths - lag(Deaths,default=0)) %>%
mutate(Cases = Cases - lag(Cases,default=0)) %>%
ungroup()
# padding before
regions<-levels(d$country)
days<-length(seq(as.Date('2019/12/31'),as.Date(d$DateRep[1]-1),"days"))
zeroes<-data.frame(DateRep=rep(seq(as.Date('2019/12/31'),as.Date(d$DateRep[1]-1),"days"),length(regions)),
country=regions[rep(seq_len(length(regions)), each = days)],
Cases=0,Deaths=0)
d=bind_rows(zeroes,d)
d$Cases[d$Cases<0] <- 0
d$Deaths[d$Deaths<0] <- 0
# Changing region names
nametrans <- read.csv("Italy/data/province_name_translation.csv")
colnames(nametrans)[which(colnames(nametrans)=="denominazione_regione")]<-"country"
nametrans$country<-as.factor(nametrans$country)
nametrans$country<-str_replace_all(nametrans$country, " ", "_")
d_Italy<-data.frame(country="Italy",google_county="Italy",county="Italy")
nametrans <- bind_rows(nametrans,d_Italy)
d <- inner_join(d,nametrans,by.x="country",by.y="country") # fix names of regions
d <- d[,-which(colnames(d) %in% c("country","google_county"))]
colnames(d)[which(colnames(d)=="county")] <- "country"
d$country<-str_replace_all(d$country, " ", "_")
d <- d %>% select("country","DateRep","Cases","Deaths")
return(d)
}
read_obs_data <- function(){
# Read the deaths and cases data
cases_raw <- read.csv("Italy/data/uk_cases.csv", stringsAsFactors = FALSE)
deaths_raw <- read.csv("Italy/data/uk_deaths.csv", stringsAsFactors = FALSE)
deaths_weeks <- read.csv("Italy/data/uk_deaths_Weeks.csv", stringsAsFactors = FALSE)
cases_raw$Date <- as.Date(cases_raw$Date, "%d/%m/%y")
deaths_weeks$Week.start.date <- as.Date(deaths_weeks$Week.start.date, "%d-%b-%y")
areas <- select(cases_raw, Area.code, Area.name) %>% distinct()
deaths <- left_join(deaths_raw, deaths_weeks)
rand_vect <- function(N, M, sd = 1, pos.only = TRUE) {
#adapted from https://stackoverflow.com/questions/24845909/generate-n-random-integers-that-sum-to-m-in-r
vec <- rep(1/N, N)
if (abs(sum(vec)) < 0.01) vec <- vec + 1
vec <- round(vec / sum(vec) * M)
deviation <- M - sum(vec)
for (. in seq_len(abs(deviation))) {
vec[i] <- vec[i <- sample(N, 1)] + sign(deviation)
}
if (pos.only) while (any(vec < 0)) {
negs <- vec < 0
pos <- vec > 0
vec[negs][i] <- vec[negs][i <- sample(sum(negs), 1)] + 1
vec[pos][i] <- vec[pos ][i <- sample(sum(pos ), 1)] - 1
}
vec
}
d <- data.frame(Date = as.Date(min(deaths$Week.start.date):(max(deaths$Week.start.date)+6), origin="1970-01-01")) %>%
crossing(areas) %>%
left_join(., deaths, by=c("Date"="Week.start.date", "Area.code"="UTLA19CD")) %>%
select(-UTLA19NM) %>%
fill("Week.number") %>%
replace_na(list(deaths = 0)) %>%
group_by(Area.name, Week.number) %>%
mutate(deaths_sum = sum(deaths),
deaths = rand_vect(n(), max(deaths_sum)))
d <- left_join(d, cases_raw) %>%
mutate(country = paste(Area.code, "-", Area.name),
DateRep = Date,
Cases = cases,
Deaths = deaths) %>%
replace_na(list(Cases = 0)) %>%
select(country, DateRep, Cases, Deaths)
return(d)
}
read_ifr_data_italy <- function(regions){
ifr.Italy.regional <- read.csv("Italy/data/weighted_ifrs_italy.csv")
colnames(ifr.Italy.regional)[which(colnames(ifr.Italy.regional)=="state")]<-"country"
colnames(ifr.Italy.regional)[which(colnames(ifr.Italy.regional)=="IFR")]<-"ifr"
ifr.Italy.regional$country <- str_replace_all(ifr.Italy.regional$country, " ", "_")
ifr.Italy.regional$country[which( ifr.Italy.regional$country=="Friuli_Venezia-Giulia")]<-"Friuli_Venezia_Giulia"
ifr.Italy.regional$country[which( ifr.Italy.regional$country=="Provincia_Autonoma_Bolzano")]<-"P.A._Bolzano"
ifr.Italy.regional$country[which( ifr.Italy.regional$country=="Provincia_Autonoma_Trento")]<-"P.A._Trento"
ifr.Italy.regional$country[which( ifr.Italy.regional$country=="Valle_D'Aosta")]<-"Valle_d'Aosta"
colnames(ifr.Italy.regional)[which(colnames(ifr.Italy.regional)=="total_pop")] <- "popt"
# ## get IFR and population
ifr.by.country = read.csv("data/popt_ifr.csv")
ifr.by.country$country = as.character(ifr.by.country[,2])
# Changing region names
nametrans <- read.csv("Italy/data/province_name_translation.csv")
colnames(nametrans)[which(colnames(nametrans)=="denominazione_regione")]<-"country"
nametrans$country<-as.factor(nametrans$country)
nametrans$country<-str_replace_all(nametrans$country, " ", "_")
ifr.Italy.regional <- inner_join(ifr.Italy.regional,nametrans,by.x="country",by.y="country") # fix names of regions
ifr.Italy.regional <- ifr.Italy.regional[,-which(colnames(ifr.Italy.regional) %in% c("country","google_county"))]
colnames(ifr.Italy.regional)[which(colnames(ifr.Italy.regional)=="county")] <- "country"
ifr.Italy.regional$country<-str_replace_all(ifr.Italy.regional$country, " ", "_")
ifr.Italy.regional <- ifr.Italy.regional %>% select("country","X","ifr","popt")
ifr.national<-ifr.by.country[which(ifr.by.country$country=="Italy"),]$ifr
ifr.Italy.regional <- bind_rows(ifr.Italy.regional,data.frame(country="Italy",ifr=ifr.national,popt=60359546))
ifr.Italy.regional$X[which(ifr.Italy.regional$country=="Italy")]<-23
return(ifr.Italy.regional)
}
read_ifr_data <- function(region){
ifr <- read.csv("Italy/data/uk_ifr.csv", stringsAsFactors = FALSE)
pop <- read.csv("Italy/data/uk_pop.csv", stringsAsFactors = FALSE, skip = 6) %>% select(-1)
cases_raw <- read.csv("Italy/data/uk_cases.csv", stringsAsFactors = FALSE)
areas <- select(cases_raw, Area.code, Area.name) %>% distinct()
ifr.Italy.regional <- areas %>%
left_join(pop, by=c("Area.code"="X")) %>%
mutate(country = paste(Area.code, "-", Area.name),
X = row_number(),
ifr = ifr$ifr,
popt = X2019) %>%
select(country, X, ifr, popt)
return(ifr.Italy.regional)
}
read_google_mobility_italy <- function(Country){
google_mobility <- read.csv('Italy/data/Global_Mobility_Report.csv', stringsAsFactors = FALSE)
google_mobility$date = as.Date(google_mobility$date, format = '%Y-%m-%d')
google_mobility[, c(6,7,8,9,10,11)] <- google_mobility[, c(6,7,8,9,10,11)]/100
google_mobility[, c(6,7,8,9,10)] <- google_mobility[, c(6,7,8,9,10)] * -1
google_mobility<-google_mobility[,c(2,3,5,6,7,8,9,10,11)]
colnames(google_mobility)[which(colnames(google_mobility)=="country_region")]<-"state"
colnames(google_mobility)[which(colnames(google_mobility)=="sub_region_1")]<-"country"
colnames(google_mobility)[which(colnames(google_mobility)=="grocery_and_pharmacy_percent_change_from_baseline")]<-"grocery.pharmacy"
colnames(google_mobility)[which(colnames(google_mobility)=="parks_percent_change_from_baseline")]<-"parks"
colnames(google_mobility)[which(colnames(google_mobility)=="transit_stations_percent_change_from_baseline")]<-"transitstations"
colnames(google_mobility)[which(colnames(google_mobility)=="workplaces_percent_change_from_baseline")]<-"workplace"
colnames(google_mobility)[which(colnames(google_mobility)=="residential_percent_change_from_baseline")]<-"residential"
colnames(google_mobility)[which(colnames(google_mobility)=="retail_and_recreation_percent_change_from_baseline")]<-"retail.recreation"
google_mobility$country[which(google_mobility$country =="")]<-"Italy"
mobility <- google_mobility
nametrans <- read.csv("Italy/data/province_name_translation.csv")
Italy<-data.frame(denominazione_regione="Italy",google_county="Italy",county="Italy")
nametrans<-bind_rows(nametrans,Italy)
mobility$country<-as.factor(mobility$country)
nametrans$google_county<-as.factor(nametrans$google_county)
#mobility <- mobility %>% filter(country !="")
colnames(nametrans)[which(colnames(nametrans)=="google_county")]<-"country"
mobility <- inner_join(mobility,nametrans,by.x="country",by.y="country") # fix names of regions
mobility$country<-str_replace_all(mobility$denominazione_regione, " ", "_")
mobility <- mobility %>% select(country,date,grocery.pharmacy,parks,residential,retail.recreation,transitstations,workplace)
# Changing region names
nametrans <- read.csv("Italy/data/province_name_translation.csv")
colnames(nametrans)[which(colnames(nametrans)=="denominazione_regione")]<-"country"
nametrans$country<-as.factor(nametrans$country)
nametrans$country<-str_replace_all(nametrans$country, " ", "_")
mobility <- mobility %>% filter(country !="Italy")
mobility <- inner_join(mobility,nametrans,by.x="country",by.y="country") # fix names of regions
mobility <- mobility[,-which(colnames(mobility) %in% c("country","google_county"))]
colnames(mobility)[which(colnames(mobility)=="county")] <- "country"
mobility$country<-str_replace_all(mobility$country, " ", "_")
mobility <- mobility %>% select("country","date","grocery.pharmacy","parks","residential","retail.recreation","transitstations","workplace")
return(mobility)
}
read_google_mobility <- function(){
google_mobility <- read.csv('Italy/data/Global_Mobility_Report.csv', stringsAsFactors = FALSE)
google_mobility$date = as.Date(google_mobility$date, format = '%Y-%m-%d')
google_mobility[, c(6,7,8,9,10,11)] <- google_mobility[, c(6,7,8,9,10,11)]/100
google_mobility[, c(6,7,8,9,10)] <- google_mobility[, c(6,7,8,9,10)] * -1
google_mobility<-google_mobility[,c(2,3,5,6,7,8,9,10,11)]
colnames(google_mobility)[which(colnames(google_mobility)=="country_region")]<-"state"
colnames(google_mobility)[which(colnames(google_mobility)=="sub_region_1")]<-"country"
colnames(google_mobility)[which(colnames(google_mobility)=="grocery_and_pharmacy_percent_change_from_baseline")]<-"grocery.pharmacy"
colnames(google_mobility)[which(colnames(google_mobility)=="parks_percent_change_from_baseline")]<-"parks"
colnames(google_mobility)[which(colnames(google_mobility)=="transit_stations_percent_change_from_baseline")]<-"transitstations"
colnames(google_mobility)[which(colnames(google_mobility)=="workplaces_percent_change_from_baseline")]<-"workplace"
colnames(google_mobility)[which(colnames(google_mobility)=="residential_percent_change_from_baseline")]<-"residential"
colnames(google_mobility)[which(colnames(google_mobility)=="retail_and_recreation_percent_change_from_baseline")]<-"retail.recreation"
mobility <- google_mobility
mobility_matching <- read.csv("Italy/data/google_ons_lookup.csv", stringsAsFactors = FALSE) %>%
filter(!is.na(ONS.Code))
mobility <- inner_join(mobility_matching, mobility, by=c("Google"="country")) %>%
mutate(country = paste(ONS.Code, "-", ONS)) %>%
select("country","date","grocery.pharmacy","parks","residential","retail.recreation","transitstations","workplace")
# remove areas with nas - May not need this as real issue is covariate_list is different lengths per area
# however if fully blank or very blank issues with imputing so set a limit of <30 NA for now
area_without_na <- mobility %>%
group_by(country) %>%
summarise_all(~sum(is.na(.))) %>%
transmute(country, sumNA = rowSums(.[-1])) %>%
filter(sumNA < 30)
mobility <- mobility %>%
filter(country %in% area_without_na$country)
return(mobility)
}
read_interventions_italy <- function(){
covariates<-read.csv("Italy/data/Italy_events.csv")
covariates=covariates[c(1:105),c(1:5)]
covariates=covariates %>% select(Regions,Intervention,Effective_date) %>%
pivot_wider(names_from=Intervention,values_from=Effective_date)
colnames(covariates)=c("country","School_closures","Case_based_measures","Social_distancing","Public_events","Lockdown")
covariates$School_closures=dmy(covariates$School_closures)
covariates$Case_based_measures=dmy(covariates$Case_based_measures)
covariates$Social_distancing=dmy(covariates$Social_distancing)
covariates$Lockdown=dmy(covariates$Lockdown)
covariates$Public_events=dmy(covariates$Public_events)
colnames(covariates)=c( "Country","schools_universities","self_isolating_if_ill","social_distancing_encouraged","public_events","lockdown" )
covariates=as.data.frame(covariates)
#covariates$country <- factor(covariates$country)
covariates$Country <- factor(covariates$Country)
write.csv(covariates,"Italy/data/Italy_interventions.csv")
# add all of Italy
covariates_countries <- read_csv("data/interventions.csv")
covariates_countries <- covariates_countries[which(covariates_countries$Country=="Italy"), c('Country', 'Type', 'Date effective')]
covariates_countries <- spread(covariates_countries, Type, 'Date effective')
colnames(covariates_countries)=c("Country","schools_universities","public_events","lockdown","social_distancing_encouraged","self_isolating_if_ill")
covariates_countries$schools_universities<-as.Date(covariates_countries$schools_universities,format="%Y-%m-%d")
covariates_countries$public_events<-as.Date(covariates_countries$public_events,format="%Y-%m-%d")
covariates_countries$lockdown<-as.Date(covariates_countries$lockdown,format="%Y-%m-%d")
covariates_countries$social_distancing_encouraged<-as.Date(covariates_countries$social_distancing_encouraged,format="%Y-%m-%d")
covariates_countries$self_isolating_if_ill<-as.Date(covariates_countries$self_isolating_if_ill,format="%Y-%m-%d")
covariates<-bind_rows(covariates_countries,covariates)
# Changing region names
nametrans <- read.csv("Italy/data/province_name_translation.csv")
colnames(nametrans)[which(colnames(nametrans)=="denominazione_regione")]<-"country"
nametrans$country<-as.factor(nametrans$country)
nametrans$country<-str_replace_all(nametrans$country, " ", "_")
colnames(nametrans)[which(colnames(nametrans)=="country")]<-"Country"
covariates <- inner_join(covariates,nametrans,by.x="Country",by.y="Country") # fix names of regions
covariates <- covariates[,-which(colnames(covariates) %in% c("Country","google_county"))]
colnames(covariates)[which(colnames(covariates)=="county")] <- "Country"
covariates$Country<-str_replace_all(covariates$Country, " ", "_")
covariates <- covariates %>% select("Country","schools_universities","public_events","lockdown","social_distancing_encouraged","self_isolating_if_ill")
return(covariates)
}
read_interventions <- function(){
covariates <- read.csv("Italy/data/uk_interventions.csv")
cases_raw <- read.csv("Italy/data/uk_cases.csv", stringsAsFactors = FALSE)
covariates <- select(cases_raw, Area.code, Area.name) %>%
distinct() %>%
crossing(covariates) %>%
mutate(Country = paste(Area.code, "-", Area.name),
schools_universities = as.Date(schools_universities, "%d/%m/%y"),
public_events = as.Date(public_events, "%d/%m/%y"),
lockdown = as.Date(lockdown, "%d/%m/%y"),
social_distancing_encouraged = as.Date(social_distancing_encouraged, "%d/%m/%y"),
self_isolating_if_ill = as.Date(self_isolating_if_ill, "%d/%m/%y")) %>%
select("Country","schools_universities","public_events","lockdown","social_distancing_encouraged","self_isolating_if_ill")
return(covariates)
}
|
2c40660da6beaa9aaa11e8d6eb67c0e7984b25cb | f457f42f9d7881977fb9c2f36648561557c976ba | /man/portpirie.Rd | 5816249bba5b0fca787cdc7638d8d8990a6763c3 | [] | no_license | cran/ismev | cb20d114b2617cd8f105b0fbc9fc796a48645ba7 | ffe568d732e72e071012a989d672996be6d8cc88 | refs/heads/master | 2021-05-16T02:43:11.393201 | 2018-05-10T21:08:29 | 2018-05-10T21:08:29 | 17,696,836 | 1 | 5 | null | null | null | null | UTF-8 | R | false | false | 704 | rd | portpirie.Rd | \name{portpirie}
\alias{portpirie}
\title{Annual Maximum Sea Levels at Port Pirie, South Australia}
\usage{data(portpirie)}
\description{
The \code{portpirie} data frame has 65 rows and 2 columns.
The second column gives annual maximimum sea levels recorded
at Port Pirie, South Australia, from 1923 to 1987.
The first column gives the corresponding years.
}
\format{
This data frame contains the following columns:
\describe{
\item{Year}{A numeric vector of years.}
\item{SeaLevel}{A numeric vector of annual sea level
maxima.}
}
}
\source{
Coles, S. G. (2001)
\emph{An Introduction to Statistical Modelling of
Extreme Values.} London: Springer.
}
\keyword{datasets}
|
b1b3bf260f1898f68de2945e3b285cfe353bc050 | 3b3a209f4af98de119a5b70f569a3357b9a0370f | /man/load_supermarket_takslist.Rd | 22d778ab7b2dcc5b1c8ca33b385d3f72a0f74178 | [] | no_license | BrainVR/brainvr-supermarket | 190741a73548b97ccd71a5bec9c86e7c0323f151 | 5ada23205dc731f1f71f03174967f10614a9a158 | refs/heads/master | 2021-09-23T03:54:31.279411 | 2021-09-12T00:35:37 | 2021-09-12T00:35:37 | 216,592,182 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 710 | rd | load_supermarket_takslist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/super-loading.R
\name{load_supermarket_takslist}
\alias{load_supermarket_takslist}
\title{Loads taskslist form .json file}
\usage{
load_supermarket_takslist(filepath, language = "CZ")
}
\arguments{
\item{filepath}{path to the .json tasklist file. In newer logging versions,
this is already included in the header, but in older versions it
needs to be loaded separately}
\item{language}{language of the tasklist. Only important if you are
not logging item codes.
See language options in [item_translations]. Default is "CZ".}
}
\value{
data.frame with supermarket task progression
}
\description{
Loads taskslist form .json file
}
|
83a9b2a89c52bedc61b18c396b93ccd1e45277e7 | bbde2405b9ff4a77d0f5156b31465622889ee38d | /tutorial.R | 82d9a40f75ed1d0d96627e28c57988ca55170cba | [] | no_license | cstatzer/LearningR | aa8004bfeb23a8967625616734d3002fd57a6c99 | 4263a6ba60adc9e610e6e94bc9694c08c68e8858 | refs/heads/master | 2022-11-20T15:12:28.181024 | 2020-07-12T20:13:38 | 2020-07-12T20:13:38 | 279,138,254 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,055 | r | tutorial.R | # Install additional libraries to be able to generate all plots (run install functions once and then you can delete them):
# install.packages("viridis")
# install.packages("ggforce")
# install.packages("ggrepel")
# install.packages("survminer")
# install.packages("survival")
# install.packages("magrittr")
# install.packages("scatterpie")
library(ggExtra)
#################################
########### R basics ############
#################################
library(tidyverse)
Kelvin <- 273.15
tempsC <- c(5,20,30)
feeling <- c("cold","medium","hot")
mean(tempsC)
max(tempsC)
length(feeling)
# option A
temp <- mean(tempsC)
round(temp)
# option B
tempsC %>% mean() %>% round()
#################################
######## Data workflow ##########
#################################
world <- read_csv("./data/gapminder.csv")
world
View(world)
world %>%
filter(continent == "Americas")
world %>%
filter(continent == "Asia") %>%
select(country,year,lifeExp) %>%
arrange(desc(lifeExp))
world %>%
group_by(continent, year) %>%
summarize(mean_life_exp = mean(lifeExp)) %>%
arrange(year,continent)
##################################################
######## Data visualization with ggplot ##########
##################################################
ggplot(data = world, mapping = aes(x = lifeExp)) +
geom_histogram()
# The LEGO principle
plot <- ggplot(data = world,mapping = aes(x = continent, y = lifeExp))
plot
plot + geom_point()
plot + geom_boxplot()
plot + geom_violin() + geom_boxplot(width = 0.1)
# Data exploration
plot <- ggplot(data = world, mapping = aes(x = gdpPercap, y = lifeExp)) +
geom_point(alpha = 0.5) +
geom_smooth() +
ggtitle("Do rich countries have a higher life expectancy?")
plot
ggMarginal(plot,type = "boxplot")
ggplot(data = world, mapping = aes(x = gdpPercap, y = lifeExp, color = continent)) +
geom_point() +
geom_smooth() +
ggtitle("Do rich countries have a higher life expectancy?")
# ggplot(data = world, mapping = aes(x = year, y = country, fill = gdpPercap)) +
# geom_tile()
# The Economist
library(viridis)
library(ggforce)
library(ggrepel)
world_2002 <- world %>%
filter(year == 2002) %>%
mutate(pop=pop/1000000)
africa_leading <- world_2002 %>%
filter(continent == "Africa",
gdpPercap > 3000)
plot <- ggplot(data = world_2002, aes(x=gdpPercap, y=lifeExp, size = pop, color = continent, label = country)) +
geom_point(alpha=0.8) +
scale_size(range = c(1, 20), name="Population (mio)") +
scale_color_viridis(discrete=TRUE, guide=FALSE) +
facet_zoom(x = continent == "Africa") +
geom_label_repel(data = africa_leading,size = 3) +
theme_bw() +
labs(title = "World demographics in 2002",x = "GDP per capita [US dollars per inhabitant]", y = "Life expectancy [years]")
plot
#################################
########### Survival ############
#################################
library(tidyverse)
library(survminer)
library(survival)
data <- read_csv(file = "./data/lifespan.csv")
fit<- survfit(Surv(Time, Censored) ~ Strain, data = data)
# option A
ggsurvplot(fit = fit, data = data)
# option B
ggsurvplot(fit = fit, data = data,conf.int = TRUE,risk.table = TRUE)
# option C
ggsurvplot(fit = fit, data = data,conf.int = TRUE,risk.table = TRUE,surv.median.line = "hv")
fit_repeat<- survfit(Surv(Time, Censored) ~ Strain + Repeat, data = data)
ggsurvplot_facet(fit = fit_repeat, data = data,facet.by = "Repeat",conf.int = TRUE,risk.table = TRUE,surv.median.line = "hv")
#################################
########## Microscopy ###########
#################################
library(tidyverse)
library(waffle)
colors <- c("grey50","bisque1", "darkorange2", "red","darkred")
data <- read_csv(file = "./data/microscopy.csv")
absolute <- data$Frequency
names(absolute) <- data$category
waffle(absolute, rows = 10, size = 1, colors = colors, legend_pos = "bottom",title = "Absolute observations")
relative <- round(absolute/sum(absolute) * 25)
waffle(relative, rows = 1, size = 1, colors = colors, legend_pos = "bottom",title = "Relative fractions")
#################################
####### Merging datasets ########
#################################
chip <- read_csv("./data/chip.csv")
rnaseq <- read_csv("./data/rna_seq.csv")
full_join(chip,rnaseq, by = "gene")
#################################
######## Parsing strings ########
#################################
library(tidyverse)
input <- read_csv("./data/aa_sequence.csv")
input
str <- input %>% unlist() %>% paste(., collapse=" ")
str_proc <- str_replace_all(string = str,pattern = "[[:digit:],[:space:]]",replacement = "")
str_proc
#################################
#### Data cleaning RNA seq ######
#################################
library(tidyverse)
data <- read_csv(file = "./data/RNAseq.csv")
data <- data %>% mutate(logFC = as.numeric(logFC)) %>% filter(!is.na(logFC))
readable <- data %>% separate(col = Gene_id,into = c("Junk","ID"),sep = "_")
readable <- readable %>% filter(!is.na(ID))
ggplot(data = data, aes(x = logFC,y = -log10(padj))) +
geom_point(alpha = 0.5)
#################################
##### 96-well plate scoring #####
#################################
library(tidyverse)
library(ggplot2)
library(magrittr)
library(scatterpie)
lets<- c("H","G","F","E","D","C","B","A")
df <- read_csv("./data/96well.csv")
df <- df %>% mutate(region = paste0(Row,Column), Row = match(Row, lets))
df[,4:9] <- df[,4:9] / 80
p <- ggplot() +
geom_point(data=expand.grid(seq(1, 12), seq(1,8)), aes(x=Var1, y=Var2),
color="grey80", fill="white", shape=21, size=6) +
geom_scatterpie(aes(x= Column, y= Row, r = Total, group = region), data=df,
cols=c("Extremely high","High", "Intermediate", "Low", "None" ))
p + scale_x_discrete(name ="Columns", limits=c(1:12)) +
scale_y_discrete(name ="Rows", limits=lets) +
scale_fill_manual(values = c("red","darkgreen","darkolivegreen3","darkseagreen1","grey")) +
coord_fixed(ratio=12/12) + facet_grid(Day ~ .) +
ggtitle("Promotor activity (day 1 and 8")
|
4598f58f7fb647c7e19da519d6663f54911bee20 | af545d1594c0aca08e84a6bc2742df962521695e | /Chapter 15/Exercise 15.44.r | fde4f8b799cc856f9169e10635fb89489f454dff | [] | no_license | kmahoski/Statistical-Data-Analysis-in-R | 9da43ae5339d568cd98f19b8c8ad1c472f5c98b2 | 309f85c1284e4691e5670172f2619b75a292dd12 | refs/heads/master | 2021-01-22T11:37:36.858625 | 2014-11-10T21:53:17 | 2014-11-10T21:53:17 | 25,942,516 | 1 | 0 | null | 2014-11-10T21:53:17 | 2014-10-29T21:35:08 | R | UTF-8 | R | false | false | 260 | r | Exercise 15.44.r | surfaceFinishes <- c(10.32, 9.68, 9.92, 10.10, 10.20, 9.87, 10.14, 9.74, 9.80, 10.26)
nullMedianValue <- 10
confidenceLevel <- 0.95
signTestNormalApproximation(surfaceFinishes, md = nullMedianValue,
alternative = "two.sided", conf.level = confidenceLevel)
|
402265793be7de7eb285f20f6e6ad79e1a0c8ece | 1415327fe6a3200b61987f6579abbda343fb024f | /man/mass_transport_coef.Rd | 44c733721e9f8a507a0a7c3543d4c70740410f94 | [] | no_license | masspern/rAedesSim | 6b093bd98b6a8de9195bf0993c2f1bbf03593a7d | ee532108bed9ca3b93739edf84a58d1d2b5336fc | refs/heads/master | 2021-01-16T21:00:37.492290 | 2015-11-08T08:41:58 | 2015-11-08T08:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 701 | rd | mass_transport_coef.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/psychro_function.r
\name{mass_transport_coef}
\alias{mass_transport_coef}
\title{mass_transport_coef}
\usage{
mass_transport_coef(tair = 23, twater = 21, urel = 50, v_air = 0.5)
}
\arguments{
\item{tair}{numeric: Air temperature in Celsius}
\item{twater}{numeric: Water temperature in Celsius}
\item{urel}{numeric: Air Relative umidity (\%)}
\item{v_air}{numeric: Velocity of air movement (m/sec)}
}
\value{
numeric: Mass transfer coefficient for water. Only vapour pressure gradient is considered.
}
\description{
Mass transfer coefficient in function to air temperature in forced convection (v_air > 0.1).
}
|
c61b359604d6d6c11e36bca3aa6ed67ef9231817 | 7054e31dd0a145ef392b1fb7565bd6c24a7b72b8 | /R/Gamma.R | 778e54949dc3b9742fb1cb473beaf9beaed7e0b9 | [] | no_license | cran/ExtDist | babe7363f31a019fd88aed030450b44346ff3d21 | ace084bbc5b4557d043418c24ea64bc29293cb42 | refs/heads/master | 2023-08-31T00:17:52.828260 | 2023-08-21T09:02:36 | 2023-08-21T10:31:36 | 19,884,385 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 7,671 | r | Gamma.R | #' @title The Gamma Distribution.
#' @description Density, distribution, quantile, random number
#' generation, and parameter estimation functions for the gamma distribution with parameters \code{shape} and
#' \code{scale}. Parameter estimation can be based on a weighted or unweighted i.i.d sample and can be carried out
#' numerically.
#' @rdname Gamma
#' @name Gamma
#'
#' @aliases dGamma
#' @aliases pGamma
#' @aliases qGamma
#' @aliases rGamma
#' @aliases eGamma
#' @aliases lGamma
#'
#' @param params A list that includes all named parameters
#' @param x,q A vector of quantiles.
#' @param w An optional vector of sample weights.
#' @param p A vector of probabilities.
#' @param n Number of observations.
#' @param X Sample observations.
#' @param shape Shape parameter.
#' @param scale Scale parameter.
#' @param method Parameter estimation method.
#' @param logL logical; if TRUE, lBeta_ab gives the log-likelihood, otherwise the likelihood is given.
#' @param ... Additional parameters.
#'
#' @details The \code{dGamma()}, \code{pGamma()}, \code{qGamma()},and \code{rGamma()} functions serve as wrappers of the standard
#' \code{\link[stats]{dgamma}}, \code{\link[stats]{pgamma}}, \code{\link[stats]{qgamma}}, and \code{\link[stats]{rgamma}} functions
#' in the \pkg{\link{stats}} package. They allow for the parameters to be declared not only as
#' individual numerical values, but also as a list so parameter estimation can be carried out. \cr
#' \cr
#' The gamma distribution with parameter \code{shape}=\eqn{\alpha} and \code{scale}=\eqn{\beta} has probability density function,
#' \deqn{f(x)= (1/\beta^\alpha \Gamma(\alpha))x^{\alpha-1}e^{-x/\beta}}
#' where \eqn{\alpha > 0} and \eqn{\beta > 0}. Parameter estimation can be performed using the method of moments
#' as given by Johnson et.al (pp.356-357). \cr
#' \cr
#' The log-likelihood function of the gamma distribution is given by,
#' \deqn{l(\alpha, \beta |x) = (\alpha -1) \sum_i ln(x_i) - \sum_i(x_i/\beta) -n\alpha ln(\beta) + n ln \Gamma(\alpha)}
#' where \eqn{\Gamma} is the \link[base]{gamma} function. The score function is provided by Rice (2007), p.270.
#' @return dGamma gives the density, pGamma the distribution function,
#' qGamma the quantile function, rGamma generates random deviates, and
#' eGamma estimates the distribution parameters.lgamma provides the log-likelihood function.
#'
#' @references Johnson, N. L., Kotz, S. and Balakrishnan, N. (1995) Continuous Univariate Distributions,
#' volume 1, chapter 17, Wiley, New York.\cr
#' \cr
#' Bury, K. (1999) Statistical Distributions in Engineering, Chapter 13, pp.225-226,
#' Cambridge University Press.\cr
#' \cr
#' Rice, J.A. (2007) Mathematical Statistics and Data Analysis, 3rd Ed, Brookes/Cole.
#' @seealso \pkg{\link{ExtDist}} for other standard distributions.
#' @author Haizhen Wu and A. Jonathan R. Godfrey. \cr
#' Updates and bug fixes by Sarah Pirikahu, Oleksii Nikolaienko.
#' @examples
#' # Parameter estimation for a distribution with known shape parameters
#' X <- rGamma(n=500, shape=1.5, scale=0.5)
#' est.par <- eGamma(X, method="numerical.MLE"); est.par
#' plot(est.par)
#'
#' # Fitted density curve and histogram
#' den.x <- seq(min(X),max(X),length=100)
#' den.y <- dGamma(den.x,shape=est.par$shape,scale=est.par$scale)
#' hist(X, breaks=10, probability=TRUE, ylim = c(0,1.1*max(den.y)))
#' lines(den.x, den.y, col="blue")
#' lines(density(X), lty=2)
#'
#' # Extracting shape or scale parameters
#' est.par[attributes(est.par)$par.type=="shape"]
#' est.par[attributes(est.par)$par.type=="scale"]
#'
#' # Parameter estimation for a distribution with unknown shape parameters
#' # Example from: Bury(1999) pp.225-226, parameter estimates as given by Bury are
#' # shape = 6.40 and scale=2.54.
#' data <- c(16, 11.6, 19.9, 18.6, 18, 13.1, 29.1, 10.3, 12.2, 15.6, 12.7, 13.1,
#' 19.2, 19.5, 23, 6.7, 7.1, 14.3, 20.6, 25.6, 8.2, 34.4, 16.1, 10.2, 12.3)
#' est.par <- eGamma(data, method="numerical.MLE"); est.par
#' plot(est.par)
#'
#' # log-likelihood
#' lGamma(data,param = est.par)
#'
#' # Evaluating the precision of the parameter estimates by the Hessian matrix
#' H <- attributes(est.par)$nll.hessian
#' var <- solve(H)
#' se <- sqrt(diag(var));se
#' @rdname Gamma
#' @export dGamma
dGamma <-function(x, shape = 2, scale = 2, params = list(shape = 2, scale = 2),...){
if(!missing(params)){
shape <- params$shape
scale <- params$scale
}
out = stats::dgamma(x, shape=shape, scale=scale)
return(out)
}
#' @rdname Gamma
#' @export pGamma
pGamma <- function(q, shape = 2, scale = 2, params = list(shape = 2, scale = 2),...){
if(!missing(params)){
shape <- params$shape
scale <- params$scale
}
out = stats::pgamma(q, shape=shape, scale=scale)
return(out)
}
#' @rdname Gamma
#' @export qGamma
qGamma <- function(p, shape = 2, scale = 2, params = list(shape = 2, scale = 2),...){
if(!missing(params)){
shape <- params$shape
scale <- params$scale
}
out = stats::qgamma(p, shape=shape, scale=scale)
return(out)
}
#' @rdname Gamma
#' @export rGamma
rGamma <- function(n, shape = 2, scale = 2, params = list(shape = 2, scale = 2),...){
if(!missing(params)){
shape <- params$shape
scale <- params$scale
}
out = stats::rgamma(n, shape=shape, scale=scale)
return(out)
}
#' @rdname Gamma
#' @export eGamma
eGamma <- function(X,w, method =c("moments","numerical.MLE"),...){
method <- match.arg(method)
# Accounting for weights
n <- length(X)
if(missing(w)){
w <- rep(1,n)
} else {
w <- n*w/sum(w)
}
if(method == "moments") {
# Weighted parameter estimation
Smean <- mean(X)
Ssd <- stats::sd(X)
shape <- (Smean/Ssd)^2 # Engineering statistics handbook (http://www.itl.nist.gov/div898/handbook/eda/section3/eda366b.htm)
scale <- Smean/(Ssd^2)
# Standard errors of estimates
SE.shape <- Ssd*sqrt(trigamma(shape)/(n*(shape*trigamma(shape)-1))) # Bury pg 216
SE.scale <- sqrt(shape/(n*(shape*trigamma(shape)-1)))
est.par <- list(shape = shape, scale = scale)
est.par.se <- c(SE.shape, SE.scale)
}
else
{method <- "numerical.MLE"
est.par <- wmle(X=X, w=w, distname = "Gamma",
initial=list(shape = 1, scale = mean(X) ),
lower=list(shape = 0, scale = 0),
upper=list(shape = Inf, scale = Inf))
est.par.se <- try(sqrt(diag(solve(attributes(est.par)$nll.hessian))),silent=TRUE)
if(inherits(est.par.se, "try-error")) {
est.par.se <- rep(NA, length(est.par))
}
}
attributes(est.par)$ob <- X
attributes(est.par)$weights <- w
attributes(est.par)$distname <- "Gamma"
attributes(est.par)$method <- method
attributes(est.par)$par.name <- c("shape","scale")
attributes(est.par)$par.type <- c("shape","scale")
attributes(est.par)$par.vals <- c(est.par$shape, est.par$scale)
attributes(est.par)$par.s.e <- est.par.se
class(est.par) <- "eDist"
return(est.par)
}
#' @rdname Gamma
#' @export lGamma
## (weighted) (log) likelihood function
lGamma <-
function(X, w, shape = 2, scale = 2, params = list(shape = 2, scale = 2), logL = TRUE,...){
if(!missing(params)){
shape <- params$shape
scale <- params$scale
}
n <- length(X)
if(missing(w)){
w <- rep(1,n)
} else {
w <- n*w/sum(w)
}
ll <- sum(w*log(dGamma(x=X,params = params)))
# ll <- (shape-1)*sum(log(X)) - sum(X/scale) - n*shape*log(scale) + n*log(gamma(shape))
l <- exp(ll)
if(logL) {return(ll)} else{return(l)}
}
|
0575c8a044d2e54502911dbbb5674bfc2ab058e7 | 513a8f7525b71e23e47ca647201e1a8da9980b2c | /kladda_1.R | 6aad8d052ed1f3ab9165511a7d3ec577817d5d13 | [] | no_license | jonjoensen/demo-shiny-statbank | 690beff3928164fcf7816350ffe22a00c680dbce | 5cb87996d83c42350a4358318285059965c5fbd4 | refs/heads/master | 2020-03-15T00:20:18.112357 | 2018-05-02T14:35:12 | 2018-05-02T14:41:40 | 131,865,799 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 636 | r | kladda_1.R | # install.packages("pxweb")
library(pxweb)
api_catalogue()
staging_api <- pxweb_api$new(api = "pxweb/staging",
url = "http://pxweb/staging/api/[version]/[lang]",
description = "Staging - Statistics Faroe Islands",
languages = "en",
versions = "v1",
calls_per_period = 30,
period_in_seconds = 10,
max_values_to_download = 100000)
staging_api$write_to_catalogue()
interactive_pxweb()
17
1
2
5
1
'n'
'n'
'y'
1
4
1
'a'
'a'
'a'
40
|
bf79963ccb42ab5c6f98386c5ff31d367027aee5 | cb90a14067d0a0638dac4065f97bd43f37328185 | /extractSSlocdata.R | 0545174468dc08ad168c3428b5f3e206c7c9524f | [] | no_license | RowanHowell/CLM-R | ace86119a8adb0a28c2e9c450a2528f88657cc93 | 3aefa6dc71b23fa99444b8e3288d7a314113db61 | refs/heads/master | 2022-11-23T00:22:56.326806 | 2020-07-20T09:12:14 | 2020-07-20T09:12:14 | 243,314,095 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,351 | r | extractSSlocdata.R |
extractSSlocdata = function(data, prots, locs, model){
if(length(data)>1){ # combine SSs of varying length
K = length(data)
data2 = data[[1]]
for(k in 2:K){
data2 = cbind(data2, data[[k]])
}
data = data2
} else{
data = data[[1]]
}
SSs = colnames(data) # determine number and length of SSs
SSsnum = as.tibble(str_split_fixed(SSs, "\\.", n =2))
colnames(SSsnum) = c("SS", "state")
N = length(unique(SSsnum$SS))
lengths = c()
for(j in 1:N){
lengths[j] = max(as.numeric(filter(SSsnum, SS == paste0("Attr", j))$state))
}
SSslength = tibble(SS = paste0("Attr",1:N), length = lengths)
nodes =c() # wrangle node names
for(p in prots){
nodest = grep(p, model$genes, value = TRUE)
for(n in nodest){
if(str_split(n, "L_")[[1]][2] %in% locs){
nodes = c(nodes, n)
}
}
}
lownodes = grep("low", nodes, value = TRUE) # declare various useful colleciton of nodes
highnodes = grep("high", nodes, value = TRUE)
multinodes = sub("low","", lownodes)
singlenodes = setdiff(nodes,c(lownodes,highnodes))
allnodes = c(setdiff(nodes,c(lownodes,highnodes)), multinodes)
allData = as_tibble(t(data[c("SACA_Nucleus", "SpindleAlign",nodes, "ME"),])) #transverse to make easier to use dplyr methods
if(length(lownodes)>length(highnodes)){ # in case eg low at SPB but not high
extra = sub("low","high",setdiff(lownodes, sub("high", "low", highnodes)))
for(j in 1:length(extra)){
allData = mutate(allData, !!extra[j] := 0)
}
}
if(length(lownodes)>0){ # combine low/high nodes where necessary
for(j in 1:length(lownodes)){
allData = mutate(allData, !!multinodes[j] := allData[[lownodes[j]]] + allData[[sub("low","high",lownodes[j])]])
}
}
if(length(singlenodes)>0){# double single level nodes to match HIGH
for(j in 1:length(singlenodes)){
allData = mutate(allData, !!singlenodes[j] := 2*allData[[singlenodes[j]]])
}
}
allData = mutate(allData, ME = ME*3, SpindleAlign = 3*SpindleAlign, SACA_Nucleus = 3*SACA_Nucleus) # triple input/output nodes to distinguish
allData = select(allData,SACA_Nucleus, SpindleAlign, !!allnodes, ME) # select relevant variables
summaryData = data.frame(matrix(, nrow = 0, ncol = ncol(allData)))
colnames(summaryData) = colnames(allData)
for(j in 1:N){ # iterate over SSs
if(SSslength$length[j]>1){
subDF = allData[which(SSsnum == paste0("Attr",j)),]
cSums = colSums(subDF)/as.numeric(SSslength$length[j]) # take mean value over all of the states in a cyclic attractor
summaryData[j, ] = cSums
} else{
summaryData[j,] = allData[which(SSsnum == paste0("Attr",j)),]
}
}
summaryData[summaryData==0] = "OFF"
summaryData[summaryData==1] = "LOW"
summaryData[summaryData==2] = "HIGH"
summaryData[summaryData==3] = "ON"
summaryData[summaryData<1 & summaryData>0] = "LOW/OFF" # note this cannot handle a scenario where oscillations occur over OFF/LOW/HIGH
summaryData[summaryData<2 & summaryData>1] = "HIGH/LOW"
summaryData[summaryData<3 & summaryData>2] = "MEON/OFF"
summaryData = distinct(summaryData) # only keeps unique variables
summaryData = t(summaryData)
summaryData = summaryData[nrow(summaryData):1,]
M = which(as.character(summaryData["SACA_Nucleus",])=="ON")
SA = which(as.character(summaryData["SpindleAlign",])=="ON")
order = c(setdiff(M,SA),setdiff(1:ncol(summaryData), union(M,SA)), setdiff(SA,M), intersect(M,SA))
summaryData = summaryData[,order]
return(summaryData)
}
PlotSSData = function(summaryData, filename){
summaryDataM = melt(summaryData)
summaryDataM$Var2 = as.factor(summaryDataM$Var2)
setwd("SSPlots")
tiff(filename, height = 400, width = 2000)
print(ggplot(summaryDataM, aes(x = Var2, y = Var1, fill = value)) + geom_tile(colour="grey",size=0.25) + coord_fixed() + theme_bw() +theme(axis.text.x = element_blank(), axis.ticks.x = element_blank(),text=element_text(size=16, family="Helvetica"), legend.title = element_blank(),legend.key.size =unit(1.5, "cm"),panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_fill_manual(values = c("OFF" = "white","ON" = "black","LOW" = "#DF8646","HIGH" = "#C43012", "HIGH/LOW" = "#F28835", "LOW/OFF" = "#FEE8AF", "MEON/OFF" = "grey")) + xlab("") + ylab("") )
dev.off()
setwd("..")
} |
25a0e8f754861908e7489669930a7d46fead2a7c | 378fd0edb0456d4999b73da2f1bf3970ffdb29ac | /R/Evaluate.R | fe1e288ee5cd4330bba29c884c76ee71939af763 | [
"Apache-2.0"
] | permissive | prabinrs/PatientLevelPrediction | 2752c83da2195ef9bf28d59420718581fccfed62 | f9cc776ee39c5ec6c6ab57a244f4a11e829b247f | refs/heads/master | 2021-01-14T12:45:19.432919 | 2015-05-15T23:26:10 | 2015-05-15T23:26:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,021 | r | Evaluate.R | # @file Evaluate.R
#
# Copyright 2015 Observational Health Data Sciences and Informatics
#
# This file is part of PatientLevelPrediction
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Compute the area under the ROC curve
#'
#' @details Computes the area under the ROC curve for the predicted probabilities, given the
#' true observed outcomes.
#'
#' @param prediction A prediction object as generated using the \code{\link{predictProbabilities}} function.
#' @param outcomeData An object of type \code{outcomeData}.
#' @param confidenceInterval Should 95 percebt confidence intervals be computed?
#'
#' @export
computeAuc <- function(prediction,
outcomeData,
confidenceInterval = FALSE) {
if (attr(prediction, "modelType") != "logistic")
stop("Computing AUC is only implemented for logistic models")
cohortConceptId <- attr(prediction, "cohortConceptId")
outcomeConceptId <- attr(prediction, "outcomeConceptId")
outcomes <- ffbase::subset.ffdf(outcomeData$outcomes, cohortConceptId == cohortConceptId & outcomeId == outcomeConceptId, select = c("personId", "cohortStartDate", "outcomeId", "outcomeCount", "timeToEvent"))
prediction <- merge(prediction, ff::as.ram(outcomes), all.x = TRUE)
prediction$outcomeCount[!is.na(prediction$outcomeCount)] <- 1
prediction$outcomeCount[is.na(prediction$outcomeCount)] <- 0
if (confidenceInterval){
auc <- .Call('PatientLevelPrediction_aucWithCi', PACKAGE = 'PatientLevelPrediction', prediction$value, prediction$outcomeCount)
return(data.frame(auc=auc[1],auc_lb95ci=auc[2],auc_lb95ci=auc[3]))
} else {
auc <- .Call('PatientLevelPrediction_auc', PACKAGE = 'PatientLevelPrediction', prediction$value, prediction$outcomeCount)
return(auc)
}
}
#' Compute the area under the ROC curve
#'
#' @details Computes the area under the ROC curve for the predicted probabilities, given the
#' true observed outcomes.
#'
#' @param prediction A vector with the predicted hazard rate.
#' @param status A vector with the status of 1 (event) or 0 (no event).
#' @param time Only for survival models: a vector with the time to event or censor (which ever comes first).
#' @param confidenceInterval Should 95 percebt confidence intervals be computed?
#' @param timePoint Only for survival models: time point when the AUC should be evaluated
#' @param modelType Type of model. Currently supported are "logistic" and "survival".
#'
#' @export
computeAucFromDataFrames <- function(prediction, status, time = NULL, confidenceInterval = FALSE, timePoint, modelType = "logistic"){
if (modelType == "survival" & confidenceInterval)
stop("Currently not supporting confidence intervals for survival models")
if (modelType == "survival"){
Surv.rsp <- Surv(time, status)
Surv.rsp.new <- Surv.rsp
if (missing(timePoint))
timePoint <- max(time[status == 1])
auc <- AUC.uno(Surv.rsp, Surv.rsp.new, prediction, timePoint)$auc
return(auc*auc)
} else {
if (confidenceInterval){
auc <- .Call('PatientLevelPrediction_aucWithCi', PACKAGE = 'PatientLevelPrediction', prediction, status)
return(data.frame(auc=auc[1],auc_lb95ci=auc[2],auc_lb95ci=auc[3]))
} else {
auc <- .Call('PatientLevelPrediction_auc', PACKAGE = 'PatientLevelPrediction', prediction, status)
return(auc)
}
}
}
#' Plot the calibration
#'
#' @details Create a plot showing the predicted probabilities and the observed fractions. Predictions
#' are stratefied into equally sized bins of predicted probabilities.
#'
#' @param prediction A prediction object as generated using the \code{\link{predictProbabilities}} function.
#' @param outcomeData An object of type \code{outcomeData}.
#' @param numberOfStrata The number of strata in the plot.
#' @param fileName Name of the file where the plot should be saved, for example 'plot.png'. See the
#' function \code{ggsave} in the ggplot2 package for supported file formats.
#'
#' @return A ggplot object. Use the \code{\link[ggplot2]{ggsave}} function to save to file in a different format.
#'
#' @export
plotCalibration <- function(prediction, outcomeData, numberOfStrata = 5, fileName = NULL){
if (attr(prediction, "modelType") != "logistic")
stop("Plotting the calibration is only implemented for logistic models")
cohortConceptId <- attr(prediction, "cohortConceptId")
outcomeConceptId <- attr(prediction, "outcomeConceptId")
outcomes <- ffbase::subset.ffdf(outcomeData$outcomes, cohortConceptId == cohortConceptId & outcomeId == outcomeConceptId, select = c("personId", "cohortStartDate", "outcomeId", "outcomeCount", "timeToEvent"))
prediction <- merge(prediction, ff::as.ram(outcomes), all.x = TRUE)
prediction$outcomeCount[!is.na(prediction$outcomeCount)] <- 1
prediction$outcomeCount[is.na(prediction$outcomeCount)] <- 0
q <- quantile(prediction$value, (1:(numberOfStrata-1))/numberOfStrata)
prediction$strata <- cut(prediction$value, breaks=c(0,q,max(prediction$value)), labels = FALSE)
computeStratumStats <- function(data){
return(data.frame(minx = min(data$value), maxx = max(data$value), fraction = sum(data$outcomeCount) / nrow(data)))
}
#strataData <- plyr::ddply(prediction, prediction$strata, computeStratumStats)
counts <- aggregate(outcomeCount ~ strata, data = prediction, sum)
names(counts)[2] <- "counts"
backgroundCounts <- aggregate(personId ~ strata, data = prediction, length)
names(backgroundCounts)[2] <- "backgroundCounts"
minx <- aggregate(value ~ strata, data = prediction, min)
names(minx)[2] <- "minx"
maxx <- aggregate(value ~ strata, data = prediction, max)
names(maxx)[2] <- "maxx"
strataData <- merge(counts,backgroundCounts)
strataData <- merge(strataData,minx)
strataData <- merge(strataData,maxx)
strataData$fraction <- strataData$counts / strataData$backgroundCounts
plot <- ggplot2::ggplot(strataData, ggplot2::aes(xmin = minx, xmax = maxx, ymin = 0, ymax = fraction)) +
ggplot2::geom_abline() +
ggplot2::geom_rect(color = rgb(0, 0, 0.8, alpha = 0.8), fill = rgb(0, 0, 0.8, alpha = 0.5)) +
ggplot2::scale_x_continuous("Predicted probability") +
ggplot2::scale_y_continuous("Observed fraction")
if (!is.null(fileName))
ggplot2::ggsave(fileName,plot,width=5,height=3.5,dpi=400)
return(plot)
}
#' Plot the ROC curve
#'
#' @details Create a plot showing the Receiver Operator Characteristics (ROC) curve.
#'
#' @param prediction A prediction object as generated using the \code{\link{predictProbabilities}} function.
#' @param outcomeData An object of type \code{outcomeData}.
#' @param fileName Name of the file where the plot should be saved, for example 'plot.png'. See the
#' function \code{ggsave} in the ggplot2 package for supported file formats.
#'
#' @return A ggplot object. Use the \code{\link[ggplot2]{ggsave}} function to save to file in a different format.
#'
#' @export
plotRoc <- function(prediction, outcomeData, fileName = NULL){
if (attr(prediction, "modelType") != "logistic")
stop("Plotting the ROC curve is only implemented for logistic models")
cohortConceptId <- attr(prediction, "cohortConceptId")
outcomeConceptId <- attr(prediction, "outcomeConceptId")
outcomes <- ffbase::subset.ffdf(outcomeData$outcomes, cohortConceptId == cohortConceptId & outcomeId == outcomeConceptId, select = c("personId", "cohortStartDate", "outcomeId", "outcomeCount"))
prediction <- merge(prediction, ff::as.ram(outcomes), all.x = TRUE)
prediction$outcomeCount[!is.na(prediction$outcomeCount)] <- 1
prediction$outcomeCount[is.na(prediction$outcomeCount)] <- 0
prediction <- prediction[order(-prediction$value),c("value", "outcomeCount")]
prediction$sens <- cumsum(prediction$outcomeCount) / sum(prediction$outcomeCount)
prediction$fpRate <- cumsum(prediction$outcomeCount == 0) / sum(prediction$outcomeCount == 0)
data <- aggregate(fpRate ~ sens, data = prediction, min)
data <- aggregate(sens ~ fpRate, data = data, min)
plot <- ggplot2::ggplot(data, ggplot2::aes(x = fpRate, y = sens)) +
ggplot2::geom_area(color = rgb(0, 0, 0.8, alpha = 0.8), fill = rgb(0, 0, 0.8, alpha = 0.4)) +
ggplot2::scale_x_continuous("1 - specificity") +
ggplot2::scale_y_continuous("Sensitivity")
if (!is.null(fileName))
ggplot2::ggsave(fileName, plot, width = 5, height = 4.5, dpi = 400)
return(plot)
}
|
0d1806a5019925575f76e88be8757d72612c4fca | b9d5e16cf702ee87d3553d98e849c9b15ba3922e | /Curso02/Modulo II/code/script-c2m2.R | dbb85fb0e8f3b2e829adb27347f4131ab1b5fcf9 | [] | no_license | Manuelfjr/CursoLivre | 83b73f2462f932623d4868d65efbb0a51b279f36 | 39b7fc4a8f40c1e98524d53c6b2591809ee3bf75 | refs/heads/master | 2022-12-07T20:12:22.425148 | 2020-08-19T20:02:41 | 2020-08-19T20:02:41 | 277,863,734 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 564 | r | script-c2m2.R | # Questão 01
data(trees)
trees
attach(trees)
# A)
plot(Girth,Volume)
title('Volume ~ Girth')
# B)
cor(Girth,Volume)
# C)
cor.test(Volume, Girth)
# D)
gvmodel <- lm(Volume ~ Girth)
gvmodel
# E)
# Para esse modelo, consideremos que a variavel explicativa é
# o diâmetro da arvore e a variavel resposta é o volume da
# arvore.
# F)
coefs <- gvmodel$coefficients
coefs
# G)
plot(Girth, Volume)
abline(gvmodel, col="gray")
# H)
# I)
summary(gvmodel)
# J)
predict(gvmodel,newdata=data.frame(Girth=c(13)),
interval="prediction")
|
63d0f0cc2626636b70d88dada58b05db5142270a | 4236a34307c064b968910bdd43354fe0de692c19 | /cachematrix.R | 341fb907dba943934952fd34a4851d9a3df941aa | [] | no_license | ragank715/ProgrammingAssignment2-master | e211853588ee10959cd6a3d3cf1ceeb36c6bc496 | cd57413ea069c87810504cfd24ebe9f412c456f0 | refs/heads/master | 2021-01-02T22:18:00.541718 | 2015-04-26T18:03:16 | 2015-04-26T18:03:16 | 34,536,541 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,131 | r | cachematrix.R | ## The flow of function is very similar to what was provided
## for the example with the mean
## Chnages implemented are as follows:
## 1) a variable n is introduced defining the dimension of square matrix
## 2) Instead of NULL, NA is being used to fill in the inverted matrix for
## the time inverse is not computed
## 3) mean fucntion has been replaced by solve
## 4) variable names have been chaged to represent the inversion of materix
makeCacheMatrix <- function(x = matrix(,n,n)) {
xinv <- matrix(NA,n,n)
set <- function(y) {
x <<- y
xinv <<- matrix(NA,n,n)
}
get <- function() x
setinverse <- function(solve) xinv <<- solve
getinverse <- function() xinv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
cacheSolve <- function(x, n, ...) {
xinv <- x$getinverse()
if(!is.na(xinv)) {
message("getting cached data")
return(xinv)
}
data <- x$get()
xinv <- solve(data, ...)
x$setinverse(xinv)
xinv
}
|
c2c6cb41ec7cdf16fc87703cf797e8061e49bd57 | a0dc8970520126b2ea0234c2ed265264c144df2e | /Scripts/Functions/Lag_X.R | 279cf7d70c009f1bf598a073c4f7373be594e9dc | [] | no_license | AtefOuni/CaterpillaR | b1a0703b85b5009af2c421e58e2697dd66e91a67 | 7c0cafa990f01ea085ab21e8349a616c0c799aeb | refs/heads/master | 2020-05-31T04:10:35.543952 | 2015-04-20T15:08:35 | 2015-04-20T15:08:35 | 31,979,495 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 415 | r | Lag_X.R |
Lag_X <- function(X,Lags = 12,Var_Name ="X") {
if(Lags == 0) {
return(X)
}
X_l <- list()
for(k in 1:(Lags+1)) {
X_l[[k]] <- lag(X,k=1-k)
}
X_res <- cbind(X_l[[1]], X_l[[2]])
if(Lags>1) {
for(k in 3:(Lags+1))
{
X_res <- cbind(X_res, X_l[[k]])
}
}
dimnames(X_res)[[2]] <- paste(Var_Name,0:Lags,sep="_")
return(X_res)
}
|
a73cfbca92c0b10bd1d3d962f029cd0d9429138e | bd8f6f04dbf8bf1b82d5c020e1b24ed7eed20046 | /collapsed_opps.R | d88ed3d37095152d0b71166a608e681f99ee33d9 | [] | no_license | mbreecher/sales_analysis | b9c18bc3a2a347ab53b82ca3f289ce75dc5f4f0d | 76198f8fd39a34a2527450fcf8d236bcc5150f4f | refs/heads/master | 2021-01-21T07:39:01.380032 | 2014-12-12T19:46:54 | 2014-12-12T19:46:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,146 | r | collapsed_opps.R | library(RMySQL)
library(reshape2)
library(plyr)
library(ggplot2)
setwd("C:/R/workspace/shared")
source("get_query.r")
load("db_creds.Rdata")
setwd("C:/R/workspace/sales_analysis/source")
opps <- read.csv("opportunities_and_products_for_psh.csv", header = T, stringsAsFactors = F)
#grab project and collapsed time data from mysql database
con <- dbConnect(dbDriver("MySQL"), user = username, password = password, dbname = "revenue_analysis")
sql <- paste("select subcloud.service_id, subcloud.opportunity_id, subcloud.account_name, subcloud.cik, subcloud.registrant_type,
subcloud.solution, subcloud.SrPSM, subcloud.PSM, subcloud.CSM, subcloud.Sr_CSM, subcloud.service_name, subcloud.cs_ps,
subcloud.service_type, subcloud.form, subcloud.quarter_end, subcloud.filing_date, subcloud.filing_deadline, subcloud.filing_deadline_recalc,
subcloud.service_status, subcloud.customer_status, subcloud.year_end, subcloud.reporting_period, subcloud.service_period, subcloud.list_price,
subcloud.sales_price, subcloud.filing_week_num
from subcloud left join opportunities
on subcloud.opportunity_id collate latin1_bin = opportunities.opportunity_line_item_id collate latin1_bin
where subcloud.service_id like 'a0%' and service_status = 'Completed' and not subcloud.cs_ps = 'CS'
group by subcloud.service_id, subcloud.account_name", sep = "")
query <- dbGetQuery(con, sql)
dbDisconnect(con)
agg_prices <- aggregate(cbind(sales_price, list_price) ~ service_id + opportunity_id +
service_name + service_type + service_type + reporting_period, data = query, FUN = sum)
opps_data <- opps[,names(opps) %in% c("Line.Item.18.Digit.Id", "Created.Date", "Close.Date", "Stage")]
opps_data <- opps_data[opps_data$Stage %in% c("Closed Won"),]
opps_data <- opps[,names(opps) %in% c("Line.Item.18.Digit.Id", "Created.Date", "Close.Date")]
names(opps_data)[names(opps_data) %in% "Line.Item.18.Digit.Id"] <- c("opportunity_id")
agg_prices <- merge(agg_prices, opps_data, by = "opportunity_id", all.x = T)
agg_prices$Close.Date <- as.Date(agg_prices$Close.Date, format = "%m/%d/%Y"); agg_prices$Created.Date <- as.Date(agg_prices$Created.Date, format = "%m/%d/%Y")
agg_prices$closed_period <- paste(year(agg_prices$Close.Date), ceiling(month(agg_prices$Close.Date)/3), sep = "")
agg_prices$created_period <- paste(year(agg_prices$Created.Date), ceiling(month(agg_prices$Created.Date)/3), sep = "")
agg_prices$month <- (year(agg_prices$Close.Date) - 2012)* 12 + month(agg_prices$Close.Date)
agg_prices$month_name <- format(agg_prices$Close.Date, format = "%b-%y")
agg_prices$month <- as.numeric(agg_prices$month)
agg_prices$closed_int = (year(agg_prices$Close.Date) - 2012)* 4 + floor(month(agg_prices$Close.Date)/3)
agg_prices$report_int = (as.numeric(substr(agg_prices$reporting_period,1,4))-2012)*4 + as.numeric(substr(agg_prices$reporting_period,6,6))
agg_prices_counts <- ddply(agg_prices, .var = c("service_type", "closed_period", "reporting_period","service_id"),
.fun = function(x){
data.frame(count = length(unique(x$service_id)),
closed_int = (year(x$Close.Date) - 2012)* 4 + floor(month(x$Close.Date)/3),
report_int = (as.numeric(substr(x$reporting_period,1,4))-2012)*4 + as.numeric(substr(x$reporting_period,6,6))
)
})
excluded <- c("Rush Charges", "Auditor Review", "Migration")
#loop to plot all periods
for (loop in unique(agg_prices$reporting_period)){
if(dim(agg_prices[agg_prices$reporting_period %in% loop,])[1] > 100){
closed_loop_plot <- ggplot(agg_prices[!(agg_prices$service_type %in% excluded) &
!is.na(agg_prices$closed_int) &
agg_prices$reporting_period %in% loop,]) +
geom_bar(aes(x = closed_int, fill = service_type)) +
geom_segment(aes(x = report_int + 1, xend = report_int+ 1, y = 0, yend = 300), color = "red") +
scale_x_continuous(labels=unique(agg_prices$reporting_period),
breaks =unique(agg_prices$report_int)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
ggtitle(paste(loop," services closed by period", sep = ""))
setwd("C:/R/workspace/sales_analysis/output")
ggsave(paste("opportunity_close_by_reporting_period",loop,".png", sep = "")
, closed_loop_plot, width = 14, height = 8.5)
}
}
#plot services sold by individual contracts vs as a package
valid_svc <- c("Roll Forward", "Full Service Roll Forward", "Standard Import", "Detail Tagging",
"Full Review", "Maintenance", "Full Service Standard Import")
valid_agg_prices <- agg_prices[agg_prices$service_type %in% valid_svc,]
counter <- ddply(valid_agg_prices, .var = c("opportunity_id"), .fun = function(x){
data.frame(line_item_count = length(unique(x$service_id)))
})
valid_agg_prices <- merge(valid_agg_prices, counter, by = c("opportunity_id"), all.x = T)
valid_agg_prices$opp_type <- "package"
valid_agg_prices[valid_agg_prices$line_item_count %in% "1",]$opp_type <- "single"
valid_agg_prices <- valid_agg_prices[valid_agg_prices$reporting_period %in% c("2013Q1","2013Q2","2013Q3","2013Q4","2014Q1","2014Q2","2014Q3") ,]
opp_type_plot <- ggplot(valid_agg_prices) +
geom_bar(aes(x = report_int, fill = factor(opp_type)), position = "dodge", size = 3) +
scale_x_continuous(labels=unique(valid_agg_prices$reporting_period),
breaks =unique(valid_agg_prices$report_int)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
facet_wrap(~service_type) +
ggtitle("services sold as part of a package vs single")
setwd("C:/R/workspace/sales_analysis/output")
ggsave("services_from_opportunity_type.png", opp_type_plot, width = 11, height = 8.5)
discount trend
agg_opps <- aggregate(cbind(sales_price) ~ opportunity_id + Close.Date ,data = valid_agg_prices, FUN = sum)
#recommendation accuracy |
d59f64e721a6eb7c5ca9b8f632d9c302585eef71 | 1f27ad535035612c2731e95cbca8e86b43e043c1 | /R/inla.climate.R | ec14e8fb0de755284bcdd5dfa493f83ce86eebc0 | [] | no_license | eirikmn/INLA.climate | 8417e699d97b24b4d408174c03b8a6896e30d9ee | c467e6e2f9fd2e4341cbc1bf15b9629b82eb7089 | refs/heads/master | 2023-02-08T15:06:14.385088 | 2023-01-27T09:52:09 | 2023-01-27T09:52:09 | 168,170,133 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,892 | r | inla.climate.R |
inla.climate = function(data, forcing, Qco2=NULL,compute.mu=NULL, stepLength=0.01,restart.inla=FALSE,
m = 4,model="fgn", formula=NULL, print.progress=FALSE,
inla.options = list(),
tcr.options = list(),
mu.options = list(),
ar1.options = list()){
catch = tryCatch(attachNamespace("INLA"),error=function(x){})
if(length(find.package("INLA",quiet=TRUE))==0){
stop("This function requires INLA. Please install at www.R-INLA.org or by calling 'install.packages(\"INLA\", repos=c(getOption(\"repos\"), INLA=\"https://inla.r-inla-download.org/R/testing\"), dep=TRUE)' from R.")
}
if(print.progress){
print("Initiating inla.climate..")
}
tid.start = proc.time()[[3]]
inla.climate.call = sys.call(which=1)
if(sum(is.na(forcing))>0) stop("Forcing contains NA values")
old.digits=getOption("digits")
if(old.digits < 7){
options(digits = 7)
}
default.inla.options = list(
num.threads=1,
control.compute = list(cpo=FALSE,dic=(sum(is.na(data))==0),config=TRUE),
control.inla = list(reordering="metis",h=stepLength[1],restart=restart.inla),
control.family = list(hyper = list(prec = list(initial = 12, fixed=TRUE))) )
inla.options=set.options(inla.options,default.inla.options)
if(length(Qco2)>0){
default.tcr.options = list(nsamples = 100000, seed = 1234)
# temp = default.tcr.options
tcr.options=set.options(tcr.options,default.tcr.options)
}
if(length(compute.mu)==0){
compute.mu="No"
}
if(compute.mu %in% c(1,2,"full","complete","quick","fast")){
default.mu.options = list(nsamples = 100000, seed = 1234)
# temp = default.mu.options
mu.options=set.options(mu.options,default.mu.options)
}
if(model == "ar1"){
default.ar1.options = list(nsamples = 100000, seed = 1234)
# temp = default.mu.options
ar1.options=set.options(ar1.options,default.ar1.options)
}
if(class(data)=="numeric" || class(data)=="ts"){
n = length(data)
y0 = data
}else if (class(data)=="data.frame"){
n = length(data[,1])
y0 = data[,1]
}else{
stop("'data' input not recognized. Only objects of class 'numeric', 'ts' and 'data.frame' are valid.")
}
if(class(forcing)=="numeric" || class(forcing)=="ts"){
if(length(forcing)>n){
y0 = c(y0,rep(NA,length(forcing)-n))
n = length(forcing)
}
T0 = mean(data[1:20][!is.na(data[1:20])])
if(is.null(T0)){
stop("There are no non-NA values among the first 20 of data, please remove these before repeating.")
}
y=y0-T0
df=data.frame(y=y,idy=1:n)
}else{
stop("'forcing' must be a 'numeric' object.")
}
if(sum(is.na(df[1:n,1]))>0){
inla.options$control.compute$dic = FALSE
}
is.lrd = model %in% c("arfima","fgn")
if(is.lrd){
lagmax = 1000L
funks = h.map.maker(m,lagmax,model)
lprior.fun.H = compute.Hprior(50,0.9,0.1,persistent=TRUE,model=model)
model.approx = INLA::inla.rgeneric.define(rgeneric.lrd,lprior.fun.H = lprior.fun.H,
n=n,N=m,forcing=forcing,funks=funks)
}else if(model == "ar1"){
#m = 1 #only one component is available so far
model.approx = INLA::inla.rgeneric.define(rgeneric.ar1,n=n,N=m,forcing=forcing)
#model.approx = INLA::inla.rgeneric.define(rgeneric.forcing.3AR1.free,n=n,N=m,forcing=forcing)
}
if(is.null(formula)){
formula = y ~ -1+ f(idy, model=model.approx)
}
if(print.progress){
print("Starting INLA..")
}
tid.approx.start = proc.time()[[3]]
result.approx <- tryCatch(
do.call(INLA::inla, c(list(formula = formula,data = df,family = "gaussian"),inla.options) ),
error=warning
)
if(is.character(result.approx)){
feil = "\n Convergence can sometimes be improved by changing the step length h."
stop(paste0(result.approx,feil))
}
tid.approx.slutt = proc.time()[[3]]
tid.approx = tid.approx.slutt-tid.approx.start
if(print.progress){
cat("INLA completed in ",tid.approx," seconds\n",sep="")
}
if(model == "ar1"){
ar1.result = inla.climate.ar1(result.approx, m=m, nsamples=ar1.options$nsamples,
seed=ar1.options$seed, print.progress=print.progress)
}
if(!is.null(Qco2)){
tcr.result = inla.climate.tcr(result.approx,Qco2,nsamples=tcr.options$nsamples,
seed=tcr.options$seed, print.progress=print.progress,model=model)
}
if(compute.mu %in% c(1,2,"full","complete","quick","fast") ){
if(compute.mu %in% c(2,"full","complete")){
mu.quick = FALSE
}else{
mu.quick=TRUE
}
mu.result = inla.climate.mu(result.approx, forcing, quick=mu.quick, T0.corr = T0, nsamples=mu.options$nsamples,
seed=mu.options$seed, print.progress=print.progress,model=model)
}
if(print.progress){
cat("Finishing up...\n",sep="")
}
misc = list(call=inla.climate.call, INLA.options=inla.options, TCR.options=tcr.options, mu.options=mu.options,
data = data.frame(y=(df$y+T0),idy=(df$idy)), forcing=forcing, n=n, m=m, model=model, T0=T0,
stepLength=stepLength, restart.inla=restart.inla, Qco2=Qco2, compute.mu=compute.mu,
time.inla = tid.approx)
results = process.inla(result.approx, misc)
if(length(Qco2) > 0){
results = process.tcr(results,tcr.result)
}
if(compute.mu %in% c(1,2,"full","complete","quick","fast")){
results = process.mu(results,mu.result)
}
if(model == "ar1"){
misc$ar1.options = ar1.options
results = process.ar1(results,ar1.result)
}
if(print.progress){
cat("Finishing up..\n",sep="")
}
tid.slutt = proc.time()[[3]]
results$time$Total = tid.slutt-tid.start
class(results) <- "inla.climate"
options(digits=old.digits)
return(results)
}
|
a134346bcc54f317ef988fe35d6aaac076bf616a | cbcb137a4b1537c6e07f7b792f1b78affab44af7 | /inst/shiny-examples/maniTools/app.R | 7b063d21ce7c76c7a6df7539c2534da601be2042 | [] | no_license | kcf-jackson/maniTools | 07c1dceb39ce4f52d88a52a190c6974a8a56b9a3 | 1653112e263e44fc2ec4bdb225848ce05f33ad3d | refs/heads/master | 2020-12-24T21:21:42.946003 | 2020-04-07T09:46:37 | 2020-04-07T09:46:37 | 58,609,852 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 12,653 | r | app.R | library(shiny)
library(plotly)
library(maniTools)
dr_demo <- function(sim_data, algor, k, d, kernel = 'rbfdot') {
if (is.null(algor) | is.null(sim_data) | is.null(k) | is.null(d)) return(NULL)
if (is.na(algor)) return(NULL)
algor <- toupper(algor)
if (algor == "PCA") {
# PCA (on centered and scaled data)
run_time <- system.time({
pca_dr <- sim_data$data %>% center_and_standardise() %>% prcomp()
proj_data <- sim_data$data %*% pca_dr$rotation[,1:2]
})
}
# MDS
if (algor == "MDS")
run_time <- system.time({ proj_data <- cmdscale(dist(sim_data$data), k = d) })
# Isomap
if (algor == "ISOMAP")
run_time <- system.time({ proj_data <- RDRToolbox::Isomap(sim_data$data, dims = d, k = k)$dim2 })
# LLE
if (algor == "LLE")
run_time <- system.time({ proj_data <- LLE2(sim_data$data, dim = d, k = k) })
if (algor == "DIFFUSIONMAP")
run_time <- system.time({ proj_data <- diffusionMap::diffuse(dist(sim_data$data), neigen = d)$X })
# t-SNE
if (algor == "TSNE")
run_time <- system.time({ proj_data <- tsne::tsne(sim_data$data, k = d) })
# KernelPCA
if (algor == "KPCA")
run_time <- system.time({ proj_data <- kernlab::kpca(sim_data$data, kernel = kernel, features = d)@pcv })
# SPE
if (algor == "SPE")
run_time <- system.time({ proj_data <- spe::spe(sim_data$data, edim = d)$x })
# Laplacian Eigenmaps
if (algor == "LE")
run_time <- system.time({ proj_data <- Laplacian_Eigenmaps(sim_data$data, k = k, d = d)$eigenvectors })
# HessianLLE
if (algor == 'HLLE')
run_time <- system.time({ proj_data <- Hessian_LLE(sim_data$data, k = k, d = d)$projection })
# LTSA
if (algor == 'LTSA')
run_time <- system.time({ proj_data <- Local_TSA(sim_data$data, k = k, d = d) })
p1 <- plotly_2D(proj_data, sim_data$colors)
plot_title <- paste(algor, ". Time taken: ", round(run_time[[1]], 3), "s.", sep = "")
p1 <- layout(p1, title = plot_title)
list(p1, run_time)
}
# Define UI for application that draws a histogram
ui <- shinyUI(fluidPage(
# Application title
titlePanel("Manifold learning in R"),
tabsetPanel(
tabPanel("Demo",
fluidRow(
column(6,
plotlyOutput("plot_3d"),
plotlyOutput("plot_2d")
),
column(6,
wellPanel(
style = "background-color: #ff6666;",
h3("Manifold"),
fluidRow(
column(4, fileInput("file_input", "Load File", accept = c(
'text/csv', 'text/comma-separated-values',
'text/tab-separated-values', 'text/plain', '.csv', '.tsv')),
checkboxInput('header', 'Header', TRUE)
),
column(4, radioButtons('sep', 'Separator',
c(Comma=',', Semicolon=';', Tab='\t'),
',')
),
column(4, radioButtons('quote', 'Quote',
c(None='', 'Double Quote'='"', 'Single Quote'="'"),
'"')
)
),
textOutput("file_text"),
hr(),
fluidRow(
column(4,
selectInput("data_input", "Examples",
choice = c("Swiss Roll", "Swiss Hole", "Corner Planes",
"Punctured Sphere", "Twin Peaks", "Clusters",
"Toroidal Helix", "Gaussian"), selected = "Swiss Roll")
),
column(4, numericInput("num_pts", "#Points", 800)),
column(4, uiOutput("ui"))
)
#submitButton("Load Example")
),
wellPanel(
style = "background-color: #00b300;",
h3("Parameters"),
fluidRow(
column(4, numericInput("d", "Target Dimension d", 2, min = 2, max = 2)),
column(4, numericInput("k", "Nearest Neighbors k", 8, min = 1)),
column(4, selectInput("kernel", "Kernel",
choices = c("rbfdot", #"polydot", "vanilladot", "tanhdot",
"laplacedot", "besseldot", "anovadot"#, "splinedot"
)))
#column(3, numericInput("sigma", "Sigma", 10.0)),
#column(3, numericInput("alpha", "Alpha", 1.0))
),
textOutput("comment_text")
),
wellPanel(
style = "background-color: #4d94ff;",
h3("Algorithm"),
fluidRow(
radioButtons("algor", label = "",
choices = list("MDS" = "mds", "PCA" = "pca", "Kernel PCA" = "kpca",
"ISOMAP" = "isomap", "Diffusion Map" = "diffusionMap",
"Laplacian Eigenmaps" = "le", "Locally Linear Embedding (LLE)" = "lle",
"Hessian-LLE (HLLE)" = "hlle", "t-SNE" = "tsne",
"Stochastic Proximity Embedding (SPE)" = "spe",
"Local Tangent Space Alignment (LTSA)" = "ltsa"),
inline = TRUE)
),
textOutput("plot_text")
)
)
)
),
tabPanel("Comparison",
fluidRow(
checkboxGroupInput("algor_group", label = h3("Algorithms"),
choices = list("PCA" = 'pca', "MDS" = 'mds', "ISOMAP" = 'isomap',
"LLE" = 'lle', "Diffusion Map" = 'diffusionMap',
"t-SNE" = 'tsne', "KPCA" = 'kpca', "SPE" = 'spe',
"Laplacian Eigenmaps" = 'le', "HLLE" = 'hlle', "LTSA" = 'ltsa'),
inline = TRUE)
),
fluidRow(
column(1, actionButton("button", "Update")),
column(11, textOutput("info_text"))
),
fluidRow(
column(4, plotlyOutput("c_plot_1")),
column(8, uiOutput("plot_first_row"))
),
fluidRow(
uiOutput("plot_second_row")
),
fluidRow(
uiOutput("plot_third_row")
),
fluidRow(
uiOutput("plot_fourth_row")
)
)
)
))
server <- shinyServer(function(input, output) {
data_from_file <- reactive({
inFile <- input$file_input
if (is.null(inFile)) return(NULL)
sim_data <- read.csv( inFile$datapath, header = input$header,
sep = input$sep, quote = input$quote )
if (ncol(sim_data) >= 4) {
scale <- sim_data[,4]
} else {
scale <- z
}
list(data = as.matrix(sim_data), colors = scale)
})
reduce_to_3d <- reactive({
sim_data <- data_from_file()
sim_data$data <- sim_data$data[,1:3]
sim_data
})
DR_data <- reactiveValues(simulation = NULL)
total_time <- reactiveValues(time_taken = NULL)
output$ui <- renderUI({
data_param_label <- switch(input$data_input,
"Swiss Roll" = "Height",
"Swiss Hole" = "Height",
"Corner Planes" = "Angles",
"Punctured Sphere" = "Z scale",
"Twin Peaks" = "Z scale",
"Clusters" = "Number of clusters",
"Toroidal Helix" = "Sample rate",
"Gaussian" = "Sigma")
initial_value <- switch(input$data_input,
"Swiss Roll" = 1,
"Swiss Hole" = 1,
"Corner Planes" = 45,
"Punctured Sphere" = 1,
"Twin Peaks" = 1,
"Clusters" = 3,
"Toroidal Helix" = 1,
"Gaussian" = 1)
numericInput("data_parameter", data_param_label, value = initial_value)
})
# First tab ================================================================================
output$plot_3d <- renderPlotly({
if (!is.null(data_from_file())) {
sim_data <- reduce_to_3d()
} else {
data_f <- switch(input$data_input,
"Swiss Roll" = swiss_roll,
"Swiss Hole" = swiss_hole,
"Corner Planes" = corner_planes,
"Punctured Sphere" = punctured_sphere,
"Twin Peaks" = twin_peaks,
"Clusters" = clusters_3d,
"Toroidal Helix" = toroidal_helix,
"Gaussian" = gaussian_random_samples)
sim_data <- data_f(input$num_pts, input$data_parameter)
DR_data$simulation <- sim_data
}
if (is.null(sim_data$data) | (ncol(sim_data$data) < 3)) {
plotly_empty(type = "scatter", mode = "markers")
} else {
plotly_3D(sim_data)
}
})
output$plot_2d <- renderPlotly({
if (!is.null(data_from_file())) {
sim_data <- data_from_file()
DR_data$simulation <- sim_data
}
if (is.null(DR_data$simulation) | (ncol(DR_data$simulation$data) < 3)) {
plotly_empty(type = "scatter", mode = "markers")
} else {
res <- dr_demo(DR_data$simulation, algor = input$algor,
k = input$k, d = input$d, kernel = input$kernel)
total_time$time_taken <- res[[2]]
res[[1]]
}
})
output$file_text <- renderText({"Only plots the first 3 dimensions of the data.
The 4th dimension is used as colors if available; otherwise, the 3rd dimension is used."})
output$comment_text <- renderText({"The target dimension is fixed at 2."})
output$plot_text <- renderPrint({
cat("Time taken:", total_time$time_taken[[1]], "s. \n")
})
# Second tab ================================================================================
output$info_text <- renderText({"(Note: some algorithms may take long to run (e.g. Isomap and t-SNE),
please avoid clicking the 'Update' button while the calculation is being performed.)"})
output$c_plot_1 <- renderPlotly({
if (!is.null(data_from_file())) {
sim_data <- reduce_to_3d()
} else {
data_f <- switch(input$data_input,
"Swiss Roll" = swiss_roll,
"Swiss Hole" = swiss_hole,
"Corner Planes" = corner_planes,
"Punctured Sphere" = punctured_sphere,
"Twin Peaks" = twin_peaks,
"Clusters" = clusters_3d,
"Toroidal Helix" = toroidal_helix,
"Gaussian" = gaussian_random_samples)
sim_data <- data_f(input$num_pts, input$data_parameter)
DR_data$simulation <- sim_data
}
plotly_3D(sim_data)
})
output$plot_first_row <- renderUI({
plot_output_list <- lapply(1:2, function(i) {
column(6, plotlyOutput(paste0("c_plot_", i + 1)))
})
do.call(tagList, plot_output_list)
})
output$plot_second_row <- renderUI({
plot_output_list <- lapply(3:5, function(i) {
column(4, plotlyOutput(paste0("c_plot_", i + 1)))
})
do.call(tagList, plot_output_list)
})
output$plot_third_row <- renderUI({
plot_output_list <- lapply(6:8, function(i) {
column(4, plotlyOutput(paste0("c_plot_", i + 1)))
})
do.call(tagList, plot_output_list)
})
output$plot_fourth_row <- renderUI({
plot_output_list <- lapply(9:11, function(i) {
column(4, plotlyOutput(paste0("c_plot_", i + 1)))
})
do.call(tagList, plot_output_list)
})
observeEvent(input$button, {
algor_list <- input$algor_group
for (i in 1:11) {
local({
local_i <- i + 1
output[[paste0("c_plot_", local_i)]] <-
renderPlotly({
if ((local_i - 1) %in% seq_along(algor_list)) {
if (!is.null(data_from_file())) {
DR_data$simulation <- data_from_file()
}
res <- dr_demo(DR_data$simulation, algor = algor_list[local_i - 1],
k = input$k, d = input$d, kernel = input$kernel)
total_time$time_taken <- res[[2]]
res[[1]]
} else {
plotly_empty(type = "scatter", mode = "markers")
}
})
})
}
})
})
# Run the application
shinyApp(ui = ui, server = server)
|
9e10d263676b8c2ac4ecda5fac522cc27bdb15c8 | 01f9fbcab3e94e85bd3e74b24e0fcd7dcab87583 | /TwitterVerse Shiny/Predict.R | 90cb285684c661a66dd58cbf9cd9c71f1cc9cce5 | [] | no_license | jayurbain/TwitterVerse | f564bc88f3c50fe37bb217165d6db8b0c5a786f5 | 09f108d7271fc3009c5df301def3f21770cc4d53 | refs/heads/master | 2021-01-10T08:55:12.709555 | 2016-01-16T23:55:55 | 2016-01-16T23:55:55 | 49,349,286 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,179 | r | Predict.R | require(stringr);
# remove any extra whitespace
trim <- function(x) {
return(gsub("^ *|(?<= ) | *$", "", x, perl=T))
}
# convert input text to lowercase; remove numbers, punctuation, and extra whitespace
# return as string array of tokens
preprocessText <- function(s) {
# remove numbers, punctuation
ss <- gsub("[^a-zA-Z\n\']", " ", s)
# lowercase
ss <- tolower(ss)
# remove extra space
ss<-trim(ss)
tokens <- unlist(str_split(s, " "))
return(tokens)
}
# predict next ngram using Katz Backoff
predictNgramKB <- function(input, sortPS=FALSE){
tokens <- preprocessText(input)
len <- length(tokens)
predictions <- c()
if(len==1 && tokens[1]=='') {
len=0
}
# trigram input (or more)
if (len >= 3) {
# predict quadgram from trigram
t3 <- tokens[len-2]; t2 <- tokens[len-1]; t1 <- tokens[len]
str<-paste(t3, t2, t1);
s<-paste('^', str, sep='')
ngrams<-df_quadGram[ grep(s, rownames(df_quadGram)), ]
if( dim(ngrams)[1] > 0 ) {
ngrams<-ngrams[with(ngrams, order(p, decreasing=T)),]
ngrams<-specificity(ngrams, 4, sortPS)
predictions<-c(predictions, word(rownames(ngrams), 4))
}
# predict trigram from bigram
if(length(predictions) < 10) {
str<-paste(t2, t1);
s<-paste('^', str, sep='')
ngrams<-df_triGram[ grep(s, rownames(df_triGram)), ]
if( dim(ngrams)[1] > 0 ) {
ngrams<-ngrams[with(ngrams, order(p, decreasing=T)),]
ngrams<-specificity(ngrams, 3, sortPS)
predictions<-c(predictions, word(rownames(ngrams), 3))
}
}
# predict bigram from unigram
if(length(predictions) < 10 ) {
str<-paste(t1);
s<-paste('^', str, sep='')
ngrams<-df_biGram[ grep(s, rownames(df_biGram)), ]
if( dim(ngrams)[1] > 0 ) {
ngrams<-ngrams[with(ngrams, order(p, decreasing=T)),]
ngrams<-specificity(ngrams, 2, sortPS)
predictions<-c(predictions, word(rownames(ngrams), 2))
}
}
predictions<-predictions[!duplicated(predictions)]
}
else if(len == 2) { # bigram input
# predict trigram from bigram
t2 <- tokens[len-1]; t1 <- tokens[len]
str<-paste(t2, t1);
s<-paste('^', str, sep='')
ngrams<-df_triGram[ grep(s, rownames(df_triGram)), ]
if( dim(ngrams)[1] > 0 ) {
ngrams<-ngrams[with(ngrams, order(p, decreasing=T)),]
ngrams<-specificity(ngrams, 3, sortPS)
predictions<-c(predictions, word(rownames(ngrams), 3))
}
# predict bigram from unigram
if(length(predictions) < 10 ) {
str<-paste(t1);
s<-paste('^', str, sep='')
ngrams<-df_biGram[ grep(s, rownames(df_biGram)), ]
if( dim(ngrams)[1] > 0 ) {
ngrams<-ngrams[with(ngrams, order(p, decreasing=T)),]
ngrams<-specificity(ngrams, 2, sortPS)
predictions<-c(predictions, word(rownames(ngrams), 2))
}
}
predictions<-predictions[!duplicated(predictions)]
} else if(len==1) {
# predict bigram from unigram
t1 <- tokens[len]
str<-paste(t1);
s<-paste('^', str, sep='')
ngrams<-df_biGram[ grep(s, rownames(df_biGram)), ]
if( dim(ngrams)[1] > 0 ) {
ngrams<-ngrams[with(ngrams, order(p, decreasing=T)),]
ngrams<-specificity(ngrams, 2, sortPS)
predictions<-c(predictions, word(rownames(ngrams), 2))
}
# predict unigram from nothing
if(length(predictions) < 10 ) {
ngrams<-head(rownames(df_biGram))
#ngrams<-specificity(ngrams, 4, sortPS)
predictions <- c(predictions, ngrams, 10)
}
if(length(predictions) > 0) {
predictions<-predictions[!duplicated(predictions)]
predictions <- predictions[!is.na(predictions)]
}
} else if(len==0) {
# predict unigram
predictions<-head(rownames(df_uniGram), 10)
}
return(head(predictions, 10))
}
specificity<-function(ngrams, n, sortPS=FALSE) {
ngrams$countTotal<-0
for ( ngram in rownames(ngrams) ) {
ngrams[ngram,'countTotal']<-dim(df_quadGram[ grep(paste0(word(ngram, n), '$'), rownames(df_quadGram)), ])[1]
}
ngrams$pS<-ngrams$count/ngrams$countTotal
if( sortPS==TRUE) {
ngrams<-ngrams[with(ngrams, order(pS, decreasing=T)),]
}
return(ngrams)
}
|
ea262c73ba3d718c27687b40bff13cb8db2db91d | c163af509749f2291b3954e4acdf61e08f6c15b0 | /simu.R | a60c483c9b4625eb022697857c548a9cb3014c63 | [] | no_license | haoluns/mixspa | 9093f0f644e8761d6743ee51ad17b3fd7e3777d6 | 367ae6050ab4ddf1de5e0dac931f92f45a79ef35 | refs/heads/master | 2020-09-16T19:04:48.388125 | 2019-11-25T04:51:56 | 2019-11-25T04:51:56 | 223,861,853 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,897 | r | simu.R | load("first3_sparse.RData")
library(fda);library(dplyr)
library(Rmpfr)
library(dplyr)
library(reshape2)
library(data.table)
library(lsei)
source("funs_mixspa.R")
library(fda)
timepts=1:10;
norder=4 ## cubic B-spline
nbasis=norder+length(timepts)-2;
spline_basis=create.bspline.basis(rangeval=c(1,10),nbasis=6,norder=4)
library(fda)
library(fdapace)
soapfpca<-function(observedx,timepointsx){
res_pace<- FPCA(observedx, timepointsx,list(dataType='Sparse',error=TRUE, kernel='epan',
verbose=FALSE,methodBwCov="GCV",methodBwMu="GCV"))
coef_mat0 = coef(Data2fd(argvals = res_pace$workGrid,y=res_pace$phi,spline_basis))
pc1s = first_FPC(coef_mat0[,1],observed=observedx, timepoints=timepointsx,minit=6,gamma=1e1,threshold=1e-5)
previous_beta = list()
previous_beta[[1]] = pc1s$beta
pc2s = third_FPC_conditional2(rnorm(spline_basis$nbasis), observed=pc1s$residual, timepoints=timepointsx, gamma=3e4,betalist =previous_beta,threshold=1e-4,range_score=max(abs(pc1s$sfit)))
previous_beta[[2]] = pc2s$beta
pc3s = third_FPC_conditional2(rnorm(spline_basis$nbasis), observed=pc2s$residual, timepoints=timepointsx, gamma=1e2,betalist =previous_beta,threshold=1e-3,range_score=max(abs(pc2s$sfit)))
return(list(pc1=pc1s,pc2=pc2s,pc3=pc3s))
}
mixFPCA<-function(observed,timepoints){
sigma = 10
fmeanfit = findmean(observed, timepoints, minit=6,gamma=0,threshold=5e-3)
observedmu1 = lapply(1:length(observed),function(i){
(eval.fd(timepoints[[i]], fmeanfit$pc_fit1))[,1]
})
observedmu2 = lapply(1:length(observed),function(i){
(eval.fd(timepoints[[i]], fmeanfit$pc_fit2))[,1]
})
if (mean(sapply(observedmu1,mean))<mean(sapply(observedmu2,mean))){
tempp = observedmu1
observedmu1 = observedmu2
observedmu2 = tempp
mu1 = fmeanfit$pc_fit2
mu2 = fmeanfit$pc_fit1
fmeanfit_sigmak = rev(fmeanfit$sigmak)
reverseflag = 1
}else{
mu1 = fmeanfit$pc_fit1
mu2 = fmeanfit$pc_fit2
fmeanfit_sigmak=fmeanfit$sigmak
reverseflag = 0
}
observedcentered1 = lapply(1:length(observed),function(i){
observed[[i]]-(eval.fd(timepoints[[i]], mu1))[,1]
})
observedcentered2 = lapply(1:length(observed),function(i){
observed[[i]]-(eval.fd(timepoints[[i]], mu2))[,1]
})
owik=meanfit$wikmat
if(reverseflag == 0){
classest = sapply(owik[,2],function(x)ifelse(x>0.5,1,2))
}else{
classest = sapply(owik[,2],function(x)ifelse(x>0.5,2,1))
}
observed1 = observedcentered1[which(classest==1)]
timepoints1 = timepoints[which(classest==1)]
observed2 = observedcentered2[which(classest==2)]
timepoints2 = timepoints[which(classest==2)]
res_pace1<- FPCA(observed1, timepoints1,list(dataType='Sparse',error=TRUE, kernel='epan', verbose=TRUE,methodBwCov="GCV",methodBwMu="GCV",methodSelectK = 3))
res_pace2<- FPCA(observed2, timepoints2,list(dataType='Sparse',error=TRUE, kernel='epan', verbose=TRUE,methodBwCov="GCV",methodBwMu="GCV",methodSelectK = 3))
pacefpc11 = Data2fd(argvals = res_pace1$workGrid,y=res_pace1$phi[,1],spline_basis)
pacefpc12 = Data2fd(argvals = res_pace1$workGrid,y=res_pace1$phi[,2],spline_basis)
pacefpc13 = Data2fd(argvals = res_pace1$workGrid,y=res_pace1$phi[,3],spline_basis)
pacefpc21 = Data2fd(argvals = res_pace2$workGrid,y=res_pace2$phi[,1],spline_basis)
pacefpc22 = Data2fd(argvals = res_pace2$workGrid,y=res_pace2$phi[,2],spline_basis)
pacefpc23 = Data2fd(argvals = res_pace2$workGrid,y=res_pace2$phi[,3],spline_basis)
fpc1x = soapfpca(observed1,timepoints1)
fpc2x = soapfpca(observed2,timepoints2)
return(list(
mu1 = mu1,
mu2 = mu2,
pc11 = fpc1x$pc1$pc_fit,
pc12 = fpc2x$pc1$pc_fit,
pc21 = fpc1x$pc2$pc_fit,
pc22 = fpc2x$pc2$pc_fit,
pc31 = fpc1x$pc3$pc_fit,
pc32 = fpc2x$pc3$pc_fit,
meanfit = fmeanfit,
pacepc11 = pacefpc11,
pacepc12 = pacefpc12,
pacepc13 = pacefpc13,
pacepc21 = pacefpc21,
pacepc22 = pacefpc22,
pacepc23 = pacefpc23
))
}
ssize = 150
timegrid = seq(1,10,by=0.05)
pp=c(0.3,0.4,0.5)
sparse=TRUE
for(jj in 1:3){
for(it in 1:100){
clusters = lapply(1:ssize,function(i){
x= rbinom(1,1,pp[jj])
ifelse(x==1,1,2)
})
timepoints = lapply(1:ssize,function(i){
curtimegrid = sample(timegrid,20)
sort(curtimegrid)
})
observed = lapply(1:ssize,function(i){
if(sparse==TRUE){
tm=timepoints[[i]]
}else{
tm = timegrid
}
mu1 = eval.fd(tm, meanfit$pc_fit1)
mu2 = eval.fd(tm, meanfit$pc_fit2)
pc1add1 = eval.fd(tm, fpc1$pc1$pc_fit)*rnorm(1,0,50)
pc1add2 = eval.fd(tm, fpc2$pc1$pc_fit)*rnorm(1,0,50)
pc2add1 = eval.fd(tm, fpc1$pc2$pc_fit)*rnorm(1,0,25)
pc2add2 = eval.fd(tm, fpc2$pc2$pc_fit)*rnorm(1,0,25)
pc3add1 = eval.fd(tm, fpc1$pc3$pc_fit)*rnorm(1,0,10)
pc3add2 = eval.fd(tm, fpc2$pc3$pc_fit)*rnorm(1,0,10)
err1=rnorm(1,mean=0,sd=0.1)
err2=rnorm(1,mean=0,sd=0.1)
if(clusters[i]==1){
return(as.numeric(mu1+pc1add1+pc2add1+pc3add1+err1))
}else{
return(as.numeric(mu2+pc1add2+pc2add2+pc3add1+err2))
}
})
if(sparse==FALSE){
timepoints = lapply(1:ssize,function(i){
timegrid
})
}
#inprod(pacemu-meanfit$pc_fit,pacemu-meanfit$pc_fit)
res_cfpca = mixFPCA(observed,timepoints)
errorcfpca_mu1 = inprod(res_cfpca$mu1-meanfit$pc_fit1,res_cfpca$mu1-meanfit$pc_fit1)
errorcfpca_mu2 = inprod(res_cfpca$mu2-meanfit$pc_fit2,res_cfpca$mu2-meanfit$pc_fit2)
errorcfpca_pc11= min(inprod(res_cfpca$pc11-fpc1$pc1$pc_fit,res_cfpca$pc11-fpc1$pc1$pc_fit),inprod(res_cfpca$pc11+fpc1$pc1$pc_fit,res_cfpca$pc11+fpc1$pc1$pc_fit))
errorcfpca_pc12= min(inprod(res_cfpca$pc12-fpc2$pc1$pc_fit,res_cfpca$pc12-fpc2$pc1$pc_fit),inprod(res_cfpca$pc12+fpc2$pc1$pc_fit,res_cfpca$pc12+fpc2$pc1$pc_fit))
errorcfpca_pc21 = min(inprod(res_cfpca$pc21-fpc1$pc2$pc_fit,res_cfpca$pc21-fpc1$pc2$pc_fit),inprod(res_cfpca$pc21+fpc1$pc2$pc_fit,res_cfpca$pc21+fpc1$pc2$pc_fit))
errorcfpca_pc22 = min(inprod(res_cfpca$pc22-fpc2$pc2$pc_fit,res_cfpca$pc22-fpc2$pc2$pc_fit),inprod(res_cfpca$pc22+fpc2$pc2$pc_fit,res_cfpca$pc22+fpc2$pc2$pc_fit))
errorcfpca_pc31 = min(inprod(res_cfpca$pc31-fpc1$pc3$pc_fit,res_cfpca$pc31-fpc1$pc3$pc_fit),inprod(res_cfpca$pc31+fpc1$pc3$pc_fit,res_cfpca$pc31+fpc1$pc3$pc_fit))
errorcfpca_pc32 = min(inprod(res_cfpca$pc32-fpc2$pc3$pc_fit,res_cfpca$pc32-fpc2$pc3$pc_fit),inprod(res_cfpca$pc32+fpc2$pc3$pc_fit,res_cfpca$pc32+fpc2$pc3$pc_fit))
errorpace_pc11= min(inprod(res_cfpca$pacepc11-fpc1$pc1$pc_fit,res_cfpca$pacepc11-fpc1$pc1$pc_fit),inprod(res_cfpca$pacepc11+fpc1$pc1$pc_fit,res_cfpca$pacepc11+fpc1$pc1$pc_fit))
errorpace_pc12= min(inprod(res_cfpca$pacepc21-fpc2$pc1$pc_fit,res_cfpca$pacepc21-fpc2$pc1$pc_fit),inprod(res_cfpca$pacepc21+fpc2$pc1$pc_fit,res_cfpca$pacepc21+fpc2$pc1$pc_fit))
errorpace_pc21 = min(inprod(res_cfpca$pacepc12-fpc1$pc2$pc_fit,res_cfpca$pacepc12-fpc1$pc2$pc_fit),inprod(res_cfpca$pacepc12+fpc1$pc2$pc_fit,res_cfpca$pacepc12+fpc1$pc2$pc_fit))
errorpace_pc22 = min(inprod(res_cfpca$pacepc22-fpc2$pc2$pc_fit,res_cfpca$pacepc22-fpc2$pc2$pc_fit),inprod(res_cfpca$pacepc22+fpc2$pc2$pc_fit,res_cfpca$pacepc22+fpc2$pc2$pc_fit))
errorpace_pc31 = min(inprod(res_cfpca$pacepc13-fpc1$pc3$pc_fit,res_cfpca$pacepc13-fpc1$pc3$pc_fit),inprod(res_cfpca$pacepc13+fpc1$pc3$pc_fit,res_cfpca$pacepc13+fpc1$pc3$pc_fit))
errorpace_pc32 = min(inprod(res_cfpca$pacepc23-fpc2$pc3$pc_fit,res_cfpca$pacepc23-fpc2$pc3$pc_fit),inprod(res_cfpca$pacepc23+fpc2$pc3$pc_fit,res_cfpca$pacepc23+fpc2$pc3$pc_fit))
o_sigma = res_cfpca$meanfit$sigma
o_pik = res_cfpca$meanfit$pik
owik = res_cfpca$meanfit$wikmat
classest = ifelse(owik[,2]>0.5,1,2)
classtrue = clusters%>%do.call(c,.)
correctrate = max(length(which(classest==classtrue))/ssize,1-length(which(classest==classtrue))/ssize)
output = c(
errorcfpca_mu1,errorcfpca_mu2,
errorcfpca_pc11,errorcfpca_pc12,
errorcfpca_pc21,errorcfpca_pc22,
errorcfpca_pc31,errorcfpca_pc32,
errorpace_pc11,errorpace_pc12,
errorpace_pc21,errorpace_pc22,
errorpace_pc31,errorpace_pc32,
o_sigma,o_pik,correctrate
)
if (it==1){
outputmat = output
}else{
outputmat = rbind(outputmat,output)
}
}
}
|
8288dc56c96fc47068933c25a5c46ffe90e38d40 | 79b54712f13b3113630b91b71433418acc6dc934 | /server.R | c8215e32dbea71607809bed2d4fb263b1fb29233 | [] | no_license | Geekiac/Developing_Data_Products_Course_Project | 866abb10c37943e5ede45cececcd49a71ac96e6e | db8bafc04201a4c177d31967e0c86ba82b9caf38 | refs/heads/master | 2021-01-17T18:03:45.866786 | 2016-07-12T20:34:45 | 2016-07-12T20:34:45 | 63,190,377 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,471 | r | server.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
y_1d <- c()
x_2d <- c()
y_2d <- c()
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
stepSize <- reactive({
input$stepSize
})
numSteps <- reactive({
input$numSteps
})
output$walk_1d <- renderPlot({
y_1d <<- c(0, runif(numSteps()-1, -stepSize(), stepSize()))
y_1d <<- round(y_1d)
for(i in 2:numSteps()) {
y_1d[i] <<- y_1d[i] + y_1d[i-1]
}
plot(y_1d, type="l", col="blue")
abline(h=0, col="red")
})
output$summary_1d <- renderPrint({
"summary of y"
summary(y_1d)
})
output$walk_2d <- renderPlot({
vals <- c(0, runif(numSteps()-1, -stepSize(), stepSize()))
x_2d <<- c(0)
y_2d <<- c(0)
for(i in 2:numSteps()) {
if(i %% 2 == 0) {
x_2d[i] <<- x_2d[i-1] + vals[i]
y_2d[i] <<- y_2d[i-1]
} else {
x_2d[i] <<- x_2d[i-1]
y_2d[i] <<- y_2d[i-1] + vals[i]
}
}
plot(x_2d, y_2d, type="l", col="blue")
abline(h=0, col="red")
abline(v=0, col="red")
})
output$summary_2d_x <- renderPrint({
summary(x_2d)
})
output$summary_2d_y <- renderPrint({
summary(y_2d)
})
})
|
48d8ba3a17f5c8a5e086cab36d9a86377b76a82a | 257c46df84db47f4dbd61e289852cd0e80fb6c99 | /data preprocessing/r_Supervised_Classification.r | 8384108e43b337c55b1a98f25bbd015128ff5d6f | [] | no_license | bordenchen/DBSCAN | 175f3578bdb0eac93d4cfb07a488dc27bdbc323a | 54c7459d641eda56a9e68f022dbbb05efbaba0de | refs/heads/master | 2020-03-30T12:13:49.236185 | 2018-10-08T06:27:08 | 2018-10-08T06:27:08 | 151,215,248 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 692 | r | r_Supervised_Classification.r | require(rpart)
require(tree)
library(rpart.plot)
ecoli.df = read.csv("C:/Users/USER/Desktop/dataprocessing/result.csv")
ecoli.rpart1 = rpart(class ~ R+G+B,data = ecoli.df)
summary(ecoli.rpart1)
plotcp(ecoli.rpart1)
rpart.plot(ecoli.rpart1)
ecoli.tree1 = tree(class ~ R+G+B,data = ecoli.df)
summary(ecoli.tree1)
plot(ecoli.tree1)
text(ecoli.tree1, all = T)
Cv_Tree<-cv.tree(ecoli.tree1,FUN = prune.misclass)
plot(Cv_Tree)
treePrunedMod <- prune.misclass(ecoli.tree1, best = 4)
plot(treePrunedMod)
data<-ecoli.df[,1:3]
R<-0.1
G<-0.5
B<-0.3
dad<-as.data.frame(cbind(R,G,B))
out <- predict(ecoli.tree1 ,ecoli.df[,1:3] , type = "class")
out_df<-as.data.frame(out)
out_df$class<-ecoli.df[,4]
|
290fc87e8433ac111bbe1d6cc7af05cfa8c413a9 | 97ac03d8fd78c0ed5fcd60fcb0beed5ac873124d | /analysis.R | 17257969e484caa01421deedbb5685688a15b23f | [
"CC0-1.0"
] | permissive | AbbyHeron/Workshop1 | 13ec5381364b6f24749c2d9ebf2254c2bbceda5f | 42df74e4e2ef47cc191edcf8a2cc1811695c862d | refs/heads/main | 2023-01-01T21:03:29.318917 | 2020-10-22T16:12:22 | 2020-10-22T16:12:22 | 306,366,593 | 0 | 1 | CC0-1.0 | 2020-10-22T16:12:23 | 2020-10-22T14:39:02 | R | UTF-8 | R | false | false | 2,110 | r | analysis.R | #Reading the data into R
chaff <- read.table("chaff.txt", header = T)
#Loading in Tidyverse
library(tidyverse)
#The data is in two columns of males and females so it is important to put the data into tidy format -
#males and females in one column (sex) and the value in another column (max)
chaff2 <- gather(data = chaff, key = sex, value = max)
#Analysing the data to see if there is a difference in mass between males and females
#Running a one way ANOVA of the data to see if there is a significant difference
mod <- aov(max ~ sex, data = chaff2)
mod
t.test(data = chaff2, max~sex)
#There is no significant difference between the mass of males and females
#Loading in RMisc to generate a summary of the data
library(Rmisc)
#Generating a summary of the data
chaffsummary <- summarySE(chaff2, measurevar = "max", groupvars = "sex")
chaffsummary
#Creating a plot to see if there is a difference in mass between males and females
#Loading ggplot to create a plot
library(ggplot2)
#Creating a plot
fig1 <- ggplot(data = chaffsummary, aes(x = sex, y = max, fill = sex))+
geom_bar(stat = "identity", colour = "black")+
geom_errorbar(aes(ymin = max-se, ymax = max+se), width = 0.2, size = 1)+
theme_classic()+
scale_fill_manual(values=c("dark grey", "#EA3C3C")) +
theme(axis.line = element_line(size = 1.25),
axis.ticks = element_line(size = 1.25),
axis.text.x = element_text(size = rel(1.2), colour = "black"),
axis.text.y = element_text(size = rel(1.2), colour = "black"),
legend.text = element_text(size = rel(1)),
legend.position = "none")+
theme(text = element_text(size=14))+
labs(x = "Sex", y = "Mass (g)")+
scale_x_discrete(labels = parse(text=c("females" = "Females", "males" = "Males")))
#Creating figure saving settings to make it easier to save figures
units <- "in"
fig_w <- 3.5
fig_h <- fig_w
dpi <- 300
device <- "tiff"
#Saving the figure to a file
ggsave("fig1.tiff",
plot = fig1,
device = device,
width = fig_w,
height = fig_h,
units = units,
dpi = dpi)
#adding comment "Hi, its Taryn" |
4664e21048e477dcdebc197f42c3d17803dac21a | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /wk/man/wkb_translate_wkt.Rd | 7e9d45fa4460f268ddbeacf72993aed3f18778fb | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 3,282 | rd | wkb_translate_wkt.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/translate.R
\name{wkb_translate_wkt}
\alias{wkb_translate_wkt}
\alias{wkb_translate_wkb}
\alias{wkb_translate_wksxp}
\alias{wkt_translate_wkt}
\alias{wkt_translate_wkb}
\alias{wkt_translate_wksxp}
\alias{wksxp_translate_wkt}
\alias{wksxp_translate_wkb}
\alias{wksxp_translate_wksxp}
\alias{wk_platform_endian}
\title{Translate between WKB and WKT}
\usage{
wkb_translate_wkt(
wkb,
include_z = NA,
include_m = NA,
include_srid = NA,
precision = 16,
trim = TRUE
)
wkb_translate_wkb(
wkb,
include_z = NA,
include_m = NA,
include_srid = NA,
endian = wk_platform_endian(),
buffer_size = 2048
)
wkb_translate_wksxp(wkb, include_z = NA, include_m = NA, include_srid = NA)
wkt_translate_wkt(
wkt,
include_z = NA,
include_m = NA,
include_srid = NA,
precision = 16,
trim = TRUE
)
wkt_translate_wkb(
wkt,
include_z = NA,
include_m = NA,
include_srid = NA,
endian = wk_platform_endian(),
buffer_size = 2048
)
wkt_translate_wksxp(wkt, include_z = NA, include_m = NA, include_srid = NA)
wksxp_translate_wkt(
wksxp,
include_z = NA,
include_m = NA,
include_srid = NA,
precision = 16,
trim = TRUE
)
wksxp_translate_wkb(
wksxp,
include_z = NA,
include_m = NA,
include_srid = NA,
endian = wk_platform_endian(),
buffer_size = 2048
)
wksxp_translate_wksxp(wksxp, include_z = NA, include_m = NA, include_srid = NA)
wk_platform_endian()
}
\arguments{
\item{wkb}{A \code{list()} of \code{\link[=raw]{raw()}} vectors, such as that
returned by \code{sf::st_as_binary()}.}
\item{include_z, include_m, include_srid}{Include the
values of the Z and M coordinates and/or SRID
in the output? Use \code{FALSE} to omit, \code{TRUE} to include, or \code{NA} to include
only if present. Note that using \code{TRUE} may result in an error if there
is no value present in the original.}
\item{precision}{The rounding precision to use when writing
(number of decimal places).}
\item{trim}{Trim unnecessary zeroes in the output?}
\item{endian}{For WKB writing, 0 for big endian, 1 for little endian.
Defaults to \code{\link[=wk_platform_endian]{wk_platform_endian()}} (slightly faster).}
\item{buffer_size}{For WKB writing, the initial buffer size to use for
each feature, in bytes. This will be extended when needed, but if you
are calling this repeatedly with huge geometries, setting this value
to a larger number may result in less copying.}
\item{wkt}{A character vector containing well-known text.}
\item{wksxp}{A \code{list()} of classed objects}
}
\value{
\verb{*_translate_wkt()} returns a character vector of
well-known text; \verb{*_translate_wkb()} returns a list
of raw vectors, and \verb{*_translate_wksxp()} returns an unclassed
list of \code{\link[=wksxp]{wksxp()}} geometries. Unlike \code{\link[=as_wkb]{as_wkb()}}, \code{\link[=as_wkt]{as_wkt()}}, and
\code{\link[=as_wksxp]{as_wksxp()}}, these functions do not attach
a class to the output.
}
\description{
Translate between WKB and WKT
}
\examples{
# translate between WKT and WKB
(wkb <- wkt_translate_wkb("POINT (30 10)"))
wkb_translate_wkt(wkb)
# some basic creation options are also available
wkt_translate_wkt("POINT (30 10)", trim = FALSE)
wkb_translate_wkb(wkb, endian = 0)
}
|
d20dee242bbce83ef94da7809cd141c0f56d39de | 8ba77da6aec29d1677a16d8f72d02fe30383566f | /CTD_graph.R | 6755eca6b8ee345f3f7f5a731f3757a51a0e7055 | [] | no_license | mattgrobelny/ThermalGrad_Analysis | 2d11805599194cf80042feb2f8c612ec26c24088 | c9384817c76ff8156d0ec0ec3665c367b4996278 | refs/heads/master | 2021-06-15T14:49:47.149637 | 2017-04-23T02:04:53 | 2017-04-23T02:04:53 | 65,787,720 | 0 | 0 | null | 2017-04-18T19:59:34 | 2016-08-16T04:33:31 | R | UTF-8 | R | false | false | 2,535 | r | CTD_graph.R | # Data reanalysis for the thermal gradient project
# By: Mateusz Grobelny
########################################################################################################################################
# Libraries
library(ggplot2)
library(RColorBrewer)
library(fields)
library(MASS)
library(FSA)
library(dunn.test)
library(lattice)
library(coin)
# Import data set
#Master_species_gradient_logger_data <- read.csv("~/Documents/OneDrive/Antarctica Files/Data/Gradient Project/Hobodata/Large Tank/DATA_reanalysis/Master_species_gradient_logger_data.csv")
#set wd
setwd("~/Documents/OneDrive/Antarctica Files/Data/Gradient Project/Hobodata/Large Tank/DATA_reanalysis")
XBT_R <- read.csv("~/Documents/OneDrive/Antarctica Files/Data/Gradient Project/XBT/XBT_R.csv")
head(XBT_R)
#Set Depth limit
N<-250
#set up data frames
S1<-XBT_R$Site1[3:N]
S2<-XBT_R$Site2[3:N]
S3<-XBT_R$Site3[3:N]
SS_S<-data.frame(S1,S2,S3)
D1<-XBT_R$Depth[3:N]
D2<-XBT_R$Depth[3:N]
D3<-XBT_R$Depth[3:N]
DD_D<-data.frame(D1,D2,D3)
#plot
matplot(y=DD_D,x=SS_S,type="l",lty=c(1,1,1),ylim = rev(range(-10:max(D1))),xlim=c(-2,2),xlab="Seawater Temperature (°C) ", ylab="Depth (m)", col=c("gray8","gray44","gray64"),lwd=2.5,axes = F)
box()
axis(side = 1, tck = .01, labels = NA)
axis(side = 2, tck = .01, labels = NA)
axis(side = 4, tck = .01, labels = NA)
axis(side = 3, tck = .01, labels = NA)
axis(side = 1, lwd = 0, line = -.7)
axis(side = 2, lwd = 0, line = -.7, las = 1)
#add gradient lines which show the range of the experimental tank
# abline(v=-1.3,col = "blue",lty=2)
# abline(v=1.3,col = "red",lty=2)
abline(h=0,col = "black",lty=3)
###mtext(side = 2, "Outcome Variable",font=3 line = 2)
### Redo in ggplot2
N<-1400
ggplot_XBT_2014 = ggplot(data=na.omit(XBT_R[3:N,1:4]), aes(y=Depth)) +
geom_path(aes(x=as.numeric(Site3),group=1, color="Gerlache Strait"), size =1.1)+
geom_path(aes(x=Site1,group=1, color="Bismark Strait"), size =1.1)+
geom_path(aes(x=Site2,group=1, color="Dallman Bay"), size =1.1)+
theme_bw()+
xlim(-2,2) +
scale_y_reverse()+
geom_hline(yintercept = 0, color = "black",
linetype = "dashed", alpha = 0.7) +
xlab(expression("Temperature "*~degree*C))+
ylab("Depth (m)")+
scale_colour_manual(name="Location", values=c("Bismark Strait" = "red", "Dallman Bay" = "red4", "Gerlache Strait" = "blue4"),
guide = guide_legend(fill = NULL,colour = NULL)) + theme(legend.position = "bottom")
ggplot_XBT_2014
ggsave(ggplot_XBT_2014, file = "ggplot_XBT_2014.png", dpi = 500)
|
87dcdd8fb86ba7bbb87403d4c5ddaaeb1fe8f7cf | befeec107b70db0ea26bdcb2cfc6498bd68b0b05 | /src/postp.R | d87c8009d35c473ee5f73a85ad03028150bedc1b | [
"Apache-2.0"
] | permissive | vuw-sim-stia/lit-cascades | a93302e81fe87f1b05d9336e686fa57c97622eed | 3d56ffa6f7703e09ed1a5dec8eec021ce6825ab5 | refs/heads/master | 2021-01-13T09:37:36.179435 | 2018-08-27T23:20:01 | 2018-08-27T23:20:01 | 72,040,702 | 2 | 2 | Apache-2.0 | 2018-06-26T03:31:26 | 2016-10-26T20:14:46 | HTML | UTF-8 | R | false | false | 3,309 | r | postp.R | #set working directoy
setwd("/Users/mlr/OneDrive - Victoria University of Wellington - STAFF/Git/lit-cascades/src/")
#slice_sizes <- c(1000,500,250)
slice_sizes <- c(1000)
options(scipen=10000)
dat <- fread(paste0("TLit/www/output/",sliceSize,"/",theSource,"_temporal_statistics.csv"),sep=" ", header = T,fill = T)
dat<-dat[,-1]
colnames(dat)<-c("dpub","specificity","diversity","ShannonWiener","Pielou","Richness","Diameter","Density","Modularity")
dat$cnt <- c(1:nrow(dat))
# MDS on social net extracted
links <- read.table("TLit/www/output/1000/Great_Expectations_socialnetwork_links.csv",sep = " ",header = T,stringsAsFactors = F)
chars <- unique(c(links$id1,links$id2))
mat <- matrix(data = 0,nrow=length(chars),ncol=length(chars),dimnames = list(chars,chars))
for(i in 1:nrow(links)){
mat[links[i,1],links[i,2]]<-links[i,3]
mat[links[i,2],links[i,1]]<-links[i,3]
}
mat_dis <- max(mat)-mat
d <- dist(mat) # euclidean distances between the rows
fit <- cmdscale(d,eig=TRUE, k=2) # k is the number of dim
#fit # view results
# plot solution
x <- fit$points[,1]
y <- fit$points[,2]
plot(x, y, xlab="x", ylab="y",pch='.')
text(x, y, labels = row.names(mat), cex=.5, pos = 4)
#3D
library(rgl)
library(RColorBrewer)
data.mds <- cmdscale(d, k=3)
#Create x,y refs
data.x <- data.mds[,1]
data.y <- data.mds[,2]
data.z <- data.mds[,3]
#Plot
plot3d(data.x, data.y, data.z, col=brewer.pal(nrow(mat), "BrBG"),xlab="x", ylab="y",zlab="z")
text3d(data.x, data.y, data.z,rownames(mat),cex=.8)
#MDS on all network featues extracted
netstat <- read.table("TLit/www/output/1000/netstat_combined.csv",header = F,stringsAsFactors = F)
netstatmat <- netstat[,c(2,5,6,7)]
d <- dist(netstatmat) # euclidean distances between the rows
fit <- cmdscale(d,eig=TRUE, k=2) # k is the number of dim
# plot solution
x <- fit$points[,1]
y <- fit$points[,2]
plot(x, y, xlab="x", ylab="y",pch='.')
text(x, y, labels = netstat$V1, cex=.5, pos = 4)
netstat <- read.table("TLit/www/output/1000/socnetstat_combined.csv",header = F,stringsAsFactors = F)
netstatmat <- netstat[,c(2,5,6,7)]
d <- dist(netstatmat) # euclidean distances between the rows
fit <- cmdscale(d,eig=TRUE, k=2) # k is the number of dim
# plot solution
x <- fit$points[,1]
y <- fit$points[,2]
plot(x, y, xlab="x", ylab="y",pch='.')
text(x, y, labels = netstat$V1, cex=.5, pos = 4)
allsocialnets <- list.files(paste0("TLit/www/output/1000/"),
pattern = "(.*)_socialnetwork_links.csv",
full.names = T)
for(net in allsocialnets){
links <- read.table(net,header = T,stringsAsFactors = F, sep=" ")
chars <- unique(c(links$id1,links$id2))
mat <- matrix(data = 0,nrow=length(chars),ncol=length(chars),dimnames = list(chars,chars))
for(i in 1:nrow(links)){
mat[links[i,1],links[i,2]]<-links[i,3]
mat[links[i,2],links[i,1]]<-links[i,3]
}
d <- dist(mat) # euclidean distances between the rows
fit <- cmdscale(d,eig=TRUE, k=2) # k is the number of dim
# plot solution
x <- fit$points[,1]
y <- fit$points[,2]
jpeg(paste0("TLit/www/output/1000/",gsub("_socialnetwork_links.csv","",gsub("TLit/www/output/1000//","",net)),"_mds_social_network.jpg"))
plot(x, y, xlab="x", ylab="y",pch='.')
text(x, y, labels = chars, cex=.5, pos = 4)
dev.off()
}
|
5c51f243589c1b3009ac0dec7fba8fefaa58fcd2 | 34a1ec578fe8a27cca9f3e6dd4b8218ff5a7d11b | /R/recode_grid.R | 66cf10000c0f04448ba08c8fca8194d436b96c97 | [
"MIT"
] | permissive | electorArg/polAr | 4c50e700b0452c99a4347160fd1b4f70806ab91f | e8e8b88ed74a98eb17dce6e9c74edb2e62a00f2e | refs/heads/master | 2023-04-08T00:47:44.651990 | 2021-04-28T12:56:14 | 2021-04-28T12:56:14 | 256,862,665 | 10 | 2 | null | null | null | null | UTF-8 | R | false | false | 5,837 | r | recode_grid.R | #'Recodifica id asignados a provincias o departamentos de Argentina
#' (\emph{Recode Argentina's districs id})
#'@description
#'
#'Función que permite recodificar etiquetas para utilización de grillas de Argentina y de departamentos para los 24 distritos
#' subnacionales. Ello permite hacer mas sencilla la vinculación con bases de datos de diversas fuentes.
#'
#'@return Los valores por defecto en \code{\link{get_grid}} son \code{codprov} para provincia y \code{coddepto} para departamentos, respectivamente.
#' Estos corresponden a la codificación de los escrutinios provisorios de elecciones nacionales y se etiquetaron como \code{'indra'} .
#' Se puede optar por la nomenclautra de \code{'indec'}, con la familia \code{\*_censo}, para ambos niveles, o la de \code{'iso'} con \code{\*_iso} ,
#' para el nivel provincial.
#'
#'@details Respecto el origen de los datos se puede consultar la documentación de
#' \href{https://www.iso.org/obp/ui/#iso:code:3166:AR}{\code{ISO 3166-2} - International Organization for Standardization} y
#' del \href{https://www.iso.org/obp/ui/#iso:code:3166:AR}{\emph{INDEC}}.
#'
#'@details \code{codprov} y \code{coddepto} son las codificaciones de las bases de datos de \emph{INDRA}, empresa encargada por
#' muchos años de la tarea del escrutinio provisorio y utilizados en \href{https://electorarg.github.io/polAr/}{polAr}.
#'
#'@param type la variante del código que se quiere definir para la grilla. Las opciones son \code{'indra'}, \code{'indec'} o \code{'iso'}.
#'
#'@param data data.frame obtenido con \code{\link{get_grid}}. Las grillas disponibles
#' se pueden chequear con \code{\link{show_arg_codes}}.
#'
#'
#'
#' @examples
#'
#' get_grid("ARGENTINA")
#'
#'
#' get_grid("ARGENTINA") %>%
#' recode_grid(type = "iso")
#'
#'
#'@export
recode_grid<- function(data,
type = NULL){
# Check parameters
assertthat::assert_that(is.data.frame(data),
msg = glue::glue("{data} debe ser un 'data.frame' obtenido con la funcion get_grid()"))
assertthat::assert_that(dim(data)[2] == 5,
msg = glue::glue("{data} debe ser un 'data.frame' obtenido con la funcion get_grid()"))
assertthat::assert_that(is.character(type),
msg = "type debe ser del tipo 'character'")
if("CABA" %in% data$name){
assertthat::assert_that(is.character(type),
msg = "type debe ser del tipo 'character'")
assertthat::assert_that(length(type) == 1,
msg = glue::glue("{type} no es una opcion valida para recodificar grillas de provincias.
Debe elegir una opcion entre 'indra', 'indec' o 'iso'"))
assertthat::assert_that(type %in% c("indra", "indec", "iso"),
msg = glue::glue("{type} no es una opcion valida ('indra', 'indec', 'iso')"))
full_codes <- full_geo_metadata %>%
dplyr::select(codprov, codprov_censo, codprov_iso) %>%
dplyr::distinct()
if(type == "indec"){
data %>%
dplyr::left_join(full_codes, by = c("code" = "codprov")) %>%
dplyr::select(2, 3, 5, code = codprov_censo)
} else if(type == "iso"){
data %>%
dplyr::left_join(full_codes, by = c("code" = "codprov")) %>%
dplyr::select(2, 3, 5, code = codprov_iso)
} else{
data
}
}else{
assertthat::assert_that(is.character(type),
msg = "type debe ser del tipo 'character'")
assertthat::assert_that(length(type) == 1,
msg = glue::glue("{type} no es una opcion valida para recodificar grillas de departamentos.
Debe elegir una opcion entre 'indra'o 'indec'"))
assertthat::assert_that(type %in% c("indra", "indec"),
msg = glue::glue("{type} no es una opcion valida para recodificar grillas de departamentos.
Debe elegir una opcion entre 'indra'o 'indec'"))
#### hack para filtrar grilla de deprtamento que se quiere recodear
# Me traigo todos los id de depto de tdas las provincias y genero un id unico
full_codes <- full_geo_metadata %>%
dplyr::select(coddepto, nomdepto_censo,coddepto_censo, name_prov) %>%
dplyr::mutate(nomdepto_censo = stringr::str_to_upper(nomdepto_censo),
id = paste0(coddepto, stringr::str_remove_all(nomdepto_censo, " "), name_prov))
# agrego a la base de grillas el mismo codigo de id de la metadata
grillas_depto_id <- grillas_geofacet %>%
dplyr::bind_rows(.id = "name_prov") %>%
tibble::as_tibble()%>%
dplyr::slice(25:dim(.)[1]) %>%
dplyr::mutate(code = stringr::str_pad(code, 3, "left", 0),
id = paste0(code, stringr::str_remove_all(name, " "), name_prov))
# Creo filtro para seleccionar grilla correcta
filtro_provincia <- data %>%
dplyr::left_join(grillas_depto_id) %>%
dplyr::left_join(full_codes)
filtro_id <- filtro_provincia %>%
dplyr::pull(id)
# filtro la grilla de interes
data <- grillas_depto_id %>%
dplyr::filter(id %in% filtro_id) %>%
dplyr::select(name, code, row, col)
#######################################################################
if(type == "indec"){
data %>%
dplyr::mutate(code = dplyr::case_when(
code == filtro_provincia$coddepto ~ filtro_provincia$coddepto_censo
))
} else{
data
}
}
} |
b9102b90ec276c8061eba35873df37d2105fe714 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.customer.engagement/man/sesv2_test_render_email_template.Rd | d8203c97ef7928505cca2e5b1c175f122628a0de | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 919 | rd | sesv2_test_render_email_template.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sesv2_operations.R
\name{sesv2_test_render_email_template}
\alias{sesv2_test_render_email_template}
\title{Creates a preview of the MIME content of an email when provided with a
template and a set of replacement data}
\usage{
sesv2_test_render_email_template(TemplateName, TemplateData)
}
\arguments{
\item{TemplateName}{[required] The name of the template.}
\item{TemplateData}{[required] A list of replacement values to apply to the template. This parameter is
a JSON object, typically consisting of key-value pairs in which the keys
correspond to replacement tags in the email template.}
}
\description{
Creates a preview of the MIME content of an email when provided with a template and a set of replacement data.
See \url{https://www.paws-r-sdk.com/docs/sesv2_test_render_email_template/} for full documentation.
}
\keyword{internal}
|
25ff9effe35593a5d02a5a102e87feb477f76924 | e44a5584d57becf8db393c1fc35dda419e775d9e | /inst/examples/data.R | e1298dc99cc91ce382a5773083e01d7fcd73c03f | [] | no_license | duncantl/CGIwithR | 2c39fb18f304b5ad6802155730703f02692e8821 | e1afc34ced53eb6d638a7c1735222877ff97d20b | refs/heads/master | 2016-09-06T18:26:36.846291 | 2014-05-30T19:01:35 | 2014-05-30T19:01:35 | 20,338,698 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 479 | r | data.R | #
# This is a trivial example of how one can specify a
# Content-type other than text/html for the returned
# "page". The Content-type: header must be the very
# first text returned by our script so that R.cgi
# will know not to add its own Content-type header.
#
# The example shows how we can return a dataset
# without any markup so that it can be read via
# functions such as read.table() without any HTML.
cat("Content-type: text/plain\n\n")
data(mtcars)
print(mtcars)
|
0c226f5307289ceabbb0b422dcef6a2dbf627a7c | 66503e3a888729d37f5796192602f2d684893563 | /man/member-if.Rd | b854711109e142ad5e3660ff71fa54b3e0a301c1 | [
"MIT"
] | permissive | wwbrannon/schemeR | 79e4c2791fc77cd25ac9fbcc63fff4161809c6b7 | 4089b074d8636b9ced2716ac84b74c1ef90c110d | refs/heads/master | 2021-04-28T22:01:09.946266 | 2017-01-26T10:40:13 | 2017-01-26T10:40:13 | 77,758,545 | 5 | 0 | null | 2017-01-19T03:58:23 | 2016-12-31T22:49:25 | R | UTF-8 | R | false | true | 1,453 | rd | member-if.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functional.R
\name{member-if}
\alias{member-if}
\alias{member.if}
\alias{member.if.not}
\title{Searching for tails of sequences}
\usage{
member.if(f, x, k = identity)
member.if.not(f, x, k = identity)
}
\arguments{
\item{f}{The filter predicate to use on the sequence x.}
\item{x}{The sequence to search for a satisfying element.}
\item{k}{The "key" function to pre-apply to elements of x. Defaults to
the identity function.}
}
\value{
The tail of the sequence \code{map(k, x)} beginning with the first
element that satisfies the predicate f, or NULL if no element did.
}
\description{
\code{member.if} and \code{member.if.not} search "sequences" (by which we
mean lists, other vectors or pairlists) for the first element satisfying
some predicate function, and return the sub-sequence beginning with that
element. \code{member.if.not} uses Negate(f) as its predicate.
}
\details{
The sequence searched is actually \code{\link{map}(k, x)} rather than x,
which makes it easier to avoid defining short anonymous functions.
}
\examples{
f <- 20:40
#The first element divisible by 3 and all afterward
member.if(is.zero, f, k=function(x) x \%\% 3) == 21:40
#Trimming by the presence of a sentinel value
member.if.not(function(x) x < 30, f) == 30:40
}
\seealso{
The functional-programming functions in base, especially \code{Filter},
under \code{\link[=Filter]{funprog}}.
}
|
42685f6ecbbf79d385f83608d6951b06469ada46 | 85f13d7969dfd03641a200efd19257947d5fe2a5 | /man/tween_states.Rd | 4c4579393d7e1b1464955ff33c33d33f0f879514 | [] | no_license | arturocm/tweenr | ae03a615d797f365cccb814f7e80909d80496e98 | 62ae8d9437c91c02a24e442ffc0705e9addf7473 | refs/heads/master | 2021-01-16T21:05:49.673910 | 2016-02-10T10:37:08 | 2016-02-10T10:37:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,628 | rd | tween_states.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tween_states.R
\name{tween_states}
\alias{tween_states}
\title{Tween a list of data.frames representing states}
\usage{
tween_states(data, tweenlength, statelength, ease, nframes)
}
\arguments{
\item{data}{A list of data.frames. Each data.frame must contain the same
number of rows, but only the first data.frame needs to contain all columns.
Subsequent data.frames need only contain the columns that shows change.}
\item{tweenlength}{The lengths of the transitions between each state.}
\item{statelength}{The length of the pause at each state.}
\item{ease}{The easing functions to use for the transitions. See details.}
\item{nframes}{The number of frames to generate. The actual number of frames
might end up being higher depending on the regularity of \code{tweenlength}
and \code{statelength}.}
}
\value{
A data.frame with the same columns as the first data.frame in
\code{data}, but replicated \code{nframes} times. An additional column called
\code{.frame} will be added giving the frame number.
}
\description{
This function is intended to create smooth transitions between states of
data. States are defined as full data.frames or data.frames containing only
the columns with change. Each state can have a defined period of pause, the
transition length between each states can be defined as well as the easing
function.
}
\details{
How transitions proceed between states are defined by an easing function. The
easing function converts the parameterized progression from one state to the
next to a new number between 0 and 1. \code{linear} easing is equivalent to
an identity function that returns the input unchanged. In addition there are
a range of additional easers available, each with three modifiers.
\strong{Easing modifiers:}
\describe{
\item{-in}{The easing function is applied as-is}
\item{-out}{The easing function is applied in reverse}
\item{-in-out}{The first half of the transition it is applied as-is, while
in the last half it is reversed}
}
\strong{Easing functions}
\describe{
\item{quadratic}{Models a power-of-2 function}
\item{cubic}{Models a power-of-3 function}
\item{quartic}{Models a power-of-4 function}
\item{quintic}{Models a power-of-5 function}
\item{sine}{Models a sine function}
\item{circular}{Models a pi/2 circle arc}
\item{exponential}{Models an exponential function}
\item{elastic}{Models an elastic release of energy}
\item{back}{Models a pullback and relase}
\item{bounce}{Models the bouncing of a ball}
}
A good visual explanation can be found \href{http://easings.net}{here}.
}
|
9e79583925c4ac6b2006b1a604dd96727007a621 | 405a4148597fe9b412680b3e479d7b1e328167ba | /Code/Evolution_No_Density_Dependence.R | 620f00358530148d4994334207269a802a5d4061 | [] | no_license | swalker1998/BIOL-3295 | 68bd49e4947166e247e572755c67daf8dcd3d42c | cfd145967c4f4fac6cab31496cfd050cbced4714 | refs/heads/master | 2020-09-03T04:25:29.993967 | 2019-10-31T12:50:50 | 2019-10-31T12:50:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 742 | r | Evolution_No_Density_Dependence.R | # EVOLUTION WITH NO DENSITY DEPENDENCE IN POPULATION DYNAMICS
# Remove all objects - always start with this
rm(list=ls())
# times
t = seq(0,100)
# intrinsic growth rates for genotype 1 and 2
r1 = 0.1
r2 = 0.2
# initial populatin size for genotypes 1 and 2
n10 = 1
n20 = 1
# population size for genotypes 1 and 2
n1 = n10*exp(r1*t)
n2 = n20*exp(r2*t)
par(mfrow = c(2,1), mar = c(4,4,1,1))
# Graph of the population sizes for genotypes 1 and 2
plot(t,n1, col = "red", typ="l", xlab = "time, t", ylab = "population size")
lines(t,n2, col = "blue")
# Graphs of the relative abundance for genotypes 1 and 2
plot(t,n1/(n1+n2), col = "red", typ="l", ylim = c(0,1), xlab = "time, t", ylab = "relative abundance")
lines(t,n2/(n1+n2), col = "blue") |
b90fb28f68052e6a1b6155dc1c623f3bd5d4792d | a732fbe391bad266b513ac2d932ce352bd25e619 | /TARA_scripts/read_counts.R | 44087f524007f917e1e6e59f900b3e7755303bfb | [] | no_license | alkaZeltser/garud_lab | c574dd9379ad7f2364306f0067ce87e15bed8f4f | 71164a8b0014cdf4b4841c7c9eb002211f948f05 | refs/heads/main | 2023-02-12T20:57:36.733060 | 2021-01-03T20:09:46 | 2021-01-03T20:09:46 | 319,797,197 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 105 | r | read_counts.R | setwd("/Users/nikaz/Downloads")
line_counts = read.table(file="counts.txt")
read_counts = line_counts/4
|
490813cb5fc418a1b8581190c3e6ff9523467210 | eb1667dd25aa5afc0ccbb1e5bc9bbd37f7177d65 | /scripts/brouillons/heatmaps_rplotly.R | ead3fb7dec96cce8dea246145ebf966efb552c24 | [] | no_license | Mataivic/AdaptSearch_visualization_tools | e44dc22b0595e2e983a5ca885715af83e4c6825a | 7e8cf9ad4c88bb94e9a7b395c9879af3161e7bf8 | refs/heads/master | 2020-03-18T19:27:33.914939 | 2018-07-31T11:52:50 | 2018-07-31T11:52:50 | 135,155,576 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,334 | r | heatmaps_rplotly.R | library(plotly)
library(ggplot2)
library(ggdendro)
setwd("~/Documents/Fork_AdaptSearch/adaptsearch/galaxy_wrappers/07_MutCount/test-data/OUT_concat")
data <- read.table("codons_freqs.csv", header=TRUE, dec=".", sep=",", row.names=1)
counts <- data[seq(1, nrow(data), 3),]
freqs <- data[seq(2, nrow(data), 3),]
substrLeft <- function(x, n){
sapply(x, function(xx)
substr(xx, 0, n)
)
}
row.names(freqs) <- substrLeft(row.names(freqs),2)
row.names(counts) <- substrLeft(row.names(counts),2)
dd.col <- as.dendrogram(hclust(dist(freqs)))
dd.row <- as.dendrogram(hclust(dist(t(freqs))))
dx <- dendro_data(dd.row)
dy <- dendro_data(dd.col)
# helper function for creating dendograms
ggdend <- function(df) {
ggplot() +
geom_segment(data = df, aes(x=x, y=y, xend=xend, yend=yend)) +
labs(x = "", y = "") + theme_minimal() +
theme(axis.text = element_blank(), axis.ticks = element_blank(),
panel.grid = element_blank())
}
# x/y dendograms
px <- ggdend(dx$segments)
py <- ggdend(dy$segments) + coord_flip()
px1 <- ggplot(segment(dx)) + geom_segment(aes(x=x, y=y, xend=xend, yend=yend))
#+ geom_text(data=label(dx), aes(label=label, x=x, y=0))
py2 <- ggplot(segment(dy)) + geom_segment(aes(x=x, y=y, xend=xend, yend=yend)) + coord_flip() + scale_y_reverse(expand=c(0.2, 0))
#+ geom_text(data=label(dy), aes(label=label, x=x, y=0))
# heatmap
col.ord <- order.dendrogram(dd.col)
row.ord <- order.dendrogram(dd.row)
xx <- scale(freqs)[col.ord, row.ord]
xx_names <- attr(xx, "dimnames")
df <- as.data.frame(xx)
colnames(df) <- xx_names[[2]]
df$spec <- xx_names[[1]]
df$spec <- with(df, factor(spec, levels=spec, ordered=TRUE))
mdf <- reshape2::melt(df, id.vars="spec")
p <- ggplot(mdf, aes(x = variable, y = spec)) + geom_tile(aes(fill = value)) +
scale_fill_distiller(palette = "Spectral")
# hide axis ticks and grid lines
eaxis <- list(
showticklabels = FALSE,
showgrid = FALSE,
zeroline = FALSE
)
p_empty <- plot_ly(filename="r-docs/dendrogram") %>%
# note that margin applies to entire plot, so we can
# add it here to make tick labels more readable
layout(margin = list(l = 200),
xaxis = eaxis,
yaxis = eaxis)
subplot(px1, p_empty, p, py2, nrows = 2, margin = 0.02, heights = c(0.25,0.75), widths=c(0.75,0.25))
subplot(py2, p, margin=0.03, widths=c(0.25,0.75))
|
2f20907f8bb4c7017049c8b5a7c78ab09865c1e0 | bdc8d86d43651127bc79602785377d6504599161 | /scripts/R/two_periods_compare.R | cd76ccbbbaf304b7705e411eb15ba271c1d0b0a5 | [] | no_license | drmarcogir/phenology | 5c002d779d7c482d505056b456e1c4684c740ed1 | 402ede6f9688818fa82a3c5a416957d9a0000179 | refs/heads/main | 2023-04-23T20:51:15.136883 | 2021-05-21T18:48:58 | 2021-05-21T18:48:58 | 312,090,652 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,258 | r | two_periods_compare.R | library(snow)
library()
st_read("/home/marco/Desktop/terr-ecoregions-TNC/tnc_terr_ecoregions.shp")->dat
phenochange<-raster("/home/marco/Desktop/dif8.tif")
unique(dat$WWF_MHTNAM) %>%
.[str_detect(.,"For|for")]->forlist
dat %>%
filter(WWF_MHTNAM %in% forlist) %>%
st_write(.,"tmp.shp",delete_layer=T)
###########################################################
# ---Mean results (difference between two time slices)
###########################################################
res<-NULL
for (i in 1:length(forlist)){
dat %>%
dplyr::filter(WWF_MHTNAM==forlist[i])->tmpbiome
st_write(tmpbiome,"tmp.shp",delete_layer = T)
system("v.in.ogr in=tmp.shp out=tmp --o")
system("g.region vector=tmp res=0.01")
system("v.to.rast in=tmp out=tmp use=cat --o")
system("r.mapcalc 'tmp1 = int(tmp/tmp)' --o")
system("r.mask raster=tmp1 maskcats=1")
system("g.region raster=dif8")
system("r.stats -nA dif8 > stats")
read.delim("stats",col.names = "value") %>%
pull(value) %>%
mean()->meanres
tibble(mean = meanres,biome = forlist[i])->tmp1
bind_rows(tmp1,res)->res
system("r.mask -r")
}
res %>%
arrange(mean) %>%
mutate(biome = factor(biome,levels = biome)) %>%
ggplot()+geom_bar(aes(x=mean,y=biome),stat = "identity",fill="brown3",color="black")+
theme_minimal()+theme(axis.title = element_text(size=20,face="bold"),
axis.text = element_text(size=15,colour = "black"))+
ylab("Biome")+xlab("Mean change")
###########################################################
# ---Original results for period 1 and period (ANOVA style)
###########################################################
res<-NULL
for (i in 1:length(forlist)){
system("rm -f stats")
dat %>%
dplyr::filter(WWF_MHTNAM==forlist[i])->tmpbiome
st_write(tmpbiome,"tmp.shp",delete_layer = T)
system("v.in.ogr in=tmp.shp out=tmp --o")
system("g.region vector=tmp res=0.01")
system("v.to.rast in=tmp out=tmp use=cat --o")
system("r.mapcalc 'tmp1 = int(tmp/tmp)' --o")
system("r.mask raster=tmp1 maskcats=1")
system("g.region raster=dif8")
# statistics for first period
system("r.stats -nA GUP_slice_8_sd > stats")
read.delim("stats",col.names = "value") %>%
mutate(period = "2001-2008",biome=forlist[i])->tmpres1
# statistics for second period
system("rm -f stats")
system("r.stats -nA GUP_slice2_8_sd2 > stats")
read.delim("stats",col.names = "value") %>%
mutate(period = "2009-2016",biome=forlist[i])->tmpres2
# store results
bind_rows(tmpres1,res)->res
bind_rows(tmpres2,res)->res
system("r.mask -r")
system("rm -f stats")
}
res %>%
ggplot()+geom_boxplot(aes(x=period,y=value))+facet_wrap(~biome)+
theme_minimal()+theme(axis.title = element_text(size=20,face="bold"),
axis.text = element_text(size=15,colour = "black"))
res %>%
as_tibble() %>%
mutate(period = factor(period,levels=c("2001-2008","2009-2016"))) %>% {.->>res1} %>%
group_by(biome) %>%
nest() %>%
mutate(mod = map(data,~lm(value~period,data=.)),tidied = map(mod, tidy)) %>%
unnest(tidied) %>%
filter(p.value < 0.05)
########################
# ---Everything together
########################
# statistics for first period
system("rm -f stats")
system("r.mask raster=natural_habs maskcats=4")
system("g.region raster=GUP_slice2_8_sd2")
system("r.stats -ngA GUP_slice2_8_sd2,GUP_slice_8_sd > stats")
read_delim("stats",col_names =FALSE,delim = " ") %>%
dplyr::select(X3,X4) %>%
rename(p2 = X3,p1=X4) %>%
gather(key="name",value="value") %>%
mutate(period = factor(name,levels = c("p1","p2")))->dat1
system("r.mask -r")
mod<-lm(value~period,data=dat1)
bind_rows(tmpres1,tmpres2)->dat1
system("r.in.gdal in=/home/marco/Desktop/WCMC_natural_modified_habitat_screening_layer/natural_modified_habitat_screening_layer.tif out=natural_habs")
system("r.mask raster=natural_habs maskcats=4")
read_delim("/media/marco/marcodata19/Phenology_original/tmp1/forcover1",delim=",",col_names = F) %>%
rename(cat=X1,area=X2) %>%
group_by(cat) %>%
summarise(area = sum(area))
as_tibble(rastertodf(raster("/mnt/data1tb/phenology/gridGEE/grid_pheno_grass.tif")))->dd
read_delim("/media/marco/marcodata19/Phenology_original/tmp1/forcover2",delim=",",col_names = F)
|
c5b8eac7f052d5814819ec191516c22f6d435fc9 | 39a60bfc5d613829f548285ea2e57432556f67d1 | /conf/install-2021-packages-1.R | b389623d66d9ca1149d92a12aba38b8085e024f8 | [
"MIT"
] | permissive | marqueslarissa/docker-rstudio | 3e0c5dadbe2be7dbc82bd4f1d97f71a609135802 | decf1bf4467a678fd0465b02a567c693a481dfcb | refs/heads/main | 2023-03-10T11:39:51.611172 | 2021-03-01T20:35:45 | 2021-03-01T20:35:45 | 342,003,812 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 739 | r | install-2021-packages-1.R | r <- getOption("repos")
r["CRAN"] <- "http://cran.r-project.org"
options(repos=r)
utils::install.packages("shinythemes", dependencies=TRUE )
utils::install.packages("shiny.semantic", dependencies=TRUE )
utils::install.packages("shinymaterial", dependencies=TRUE )
utils::install.packages("shinydashboard", dependencies=TRUE )
utils::install.packages("shinyWidgets", dependencies=TRUE )
utils::install.packages("shinydashboardPlus", dependencies=TRUE )
utils::install.packages("shinybusy", dependencies=TRUE )
utils::install.packages("shinystan", dependencies=TRUE )
utils::install.packages("yonder", dependencies=TRUE )
utils::install.packages("pool", dependencies=TRUE )
# for more https://github.com/nanxstats/awesome-shiny-extensions
|
cc0d3889803a34a2f2d4ada9b86d7672c5b6b78d | 50d1dab9a35ff494456b4aca2fd5ab0c6e73c63d | /run_analysis.R | eafb89a5a979cb8e5b73174343d4bf56d25c3708 | [] | no_license | samchow/GettingCleaningData | 3279990d1a5abd6f9aaddc2cab80cedec16123c2 | bef854b766c28498dc96d7271492db283533f5e1 | refs/heads/master | 2020-05-05T00:28:57.205397 | 2015-02-21T00:10:42 | 2015-02-21T00:10:42 | 29,845,789 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,146 | r | run_analysis.R | # Setup. Not we assume you are in the UCI HAR Dataset directory
###### Common Routines
renameCols <- function(cols) {
x <- NULL
x <- gsub("[[:punct:]]", "", cols)
x <- gsub("^t", "", x)
x <- gsub("^f", "Freq", x)
x <- gsub("([[:alpha:]]+)mean([[:alpha:]])", "avg\\1.\\2", x)
x <- gsub("([[:alpha:]]+)mean", "avg\\1", x)
x <- gsub("([[:alpha:]]+)std([[:alpha:]])", "sdev\\1.\\2", x)
x <- gsub("([[:alpha:]]+)std", "sdev\\1", x)
x <- gsub("Mag", "Magnitude", x)
x <- gsub("Acc", "Accel", x)
x
}
############### Getting Meta Info #####
## Get the meta information
# Read in the complete 561 features
features <- read.table("features.txt",
col.names=c("index", "label"))
# Only interested in the mean() and std()
sel_features <- features[grep("(mean|std)\\(", features$label), ]
sel_labels <- renameCols(sel_features$label)
# Read in activity labels
activities <- read.table("activity_labels.txt",
col.names=c("activity.code", "activity"))
################ Read in test data
test_data <- read.table("test/X_test.txt")
# filter out unneeded cols
sel_test_data <- test_data[, sel_features$index]
colnames(sel_test_data) <- sel_labels
# get test subject
temp <- read.table("test/subject_test.txt", col.names="subject")
sel_test_data <- cbind(sel_test_data, temp)
# get activity
temp <- read.table("test/y_test.txt", col.names="activity.code")
sel_test_data <- cbind(sel_test_data, temp)
##############Read in train data
train_data <- read.table("train/X_train.txt")
# filter oun uneeded col
sel_train_data <- train_data[, sel_features$index]
colnames(sel_train_data) <- sel_labels
# get subject data
temp <- read.table("train/subject_train.txt", col.names="subject")
sel_train_data <- cbind(sel_train_data, temp)
# get activity data
temp <- read.table("train/y_train.txt", col.names="activity.code")
sel_train_data <- cbind(sel_train_data, temp)
## Combine the 2 data fame
sel_data1 <- rbind(sel_train_data, sel_test_data)
sel_data2 <- merge(sel_data1, activities, by="activity.code")
################## Now create final tiny_data
library(dplyr)
sel_sum <- group_by(sel_data2, subject, activity)
tiny_data <- summarize(sel_sum,
avgBodyAccel.X = mean(avgBodyAccel.X),
avgBodyAccel.Y = mean(avgBodyAccel.X),
avgBodyAccel.Z = mean(avgBodyAccel.Z),
sdevBodyAccel.X = mean(sdevBodyAccel.X),
sdevBodyAccel.Y = mean(sdevBodyAccel.Y),
sdevBodyAccel.Z = mean(sdevBodyAccel.Z),
avgGravityAccel.X = mean(avgGravityAccel.X),
avgGravityAccel.Y = mean(avgGravityAccel.Y),
avgGravityAccel.Z = mean(avgGravityAccel.Z),
sdevGravityAccel.X = mean(sdevGravityAccel.X),
sdevGravityAccel.Y = mean(sdevGravityAccel.Y),
sdevGravityAccel.Z = mean(sdevGravityAccel.Z),
avgBodyAccelJerk.X = mean(avgBodyAccelJerk.X),
avgBodyAccelJerk.Y = mean(avgBodyAccelJerk.Y),
avgBodyAccelJerk.Z = mean(avgBodyAccelJerk.Z),
sdevBodyAccelJerk.X = mean(sdevBodyAccelJerk.X),
sdevBodyAccelJerk.Y = mean(sdevBodyAccelJerk.Y),
sdevBodyAccelJerk.Z = mean(sdevBodyAccelJerk.Z),
avgBodyGyro.X = mean(avgBodyGyro.X),
avgBodyGyro.Y = mean(avgBodyGyro.Y),
avgBodyGyro.Z = mean(avgBodyGyro.Z),
sdevBodyGyro.X = mean(sdevBodyGyro.X),
sdevBodyGyro.Y = mean(sdevBodyGyro.Y),
sdevBodyGyro.Z = mean(sdevBodyGyro.Z),
avgBodyGyroJerk.X = mean(avgBodyGyroJerk.X),
avgBodyGyroJerk.Y = mean(avgBodyGyroJerk.Y),
avgBodyGyroJerk.Z = mean(avgBodyGyroJerk.Z),
sdevBodyGyroJerk.X = mean(sdevBodyGyroJerk.X),
sdevBodyGyroJerk.Y = mean(sdevBodyGyroJerk.Y),
sdevBodyGyroJerk.Z = mean(sdevBodyGyroJerk.Z),
avgBodyAccelMagnitude = mean(avgBodyAccelMagnitude),
sdevBodyAccelMagnitude = mean(sdevBodyAccelMagnitude),
avgGravityAccelMagnitude = mean(avgGravityAccelMagnitude),
sdevGravityAccelMagnitude = mean(sdevGravityAccelMagnitude),
avgBodyAccelJerkMagnitude = mean(avgBodyAccelJerkMagnitude),
sdevBodyAccelJerkMagnitude = mean(sdevBodyAccelJerkMagnitude),
avgBodyGyroMagnitude = mean(avgBodyGyroMagnitude),
sdevBodyGyroMagnitude = mean(sdevBodyGyroMagnitude),
avgBodyGyroJerkMagnitude = mean(avgBodyGyroJerkMagnitude),
sdevBodyGyroJerkMagnitude = mean(sdevBodyGyroJerkMagnitude),
avgFreqBodyAccel.X = mean(avgFreqBodyAccel.X),
avgFreqBodyAccel.Y = mean(avgFreqBodyAccel.Y),
avgFreqBodyAccel.Z = mean(avgFreqBodyAccel.Z),
sdevFreqBodyAccel.X = mean(sdevFreqBodyAccel.X),
sdevFreqBodyAccel.Y = mean(sdevFreqBodyAccel.Y),
sdevFreqBodyAccel.Z = mean(sdevFreqBodyAccel.Z),
avgFreqBodyAccelJerk.X = mean(avgFreqBodyAccelJerk.X),
avgFreqBodyAccelJerk.Y = mean(avgFreqBodyAccelJerk.Y),
avgFreqBodyAccelJerk.Z = mean(avgFreqBodyAccelJerk.Z),
sdevFreqBodyAccelJerk.X = mean(sdevFreqBodyAccelJerk.X),
sdevFreqBodyAccelJerk.Y = mean(sdevFreqBodyAccelJerk.Y),
sdevFreqBodyAccelJerk.Z = mean(sdevFreqBodyAccelJerk.Z),
avgFreqBodyGyro.X = mean(avgFreqBodyGyro.X),
avgFreqBodyGyro.Y = mean(avgFreqBodyGyro.Y),
avgFreqBodyGyro.Z = mean(avgFreqBodyGyro.Z),
sdevFreqBodyGyro.X = mean(sdevFreqBodyGyro.X),
sdevFreqBodyGyro.Y = mean(sdevFreqBodyGyro.Y),
sdevFreqBodyGyro.Z = mean(sdevFreqBodyGyro.Z),
avgFreqBodyAccelMagnitude = mean(avgFreqBodyAccelMagnitude),
sdevFreqBodyAccelMagnitude = mean(sdevFreqBodyAccelMagnitude),
avgFreqBodyBodyAccelJerkMagnitude = mean(avgFreqBodyBodyAccelJerkMagnitude),
sdevFreqBodyBodyAccelJerkMagnitude = mean(sdevFreqBodyBodyAccelJerkMagnitude),
avgFreqBodyBodyGyroMagnitude = mean(avgFreqBodyBodyGyroMagnitude),
sdevFreqBodyBodyGyroMagnitude = mean(sdevFreqBodyBodyGyroMagnitude),
avgFreqBodyBodyGyroJerkMagnitude = mean(avgFreqBodyBodyGyroJerkMagnitude),
sdevFreqBodyBodyGyroJerkMagnitude = mean(sdevFreqBodyBodyGyroJerkMagnitude)
)
write.table(tiny_data, file="tiny_data.txt", row.names=FALSE) |
ceab68737590369e4647898198e01c9d202dfc04 | 46cd1c7bb79fb7f1abb2dd029f05294df5042d46 | /warm_exposure.R | e522cc71e8616cebfd1f79ac28beeed5a1462482 | [] | no_license | jtokolyi/Hydra_oligactis_SexSeason | df40d649241e5d20b09af35506698c3f300e8710 | b8305d881a2520b5a94aecc0bb329212fca417eb | refs/heads/main | 2023-04-11T03:05:35.960088 | 2021-04-09T04:09:19 | 2021-04-09T04:09:19 | 356,116,460 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,207 | r | warm_exposure.R | library(readxl); library(ggplot2)
x=as.data.frame(read_excel("Warm_exposure2.xlsx",col_types=c("text","date","text","text","text","text","date","text","text")))
x <- x[-which(x$Final_condition=="dead"),]
x$Sex <- ifelse(grepl("C2/7",x$ID),"Male strain","Female strain")
x$Lineage <- sapply(lapply(strsplit(x$ID,"-"), "[", 1:2),paste,collapse="-")
x$repr.mode.binary <- ifelse(x$Final_condition=="asexual", 0, 1)
x$ExpGroup <- factor(x$ExpGroup, levels=c("control", "warmexposed_1week", "warmexposed_4weeks"))
tiff("warm_exposure.tif",width=6, height=6, units="in",res=600,compression="lzw")
ggplot(x, aes(fill=Final_condition, x=ExpGroup)) + geom_bar(color="black") + facet_wrap(~Sex)+
theme_classic() +
scale_fill_manual(labels=c("Sexual","Asexual"),breaks=c("sexual","asexual"),values=c("white", "grey"))+
labs(fill="Reproductive mode")+
theme(strip.background = element_blank(), strip.text.x=element_text(size=20, face="bold", colour="red"),
text=element_text(size=15),legend.position="top")+
ylab("No. polyps")+xlab("Warm exposure") + scale_x_discrete(labels=c("None", "1 week","4 weeks"))
dev.off()
fisher.test(table(x$Final_condition[x$Sex=="Male strain"], x$ExpGroup[x$Sex=="Male strain"])) # P<0.001
fisher.test(table(x$Final_condition[x$Sex=="Female strain"], x$ExpGroup[x$Sex=="Female strain"])) # P<0.001
length(as.Date(x$Final_date[x$Sex=="Female strain" & x$Final_condition=="sexual" & x$ExpGroup=="warmexposed_1week"]) - as.Date("2019-07-15"))
median(as.Date(x$Final_date[x$Sex=="Female strain" & x$Final_condition=="sexual" & x$ExpGroup=="warmexposed_1week"]) - as.Date("2019-07-15"))
median(as.Date(x$Final_date[x$Sex=="Male strain" & x$Final_condition=="sexual" & x$ExpGroup=="warmexposed_4weeks"]) - as.Date("2019-08-06"))
length(as.Date(x$Final_date[x$Sex=="Male strain" & x$Final_condition=="sexual" & x$ExpGroup=="warmexposed_4weeks"]) - as.Date("2019-08-06"))
median(as.Date(x$Final_date[x$Sex=="Female strain" & x$Final_condition=="sexual" & x$ExpGroup=="warmexposed_4weeks"]) - as.Date("2019-08-06"))
length(as.Date(x$Final_date[x$Sex=="Female strain" & x$Final_condition=="sexual" & x$ExpGroup=="warmexposed_4weeks"]) - as.Date("2019-08-06"))
|
74f72c63d7173a727839a8baf78fc1547051a971 | 1bfadb58b266929c359a1a4c3afd9f9ce694228d | /man/internal2.Rd | 46f996c3f29a2c1836b804c4a33dec20a7ecaa4e | [] | no_license | adeckmyn/maps | 25c3396c94adfb70262449f29f1fd77ff9583f29 | dc1eefb14d155fca33ace2333c490f5fd52eeb34 | refs/heads/master | 2022-11-11T17:56:03.194633 | 2022-11-01T10:28:10 | 2022-11-01T10:28:10 | 40,031,227 | 29 | 10 | null | 2021-09-24T14:20:18 | 2015-08-01T01:27:16 | R | UTF-8 | R | false | false | 1,196 | rd | internal2.Rd | \name{internal2}
\alias{internal2}
\alias{char.to.ascii}
\alias{is.regexp}
\alias{indicators.factor}
\alias{insert}
\alias{match.map.slow}
\alias{match.map.grep}
\alias{map.poly}
\alias{map.wrap}
\alias{map.wrap.poly}
\alias{map.clip.poly}
\alias{subgroup}
\alias{gp.smooth}
\alias{kernel.smooth}
\alias{kernel.region.region}
\alias{kernel.region.x}
\title{Internally Required Functions}
\usage{
char.to.ascii(s)
is.regexp(s)
indicators.factor(y)
insert(x, i, v)
match.map.slow(nam, regions, warn = FALSE)
match.map.grep(nam, regions, warn = FALSE)
map.poly(database, regions = ".", exact = FALSE, xlim = NULL, ylim = NULL,
boundary = TRUE, interior = TRUE, fill = FALSE, as.polygon = FALSE,
namefield="name")
map.wrap(p, xlim=NULL)
map.wrap.poly(data, xlim, poly = FALSE, antarctica = -89.5)
map.clip.poly(data, xlim = c(NA, NA), ylim = c(NA, NA), poly = FALSE)
subgroup(x, i)
gp.smooth(x, z, xo, lambda, r)
kernel.smooth(x, z, xo, lambda, region = NULL, normalize = TRUE)
kernel.region.region(x, region, lambda)
kernel.region.x(x, region, z, lambda)
}
\description{
These functions are called internally and will generally not be
required by the user.
}
\keyword{internal}
|
47943433acfe0ce0ec46a8e55e24bc5d43d8c722 | 3c110a8bd8c4523c0155ad9a691ffa33f3d77f6c | /AA.IDT/R/switch.value.R | 715d9da712d310542aa477e75ab428eaf1b00059 | [] | no_license | rodrieja/R-Projects | 93c17c2203530dc4c162a9da261ec4b0857f8337 | 17e64e69952b759e2dcb611056f6eb0c98a118e4 | refs/heads/master | 2020-03-14T08:49:24.141062 | 2018-06-30T20:20:39 | 2018-06-30T20:20:39 | 131,533,286 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 279 | r | switch.value.R | switch.value <- function(x) {
cls.values = levels(x)
if (length(x) > 0) {
for (idx in 1:length(x)) {
if (x[idx] == cls.values[1]) {
x[idx] = cls.values[2]
}
else {
x[idx] = cls.values[1]
}
}
}
return(x)
}
|
fb04bf0472a1231017eec39647833468208f0c26 | 2357c463919a61fa1250efb2fc71c8c8fb04cf68 | /thesis/code/R/rmongodb/doc/rmongodb_introduction.R | 4c318496aa683e50a667d903f451a802da864eda | [] | no_license | tnat1031/thesis | 0f960740590e9661211d9a95d9513ec8d584dbaf | bf05cf661c2fbe77fc06f5417a8bd8afe25cde2d | refs/heads/master | 2021-01-21T12:49:56.035890 | 2018-01-11T18:39:05 | 2018-01-11T18:39:05 | 13,260,116 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,379 | r | rmongodb_introduction.R |
## ----installRmongodb, eval=FALSE-----------------------------------------
## install.packages("rmongodb")
## ----installDEV, eval=FALSE----------------------------------------------
## library(devtools)
## install_github("rmongodb", "mongosoup")
## ----loadRmongodb--------------------------------------------------------
library(rmongodb)
## ----connect2Mongo-------------------------------------------------------
help("mongo.create")
mongo <- mongo.create()
mongo
mongo.is.connected(mongo)
## ----importZIPdata, echo=FALSE, warning=FALSE, results='hide'------------
if(mongo.is.connected(mongo) == TRUE){
# load some data
library(jsonlite)
data(zips)
# rename _id field. The original zips data set holds duplicate _id values which will fale during the import
colnames(zips)[5] <- "orig_id"
ziplist <- list()
ziplist <- apply( zips, 1, function(x) c( ziplist, x ) )
res <- lapply( ziplist, function(x) mongo.bson.from.list(x) )
mongo.insert.batch(mongo, "rmongodb.zips", res )
}
## ----getDBs--------------------------------------------------------------
if(mongo.is.connected(mongo) == TRUE){
mongo.get.databases(mongo)
}
## ----getColls------------------------------------------------------------
if(mongo.is.connected(mongo) == TRUE){
db <- "rmongodb"
mongo.get.database.collections(mongo, db)
}
coll <- "rmongodb.zips"
## ----count, echo=TRUE----------------------------------------------------
if(mongo.is.connected(mongo) == TRUE){
help("mongo.count")
mongo.count(mongo, coll)
}
## ----findOneFirst, echo=TRUE---------------------------------------------
if(mongo.is.connected(mongo) == TRUE){
mongo.find.one(mongo, coll)
}
## ----Distinct, echo=TRUE-------------------------------------------------
if(mongo.is.connected(mongo) == TRUE){
res <- mongo.distinct(mongo, coll, "city")
head( res )
}
## ----findOne-------------------------------------------------------------
if(mongo.is.connected(mongo) == TRUE){
cityone <- mongo.find.one(mongo, coll, '{"city":"COLORADO CITY"}')
print( cityone )
mongo.bson.to.list(cityone)
}
## ----createBSON----------------------------------------------------------
buf <- mongo.bson.buffer.create()
mongo.bson.buffer.append(buf, "city", "COLORADO CITY")
query <- mongo.bson.from.buffer(buf)
query
## ----createBSONoneLine---------------------------------------------------
mongo.bson.from.JSON('{"city":"COLORADO CITY"}')
## ----findMore, warning=FALSE---------------------------------------------
if(mongo.is.connected(mongo) == TRUE){
pop <- mongo.distinct(mongo, coll, "pop")
hist(pop)
boxplot(pop)
nr <- mongo.count(mongo, coll, '{"pop":{"$lte":2}}')
print( nr )
pops <- mongo.find.all(mongo, coll, '{"pop":{"$lte":2}}')
print( dim(pops) )
head(pops)
}
## ----compleyQuery--------------------------------------------------------
library(jsonlite)
json <- '{"pop":{"$lte":2}, "pop":{"$gte":1}}'
cat(prettify(json))
validate(json)
if(mongo.is.connected(mongo) == TRUE){
pops <- mongo.find.all(mongo, coll, json)
print( dim(pops) )
head(pops)
}
## ----inefficient---------------------------------------------------------
mongo.cursor.to.data.frame
## ----insert--------------------------------------------------------------
# insert data
a <- mongo.bson.from.JSON( '{"ident":"a", "name":"Markus", "age":33}' )
b <- mongo.bson.from.JSON( '{"ident":"b", "name":"MongoSoup", "age":1}' )
c <- mongo.bson.from.JSON( '{"ident":"c", "name":"UseR", "age":18}' )
if(mongo.is.connected(mongo) == TRUE){
icoll <- paste(db, "test", sep=".")
mongo.insert.batch(mongo, icoll, list(a,b,c) )
dbs <- mongo.get.database.collections(mongo, db)
print(dbs)
mongo.find.all(mongo, icoll)
}
## ----update--------------------------------------------------------------
if(mongo.is.connected(mongo) == TRUE){
mongo.update(mongo, icoll, '{"ident":"b"}', '{"$inc":{"age":3}}' )
res <- mongo.find.all(mongo, icoll)
print(res)
# Creating an index for the field 'ident'
mongo.index.create(mongo, icoll, '{"ident":1}')
# check mongoshell!
}
## ----dropColls-----------------------------------------------------------
if(mongo.is.connected(mongo) == TRUE){
mongo.drop(mongo, icoll)
mongo.drop.database(mongo, db)
res <- mongo.get.database.collections(mongo, db)
print(res)
# close connection
mongo.destroy(mongo)
}
|
ca32e4c4bab0cac4f47a02be5d147da64a18a801 | 8760ee3228bc3d5642e819b3ae8eb7a1cef0be01 | /plot1.R | caa2f8494d9395407d2cfa476ff4731ffc7f587c | [] | no_license | EviBak/ExData_Plotting1 | a560e6fc71a55b194140751c1551b9d8d75aefd4 | 1a58f27df7ea211f09a1c53a6fb3d9965ec47740 | refs/heads/master | 2021-01-21T23:33:44.413113 | 2014-12-04T08:18:41 | 2014-12-04T08:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 407 | r | plot1.R | hh1 <- read.csv2.sql("household_power_consumption.txt", sql = "select * from file
where Date = '1/2/2007'")
hh2 <- read.csv2.sql("household_power_consumption.txt", sql="select * from file
where Date ='2/2/2007'")
par(cex = 0.8)
pl <- rbind(hh1, hh2)
hist(pl$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)",
main = "Global Active Power") |
70e27772483e17c15e5506895e098ed1dbaeabc2 | 00f097cc4f660d0b668da99a86afa5e0e4d6746f | /estimator/search_tau_lm_huber.R | 4da8bd44f85b9563dd5a14d9d8d32073c330479f | [] | no_license | didi10384/Homogeneity-structure-learning-in-panel-data | 52d03db5cf0e3f640fd855f7a2b7162d149cfec1 | f270a36a510e3a1fb595886ee3bc1a33395b4b85 | refs/heads/master | 2020-09-25T05:30:25.857674 | 2019-12-04T20:44:29 | 2019-12-04T20:44:29 | 225,928,143 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,029 | r | search_tau_lm_huber.R | ################ search tau for best huber regression #######################
# search for best tau producing best huber mean for a list of input
# input: x (vector), search method, options
search.tau.lm.huber = function(X,Y,fit.intcp=TRUE,
opt=list(method='default')){
if (opt$method == 'default') {
min_tau = mad(lm(Y~X)$residuals)/2
tau.set = seq(log(min_tau),log(max(Y)-min(Y)),by=0.1)
tau.set = exp(tau.set)
return( search.tau.lm.huber(X,Y,fit.intcp=fit.intcp,
opt=list(method='set',tau_set=tau.set)))
}
if(opt$method == 'set') {
tau.set = opt$tau_set
scores = vector(mode='numeric',length=length(tau.set))
for (i in 1:length(tau.set))
scores[i] = cv.lm.huber(X,Y,tau.set[i],fit.intcp=fit.intcp,
nfold=5, nrepeat=1)
return(max(tau.set[min(scores)==scores]))
}
}
#############################################################################
|
f9da063e87174ae05765559edd8213a9c54d2c5f | 27e977f1403330f29cba2f820f5edcadf53f2d16 | /extract.r | 6f401988a86ac23f2d6fcd968da4cb069001920e | [
"MIT"
] | permissive | san123i/ms-project-607_1 | 591ac7ec5f60688a0b241ea58f219f073100e6f6 | 3a2fd23969d7a82b385fcbd92cee989824399225 | refs/heads/master | 2020-04-23T11:57:35.349249 | 2019-02-15T03:37:33 | 2019-02-15T03:37:33 | 171,153,626 | 0 | 0 | null | 2019-02-17T17:57:27 | 2019-02-17T17:57:27 | null | UTF-8 | R | false | false | 1,178 | r | extract.r |
# Skip the 1st row "----------"
# Read the data
cd2 <-read_lines("tournamentinfo.txt",skip = 1)
cd2
# Repalce all "------" by NA
# Read only data apart from header
cd2_data <- unlist(str_replace_all(cd2,"(-{89})","NA"))[-c(1:3)]
cd2_head <- unlist(str_replace_all(cd2,"(-{89})","NA"))[c(1:2)]
# Creating a smple to work with small data
tempdata <- unlist(str_replace_all(cd2,"(-{89})","NA"))[c(4:5,7:8)]
#player -ID
playerID <- trimws(unlist(str_extract_all(cd2_data," \\d{1,2} ")))
head(playerID)
# [1] " 1 | GARY HUA |6.0 |W 39|W 21|W 18|W 14|W 7|D 12|D 4|"
# [2] " ON | 15445895 / R: 1794 ->1817 |N:2 |W |B |W |B |W |B |W |"
# [3] " 2 | DAKSHESH DARURI |6.0 |W 63|W 58|L 4|W 17|W 16|W 20|W 7|"
# [4] " MI | 14598900 / R: 1553 ->1663 |N:2 |B |W |B |W |B |W |B |"
tempFull_Data <- trimws(unlist(str_extract_all(cd2_data,"[[:alpha:] ?]{2,}|[\\d.?\\d? +]{2,}|[\\d +]{1,2}|[\\w:?\\d? +]{2,}",simplify = TRUE)))
tempFull_Data <- as.data.frame(tempFull_Data)
tempFull_Data <- tempFull_Data[which(!tempFull_Data$V1=="NA"),]
View(tempFull_Data)
|
f01722d20aeab6d35fe0a3d373c529a3f1857d9a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/etable/examples/corr_p_cell.Rd.R | ab22221bc99b67903b2a6590e2cbf105746771f2 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 598 | r | corr_p_cell.Rd.R | library(etable)
### Name: corr_p_cell
### Title: Correlation Cell FUN
### Aliases: corr_p_cell
### Keywords: correlation
### ** Examples
sex <- factor(rbinom(1000, 1, 0.4), labels=c('Men', 'Women'))
height <- rnorm(1000, mean=1.70, sd=0.1)
weight <- rnorm(1000, mean=70, sd=5)
bmi <- weight/height^2
d<-data.frame(sex, bmi, height, weight)
tabular.ade(x_vars=c('bmi','height','weight'), xname=c('BMI','Height','Weight'),
y_vars=c('bmi','height','weight'), yname=c('BMI','Height','Weight'),
rows=c('sex','ALL'), rnames=c('Gender'), data=d, FUN=corr_p_cell)
|
85d8999e8c97e897111d9fe2d1fb9e4649f01300 | f3ba5c556dfc50ca1bce1c0dfe5b4cee5e3d3363 | /R/CORRECTLY_SPELLED_WORDS_CASE_SENSITIVE.R | 09d7e96ae1173719b5a0095a6f365b03ea418d22 | [] | no_license | HughParsonage/TeXCheckR | 48b9ae8f94b2801f984a66e9f3ecb6c7f1f831f4 | 09826878215cf56bc24a7e273084bfda3954a73b | refs/heads/master | 2023-02-22T10:02:17.707587 | 2023-02-10T09:30:48 | 2023-02-10T09:30:48 | 87,679,432 | 8 | 2 | null | 2020-09-18T04:35:17 | 2017-04-09T03:12:11 | TeX | UTF-8 | R | false | false | 202 | r | CORRECTLY_SPELLED_WORDS_CASE_SENSITIVE.R | #' List of correctly spelled, case-sensitive words
#' @format A character vector of words as perl-regex case-sensitive patterns to skip during the spell check.
"CORRECTLY_SPELLED_WORDS_CASE_SENSITIVE"
|
c51943e7551cd997126fb419d871a3a053182b5c | d7c974e8fe4ec9c4283bb89c733dcfee764bcc37 | /man/rb.kpix.errors.Rd | f2dfd8c1c69f4b34ee3166f366cf790afea4bb38 | [] | no_license | cran/sifds | 2f1de8dc1dca624e8fc50993a6787b12c2e1b56b | de37cdf8e6a7fe29208dcd40339c28f80bcd53f9 | refs/heads/master | 2021-01-02T22:51:15.766594 | 2010-08-23T00:00:00 | 2010-08-23T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 785 | rd | rb.kpix.errors.Rd | \name{rb.kpix.errors}
\alias{rb.kpix.errors}
\title{Generates forecast errors for the RB forecasts for KPIX}
\description{Generates a sequence of 25 forecast errors 1999:Q2-2005:Q2 for the RB forecasts for KPIX for a forecast horizon 1-25 months ahead. The default horizon is one month.}
\usage{rb.kpix.errors(horizon=1)}
\arguments{
\item{horizon}{An integer within the feasible forecast horizon 1-25 months. If the forecast horizon is unfeasible no output is produced and a warning issued.}
}
\value{A time series object with 25 values, starting date 1999:Q2 and frequency 4.}
\author{Michael Lundholm}
\keyword{ sifds }
\examples{
# Load the sifds
data(sifds)
# Produce one month ahead forecast errors
rb.kpix.errors()
# Produce 12 months ahead forecast errors
rb.kpix.errors(12)
}
|
22feb0f46595a1540244fd1a894e49338bbd09fe | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/dwapi/examples/upload_data_frame.Rd.R | c93566840ac721b8f93e1619d3deffb99dd0ca73 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 332 | r | upload_data_frame.Rd.R | library(dwapi)
### Name: upload_data_frame
### Title: Upload a data frame as a file to a dataset.
### Aliases: upload_data_frame
### ** Examples
df = data.frame(a = c(1,2,3),b = c(4,5,6))
## Not run:
##D dwapi::upload_data_frame(file_name = 'sample.csv',
##D data_frame = df, dataset = 'user/dataset')
## End(Not run)
|
7a4683a3f9ebd82dadc4a3982e923a9e3b73f26d | 0bc7b27b4ecdf338211f763915e498afbd076f19 | /man/Depositos.Rd | fbefcf027843f1853bccb07e8387c8158bee962f | [] | no_license | cran/RcmdrPlugin.TeachStat | f42fd6b05a5e351d3f77e7204daabeae93bc93f1 | 702e87f2c3e6e7036a50d547f529f20ea915d369 | refs/heads/master | 2022-08-01T00:58:27.010966 | 2022-06-22T11:00:02 | 2022-06-22T11:00:02 | 162,720,733 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,358 | rd | Depositos.Rd | \name{Depositos}
\alias{Depositos}
\docType{data}
\title{
Deposits with credit institutions in Ourense
}
\description{
Private sector deposits (in millions of euro) with credit institutions in the province of Ourense (Spain) in 2002-2018.
}
\usage{data("Depositos")}
\format{
A data frame with 17 observations on the following 4 variables.
\describe{
\item{\code{year}}{a factor, year}
\item{\code{quantity}}{a numeric vector, deposit (in millions of euro) with credit institutions}
\item{\code{E_IPC_2016}}{a numeric vector, Consumer Price Index (CPI) with base 2016 in Spain}
\item{\code{G_IPC_2016}}{a numeric vector, Consumer Price Index (CPI) with base 2016 in Galicia}
}
}
\source{
Galician Institute of Statistics (2019):
- \url{http://www.ige.eu/igebdt/esqv.jsp?ruta=verTabla.jsp?OP=1&B=1&M=&COD=462&R=2\%5B2002:2003:2004:2005:2006:2007:2008:2009:2010:2011:2012:2013:2014:2015:2016:2017:2018\%5D&C=9928\%5B32\%5D;0\%5B3\%5D;1\%5B3\%5D&F=&S=&SCF=#}
- \url{http://www.ige.eu/igebdt/esqv.jsp?ruta=verTabla.jsp?OP=1&B=1&M=&COD=8547&R=0\%5Ball\%5D&C=2\%5B0\%5D;1\%5B0\%5D;9928\%5B108:12\%5D&F=&S=&SCF=}
}
\examples{
data(Depositos)
.Sindex <- Sindex(Depositos, "year", "quantity", "2010")*100
print(.Sindex)
Deflat(Depositos, "year", "quantity", "E_IPC_2016", "2011")
}
\keyword{datasets}
|
004384bbfd29d50eb3585027af93ee2fca01fd85 | 68dd09d21317c18cc2d53ad458eb696623393d5f | /InundationDurations_Exp.R | 9982eba36215ff881e14f10768c3445b1524a030 | [] | no_license | ppgibson/WarnerPoint | 4961ec295462c441d716d1d8d6778f6f45c216b1 | 55d63ffab17e0c0d178f7df6d90b3446d511f8b0 | refs/heads/master | 2021-01-10T04:39:42.002727 | 2016-02-19T22:02:14 | 2016-02-19T22:02:14 | 47,857,068 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,525 | r | InundationDurations_Exp.R | ###########################################################
## CALCULATE EXPONENTIALLY-WEIGHTED INUNDATION DURATIONS ##
###########################################################
## Based entirely on empirical values, not interpolation.
#### READ IN AND PREPARE DATA ####
# 1. Time series discharge data, all possible years (50 yrs before 1st survey to yr of last survey)
raw.flowdata <- read.csv("Gunn_DailyMeanCFS_WY1939-2013.csv")
# 2. Inundating discharge data
inundatingQs.df <- read.table("WP_InundatingDischarges.txt",
col.names=c("plot", "q.inund", "elevation", "unknown"))
# Create a new/cleaner version of inundatingQs.df, which will be used to collect
# the calculated inundation durations
plotdata <- select(inundatingQs.df, -unknown) #Get rid of unknown column
plotdata$plot <- paste("P", plotdata$plot, sep="") #Add Ps to plot numbers
plotdata <- plotdata[plotdata$plot!="P129", ] #Get rid of P129
plotdata <- plotdata[plotdata$plot!="P999", ] #...and get rid of P999
#### EXAMINE FLOW HISTORY ####
## Plot flow history for the period in question
months.all <- as.numeric(substr(raw.flowdata$date, 6, 7))
months.all.ind <- (months.all > 4 & months.all < 10)
raw.flowdata <- cbind(raw.flowdata, months.all.ind)
surv.yrs <- c(1990, 1994, 2000, 2006, 2013) #the set of survey years
for(i in surv.yrs) {
print(i)
flowhist.plot <- ggplot(data=raw.flowdata[raw.flowdata$wyear>(i-5) & raw.flowdata$wyear < (i+1), ]) + #show 5 yrs prior to and 1 yr after survye
geom_point(aes(x=as.POSIXct(date), y=discharge, color=months.all.ind)) +
geom_hline(aes(yintercept=300)) +
ggtitle(i)
print(flowhist.plot)
}
rm(months.all)
rm(months.all.ind)
rm(i)
#### INUNDUR FUNCTION ####
# Select some lambda values #larger values = faster decay! = heavier weighting of recent values
# Function to calculate inundation durations for a specified
# start yr/length of record/decay rate:
# (GA suggests calculating inundur once, then match plot q.inund -
# but so far I can't see any way to do this that would involve
# less calculation than the current loop through plots.)
inundur.exp <- function(year=1990, length = 50, lambda=0.005) { #default values
# Extract out only desired years of data for given sample year
start.year <- year - length
# end.year <- year - 1
enddate.ind <- which(raw.flowdata$date == paste(year, "-07-17", sep="")) #Use July 17 as the last date, in all years.
flow.data <- raw.flowdata[1:enddate.ind, ]
flow.data <- flow.data[(flow.data$wyear >= start.year), ]
# Print selected date, 1st date of flow record, and last date of flow record
# print(paste(year, flow.data$date[1], flow.data$date[nrow(flow.data)]))
# Create an index to limit flow data to growing season
# (index will be applied after calculation of exponential decay)
months <- as.numeric(substr(flow.data$date, 6, 7))
months.index <- (months>4 & months <10)
rm(months)
# Calculate flow duration curve for the extracted years of data
n.days <- nrow(flow.data)
decay <- exp(-lambda*(0:(n.days-1))) #exponentially decreasing sequence, as long as number of flow records.
wts <- rev(decay*n.days) #reverse so that strongest weight is for end of flow sequence!(i.e., most recent flows) #not really necessary, but keeps #s from getting so small
# Now, reduce wts to only include
sum.wts <- sum(wts*months.index) #only wts from the growing season days should be included in the total
inundurations <- inundurations #create a df copy to fill, leaving the original unchanged
inundurations$inundur <- NA #blank column to fill
# Loop through the plots
for(i in 1:nrow(inundurations)) {
ex.days.cur <- flow.data$discharge >= inundurations$q.inund[i] #days when flow EQUALS OR EXCEEDS given inundating discharge.
inundurations$inundur[i] <- sum(wts*ex.days.cur*months.index) / sum.wts
}
# Give the new inunduration column (newcol) a better name
colname <- paste("inundur.", format(lambda, scientific=FALSE), sep="")
# colname <- paste("inundur.e", log10(lambda), sep="")
colnames(inundurations)[ncol(inundurations)] <- colname
# colname <- paste("inundur.", year, sep="")
# colnames(inundurations)[ncol(inundurations)] <- colname
return(inundurations)
}
#### CALL THE FUNCTION TO CALCULATE INUNDUR ####
## Two options:
## (A) Loop through sample years; or
## (B) Loop through pre-set lambda values.
## !! Note that going back and forth between these options requires editing
## the end of the function code? (the part that sets column names in the output data frame.)
# ## (A) Call the function for each sample year
# # The set of sample years
# smp.yrs <- c(1990, 1994, 2001, 2006, 2013)
#
# # Now, run the function once for each sample year
# # Output is new columns (one for each year) in the /inundurations/ df.
# # The printed numbers in the console list the sample year, the start water
# # year, and the end water year for each call of the inunduration function
# # (to make sure that the function is in fact calculating inundation duration
# # based on the correct data).
# for (i in 1:length(smp.yrs)) {
# inundur.exp(smp.yrs[i], length=50, lambda=0.001) -> inundurations
# }
## (B) Call the function for each pre-set lambda value
# The set of lambda values
# # 'Large' lambda values (... _LgLm_ ...)
# lambdas <- c(0, 0.00001, 0.00005, 0.0001, 0.0005, 0.001, 0.003, 0.005,
# 0.007, 0.01, 0.02, 0.03, 0.04, 0.05, 0.1)
# # 'Small' lambda values (... _SmLm_ ...)
# # An exponential/geometric sequence between 1e-5 and 1e-3 (evenly distributed on log scale)
# lambdas <- 10^(seq(from=-5, to=-3, by=0.1))
# 'All' lambda values (... _AllLm_ ...)
lambdas <- 10^(seq(from=-5, to=-1, by=0.1)) #10^-1=0.1, the endpoint of LargeLms
lambdas <- c(0, lambdas) #42 lambda values!
# Assess half-life of selected lambda values
halflife <- data.frame(cbind(decay.const=lambdas, halflife.indays=(log(2)/lambdas)))
halflife <- mutate(halflife,
halflife.inyrs=halflife.indays/365,
hundredth.indays=(log(100)/lambdas),
hundredth.inyrs=(log(100)/lambdas)/365)
halflife <- mutate(halflife, expo=log10(decay.const))
halflife <- halflife[, c(6, 1:5)]
surv.yrs <- c(1990, 1994, 2001, 2006, 2013)
inundur.allyrs <- matrix(nrow=0, ncol=(4+length(lambdas))) #4 data columns (including yr, to be assigned below), plus one column for each lambda value to be computed.
## Double-for loop to calculate inundurs with each lambda value, in
## each survey year.
for(k in surv.yrs) {
print(k)
# Need a clean version of inundurations each time, in order to add columns during the for loop.
inundurations <- plotdata
# Run the inundur.exp function, which calculates an inundur value for each plot,
# once for each specified value of lambda
for(i in 1:length(lambdas)) {
inundurations <- inundur.exp(year=k, length=50, lambda=lambdas[i])
}
# Add a year column
inundurations$year <- k
# Merge current year's results with the compilation df
inundur.allyrs <- rbind(inundur.allyrs, inundurations)
}
## Examine distribution of inundur values
for (i in 1:(ncol(inundur.allyrs)-3)) {
print(i)
print(colnames(inundur.allyrs)[i+3])
hist(inundur.allyrs[, (i+3)], main=paste("AllYrs", colnames(inundur.allyrs)[i+3]))
}
# (inundur.allyrs$year==1990)
#### WRITE OUTPUT FILE ####
inundur.allyrs <- inundur.allyrs[, c(ncol(inundur.allyrs), 1:(ncol(inundur.allyrs) - 1))] #move [year] column to the front
write.csv(inundur.allyrs, "Inundurations_lambdas_allyrs_all.csv", row.names=FALSE)
#### REFORMAT OUPUT ####
# Convert \inundurations\ to long form - one record for each plot in
# each year (i.e., each plot sample).
durations <- melt(data = inundurations,
id.vars = c("plot", "q.inund", "elevation"),
variable.name = "year",
value.name = "inundur",
na.rm=FALSE)
durations$year <- substr(durations$year, 9, 12) #covert [year] column to plain numbers
durations$year <- as.numeric(durations$year)
#### END SCRIPT (FOR NOW) #### |
5a8bf869b447c099826ed2a01455d096b66ee371 | 72029d0e774d595a98a890f1e6baab69735618dc | /modelmatrix.R | e1cfc9f85fa2fef3fdba42a293cf9b711c0c04cd | [] | no_license | effyhuihui/DandB_R | 15ba7fff22971c65bef2f197b85ccd0b6ee00ae5 | bc63ab17470ac0a7d82c143476273a1617d5c845 | refs/heads/master | 2021-01-16T21:57:51.469293 | 2013-11-26T23:01:36 | 2013-11-26T23:01:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 446 | r | modelmatrix.R | library(caret)
#read in data
titanic <- read.csv(file="titanic.csv", header=T,as.is=T)
#subset
titanic <- titanic[-c(3,8,10)]
#create factors
titanic[c(1:3,5:6,8)] <- lapply(titanic[c(1:3,5:6,8)], as.factor)
str(titanic)
#create dummy vars
titanic_matrix <- as.data.frame(model.matrix(survived~.-1,data=titanic)) #-1 removes the intercept term
titanic_matrix <- as.data.frame(model.matrix(survived~.^2-1,data=titanic))#adds interaction terms |
1375a40b79bb369b852f33b120caa3ca183cccb3 | 5f2d54247d3e4c79d6aa9eb2e213611bc824425d | /man/extract_ic.Rd | c4b855a1cd5350a85802379a5af51c3332d27a3c | [] | no_license | youngahn/hBayesDM | c0df8299b0e02e7a424dbd95487052e9f9d75b12 | 09050aa476773e1c554e1a3276728c05f1bfff56 | refs/heads/master | 2021-04-27T00:25:04.052467 | 2018-03-03T11:04:12 | 2018-03-03T11:04:12 | 123,809,033 | 2 | 0 | null | 2018-03-19T07:41:14 | 2018-03-04T16:53:42 | C++ | UTF-8 | R | false | true | 867 | rd | extract_ic.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_ic.R
\name{extract_ic}
\alias{extract_ic}
\title{Extract Model Comparison Estimates}
\usage{
extract_ic(modelData = NULL, ic = "looic", core = 2)
}
\arguments{
\item{modelData}{Object returned by \code{'hBayesDM'} model function}
\item{ic}{Information Criterion. 'looic', 'waic', or 'both'}
\item{core}{Number of cores to use for leave-one-out estimation}
}
\value{
IC Leave-One-Out and/or Watanabe-Akaike information criterion estimates.
}
\description{
Extract Model Comparison Estimates
}
\examples{
\dontrun{
library(hBayesDM)
output = bandit2arm_delta("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 1)
# To show the LOOIC model fit estimates (a detailed report; c)
extract_ic(output)
# To show the WAIC model fit estimates
extract_ic(output, ic = "waic")
}
}
|
6a8945f2d9dd589afadefe3e6ac3914ec8085317 | 29585dff702209dd446c0ab52ceea046c58e384e | /HWEBayes/R/HWEImportSamp.R | 5bdf84cb2cf7743f2aba934877f2ec8c056b6607 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,133 | r | HWEImportSamp.R | HWEImportSamp <-
function(nsim,nvec,ischoice,lambdamu,lambdasd,alpha,
gmu=rep(0,length(alpha)),
gsigma=diag(0,nrow=length(alpha),
ncol=length(alpha)))
{
##priorint <- varprior <- 0
k <- length(alpha)
if (length(nvec) != k*(k+1)/2) {
stop("length mismatch between alpha and nvec")
}
liknorm <- lfactorial(sum(nvec)) - sum(lfactorial(nvec))
if (ischoice==1) {
PrnH1 <- varterm1 <- 0
if(gsigma[1,1]==0) {
stop("HWImportSamp: You need to supply gmu and gsigma")
}
## we simulate for the baseline logits and lambda
phisamp <- rmvnorm(n=nsim, mean=gmu, sigma=gsigma)
for (i in 1:nsim){
pval <- invbaselogit(phisamp[i,-k])$probs
lambda <- phisamp[i,k]
pmin <- min(pval)
fmin <- -pmin/(1-pmin)
f <- (exp(lambda)+fmin)/(exp(lambda)+1)
likterm <- MultLogLikP(pval, f, nvec) + liknorm
##
## Log of the determinant of the (k-1)x(k-1) Jacobean, derivs are:
## partial p_1/partial phi_{1},...partial p_1/partial phi_{k-1}
## ........
## partial p_{k-1}/partial phi1,...partial p_{k-1}/partial phi_{k-1}
##
jac <- diag(pval[-k]) - outer(pval[-k], pval[-k])
ljack <- log(det(jac))
##
## NB We do not need to calculate a Jacobian term for
## lambda = phisamp[i,k] as this is generated on the correct
## scale for the prior.
##
prterm1 <- log(ddirichlet(pval, alpha=alpha)) + ljack
prterm2 <- dnorm(lambda, mean=lambdamu, sd=lambdasd, log=TRUE)
gterm <- dmvnorm(phisamp[i,1:k],mean=gmu,sigma=gsigma,log=TRUE)
expterm <- exp(likterm+prterm1+prterm2-gterm)
## expprior <- exp(prterm1+prterm2-gterm)
PrnH1 <- PrnH1 + expterm
## priorint <- priorint + expprior
varterm1 <- varterm1 + expterm^2
## varprior <- varprior + expprior^2
if (i %% 1000 == 0) cat("Samples = ",i,"\n")
}
## priorint <- priorint/nsim
## varprior <- (varprior/nsim - priorint^2)/nsim
##cat("nsim prior constant (se) 95% interval = ",nsim,priorint,"(",sqrt(varprior),")",priorint-1.96*sqrt(varprior),priorint+1.96*sqrt(varprior),"\n")
}
if (ischoice==2){
pval <- rdirichlet(nsim, alpha=alpha)
lambdaval <- rnorm(nsim, mean=lambdamu, sd=lambdasd)
minp <- apply(pval, 1, min)
minf <- -minp/(1-minp)
f <- (exp(lambdaval)+minf)/(exp(lambdaval)+1)
X <- cbind(f, pval)
LLFUN <- function(x) MultLogLikP(x[-1], x[1], nvec)
likterm <- apply(X, 1, LLFUN) + liknorm
expterm <- exp(likterm)
PrnH1 <- sum(expterm)
varterm1 <- sum(expterm^2)
}
PrnH1 <- PrnH1/nsim
varest <- (varterm1/nsim - PrnH1^2)/nsim
cat("nsim norm constant (se) 95% interval:\n")
cat(nsim,PrnH1,"(",sqrt(varest),")",PrnH1-1.96*sqrt(varest),PrnH1+1.96*sqrt(varest),"\n")
list(PrnH1=PrnH1,varest=varest)
}
|
cfbcb6186aa91c1367ec00714c4f994a988c1985 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ssdtools/examples/boron_data.Rd.R | ca36d9c6003af005790f5b17833b36d9563aa5a6 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 176 | r | boron_data.Rd.R | library(ssdtools)
### Name: boron_data
### Title: CCME Species Sensitivity Data for Boron
### Aliases: boron_data
### Keywords: datasets
### ** Examples
head(ccme_data)
|
c464088fed72928f6123c042657ad01b3fe96083 | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.application.integration/man/sfn_describe_state_machine.Rd | b5d8dd8552cbb7d68a5ca9c24cdbb4eab42d1bf4 | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 1,241 | rd | sfn_describe_state_machine.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sfn_operations.R
\name{sfn_describe_state_machine}
\alias{sfn_describe_state_machine}
\title{Describes a state machine}
\usage{
sfn_describe_state_machine(stateMachineArn)
}
\arguments{
\item{stateMachineArn}{[required] The Amazon Resource Name (ARN) of the state machine to describe.}
}
\value{
A list with the following syntax:\preformatted{list(
stateMachineArn = "string",
name = "string",
status = "ACTIVE"|"DELETING",
definition = "string",
roleArn = "string",
type = "STANDARD"|"EXPRESS",
creationDate = as.POSIXct(
"2015-01-01"
),
loggingConfiguration = list(
level = "ALL"|"ERROR"|"FATAL"|"OFF",
includeExecutionData = TRUE|FALSE,
destinations = list(
list(
cloudWatchLogsLogGroup = list(
logGroupArn = "string"
)
)
)
),
tracingConfiguration = list(
enabled = TRUE|FALSE
)
)
}
}
\description{
Describes a state machine.
This operation is eventually consistent. The results are best effort and
may not reflect very recent updates and changes.
}
\section{Request syntax}{
\preformatted{svc$describe_state_machine(
stateMachineArn = "string"
)
}
}
\keyword{internal}
|
fd7ba88ff865a4a069e673fa91d664fc3dae7337 | 2645fbe19fbc84d1b50ed567573632dd7c952dd0 | /Helpers/Helpers.R | e6846103d39abed0eebd80f1f9f23555785d742c | [] | no_license | pulkitjain1004/ApproxMap_Shiny | 8847ab068bcc5bbc3c7d81b8b4e0f41fb6639460 | ee08e9c36c6daad60ec79e3b8444f0525c3ed45e | refs/heads/master | 2021-01-17T07:57:21.684561 | 2017-03-03T16:36:36 | 2017-03-03T16:36:36 | 83,064,739 | 0 | 0 | null | 2017-02-24T17:10:59 | 2017-02-24T17:10:59 | null | UTF-8 | R | false | false | 3,587 | r | Helpers.R | format_output = function(approxmap_obj) {
clusters = approxmap_obj$clusters
form_cons = approxmap_obj$formatted_results$consensus
form_wseq = approxmap_obj$formatted_results$weighted_seq
for(i in 1:length(clusters)) {
cat(paste("Cluster ",i,":",sep = ""),"\n","Sequence IDs: ", clusters[[i]], "\n", "Weighted Sequence: ", form_wseq[[i]], "\n", "Consensus Pattern: ", form_cons[[i]],"\n\n")
}
}
extract_freq = function(weighted_seq) {
weighted_seq$n = NULL
elements = unlist(lapply(weighted_seq, function(x) x$elements))
element_weights = unlist(lapply(weighted_seq, function(x) x$element_weights))
return(data.frame(elements = elements, element_weights = element_weights))
}
plot_frequency = function(weighted_seq, cons_threshhold =0.5, noise_threshold = 0, variation_threshold = 0.2) {
#n_thresh = threshhold * (length(weighted_seq)-1)
n_thresh = cons_threshhold * weighted_seq$n
v_thresh = variation_threshold * weighted_seq$n
fq = extract_freq(weighted_seq)
freq_plot <- fq %>% dplyr::mutate(element_number = 1:nrow(fq)) %>% filter(element_weights > noise_threshold) %>%
ggplot2::ggplot(aes(x = element_number, y = element_weights, text = elements)) +
ggplot2::geom_point(size = 0.75) +
ggplot2::geom_path(group = 1, size=0.1) +
ggplot2::geom_hline(yintercept = n_thresh, linetype = 2) +
ggplot2::geom_hline(yintercept = v_thresh, linetype = 4) +
ggplot2::theme(legend.position="none") +
ggplot2::geom_label(aes(label = elements,size = element_weights))
return(freq_plot)
}
get_Itemset_Formatted_HTML = function(W_itemset_html, add_itemset_weight = T) {
with_weights = paste(W_itemset_html$elements,W_itemset_html$element_weights,sep = " : ")
with_tags = paste0(W_itemset_html$start_tag,with_weights,W_itemset_html$end_tag)
collapsed = paste(with_tags,collapse = ", ")
result = ifelse(add_itemset_weight,paste("( ",collapsed," ) : ",W_itemset_html$itemset_weight,sep=""),paste("(",collapsed,")",sep=""))
return(result)
}
get_Wseq_Formatted_HTML = function(W_seq, add_itemset_weight = T, no_white_space=T) {
n = W_seq$n
W_seq$n = NULL
W_seq_html = get_tagged_itemsets_from_wseq(W_seq)
formatted_itemsets = lapply(W_seq_html,get_Itemset_Formatted_HTML, add_itemset_weight)
formatted_itemsets = paste(formatted_itemsets, collapse = " ")
result = paste("< ", formatted_itemsets," > : " , n,sep = "")
if(no_white_space) result = gsub(" ","",result)
return(result)
}
get_tagged_itemsets_from_wseq = function(wseq) {
wseq$n = NULL
weights = unlist(lapply(wseq, function(x) x$element_weights))
range = range(weights)
block_size = diff(range)/5
blocks = seq(range[1],range[2],by = block_size)
wseq = lapply(wseq, function(w_itemset) {
block_no = cut(w_itemset$element_weights,breaks = blocks, labels = F, include.lowest = T)
w_itemset$start_tag = paste0("<priority",block_no,">")
w_itemset$end_tag = paste0("</priority",block_no,">")
return(w_itemset)
})
return(wseq)
}
tag_items = function(itemset_with_tags) {
itemset_with_tags$elements = paste0(itemset_with_tags$start_tag,itemset_with_tags$elements,itemset_with_tags$end_tag)
return(itemset_with_tags)
}
get_consensus_formatted_HTML <- function(weighted_seq, strength) {
n = weighted_seq$n
weighted_seq$n = NULL
W_seq_html = get_tagged_itemsets_from_wseq(weighted_seq)
tagged_items_wseq <- lapply(W_seq_html, tag_items)
tagged_items_wseq$n = n
return(get_consensus_formatted(get_consensus_pattern(tagged_items_wseq,strength)))
}
|
294118f62dd079e276a9db5c7931bcd087cec2bc | 901e5267c684f8ca2606ba5797b9d8daca1010a3 | /Script/Apriori_Rules_Script.R | 18c48db8ac7e5937c6607d8f2ff8c6f2acaa2678 | [] | no_license | rijalanita/Market_Basket_Analysis | eed6f19ae1095d7dfc17d423b7d5138135299bc3 | c46e63d6907fe0e6cc7af15c95e5a3ff62041bac | refs/heads/master | 2022-11-19T03:28:25.292101 | 2020-07-17T14:33:24 | 2020-07-17T14:33:24 | 280,438,778 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,109 | r | Apriori_Rules_Script.R | #Author: Anita Rijal
#Version: 1
#Date: March 6, 2020
#Description: Market Basket Analysis to understand association between product types using apriori rules
# 1. Library
library(RMySQL)
library(dplyr)
library(tidyverse)
library(arules)
library(arulesViz)
#Read the csv file that you created using the read.transactions from arules.
sku_order<- read.transactions("sku_order.csv", format = "single",
sep = "," , header = TRUE,
cols= c("id_order","sku"))
sku_order2<- read.transactions("sku_order2.csv", format = "single",
sep = "," , header = TRUE,
cols= c("id_order","sku"))
sku_order3<- read.transactions("sku_order3.csv", format = "single",
sep = "," , header = TRUE,
cols= c("id_order","brand_accesories"))
#Investigating frequency of products by top 20 products
itemFrequency(sku_order, type = "absolute")
#visualising product frequency by top 20
itemFrequencyPlot(sku_order , topN=20 , type = "absolute")
#top 5 categories
itemFrequencyPlot(sku_order3 , topN=5 , type = "absolute")
#least frequent products and categories
barplot(sort(itemFrequency(sku_order2), decreasing=F))
barplot(sort(itemFrequency(sku_order3), decreasing=F))
#APRIORI rules
rule1 <- apriori(sku_order2, parameter = list(support = 0.0006, confidence = 0.3))
summary(rule1)
inspect (rule1)
summary(sku_order)
#visualise rules
plot(rule1)
plot(rule1, method = "graph")
plot(rule1, method = "paracoord", control = list(reorder = TRUE)
rule2 <- apriori(sku_order2, parameter = list(supp=0.0004, conf=0.2,maxlen=10))
summary(rule2)
inspect(rule2)
plot(rule2, method = "graph")
plot(rule2, method = "paracoord", control = list(reorder = TRUE))
#categories association rule
rule4 <- apriori(sku_order3, parameter = list(support = 0.0006, confidence = 0.4))
inspect(rule4)
plot(rule4, method = "graph")
plot(rule4, method = "paracoord", control = list(reorder = TRUE))
|
726c1ea2374ca7086964da5c57cbc75f9316ccf8 | 2741826483417f28990d21a414821f0d741b811c | /R/every_int32.R | e5347452e02a0c66c4980c6c58abef19fd2cba05 | [] | no_license | HughParsonage/hutilscpp | 3262201a11d2026d37284f709e4e0e6fbb53d3a9 | 143f9e2dca2c0f25e6e64388fdfd8f6db64477d9 | refs/heads/master | 2022-10-20T22:12:58.646043 | 2022-10-07T07:20:58 | 2022-10-07T07:20:58 | 155,201,062 | 8 | 3 | null | 2022-09-29T15:58:30 | 2018-10-29T11:33:08 | R | UTF-8 | R | false | false | 266 | r | every_int32.R | #' Every integer
#' @param nThread Number of threads.
#' @param na Value for \code{NA_INTEGER}.
#' @export
every_int <- function(nThread = getOption("hutilsc.nThread", 1L), na = NA_integer_) {
.Call("Cevery_int32", nThread, na, PACKAGE = "hutilscpp") # nocov
}
|
db78733c445cf88fcec4c20d7086472b803c6efc | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/heuristica/examples/cueValidity.Rd.R | 2998414ece0eb68e15661ed80975c6b7fcf95495 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 311 | r | cueValidity.Rd.R | library(heuristica)
### Name: cueValidity
### Title: Calculate the cue validity.
### Aliases: cueValidity
### ** Examples
cueValidity(c(5,1), c(1,0))
# Returns 1.
cueValidity(c(5,2,1), c(1,0,0))
# Also returns 1
cueValidity(c(5,2,1), c(0,0,1))
# Returns 0.
cueValidity(c(5,2,1), c(1,0,1))
# Returns 0.5.
|
adb95cf75e062d2c161389c5d3b0e986867b5bf3 | 3b3bb9d7be4125a7790ec0282bc3501e494498ec | /podstawy_programowania/2022_2023/ppr20221127_2023014 _przetwarzanie.R | 1ca14afc60addb86f89960d4f9ea98655c92f094 | [] | no_license | lwawrowski/cdv_bigdata | ee4c198c739ff6d8fdda966376ccf3be96987ce1 | e09183d300d56e0448ba853bb6165f9ed3c68126 | refs/heads/master | 2023-05-24T11:50:41.290672 | 2023-05-14T10:19:39 | 2023-05-14T10:19:39 | 183,685,383 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,087 | r | ppr20221127_2023014 _przetwarzanie.R | library(tidyverse)
load("data/fifa.RData") # 1 sposób
mecze <- read_csv2("data/fifa.csv") # 2 sposób
# filtrowanie -------------------------------------------------------------
# Ile razy Włochy grały w finale MŚ?
fifa_wlochy <- fifa %>%
filter(stage == "Final" & (home_team_name == "Italy" | away_team_name == "Italy"))
fifa %>%
filter(attendance > 100000)
fifa %>%
filter(city == "Rome" | city == "Paris" | city == "Berlin")
# wszystkie mecze w miastach "Rome", "Paris", "Berlin"
fifa %>%
filter(city %in% c("Rome", "Paris", "Berlin"))
# wszystkie mecze w miastach poza "Rome", "Paris", "Berlin"
fifa %>%
filter(!city %in% c("Rome", "Paris", "Berlin"))
# braki danych ------------------------------------------------------------
fifa_na <- fifa %>%
filter(!is.na(attendance))
complete.cases(fifa)
# argument z nazwą zbioru
fifa_bez_brakow <- fifa %>%
filter(complete.cases(fifa))
# argument jako kropka wskazująca na aktualny zbiór
fifa_bez_brakow <- fifa %>%
filter(complete.cases(.))
# nowa zmienna - niepoprawne działanie
fifa_z_brakami <- fifa %>%
mutate(braki=NA) %>%
filter(complete.cases(fifa))
# nowa zmienna - poprawne działanie
fifa_z_brakami <- fifa %>%
mutate(braki=NA) %>%
filter(complete.cases(.))
# nie bierzemy pod uwagę jednej zmiennej przy usuwaniu braków
fifa_bez_brakow <- fifa %>%
filter(complete.cases(
select(., -win_conditions)
))
# wybieranie kolumn -------------------------------------------------------
fifa_wlochy %>%
select(year, home_team_name, home_team_goals, away_team_goals, away_team_name)
fifa_wlochy %>%
select(year, home_team_name:away_team_name)
fifa_wlochy %>%
select(year_of_match=year, home_team_name:away_team_name)
# nowa kolumna ------------------------------------------------------------
# Jaka jest największa liczba bramek w jednym meczu?
fifa <- fifa %>%
mutate(goals_sum=home_team_goals+away_team_goals)
fifa %>%
select(year, home_team_name, away_team_name, goals_sum) %>%
arrange(desc(goals_sum))
fifa %>%
select(year, home_team_name, away_team_name, goals_sum) %>%
top_n(5, goals_sum)
# W zbiorze stwórz nową zmienną, która będzie zawierała bezwzględną różnicę pomiędzy wynikami zespołów.
fifa <- fifa %>%
mutate(goals_abs_diff=abs(home_team_goals-away_team_goals))
# podsumowanie ------------------------------------------------------------
# Jaka była średnia liczba widzów?
fifa %>%
summarise(mean_attendance=mean(attendance, na.rm = TRUE))
fifa_stats <- fifa %>%
summarise(mean_attendance=round(mean(attendance, na.rm = TRUE)),
size=n())
fifa %>%
summarise(mean_goals=mean(goals_sum),
median_goals=median(goals_sum),
sd_goals=sd(goals_sum))
fifa %>%
summarise_at(vars(goals_sum), list(mean = mean, median = median, sd = sd))
fifa_stats_goals <- fifa %>%
summarise_at(vars(home_team_goals, away_team_goals), list(mean = mean, median = median, sd = sd))
# grupowanie --------------------------------------------------------------
fifa_stats <- fifa %>%
group_by(stage) %>%
summarise(mean_attendance=round(mean(attendance, na.rm = TRUE)),
size=n())
fifa_stats <- fifa %>%
group_by(year) %>%
summarise(mean_attendance=round(mean(attendance, na.rm = TRUE)),
size=n()) %>%
mutate(attendance_per_match=mean_attendance/size)
fifa %>%
group_by(year, stage) %>%
summarise(mean_goals=mean(goals_sum),
median_goals=median(goals_sum),
sd_goals=sd(goals_sum))
# liczebności -------------------------------------------------------------
# Jakie miasto najczęściej gościło piłkarzy?
cites <- fifa %>%
group_by(city) %>%
summarise(n=n())
cites <- fifa %>%
count(city)
cities_year <- fifa %>%
count(year, city)
cities_goals <- fifa %>%
group_by(city) %>%
summarise(n=n(),
mean_goals=mean(goals_sum)) %>%
arrange(desc(mean_goals))
# Ile było meczów, w których drużyna prowadząca po pierwszej połowie ostatecznie przegrywała?
|
8758454c1a7a5768f2c9208e3b191b41239a18bc | 8865cd340e5325efb50cdac5758d52ffcfa4b447 | /scripts/runOutdegree.R | 9633151d7f76976f9398633e6a083157eb94e3f9 | [] | no_license | LaurensRietveld/RGraphAnalysis | 26471a2ad6c1c44a142aaa9c52db47a04107823e | 3a4b21eda23e060890dbb10e5d06f5b37dd34180 | refs/heads/master | 2016-09-06T18:55:44.393049 | 2013-04-12T12:33:06 | 2013-04-12T12:33:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 307 | r | runOutdegree.R | print("running outdegree")
degree <- degree(graph, normalized=TRUE, mode="out")
print("storing outdegree")
write.table(degree, file = "output/directed_outdegree", append = FALSE, quote = FALSE, sep = "\t", row.names = TRUE, col.names = FALSE) #row names to true: takes attribute value (i.e. uri) from vector |
5545d38f40c0e1552978adb53c980ef995d517a1 | b69331d2278322507da0560ae1ec61a665c0753f | /NetLogoDataDemo.R | 96603f5cd3bb1ea20a4938722797f251f518a3fd | [] | no_license | leoIE3/SEN | e322bfeacc237a046f9c4d6763cf9b21c32817ed | 84cb9bef22b386ddc6048064adf880b51c2f6a66 | refs/heads/master | 2021-08-22T13:34:05.074032 | 2017-11-30T09:29:57 | 2017-11-30T09:29:57 | 112,592,248 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,162 | r | NetLogoDataDemo.R | ############ !!!!!!!!!!!!!!!!!!!!!!!!!! ##########################
#Put this at the top of every file, unless you really really want to work with factors
#this will save you a lot of confusion
options(stringsAsFactors = FALSE)
#The main wiki page around these examples is at:
#http://wiki.tudelft.nl/bin/view/Education/SPM955xABMofCAS/LectureDataAnalysis
#code + data we will be using is at https://github.com/cbdavis/Demo-Analyzing-Netlogo-Data-with-R
# The contents are:
# * R code
# * the nlogo file used to generate the data.
# This also includes methods that can write the network structure to a file (included as network.txt)
# * TeamAssemblyModelData.csv - the data from last year's exam
# Put all these into the same directory, open up RStudio
# and set your working directory to this directory containing the files.
# Click on "Tools -> Set Working Directory -> Choose Directory" to do this.
# On a mac, this may also be:
# "Session -> Set Working Directory -> Choose Directory" to do this.
#Then install the necessary packages for R
# * In the bottom right quadrant, there is a "Packages" tab, with an option to "Install Packages".
# Click on this to install the following libraries:
# * ggplot2 - this will be used to do most of the plotting.
# * reshape2
# * sqldf - used for querying data, performing aggregations, filtering, etc.
#####Basic R examples
#See http://www.statmethods.net/index.html for a good overview of R
#Main components of what you're seeing:
# RStudio has four panels, and in the default config these are:
### Top Left - Code - here you can open and work on different script files
### Top Right - Workspace/History
# This shows all the variables and functions that are currently loaded in the workspace
# You can click on the variables to see their values, which can be useful to inspect
# how the code is operating.
# The history shows all of the commands that you have run.
### Bottom Left - Console
# commands can be run here
# You can also find documentation for commands by typing in ?commandName to get help, i.e. ?sum
### Bottom Right - Files/Plots/Packages/Help
# Files - This shows everything in your current working directory
# Plots - Once you start plotting, multiple plots will be stored here.
# There are arrows in this view that allow you to navigate between multiple plots
# Packages - Shows all the packages installed and currently loaded
# Help - Shows documentation for varous functions.
#You can run lines of code by highlighting them, and then clicking on "Run" above.
#You can run all the code at once by doing Code -> Run Region -> Run All
### Introduction to operations:
#Add two numbers, this will only show the value in the console output
1 + 1
#Assign a variable, this way the answer is stored
a = 1 + 1
#Same as above, just using "<-" instead of "="
a <- 1 + 1
#now add up two different variables
b = 3
c = a + b
#make a vector, c() is the function for putting elements into a vector
d = c(3,2,1,4)
#find the length
length(d)
#calculate the average and standard deviation
mean(d)
sd(d)
#do a simple plot
plot(d)
#make another vector e, which is filled with random numbers ranging from 0 to 1, and contains the same number of elements as the d vector
e = runif(length(d), 0, 1)
#combine these two vectors into a matrix, where d is the left column and e is the right column
f = cbind(d, e)
#combine these two vectors into a matrix, where d is the top row and e is the bottom row
g = rbind(d, e)
#See http://www.statmethods.net/advstats/matrix.html for more information about working with matrices
#transpose the matrix that you just made above
t(g)
#element-wise multiplication of two vectors
h = d * e
#matrix multiplication
d %*% t(e)
#Also, when you save R, it will request if you want to save the workspace
#This means that it will save all the variables currently loaded in the workspace
#use rm(variableName) to remove variables from the workspace
######Load in libraries needed for plotting
#TODO: Make sure that you have these packages installed where you see "library(something)"
#Load the ggplot2 library which is used for most of the visualizations here
#to understand why this library is cool, just do a google image search: https://www.google.com/search?q=ggplot2&tbm=isch
#See http://docs.ggplot2.org/current/ for documentation
#Also http://cran.r-project.org/web/packages/ggplot2/ggplot2.pdf
#and http://had.co.nz/ggplot2/book.pdf
#Note that plot() is not the same as ggplot()
#these are from two separate packages
library(ggplot2)
# needed for reshaping data frames
library(reshape2)
#used for querying data, performing aggregations, filtering, etc.
library(sqldf)
############### MAKE SURE THAT THE WORKING DIRECTORY IS SET ###############
#this line below sets the current working directory
#setwd("/home/cbdavis/Demo-Analyzing-Netlogo-Data-with-R")
# You can also do something like Session -> Set Working Directory -> To Source File Location
#### Make sure to specify the "Table" output for Netlogo
#Read in the data. skip the first 6 lines, the line after that is the header, and the columns are separated by commas
#You can either specify the full path to the file,
# or make sure that the working directory for R points to the directory containing the file
myDataFrame = read.table("Wolf Sheep Predation experiment-table.csv", skip = 6, sep = ",", head=TRUE)
#This gives you a quick summary of what's in the data
#This is especially important since it tells you what the column names are.
#For example, if you see "newcomer.incumbent" then you can access this column using myDataFrame$newcomer.incumbent
#Note: characters that are not letters or numbers (A-Z, a-z, 0-9) may be encoded as periods,
#So a header that looks like "[run number]" will be accessible using myDataFrame$X.run.number.
#What you'll see here is that you're working with a data frame.
#It isn't a matrix, but it's an object containing several columns along with some
#additional properties.
summary(myDataFrame)
#this will also show you the names of the column names
colnames = colnames(myDataFrame)
##### Don't worry about what this means, it cleans up the column names #####
##### You can just copy/paste it and re-use it, just make sure that #####
##### if your data frame isn't called myDataFrame, then update that part #####
# Some colnames start with "X.", get rid of this
colnames(myDataFrame) = gsub("X\\.", "", colnames(myDataFrame))
# Get rid of periods at the start and end of the names
colnames(myDataFrame) = gsub("^\\.|\\.$", "", colnames(myDataFrame))
# Convert all periods into underscores
colnames(myDataFrame) = gsub("\\.", "_", colnames(myDataFrame))
############################################################################
# you can also just rename the columns yourself
colnames(myDataFrame)[1] = "runNumber" # change "run_number" to "runNumber"
colnames(myDataFrame)[2] = "grass_is_on" # used to be ?grass in the data file
colnames(myDataFrame)[11] = "tick" # change "step" to "tick"
colnames(myDataFrame)[14] = "count_grass" # there are two columns named grass, we need to distinguish them.
#These are now the data columns that I can work with
# myDataFrame$runNumber
# myDataFrame$grass_is_on
# myDataFrame$sheep_reproduce
# myDataFrame$initial_number_sheep
# myDataFrame$grass_regrowth_time
# myDataFrame$sheep_gain_from_food
# myDataFrame$show_energy
# myDataFrame$initial_number_wolves
# myDataFrame$wolf_reproduce
# myDataFrame$wolf_gain_from_food
# myDataFrame$tick
# myDataFrame$count_sheep
# myDataFrame$count_wolves
# myDataFrame$count_grass
# In the top right panel, you can now click on the "Environment" tab,
# then click on "myDataFrame" to bring up a table view of it.
#find the maximum number of sheep encountered
max(myDataFrame$count_sheep)
#which index has the maximum number of sheep
indexWithMaxNumSheep = which.max(myDataFrame$count_sheep)
myDataFrame$count_sheep[indexWithMaxNumSheep]
#plot a sorted vector of the number of turtles over all time for all the simulations
#this gives you an idea of how often you encounter high, low, medium values
plot(sort(myDataFrame$count_sheep))
#just give me a quick scatterplot
scatterplot = ggplot(data=myDataFrame, aes(x=tick, y=count_sheep)) + #use myDataFrame for the data, columns for x and y
geom_point() + #we want to use points
xlab("tick") + #specify x and y labels
ylab("number of sheep") +
ggtitle("Number of sheep over time") #give the plot a title
print(scatterplot) #display the scatterplot
# Something's going on, but it's useful to distinguish between the different runs,
# So color the dots by runNumber
scatterplot = ggplot(data=myDataFrame, aes(x=tick, y=count_sheep)) + #use myDataFrame for the data, columns for x and y
geom_point(aes(colour = runNumber)) + #we want to use points, colored by runNumber
xlab("tick") + #specify x and y labels
ylab("number of sheep") +
ggtitle("Number of sheep over time") #give the plot a title
print(scatterplot) #display the scatterplot
#Note: if you just do "ggplot(...)" instead of "something = ggplot(...)" then the image will be drawn automatically,
#but you won't have a way to save it, except by clicking on the GUI for the image.
#Now save the plot
ggsave(scatterplot, file="scatter.png")
#do the same with lines. The only change from above is the addition of "group=runNumber"
# and geom_line is used instead of geom_point
ggplot(data=myDataFrame, aes(x=tick, y=count_sheep, group=runNumber)) + #use myDataFrame for the data, columns for x and y
geom_line(aes(colour = runNumber)) + #we want to use points, colored by runNumber
xlab("tick") + #specify x and y labels
ylab("number of sheep") +
ggtitle("Number of sheep over time") #give the plot a title
#You can navigate back and forth between different graphs by using the left/right arrows in the "Plots" window
#To have multiple graph windows open, you need to tell R specifically to open a new window
#If you're using Windows, this will looks something like this
#
# windows()
# put your code for plot 1 here
#
# windows()
# put your code for plot 2 here
#
#For mac, you would use macintosh() instead of windows. For Unix/Linux, you would use X11()
#See http://www.statmethods.net/graphs/creating.html for more info
#Give me a heatmap, without the scatter plot
# This can be useful if you have a HUGE number of points that are all in a sort of cloud
simpleHeatMapOfScatterPlot = ggplot(data=myDataFrame, aes(x=tick, y=count_sheep)) +
stat_density2d(geom="tile", aes(fill = ..density..), contour = FALSE)
print(simpleHeatMapOfScatterPlot)
ggsave(simpleHeatMapOfScatterPlot, file="simpleHeatMapOfScatterPlot.png")
# make a scatter plot
ggplot(data=myDataFrame, aes(x=count_grass, y=count_sheep)) + geom_point()
# make a scatter plot with density contours drawn on it
ggplot(data=myDataFrame, aes(x=count_grass, y=count_sheep)) + geom_point() + geom_density2d()
# instead of contours, show dots that are proportionally sized to the density
ggplot(data=myDataFrame, aes(x=count_grass, y=count_sheep)) + stat_density2d(geom="point", aes(size = ..density..), contour = FALSE)
# This is how to create filenames automatically
# You can use these in loops to create lots of graphs
fileprefix="histogram"
val = 3
# filename is now "histogram3.png"
filename = paste(fileprefix, val, ".png", sep="")
#actually, I'm in the mood for a histogram
simpleHistogram = ggplot(data=myDataFrame, aes(x=count_sheep)) + geom_histogram()
print(simpleHistogram)
# save the image with the file name we just created
ggsave(simpleHistogram, file=filename)
#now just give me a boxplot
# If we just say "group=tick", then we would have 500 boxes, which fills up the whole plot
# and is hard to read
ggplot(data=myDataFrame, aes(x=tick, y=count_sheep, group=tick)) +
geom_boxplot()
# "group=round(tick/25)" means that we group all the data into boxes 25 steps wide
boxplot = ggplot(data=myDataFrame, aes(x=tick, y=count_sheep, group=round(tick/25))) +
geom_boxplot()
print(boxplot)
ggsave(boxplot, file="boxplot.png")
#show a matrix (i.e. a "facet grid") of individual graphs where every single
#graph show the values encountered for a single permutation of grass_regrowth_time & initial_number_sheep values
ggplot(data=myDataFrame, aes(x=tick, y=count_sheep, group=round(tick/25))) +
geom_boxplot() +
facet_grid(grass_regrowth_time ~ initial_number_sheep)
# Same, but make the y scales independent per row to stretch things out a bit
boxplot = ggplot(data=myDataFrame, aes(x=tick, y=count_sheep, group=round(tick/25))) +
geom_boxplot() +
facet_grid(grass_regrowth_time ~ initial_number_sheep, scales="free_y")
print(boxplot)
#do the same, now just with lines
ggplot(data=myDataFrame, aes(x=tick, y=count_sheep)) +
geom_line() +
facet_grid(grass_regrowth_time ~ initial_number_sheep, scales="free_y")
# This looks weird since there are two repetitions, we should separate these out
facetGridWithLines = ggplot(data=myDataFrame, aes(x=tick, y=count_sheep, group=runNumber)) +
geom_line(aes(colour = runNumber)) +
facet_grid(grass_regrowth_time ~ initial_number_sheep, scales="free_y")
print(facetGridWithLines)
ggsave(facetGridWithLines, file="facetGridWithLines.png")
# just show single graphs per runNumber
ggplot(data=myDataFrame, aes(x=tick, y=count_sheep)) +
geom_point() +
facet_wrap(~runNumber)
#Now need to create a stacked area chart based on the columns below
# myDataFrame$count_sheep
# myDataFrame$count_wolves
# myDataFrame$count_grass
# To do this, we need to stack up those columns into a single column.
# We will use a column beside it to indicate what is being counted
# So instead of:
# count_sheep count_wolves count_grass
# 50 10 100
#
# We'll have:
# count_sheep 50
# count_wolves 10
# count_grass 100
# The rest of the values in the columns will be duplicated for each of these new rows
#See http://www.statmethods.net/management/reshape.html for what's happening here
# Note that "count_sheep", "count_wolves", "count_grass" are not in the list
data2 = melt(myDataFrame, id=c("runNumber", "grass_is_on", "sheep_reproduce", "initial_number_sheep", "grass_regrowth_time", "sheep_gain_from_food", "show_energy", "initial_number_wolves", "wolf_reproduce", "wolf_gain_from_food", "tick"))
# Two new columns are introduced- "variable" and "value":
# variable value
# count_sheep 50
# count_wolves 10
# count_grass 100
areaplot = ggplot(data=data2, aes(x=tick, y=value)) +
geom_area(aes(fill=variable)) +
facet_grid(grass_regrowth_time ~ initial_number_sheep, scales="free")
print(areaplot)
ggsave(areaplot, file="areaplot.png")
##### Querying data using SQLDF package #####
# With this part of the tutorial, you're using SQL (http://en.wikipedia.org/wiki/SQL) to run queries over your data
# This is more advanced, but it will give you awesome superpowers
# Whenever you are frustrated by something that is difficult to do in Excel, remember this
# This visualization uses the sqldf package + a scatter plot:
# Time lapse of 860,000 photovoltaic systems installed across Germany
# https://www.youtube.com/watch?v=XpvQNn0n_Qw
# The data I have lists the capacity per post code, but to make the visualization, I need
# to calculate the cumulative capacity up to the date the I am visualizing, for all of the
# distinct postcodes. This becomes awesomely easy when using sqldf.
# instructions for the package - http://code.google.com/p/sqldf/
# The best place to start is via tutorials online. Just search for something like "sqlite tutorial queries"
# i.e. http://sqlite.awardspace.info/syntax/sqlitepg03.htm
# Official documentation: http://www.sqlite.org/lang.html
# This is way more than you need, and tutorials are easier to understand,
# but this shows you everything that you can do with the query language.
# The uppercase terms below are some of the more popular commands that you may find useful.
# You can also use lowercase in the queries. Examples of their use are further below.
# SELECT
# FROM
# WHERE
# AS
# DISTINCT
# COUNT
# ORDER BY
# DESC
# GROUP BY
# BETWEEN
# AND
# OR
# MAX
# MIN
# JOIN
#### NOTE - column names should not contain any punctionation, otherwise the queries may not work.
# You need to change column names like "team.size" to something like "teamsize"
# just get me one row
x = sqldf("SELECT * FROM myDataFrame LIMIT 1")
# count the number of rows where the value for runNumber is equal to 1
sqldf("SELECT COUNT(*) FROM myDataFrame WHERE runNumber=1")
# the same, but where runNumber < 10
sqldf("SELECT COUNT(*) FROM myDataFrame WHERE runNumber<10")
# find the average count_sheep for each runNumber (averaged over all ticks)
sqldf("SELECT AVG(count_sheep) AS avgSheep FROM myDataFrame GROUP BY runNumber")
# same, but order the values for avgSheep descending
sqldf("SELECT AVG(count_sheep) AS avgSheep FROM myDataFrame GROUP BY runNumber ORDER BY avgSheep DESC")
# same, but also give me the runNumber that corresponds to each value
x = sqldf("SELECT runNumber, AVG(count_wolves) AS avgWolves, AVG(count_sheep) AS avgSheep FROM myDataFrame GROUP BY runNumber ORDER BY avgSheep DESC")
# plot stuff
plot(x$avgWolves, x$count_sheep)
# find the distinct values for initial_number_sheep
sqldf("SELECT DISTINCT initial_number_sheep FROM myDataFrame")
# get me the distinct combinations of initial_number_sheep and grass_regrowth_time that were used
sqldf("SELECT DISTINCT initial_number_sheep, grass_regrowth_time FROM myDataFrame")
# select a subset of the original data, and then run a query on that subset
dataSubSet = sqldf("SELECT * FROM myDataFrame WHERE runNumber<10")
sqldf("SELECT count(*) FROM dataSubSet")
# get all rows where 20 <= p <= 60
sqldf("SELECT * FROM myDataFrame WHERE count_sheep BETWEEN 20 AND 60")
# get all data where two conditions are met
sqldf("SELECT * FROM myDataFrame WHERE count_wolves > count_sheep")
# get me the row with the maximum value for the maximum average component size
sqldf("SELECT *, MAX(count_sheep) FROM myDataFrame") |
0256e3cc6322293d6f824d0a67ab2b45cc7d2405 | df9950d0ba2ab039eeb1cb3b3aa4778528b34eb0 | /R/mMRFfit.R | 0b9ec22f6a2e0d4b4223f1ebb5b609e1b7a91f5e | [] | no_license | jmbh/mMRF | 077a758b7e2b5f87c9b4c6f438f1a5536211984a | 9b59148cd8c87cf24fd735eb3e478eb8c3362efe | refs/heads/master | 2016-09-06T07:01:56.723492 | 2015-08-27T15:02:37 | 2015-08-27T15:02:37 | 34,259,406 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,966 | r | mMRFfit.R |
mMRFfit <- function(
data, #data matrix, col=variables
type, #data type for col 1:ncol; c=categorical, g=gaussian, p=poisson, e=exponential
lev, #number of categories of categorical variables, continuous variables have level=1
lambda.sel="CV", #method for penalization parameter (lambda) -selection
folds=10, #folds in case CV is used for lambda selection
gam=.25, #tuning parameter for EBIC, in case EBIC is used for lambda selection
d=1, #maximal degree of the true graph
rule.reg="AND", #parameter-aggregation of categorical variables
rule.cat="OR", #either "OR"- conditional independence; or matrix that specifies costumized rules
pbar = TRUE # shows a progress bar if TRUE
)
{
# step 1: sanity checks & info from data
stopifnot(ncol(data)==length(type)) # type vector has to match data
stopifnot(ncol(data)==length(lev)) # level vector has to match data
if(sum(apply(data, 2, function(x) class(x)!="numeric"))>0) stop("Only numeric values permitted!")
if(sum(apply(cbind(data[,type=="p"],rep(1,nrow(data))), 2, function(x) sum(x-round(x))))>0) stop("Only integers permitted for Poisson random variables!")
n <- nrow(data)
nNode <- ncol(data)
# step 2: prepare data
#data[,type!="c" & type!="p"] <- scale(data[,type!="c" & type!="p"]) #standardize continuous variables
data <- as.data.frame(data) #necessary for formula input
colnames(data) <- paste("V",1:nNode, sep="") #necessary for formula input
#compare entered and empirical levels
emp_lev <- numeric(nNode) + 1
for(z in 1:nNode)
{
if(type[z]=="c") {
emp_lev[z] <- length(unique(data[,z]))
data[,z] <- as.factor(data[,z]) #turn categoricals into factors (for formula function)
}
}
# warning: entered lev = emp lev ?
if(sum(emp_lev[type=="c"]!=lev[type=="c"])) warning("Entered levels are not equal to empirical levels. Empirical levels are used.")
#indexing-dummy that helps us to put parameters involving categorical variables in the right place
dummy_par.sort <- logical() # this will tell us, in which places of each row we fill parameters; this is because whenn gauss <- cat; we get m-1 parameter
dummy_levels <- numeric()
for(i in 1:nNode)
{
lev.e <- 1
tar.e <- TRUE
if(emp_lev[i]>1)
{
tar.e <- c(FALSE,rep(TRUE,emp_lev[i]-1))
lev.e <- emp_lev[i]-1
}
dummy_par.sort <- c(dummy_par.sort, tar.e)
dummy_levels <- c(dummy_levels, lev.e)
}
dummy_matrix.int <- cbind(dummy_levels, 1:length(dummy_levels))
dummy.ind <- unlist(apply(dummy_matrix.int,1,function(x) { rep(x[2],x[1])}))
dummy_matrix <- cbind(emp_lev, 1:length(emp_lev))
ind <- as.numeric(unlist(apply(dummy_matrix,1,function(x) { rep(x[2],x[1])})))
# step 3: create storage for parameters
model.par.matrix <- matrix(0, sum(emp_lev), sum(emp_lev))
m_lambdas <- matrix(0,nNode,2) #storing lambda threshold and actual lambda
#progress bar
if(pbar==TRUE) {
pb <- txtProgressBar(min = 0, max=nNode, initial=0, char="-", style = 3)
}
# step 4: estimation
for(v in seq_len(nNode))
{
# step 4.1: compute design matrix (adding interactions as a function of d)
if(d>(nNode-1)) {
stop("Order of interactions can be maximal the number of predictors!")
} else if (d==1){ form <- as.formula(paste(colnames(data)[v],"~ (.)"))
} else { form <- as.formula(paste(colnames(data)[v],"~ (.)^",d)) }
X <- model.matrix(form, data=data)[,-1]
#define link function
if(type[v]=="c") {
fam <- "multinomial"
} else if(type[v]=="g" | type[v]=="e") { #should be inverse link for "e", but currently not avail. for glmnet
fam <- "gaussian"
} else if(type[v]=="p") {
fam <- "poisson"
}
# step 4.2: select alpha & call glmnet
#lambda selection with EBIC
if(lambda.sel=="EBIC") {
fit <- glmnet(X, data[,v], family=fam, alpha=1)
#glmnet doesnt give us the pseudo LL, therefore we have to calculate it
#calculate LL_Null depending on cat/cont
if(type[v]=="g") {
mean.i <- coef(fit, s=1)[1] #mean given by intercept model
LL_null <- sum(dnorm(data[,v],mean.i,1, log=TRUE))
} else if(type[v]=="e") {
mean.i <- coef(fit, s=1)[1] #mean given by intercept model
LL_null <- sum(dnorm(data[,v],mean.i,1, log=TRUE))
} else if(type[v]=="p") {
mean.i <- coef(fit, s=1)[1] #mean given by intercept model
LL_null <- sum(dpois(data[,v],exp(mean.i), log=TRUE))
} else if(type[v]=="c") {
n_cats <- emp_lev[v]
#dummy matrices to compute LL
ind_dum <- cbind(matrix(0,n,n_cats), as.numeric(data[,v]))
ind_mat <- t(apply(ind_dum, 1, function(x) {x[x[4]] <- 1; return(x[1:n_cats])} ))
int_mat <- matrix(0,n,n_cats)
for(ca in 1:n_cats) { int_mat[,ca] <- coef(fit, s=1)[[ca]][1] }
LL_null <- 1/n * (sum(ind_mat * int_mat) - n*log(sum(exp(int_mat[1,]))) ) #LL multinomial from glmnet paper
}
# calc LL_sat
LL_sat <- 1/2 * fit$nulldev + LL_null
# calc LL for all lambdas
dev <- glmnet:::deviance.glmnet(fit)
LL <- - 1/2 * dev + LL_sat
n_lambdas <- length(fit$lambda)
# calculation of nonzero neighborhoods
if(type[v]!="c") { #continuous case
coefs_bin <- as.matrix(coef(fit)[-1,]) != 0 #nonzero?
n_neighbors <- colSums(coefs_bin)
}
if(type[v]=="c"){ #categorical case
m_neighbors <- matrix(0,ncol=n_lambdas, nrow=n_cats)
coefs_bin <- vector("list", length=n_cats)
for(ca in 1:n_cats){
coefs_bin[[ca]] <- as.matrix(coef(fit)[[ca]][-1,]) != 0 #nonzero?
}
n_neighbors <- colSums(Reduce('+', coefs_bin)!=0) #rule: a predictor has a nonzero parameter with 1 category of the y, then we have a neighborhood relation
}
# calc all EBICs
EBIC_lambda <- -2*LL + n_neighbors * log(n) + 2*gam*n_neighbors*log(ncol(X))
lambda_select <- fit$lambda[which(EBIC_lambda==min(EBIC_lambda))]
coefs <- coef(fit, s=lambda_select) #lambda value with highest EBIC
# lambda selection with CV
} else {
fit <- cv.glmnet(X, data[,v], family=fam, alpha=1, nfolds=folds, type.measure = "deviance")
lambda_select <- fit$lambda.min
coefs <- coef(fit, s=lambda_select)
} # end of estimation;
#list to matrix; cut out intercepts
coefsm <- matrix(do.call(rbind,lapply(coefs, as.numeric)),nrow=emp_lev[v])[,-1]
# step 4.3: save lambda + save & apply tau threshold
m_lambdas[v,2] <- bound <- sqrt(d) * sqrt(sum(coefsm^2)) * sqrt(log(nNode)/n)
m_lambdas[v,1] <- lambda_select
coefsm[abs(coefsm)<bound]<-0 # apply tau threshold
# step 4.4: write into model.par.matrix
#select corresponding row in model par matrix & fill in
#get correct dummy
dummy_par.sort.v <- dummy_par.sort[ind!=v]
#select corresponding row(s) in model par matrix & fill in
# continuous
if(emp_lev[v]==1) {
exp.n.c <- length(model.par.matrix[ind==v,ind!=v][dummy_par.sort.v]) #number of coefficients
model.par.matrix[ind==v,ind!=v][dummy_par.sort.v] <- coefsm[1:(exp.n.c)]
} else { # categorical
for(L in 1:emp_lev[v])
{
exp.n.c <- length(model.par.matrix[ind==v,ind!=v][,dummy_par.sort.v][L,])
model.par.matrix[ind==v,ind!=v][,dummy_par.sort.v][L,] <- coefsm[L,1:(exp.n.c)]
}
}
#progress bar
if(pbar==TRUE) {
setTxtProgressBar(pb, v)
}
} # end variable-loop
# step 5: derivates on model parameter matrix
# 5.1: aggregate on within categories
f_agg_cats <- function(model.par.matrix, rule) {
#select only colums where paramater are actually estimated (glmnet estimates k-1 not k parameters)
m.p.m <- model.par.matrix[,dummy_par.sort]
# averaging over columns
m.p.m.1 <- t(apply(m.p.m, 1, function(x) {
out <- numeric(0)
for(i in 1:nNode)
{
out.n <- mean(abs(x[dummy.ind==i])) #without abs, this keeps the sign; but because of the glmnet parameterization in categoricals it burries nonzero coefficients in the binary case
if(rule=="AND") {
out.n <- out.n * (sum(x[dummy.ind==i]==0)<1) #the second term = 0 when not all coefficients are nonzero
}
out <- rbind(out, out.n)
}
out <- matrix(out, nrow=1)
}))
# averaging over rows
m.p.m.2 <- apply(m.p.m.1, 2, function(x) {
out <- numeric()
for(i in 1:nNode)
{
out.n <- mean(abs(x[ind==i]))
if(rule=="AND") {
out.n <- out.n * (sum(x[ind==i]==0)<1) #the second term = 0 when not all coefficients are nonzero
}
out <- rbind(out, out.n)
}
out <- matrix(out, ncol=1)
})
} #end of function
#costumized rule
if(rule.cat!="OR")
{
#check on matrix
if(sum(dim(rule.cat) == dim(model.par.matrix))!=2) stop("rule.cat must have the same dimension as the parameter matrix!")
#apply rule
ind_costcat <- ((model.par.matrix + rule.cat)!=1)*1
diag(ind_costcat)<-0
ind_costcat_agg <- f_agg_cats(ind_costcat, "AND")
m.p.m.2 <- f_agg_cats(model.par.matrix, "OR") * ind_costcat_agg
#standard rule: OR which leads to edges that indicate conditional independence
} else {
m.p.m.2 <- f_agg_cats(model.par.matrix, "OR")
}
### 5.3: aggregate across two regressions
if(rule.reg=="AND") {
m.p.m.2_nonzero <- m.p.m.2!=0
m.p.m.2_nonzero <- m.p.m.2_nonzero * t(m.p.m.2_nonzero)
m.p.m.2 <- m.p.m.2 * m.p.m.2_nonzero
}
#make matrices symmetric (taking the average)
wadj <- (m.p.m.2 + t(m.p.m.2))/2 #adjacency matrix
mpar.matrix.sym <- (model.par.matrix+t(model.par.matrix)) / 2
#create list mapping: parameters <-> variables as input for qgraph "group"-argument
parvar.map <- parvar.map.label <- vector("list", length=nNode)
ind_map <- 1
for(m in 1:nNode){
#create indices list for qgraph
parvar.map[[m]] <- ind_map:(ind_map+lev[m]-1)
#create labels for qgraph
if(lev[m]==1)
{
parvar.map.label[[m]] <- m
} else {
parvar.map.label[[m]] <- paste(m, 1:lev[m], sep = ".")
}
ind_map <- ind_map + lev[m]
}
parvar.map.label_all <- do.call(c, parvar.map.label)
#dichotomize
adj <- (wadj!=0)*1
# step 6: output
output_list <- list("adj"=adj, "wadj"=wadj, "wpar.matrix" = model.par.matrix,
"wpar.matrix.sym"=mpar.matrix.sym, "parvar.map"=parvar.map,
"parvar.map.labels"=parvar.map.label_all, "lambda"=m_lambdas[,1])
class(output_list) <- "mMRF"
return(output_list)
}
|
a9a861c409403cd61ff31c61915a7e51e5ada9fd | 4680f495ab20b619ddf824584939a1e0356a0ed3 | /scripts/STG/STG_GAME.R | a8fc2126c75fe38b9fea69c716d55019393de75a | [] | no_license | Laurigit/flAImme | 7ca1de5e4dd82177653872f50e90e58aed5968f7 | 9d4b0381d4eedc928d88d0774c0376ba9341774b | refs/heads/master | 2023-05-24T17:06:58.416499 | 2023-04-28T08:10:30 | 2023-04-28T08:10:30 | 251,082,000 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 62 | r | STG_GAME.R | #STG_GAME
required_data(c("SRC_GAME"))
STG_GAME <- SRC_GAME
|
a3be98ef3fb8b70a80ed44c89e79e81889f5377f | d9aa2ca5271e6c882b86ea43d0db4a7e0a2b3c8f | /SHINYAPP/ui.R | 67e51478a85ce509967bc41ea4df2ddc44b73639 | [] | no_license | papaluigi/CAPSTONE | 1556a03b74f0e502fa015613ba3aac84ab577fec | 65e3ede2bffd73a3cce56232c45278e6ad7b935e | refs/heads/master | 2021-01-01T05:16:01.695856 | 2016-05-31T06:29:42 | 2016-05-31T06:29:42 | 59,186,122 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,085 | r | ui.R | library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Capstone Project"),
# Sidebar
sidebarLayout(
sidebarPanel(
h2("Next Word Prediction App"),
p("by Louis-Ferdinand Goffin, aka Papaluigi"),
br(),
img(src = "press.jpg", height=100),
br(),
br(),
textInput(inputId="sent1", label="Type your text here"),
br(),
#p("Next word proposed is :"),
uiOutput("dynamic")
),
# Output
mainPanel(
tabsetPanel(
tabPanel("Main",
h2("Instructions"),
p("Just type any sequence of words in the input field on the left panel. The next words proposed by the algoritm will appear below the input field."),
p("The list of proposed words in this panel is limited to 3, from the most probable to the less probable. You can switch to the Plots panel if you want to display the most relevant N-grams with their respective frequencies."),
p("The App aims to predict the next word ", strong("on the fly"), ", ie as long as you type (same feature as on your prefered samartphone)."),
h2("App Principles"),
p("Algorithm is based upon a ", strong("Katz's Back-off"), " process which uses Tri-grams, Bi-grams and Uni-grams analysis in order to identify the most probable word to come. Analysis starts with tri-gram match if possible, switches to bi-grams if not, and to uni-grams if not match is found in bi-grams."),
p("These N-grams have been computed using a 1.4 millions words sample extracted from the blog dataset provided by the course, after various text cleaning operations."),
h2("Optimizations performed"),
p("Special attention has been brought to ", strong("App weight"),". N-grams files initially weight 1.5MB, 19MB and 47MB for Uni-grams, Bi-grams and Tri-grams respectively. Inspired by Paul & Klein, I realized an indexation of Bi-grams and Tri-grams files, using Uni-grams as a reference for index. This led to a significant reduction of the size of the files to 10MB and 15MB respectively."),
p("Focus has also been set on ", strong("User Experience, especially App speed."), " Table lookup instructions have been optimized, and an additional speed gain has been realized by keeping in the Uni-grams table only the words cumulating 90% of the words usage in the training set. This compromise does not significantly impact accuracy."),
p("Last but not least, UI is ", strong("dynamic"), " : depending on a next word is found or not, then UI is updated on the left panel and in the Plot panel."),
p("The App currently does not allow to select identified words in order to update the input.")
#verbatimTextOutput("word1")
#dataTableOutput("DT1")
), #End of Main tabpanel
tabPanel("Plots",
plotOutput("plot1")
#dataTableOutput("DT1")
), # End of Plots tabpanel
tabPanel("Code",
h2("Code can be found on my GitHub")
#dataTableOutput("DT1")
)
) #End of tabsetPanel
) # End of mainPanel
)
))
|
455d2e224dfc7e9aab42d752aa4ee2f8e7d7827d | 4f843f261146f3b70a57623dc1750939d78a1aaf | /.svn/pristine/45/455d2e224dfc7e9aab42d752aa4ee2f8e7d7827d.svn-base | b8d36135f5d7dfe622dbbd9d6b9865d3007bbc7a | [] | no_license | xiang-chen-git/ecomove | 502d3985583241782ac62641a0e8e9ff8865f0ba | 97281cc7f2e3bc31ef175e5330fe918bf5a92237 | refs/heads/master | 2021-01-10T01:17:37.723055 | 2015-10-26T16:37:25 | 2015-10-26T16:37:25 | 44,982,305 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,484 | 455d2e224dfc7e9aab42d752aa4ee2f8e7d7827d.svn-base | ##' Get iteration similar to mode of multi-state chains
##'
##' Get the assigned states from the iteration from the chain most similar to the mode state at each step. If the data used
##' to fit the JAGS model is provided (a data frame generated by \code{GetVT} in \code{\link[bcpa:bcpa-package]{bcpa}}), then a data frame containing the state and time for
##' each step in the track is returned. Otherwise, just the vector of states is returned.
##'
##' @param jagsResults An \code{\link[=rjags-class]{rjags}} result
##' @param data (optional) The data used to fit the \code{jagsResults} containing a \code{T.POSIX} column.
##' @return Return a vector (or data frame including time) of the states from the iteration most closely matching the mode states.
##' @seealso \link{getModeStates}
##' @author Chloe Bracis
getIterationSimilarModeStates <- function(jagsResults, data = NULL)
{
modes <- getModeStates(jagsResults)
idxArray <- .getIdxArray(jagsResults)
dim(idxArray) <- c(dim(idxArray)[1] * dim(idxArray)[2], dim(idxArray)[3]) # combine chains so we have a matrix instead of an array
error <- apply(idxArray, 1, function(x) sum((x - modes)^2))
similarIdx <- which.min(error)
iteration <- idxArray[similarIdx,]
if ( !is.null(data) )
{
if (nrow(data) != length(iteration)) stop("Provided data must be same length as data used to fit jagsResults object")
iteration <- data.frame(State = iteration, Time = data$T.POSIX)
}
return(iteration)
}
| |
c0f3b49c8ebfe981f56cc77ebde62302200d0f09 | 5355ce6341489f05dc5894a70cf5cff1f951a194 | /man/DFA.Rd | 7a33604dfdafc6a91919722b135c0c69b1b7b664 | [] | no_license | AndreMikulec/econModel | 5032565f1722275425f75b55811493d45bf87f8c | 22b8507838116d3e33b6e40cf891988ad104ac7b | refs/heads/master | 2023-06-03T19:23:00.544927 | 2021-06-26T07:38:20 | 2021-06-26T07:38:20 | 303,683,123 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,195 | rd | DFA.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{DFA}
\alias{DFA}
\title{Differences by using Absolute Change}
\usage{
DFA(x, l = 1, d = 1, ...)
}
\arguments{
\item{x}{xts object}
\item{l}{lag}
\item{d}{differences}
\item{...}{dots passed}
}
\value{
xts object
}
\description{
\preformatted{
Differences by using Absolute Change
}
}
\examples{
\dontrun{
# DFA(Differences by using Absolute Change) examples
xts(matrix(c(1,-2,-4,8,16,32), ncol = 2), zoo::as.Date(0:2))
[,1] [,2]
1970-01-01 1 8
1970-01-02 -2 16
1970-01-03 -4 32
DFA(xts(matrix(c(1,-2,-4,8,16,32), ncol = 2), zoo::as.Date(0:2)))
V1dfa.2.1 V2dfa.2.1
1970-01-01 NA NA
1970-01-02 -3 8
1970-01-03 -2 16
DFA(xts(matrix(c(1,-2,-4,8,16,32), ncol = 2), zoo::as.Date(0:2)), d = 2)
V1dfa.2.1 V2dfa.2.1
1970-01-01 NA NA
1970-01-02 NA NA
1970-01-03 1 8
DFA(xts(matrix(c(1,-2,-4,8,16,32), ncol = 2), zoo::as.Date(0:2)), l = 2)
V1dfa.2.1 V2dfa.2.1
1970-01-01 NA NA
1970-01-02 NA NA
1970-01-03 -5 24
}
}
|
83801476d64897c556a3051b26f4478204f91658 | b7cbca382716af544696bb782f2faeca6ee7caf6 | /heatmap_corr_matrix.R | be3683075d3ec2aa3e036d50dbcdca263ec28afb | [] | no_license | WPierrick/Multitrait_GWAS | 88e4b28e025e5fccbe49a345fd7195da94d0a181 | 0b1ddcb39ac7c97293991689dc5eb44f2c45e214 | refs/heads/master | 2021-07-22T03:55:30.386767 | 2017-10-31T06:34:55 | 2017-10-31T06:34:55 | 108,954,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 586 | r | heatmap_corr_matrix.R | install.packages("d3heatmap")
library("d3heatmap")
corrmat <- read.csv("/Users/p.wainschtein/Documents/Multivariate_GWAS/correlation_matrix.csv", header = TRUE, sep = "\t")
row.names(corrmat) <- corrmat[,1]
corrmat$X <- NULL
colnames(corrmat) <- gsub("_colsubset.txt_munged.sumstats.gz", "", paste(colnames(corrmat)))
rownames(corrmat) <- gsub("_colsubset.txt_munged.sumstats.gz", "", paste(rownames(corrmat)))
d3heatmap(as.matrix(corrmat), dendrogram='none', Rowv=FALSE, Colv=FALSE,trace='none')
heatmap.2(as.matrix(corrmat), dendrogram='none', Rowv=FALSE, Colv=FALSE,trace='none')
|
5cde3ffa69370becd11afa60621e9d0d2ca054d2 | 1380a6e8ffa98cea38e39a006e79cd364463dab4 | /R/iso_gapfill.R | eeec3252a376b215d2290ebe0e14f14b5800946f | [] | no_license | cvoter/CSLSiso | 7ec1227c474e9691b3ac5733b6871fa77513c142 | d86f068a9b5b0ce35e098ef84c08929bec9d14b0 | refs/heads/master | 2021-05-20T09:05:07.354843 | 2020-03-24T22:29:16 | 2020-03-24T22:29:16 | 206,828,195 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,015 | r | iso_gapfill.R | #' Fill Gaps in Stable Isotope Measurements
#'
#' Linearly interolates lake and GW values for dates in timeseries with missing
#' measurements. Depending on arguments, fills in precipitation values using 1)
#' measurments collected that month, and 2) Maribeth Kniffin's data, also from
#' Hancock station.
#'
#' @param monthly_isotopes a data frame with monthly isotope measurements for
#' all dates of desired timeseries (with NAs for months
#' with no measurements)
#'
#' @return monthly_isotopes - the same data frame provided to the function, but
#' with d18O_lake values filled in for all months
#' (except maybe the very first and last months).
#'
#' @importFrom magrittr %>%
#' @importFrom dplyr filter select summarise
#' @import lubridate
#' @importFrom rlang .data
#' @importFrom zoo read.zoo na.approx
#'
#' @export
iso_gapfill <- function(monthly_isotopes) {
# 1. Lake & GW: Interpolate NAs
zoo_iso <- read.zoo(monthly_isotopes, index.name = "date")
zoo_iso <- as.data.frame(na.approx(zoo_iso, rule = 2))
monthly_isotopes$d18O_lake <- zoo_iso$d18O_lake
monthly_isotopes$d2H_lake <- zoo_iso$d2H_lake
monthly_isotopes$d18O_GWin <- zoo_iso$d18O_GWin
monthly_isotopes$d2H_GWin <- zoo_iso$d2H_GWin
monthly_isotopes$d18O_GWout <- zoo_iso$d18O_GWout
monthly_isotopes$d2H_GWout <- zoo_iso$d2H_GWout
# 2. Precip: Use CSLS pcpn measurement for entire month
for (i in 1:nrow(monthly_isotopes)) {
this_month <- month(monthly_isotopes$date[i])
this_i <- which(month(monthly_isotopes$date) == this_month)
# Fill in with this month, if exists
monthly_isotopes$d18O_pcpn[i] <- mean(monthly_isotopes$d18O_pcpn[this_i],
na.rm = TRUE)
monthly_isotopes$d2H_pcpn[i] <- mean(monthly_isotopes$d2H_pcpn[this_i],
na.rm = TRUE)
# Replace NaNs with NA if that didn't work
monthly_isotopes$d18O_pcpn[is.nan(monthly_isotopes$d18O_pcpn)] <- NA
monthly_isotopes$d2H_pcpn[is.nan(monthly_isotopes$d2H_pcpn)] <- NA
}
# 3. Precip: Average in Maribeth Kniffin values
kniffin_isotopes <- CSLSdata::kniffin$isotopes
for (i in 1:nrow(monthly_isotopes)) {
# Average in Maribeth Kniffin data from same location
this_month <- monthly_isotopes$date[i]
this_kniffin <- kniffin_isotopes %>%
filter(month(.data$date) == month(this_month),
.data$site_id == "PRECIP")
if (nrow(this_kniffin) == 0) {
kniffin_d18O <- NA
kniffin_d2H <- NA
} else {
kniffin_d18O <- mean(this_kniffin$d18O, na.rm = TRUE)
kniffin_d2H <- mean(this_kniffin$d2H, na.rm = TRUE)
}
if (is.na(monthly_isotopes$d18O_pcpn[i])){
monthly_isotopes$d18O_pcpn[i] <- kniffin_d18O
monthly_isotopes$d2H_pcpn[i] <- kniffin_d2H
}
}
return(monthly_isotopes)
}
|
1dc03cc6baddde81faf3eba30ee29b3d38323fd7 | 436ace74a695893aad73229b723fac6be6814129 | /man/sobolowen.Rd | 6adfa072a68aeba571fab6f62def127399f17e8f | [] | no_license | cran/sensitivity | 18657169c915857dcde8af872e0048fef77107f4 | 2b2cbcb7f1bebecfd05e589e459fdf4334df3af1 | refs/heads/master | 2023-04-06T05:36:54.290801 | 2023-03-19T18:10:02 | 2023-03-19T18:10:02 | 17,699,584 | 17 | 17 | null | 2021-04-07T00:57:30 | 2014-03-13T06:16:44 | R | UTF-8 | R | false | false | 4,181 | rd | sobolowen.Rd | \name{sobolowen}
\alias{sobolowen}
\alias{tell.sobolowen}
\alias{print.sobolowen}
\alias{plot.sobolowen}
\alias{ggplot.sobolowen}
\title{Monte Carlo Estimation of Sobol' Indices (improved formulas of Owen (2013)}
\description{
\code{sobolowen} implements the Monte Carlo estimation of
the Sobol' indices for both first-order and total indices at the same
time (alltogether \eqn{2p}{2p} indices). Take as input 3 independent matrices.
These are called the Owen estimators.
}
\usage{
sobolowen(model = NULL, X1, X2, X3, nboot = 0, conf = 0.95, varest = 2, \dots)
\method{tell}{sobolowen}(x, y = NULL, return.var = NULL, varest = 2, \dots)
\method{print}{sobolowen}(x, \dots)
\method{plot}{sobolowen}(x, ylim = c(0, 1), \dots)
\method{ggplot}{sobolowen}(data, mapping = aes(), ylim = c(0, 1), \dots, environment
= parent.frame())
}
\arguments{
\item{model}{a function, or a model with a \code{predict} method,
defining the model to analyze.}
\item{X1}{the first random sample.}
\item{X2}{the second random sample.}
\item{X3}{the third random sample.}
\item{nboot}{the number of bootstrap replicates.}
\item{conf}{the confidence level for bootstrap confidence intervals.}
\item{varest}{choice for the variance estimator for the denominator of
the Sobol' indices. varest=1 is for a classical estimator.
varest=2 (default) is for the estimator proposed in Janon et al. (2012).}
\item{x}{a list of class \code{"sobolowen"} storing the state of the
sensitivity study (parameters, data, estimates).}
\item{data}{a list of class \code{"sobolowen"} storing the state of the
sensitivity study (parameters, data, estimates).}
\item{y}{a vector of model responses.}
\item{return.var}{a vector of character strings giving further
internal variables names to store in the output object \code{x}.}
\item{ylim}{y-coordinate plotting limits.}
\item{mapping}{Default list of aesthetic mappings to use for plot. If not specified,
must be supplied in each layer added to the plot.}
\item{environment}{[Deprecated] Used prior to tidy evaluation.}
\item{\dots}{any other arguments for \code{model} which are passed
unchanged each time it is called}
}
\value{
\code{sobolowen} returns a list of class \code{"sobolowen"}, containing all
the input arguments detailed before, plus the following components:
\item{call}{the matched call.}
\item{X}{a \code{data.frame} containing the design of experiments.}
\item{y}{the response used}
\item{V}{the estimations of Variances of the Conditional Expectations
(VCE) with respect to each factor and also with respect to the
complementary set of each factor ("all but \eqn{X_i}{Xi}").}
\item{S}{the estimations of the Sobol' first-order indices.}
\item{T}{the estimations of the Sobol' total sensitivity indices.}
Users can ask more ouput variables with the argument
\code{return.var} (for example, bootstrap outputs \code{V.boot},
\code{S.boot} and \code{T.boot}).
}
\references{
A. Owen, 2013, \emph{Better estimations of small Sobol' sensitivity indices},
ACM Transactions on Modeling and Computer Simulations (TOMACS), 23(2), 11.
Janon, A., Klein T., Lagnoux A., Nodet M., Prieur C. (2012), Asymptotic
normality and efficiency of two Sobol index estimators. Accepted in
ESAIM: Probability and Statistics.
}
\author{
Taieb Touati and Bernardo Ramos
}
\seealso{
\code{\link{sobol}, \link{sobol2002}, \link{sobolSalt}, \link{sobol2007}, \link{soboljansen}, \link{sobolmartinez}, \link{sobolEff}}
}
\examples{
# Test case : the non-monotonic Sobol g-function
# The method of sobolowen requires 3 samples
# There are 8 factors, all following the uniform distribution
# on [0,1]
library(boot)
n <- 1000
X1 <- data.frame(matrix(runif(8 * n), nrow = n))
X2 <- data.frame(matrix(runif(8 * n), nrow = n))
X3 <- data.frame(matrix(runif(8 * n), nrow = n))
# sensitivity analysis
\donttest{
x <- sobolowen(model = sobol.fun, X1, X2, X3, nboot = 10) # put nboot=100
print(x)
plot(x)
library(ggplot2)
ggplot(x)
}
}
\keyword{design}
|
1b66497e0eadef22a31febd20a90b99225bbf9e7 | 4a51ae8d7a874a7a087fd3c0fe9ac3f000b5ae79 | /IHWStatsPaper/man/tau_weighted_bh.Rd | b59ffca0f3a2b6755bc18c59030e57b4a2194bd3 | [
"MIT"
] | permissive | Huber-group-EMBL/covariate-powered-cross-weighted-multiple-testing | bf6feaba28c35360c42a9dbe6194272d5f8570c8 | 40d485f972fd2c03b371b98d3cad3d7f1a48520c | refs/heads/master | 2023-07-02T14:29:13.421945 | 2021-08-08T17:19:12 | 2021-08-08T17:19:12 | 233,483,127 | 1 | 3 | null | null | null | null | UTF-8 | R | false | true | 548 | rd | tau_weighted_bh.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ihw_and_weighting.R
\name{tau_weighted_bh}
\alias{tau_weighted_bh}
\title{The tau-weighted BH multiple testing procedure}
\usage{
tau_weighted_bh(Ps, ws, tau = 0.5)
}
\arguments{
\item{Ps}{Numeric vector of unadjusted p-values.}
\item{ws}{Numeric vector of multiple testing weights}
\item{tau}{Numeric (default = 0.5), the level at which tau-censoring is applied.}
}
\value{
Vector of adjusted p-values
}
\description{
The tau-weighted BH multiple testing procedure
}
|
9805dda69e48faf2eb2feb17ea98c6ff61edb9d8 | 9505d8ab047a5d6e35e10745d99ee1c268e2be4a | /ard_var.R | 32c49adc63008dee8be5be5e9cd3ceb54b65c282 | [] | no_license | mloop/r-scripts | 9a9326f802f8e4ce1d3ae2c76a18674d79199ba8 | da01ff94360c9fdff1395c08b2e3e0694b490875 | refs/heads/master | 2021-01-10T06:45:08.457307 | 2016-04-01T17:04:41 | 2016-04-01T17:04:41 | 52,898,889 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 235 | r | ard_var.R | # Purpose: calculate variance of an absolute rate difference
# Author: Matthew Shane Loop
ard_var <- function(events1, events2, person_time1, person_time2){
var <- events1/(person_time1)^2 + events2/(person_time2)^2
return(var)
} |
d5409b3622a31f2f5b4fc10649f43bfc67e68ee0 | 0484ddd6f392fecfa542747f550248bba6a9bf2a | /man/filter_args.Rd | 21494768cbfc148d7602cd8c70e228aeb9a9c7e0 | [] | no_license | lengning/gClinBiomarker | d0115d4a699ca12866b9776c6a3835d9c0ece6c9 | 726d3bb9edbd8ecc450fc650ea7ab9922737629b | refs/heads/master | 2021-10-24T06:16:06.064819 | 2019-03-22T18:25:07 | 2019-03-22T18:25:07 | 125,939,464 | 5 | 1 | null | null | null | null | UTF-8 | R | false | true | 699 | rd | filter_args.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot-ggpackets-.r
\name{filter_args}
\alias{filter_args}
\title{Helper function for ggpack to filter arguments based on a prefix}
\usage{
filter_args(prefix, args, sep = "\\\\.")
}
\arguments{
\item{prefix}{a string specifying the prefix to be used to filter args}
\item{args}{a list of arguments to be subset by the prefix match}
\item{sep}{a regex joining string between prefix and args.
Defaults to \code{"\\\\."}.}
}
\value{
a list of arguments that originally were prefaced by the
specified prefix, now with that prefix removed.
}
\description{
Helper function for ggpack to filter arguments based on a prefix
}
|
baa4edcc8d8a1d49f29f22ded06e0fd2b8548481 | f29e1bbb05d7cf6c9136a6eb413038e2353d40f7 | /tests/testthat/testplotbetas.R | bad99c3941d65c8117c0b09ba4fb900b3ef0cf0f | [] | no_license | LiamDBailey/climwin | 46fdb4e668e125a8064de090473864d3aedd0c5e | 3c28479c04ba858e83f6d6f3dcab8758d40e5751 | refs/heads/master | 2023-02-02T21:35:12.772236 | 2020-05-25T09:55:21 | 2020-05-25T09:55:21 | 32,844,500 | 12 | 10 | null | 2023-01-24T15:13:52 | 2015-03-25T05:29:40 | R | UTF-8 | R | false | false | 1,357 | r | testplotbetas.R | # Test function plotbetas #
test_that("plotbetas produces a graph", {
data(Mass, envir = environment())
data(MassClimate, envir = environment())
testdata <- slidingwin(xvar = list(MassClimate$Temp), cdate = MassClimate$Date, bdate = Mass$Date,
baseline = lm(Mass ~ 1, data = Mass), range = c(3, 2),
type = "relative", stat = "max", func = "lin", cmissing = FALSE)
testenv <- environment()
test <- plotbetas(dataset = testdata[[1]]$Dataset)
# Test that a ggplot object is produced
expect_true(attr(test, "class")[1] == "gg")
testdata[[1]]$Dataset$ModelBetaQ <- testdata[[1]]$Dataset$ModelBeta
testdata[[1]]$Dataset$Function <- "quad"
test <- plotbetas(dataset = testdata[[1]]$Dataset, plotall = TRUE, plotallenv = testenv)
# Test that a second graph is produced when func = quad
expect_true(exists("beta2", envir = testenv))
testdata[[1]]$Dataset$ModelBetaC <- testdata[[1]]$Dataset$ModelBeta
testdata[[1]]$Dataset$Function <- "cub"
test <- plotbetas(dataset = testdata[[1]]$Dataset, plotall = TRUE, plotallenv = testenv)
# Test that a second graph is produced when func = cub
expect_true(exists("beta2", envir = testenv))
# Test that a third graph is produced when func = cub
expect_true(exists("beta3", envir = testenv))
}) |
fd02cfe4f030ed7631c1afe57d6e6dc7995ba1ed | c1817ca808304d6eb606c3b38097568180a4d6a2 | /7.2a.r | 9a1cec6228b33dcafac23740be04aeb9543eeea7 | [] | no_license | Achu-nair/assignment-7.2 | 854ce03febc97ff9ecb21a2f75c9854b9863b3af | 591caf6f5335b827abab403fe50a632303310fc8 | refs/heads/master | 2020-04-30T02:32:54.640634 | 2019-04-10T14:19:48 | 2019-04-10T14:19:48 | 176,562,826 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 385 | r | 7.2a.r | colnames(mtcars)
car <- data.frame("cyl"= mtcars$cyl,
"vs"= mtcars$vs,
"am"= mtcars$am,
"gear"= mtcars$gear,
"carb"= mtcars$carb)
colnames(car)
windows()
par(mfrow=c(3,4))
for(i in 1:length(car)){
boxplot(car[,i],main = paste("Boxplot of ",colnames(car)[i]),varwidth = TRUE,xlab=colnames(car)[i])
} |
bf7f2a88c8e58552b4b84f9b848464f4affc36e2 | a341f6d7b93e12ef3ff6dcfd31e5f135725c59f6 | /man/P2C2M-package.Rd | b5e97828e1440a255f4c9bfbedcca2f3e494da9a | [] | no_license | michaelgruenstaeudl/P2C2M_Code | 14aefa4661c4ed97a1f512c89fa3ebf12f3b7800 | 3f1d8679fd02284abec917ff3ae1dddc1db3afcb | refs/heads/master | 2016-09-07T18:27:10.136490 | 2015-06-08T16:10:57 | 2015-06-08T16:10:57 | 37,077,580 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,966 | rd | P2C2M-package.Rd | \name{P2C2M-package}
\alias{P2C2M-package}
\alias{P2C2M}
\docType{package}
\title{
Posterior Predictive Checks of Coalescent Models
}
\description{
\pkg{P2C2M} provides functions to read default output from BEAST
(Drummond and Rambaut 2007) and *BEAST (Heled and Drummond 2010) and
conduct posterior predictive checks of coalescent models (Reid et al.
2014) with the help of data simulation and summary statistics under
various settings.
}
\note{
\bold{Installation Instructions}
To use \pkg{P2C2M}, the default version of Python must be set to
Python 2.7. Users of unix-like operating systems can insure that
this requirement is fulfilled by setting the following alias:
\code{echo 'alias python=python2.7' >> ~/.bashrc}
Mandatory and optional dependencies of \pkg{P2C2M} can be installed
automatically via two installation scripts that are co-supplied with
the package. These scripts were designed for unix-like operating
systems and are located in folder /exec. To use these installation
scripts, a correct configuration of python2-setuptools is required.
Users of unix-like operating systems can insure a correct
configuration by setting the following alias:
\code{echo 'alias python-config=python2-config' >> ~/.bashrc}
To execute the R installer, please run the following commands in R:
\code{source('/path_to_P2C2M/exec/P2C2M.installRlibs.R')};
\code{p2c2m.install()}
To execute the Python installer, please run the following command in
a terminal:
\code{python /path_to_P2C2M/exec/P2C2M.installPylibs.py}
\emph{Special Note for MacOS}
Users of the MacOS operating system need to install the dependencies
manually. Prior to their installation, please confirm that file
'/usr/bin/python2-config' exists in your file system and that it
points to the Python 2.7 executable. Please refer to \url{http://cran.r-project.org/bin/macosx/RMacOSX-FAQ.html}
on how to install R packages manually. For the manual installation of
Python libraries, please refer to
\url{http://docs.python.org/2/using/mac.html}
\bold{Study Design Requirements}
In the user-supplied data set, every species should be represented by
at least two alleles. Species that are represented by only a single
allele, by contrast, must be specified via option
\code{"single.allele"} and thereby are not included in the
calculation of the summary statistic 'GSI'; misspecifications
causes \pkg{P2C2M} to print the error message \emph{'Error: given
group represents one or fewer taxa. Cannot compute index.'}).
\bold{Input File Requirements}
In order to execute \pkg{P2C2M}, a user must provide a directory with
three different types of input files: (a) a file that contains
species trees, (b) a file that contains gene trees for each gene
under study, and (c) an XML-formatted file generated by BEAUTi, the
input generator of BEAST (Drummond and Rambaut 2007). A species tree
file contains a draw of s generations from the posterior distribution
of species trees. Each gene tree file contains an equally large draw
from the respective posterior distribution of ultrametric
genealogies. Please note that the generations recorded in the species
tree file must match those in the gene tree files exactly. The input
file generated by BEAUTi is formatted in XML markup language and
represents the starting point for a species tree inference in *BEAST.
Here, it provides information on allele and species names, the
association between alleles and species, and ploidy levels to
\pkg{P2C2M}.
\bold{File Name Requirements}
The following requirements for input file names are in place: The
species tree file must be named 'species.trees'. Each gene tree file
must be named 'g.trees', where the letter g is substituted with the
actual name of the gene. The name of the xml-formatted input file is
not constrained and at the discretion of the user. Please be aware
that \pkg{P2C2M} uses the name of the xml-formatted input file name
to label all subsequent output of the package.
}
\author{
Michael Gruenstaeudl, Noah Reid
Maintainer: Michael Gruenstaeudl \email{mi.gruenstaeudl@gmail.com}
}
\references{
Drummond, A.J. and Rambaut, A. (2007) BEAST: Bayesian evolutionary analysis by sampling trees. \emph{BMC Evolutionary Biology}, \bold{7}, 214.
Gruenstaeudl, M., Reid, N.M., Wheeler, G.R. and Carstens, B.C., submitted. Posterior Predictive Checks of Coalescent Models: P2C2M, an R package.
Heled, J. and Drummond, A.J. (2010) Bayesian inference of species trees from multilocus data. \emph{Molecular Biology And Evolution}, \bold{27}, 570--580.
Reid, N.M., Brown, J.M., Satler, J.D., Pelletier, T.A., McVay, J.D., Hird, S.M. and Carstens, B.C. (2014) Poor fit to the multi-species coalescent model is widely detectable in empirical data. \emph{Systematic Biology}, \bold{63}, 322--333.
}
|
8e49e341d8b8eb3c378a9886b058b7971b0fea42 | 83ae358d90cb1c54c8be380bc7bd628a2f6ed530 | /man/college.Rd | d1fd06b2ad7533642893940bb4c1782888a8bf6b | [] | no_license | cran/Rlab | c7963e1210e2140fc6d397ff6a2cf289f0dd3bd2 | c72e630626f6df15cf75ffd8b9ee7c85322aeda8 | refs/heads/master | 2022-05-28T16:35:40.306539 | 2022-05-04T22:10:02 | 2022-05-04T22:10:02 | 17,693,343 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,172 | rd | college.Rd | \name{college}
\alias{college}
\title{
Statistics on colleges in 15 states.
}
\description{
The data frame college contains statistics relating
to colleges from 15 states. This is a sample of
fifteen states and certain statistics taken from
the Chronicle of Higher Education (most data is for
1992). All entries are in thousands so that Arkansas
(first row) has a population of 2,399,000, a yearly
per capita income of \$15,400, 85,700 undergraduates
students, 7,000 graduate students, and average cost
of tuition and fees at public universities of \$1,540,
and is located in the south (s for south).
}
\format{
A data frame with 15 observations on the following 7 variables (all data in thousands).
\describe{
\item{school:}{State in which school is located.}
\item{pop:}{State population.}
\item{inc:}{Yearly per capita income.}
\item{undergrad:}{Total number of undergraduate students.}
\item{graduate:}{Total number of graduate students.}
\item{fees:}{Average cost of tuition and fees.}
\item{loc:}{Area of the country (s for south, w for west, ne for northeast, mw for midwest).}
}
}
\keyword{datasets}
|
fef0e9948510d850e79c5154bcda3954ecca4430 | af9e48f7a5f4a2ff9547122d866ba7f5dc63a89b | /plot-data.R | 498645fe06bd57217edd0d564990d7509992c26d | [
"MIT"
] | permissive | joethorley/bioRxiv-028274 | 3844d9461755f6d2afd9d2647b7efcf52b8007c2 | 37002f17a9ec7732b25cf91ecf4560caa3d5adeb | refs/heads/master | 2021-01-02T08:36:01.044493 | 2018-07-04T00:57:46 | 2018-07-04T00:57:46 | 99,027,969 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,168 | r | plot-data.R | source("header.R")
pdo <- readRDS("output/clean/pdo.rds")
leks <- readRDS("output/clean/leks.rds")
wells <- readRDS("output/clean/wells.rds")
groups <- readRDS("output/clean/groups.rds")
print(ggplot(data = pdo, aes(x = Year, y = PDO)) +
geom_line() +
scale_y_continuous(name = "PDO Index"))
ggsave("output/plots/pdo-data.png", width = 3, height = 2, dpi = dpi)
leks %<>% st_fortify()
centroids <- groups %>% st_centroid() %>% st_fortify()
groups %<>% st_fortify()
wells %<>% st_fortify()
print(ggplot(data = leks, aes(x = x/1000, y = y/1000)) +
geom_point(data = wells, size = 1/20, alpha = 1/3, color = "grey") +
geom_point(size = 1/5, alpha = 1/2, color = "blue") +
geom_path(data = groups, aes(group = group), color = "black") +
geom_text(data = centroids, aes(label = Group), color = "black", size = 4) +
coord_fixed() +
scale_x_continuous("Easting (km)", labels = comma) +
scale_y_continuous("Northing (km)", labels = comma) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()))
ggsave("output/plots/wyoming.png", width = 6, height = 5, dpi = dpi)
|
d29e50c2c850209dc3cb83ed655cf4195e1aaace | a3c78700a65f10714471a0d307ab984e8a71644d | /models/ed/man/prepare_ed_veg_filename.Rd | 72197989520f94e0e2308436ab85134173270ac0 | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | PecanProject/pecan | e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f | ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c | refs/heads/develop | 2023-08-31T23:30:32.388665 | 2023-08-28T13:53:32 | 2023-08-28T13:53:32 | 6,857,384 | 187 | 217 | NOASSERTION | 2023-09-14T01:40:24 | 2012-11-25T23:48:26 | R | UTF-8 | R | false | true | 897 | rd | prepare_ed_veg_filename.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_ed_veg.R
\name{prepare_ed_veg_filename}
\alias{prepare_ed_veg_filename}
\title{Format file name for ED vegetation inputs}
\usage{
prepare_ed_veg_filename(path_prefix, suffix, latitude = NULL, longitude = NULL)
}
\arguments{
\item{path_prefix}{Desired path and prefix (without latitude and longitude)}
\item{suffix}{Character string of filename suffix.}
\item{latitude}{Site latitude coordinate (default = \code{NULL})}
\item{longitude}{Site longitude coordinate (default = \code{NULL})}
}
\value{
Character string of full formatted file path
}
\description{
Adds the latitude and longitude, or checks if they are formatted correctly.
Then, splits the prefix into the directory and base name, appends the suffix
to the base name (adding a starting dot, if necessary), and returns the
filename as a character.
}
|
a32eea8a213f98ca40161e5859e755c1dd745dfb | 85b42853b507be01f6b855558ea691295928ac5f | /man/prim_invoke.Rd | 85e256309a77533f83cd306ab2dd759a90a46553 | [] | no_license | lionelhenry/robin | 8fb1c2827ffaee05613cda63caec879f693fad0b | 0ce31fdef6f71f8c59410e30e90eaaab2e66b65c | refs/heads/master | 2020-04-20T22:27:40.554870 | 2017-08-15T16:22:11 | 2017-08-15T16:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 718 | rd | prim_invoke.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/primitive.R
\name{prim_invoke}
\alias{prim_invoke}
\alias{prim_ptr_invoke}
\title{Invoke a primitive function}
\usage{
prim_invoke(prim, call, args = NULL, rho = caller_env())
prim_ptr_invoke(ptr, call, args = NULL, rho = caller_env())
}
\arguments{
\item{prim}{A primitive function.}
\item{call}{The quoted call that the primitive will think it was
invoked with.}
\item{args}{A pairlist of arguments.}
\item{rho}{The environment that the primitive will think it was
invoked in.}
\item{ptr}{A pointer object to a primitive function.}
}
\description{
Invoke a primitive function
}
\examples{
prim_invoke(list, NULL, pairlist(1, "a"))
}
|
3583fadb69ce6a4854cc05988df37d5004fe9316 | 1a254eae510f03743fd90a001e97f228a2e17fb6 | /Deep_Dives_DL_category.R | d291062cd5dcf8ad8b1980e45b068aca67b06f98 | [] | no_license | jacinthd/Analysis-of-app-data | 9edae7370165f4424b9a1aae342435b42e128484 | f4209837a76c14eb5ba35075729ad7453a7b7336 | refs/heads/master | 2021-04-06T19:39:04.659132 | 2018-03-14T23:54:03 | 2018-03-14T23:54:03 | 125,288,539 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 825 | r | Deep_Dives_DL_category.R | top.1000.apps.by.DL<-head(category_dataset[order(-category_dataset$DTO_DL),])
#top 100 apps from each category
test<-subset(DL_REV_FULL,Category=="KidsAndFamily")
test2<-head(test[order(-test$DTO_DL),],100)
test<-subset(category_data_DLs_morethan10,Category=="Social")
test2<-head(test[order(-test$DTO_DL),],100)
#get number of apps from each category
test<-as.data.frame(table(DL_REV_FULL$Category))
test$contr_to_total<-test$Freq/sum(test$Freq)
#Total DLs
total.category<-aggregate(DTO_DL~Category,data=DL_REV_FULL,FUN=sum)
total.category$contr_to_total<-total.category$DTO_DL/sum(total.category$DTO_DL)
#get the means by each category
means.category.all<-aggregate(DTO_DL~Category,data=DL_REV_FULL,FUN=mean)
means.category.all<-means.category.all[order(-means.category.all$DTO_DL),]
|
27779535200f273b45f82054d61da3f0b7c7c94b | 6fbc0b4555e94e5dbbb1b6ce58ca2cbd98340b43 | /Simulación de pago de pensiones.R | a753324a9a840ac911dc709d04a09ce49ab2a248 | [] | no_license | CEAC333/R-repository | c6dc9b43c89ffe2a50fd280c32068bdc4a36bac0 | 79d8d2640c6f29cc92f8d1d1d005f1600c6dbe6b | refs/heads/master | 2021-05-13T19:02:53.602809 | 2018-01-08T01:32:00 | 2018-01-08T01:32:00 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 4,196 | r | Simulación de pago de pensiones.R | #Simulación de pago de pensiones
n<-100; #Número de clientes para simulación
TCI<-15; #Tiempo de llegada mínimo entre clientes
#(e.g. Un cliente llega mínimo después de TCI min. que el cliente anterior)
TCF<-30; #Tiempo de llegada máximo entre clientes
#(e.g. Un cliente tarda en llegar máx. TCF min. despúes que el cliente anterior)
TAI<-5; #Tiempo de atención mínimo
#(e.g. el tiempo de atención mínimo para cada cliente es de TAI minutos)
TAF<-15; #Tiempo de atención máximo
#(e.g. el tiempo de atención mínimo para cada cliente es de TAI minutos)
HAM<-8; #Hora de apertura (e.g. apertura a las 0800hrs)
HCM<-420; #Hora de cierre en minútos (0800hrs-1500hrs)
#Tabla General
# Formato: Cliente / Tiempo que tarda en llegar / Minuto en el cual llega / Tiempo de atención / Tiempo de espera / Tiempo de finalización
C=matrix(c(1),nrow=n,ncol=6)
for(i in 1:n){
C[i,1]<-i #Numero de cliente
C[i,2]<-runif(1,min=0,max=1)*(TCF-TCI) #Tiempo que tarda en llegar el cliente i después que el cliente i-1
if(i==1)
C[i,3]<-C[i,2] #Minuto en el cual llega el cliente 1
else
C[i,3]<-C[i-1,3]+C[i,2] #Minuto en el cual llega el cliente i, donde i>1
C[i,4]<-runif(1,min=0,max=1)*(TAF-TAI) #Tiempo de atención por cliente
if(i==1)
C[i,5]<-0 #Tiempo de espera para el primer cliente (espera 0 min.)
else
if(C[i-1,6]>C[i,3])
C[i,5]<-C[i-1,6]-C[i,3] #Posible tiempo de espera (en cola) para el i-ésimo cliente, en donde i>1
else
C[i,5]<-0 #Posible tiempo de espera (en cola) para el i-ésimo cliente, en donde i>1
C[i,6]<-C[i,3]+C[i,4]+C[i,5] #Minuto en el cual el cliente se retira de la sucursal (tiempo de finalización)
}
#Tabla de "Número de cliente" / "Ingreso del cliente" / "Salida del cliente" en minutos
D=matrix(c(1),nrow=n,ncol=3)
for(i in 1:n){
D[i,1]<-C[i,1]
D[i,2]<-C[i,3]
D[i,3]<-C[i,6]
}
#Tabla para "Ingreso de cliente" - Desgloce en Horas, minutos y segundos
E=matrix(c(1),nrow=n,ncol=4)
for(i in 1:n){
E[i,1]<-C[i,1] #Columna para identificar número de cliente
E[i,2]<-floor(D[i,2]/60)+HAM #COlumna para identificar hora de ingreso
E[i,3]<-floor(D[i,2]-60*floor(D[i,2]/60)) #Columna para identificar minutos complementarios a la hora de ingreso
E[i,4]<-round((D[i,2]-60*floor(D[i,2]/60)-floor(D[i,2]-60*floor(D[i,2]/60)))*60,digits=0) #Columna para ident. seg. complemetarios a la hora de ingreso
}
#Determinar el último cliente recibido.
#Supuesto: Se atienden a todos los clientes que lleguen antes de 15hrs (420 min.)
UCRH=matrix(c(1),nrow=1,ncol=4)
for(i in 1:n){
if(C[i,3]<=HCM){
UCR<-C[i,1]
UCRH[1,1]<-E[i,1]
UCRH[1,2]<-E[i,2]
UCRH[1,3]<-E[i,3]
UCRH[1,4]<-E[i,4]
}
}
#Tabla para "Finalización del cliente" - Desfloce en horas, minutos y segundos.
F=matrix(c(1),nrow=n,ncol=4)
for(i in 1:n){
F[i,1]<-C[i,1] #Columna para identificar número de cliente
F[i,2]<-floor(D[i,3]/60)+HAM #COlumna para identificar hora de finalización
F[i,3]<-floor(D[i,3]-60*floor(D[i,3]/60)) #Columna para identificar minutos complementarios a la hora de finalización
F[i,4]<-round((D[i,3]-60*floor(D[i,3]/60)-floor(D[i,3]-60*floor(D[i,3]/60)))*60,digits=0) #Columna para ident. seg. complemetarios a la hora de finalización
}
#Determinar el último cliente atendido y su hora de finalización.
#Supuesto: Se atienden a todos los clientes que lleguen antes de 15hrs (420 min.), por lo tanto se cierra después de las 15hrs.
UCAH=matrix(c(1),nrow=1,ncol=4)
UCA<-UCR
UCAH[1,1]<-F[UCR,1]
UCAH[1,2]<-F[UCR,2]
UCAH[1,3]<-F[UCR,3]
UCAH[1,4]<-F[UCR,4]
#Resultados relevantes de la simulación:
UCR #Número de clientes que fueron atendidos y llegaron antes de las 15hrs.
#Hora en la cual llegó el último cliente.
UCRH #Formato: (cl/hr/min/seg)
#Hora en la cual se terminó de atender al último cliente.
UCAH #Formato: (cl/hr/min/seg) |
4dff34d1cb98d3fb8d8a45e4813b21fb5e281009 | fab90001189c6512ea178a46539bd6558f035944 | /code/code_3_lwa_and_wage_loss.R | 0ca8b90666b453d6bad0ffc4648e128f2c291820 | [] | no_license | Juannadie/lwa_europe | 89428f68bf7c4a6ab6f2d9c5cf39c165b188a747 | bae066ef5bd3f5bd38957743c9db20ff4a83ba44 | refs/heads/master | 2022-12-20T05:34:37.451665 | 2020-10-02T10:06:42 | 2020-10-02T10:06:42 | 300,573,202 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,318 | r | code_3_lwa_and_wage_loss.R |
countries <- readRDS(file = "countries_2.rds") #loads data from previous step
#we rename the essential and closed indices variables for shorter code
countries <- lapply(countries, function(X) {
X[["e_i"]] <- X[["essential_index"]]
X[["c_i"]] <- X[["closed_index"]]
X[["tw"]] <- X[["teleworking"]]
X
})
#### Calculation of the Lockdown Working Ability Index for each worker, based on their tw, closure and essentiality scores ####
countries <- lapply(countries, function(X) {
#for non essential and non closed (default)
X[["lwa"]] <- X[["tw"]]
#for essential (e_i > 0)
X[["lwa"]][X[["e_i"]] > 0] <- (X[["e_i"]][X[["e_i"]] > 0] + (1-X[["e_i"]][X[["e_i"]] > 0])*X[["tw"]][X[["e_i"]] > 0])
#for closed (c_i > 0)
X[["lwa"]][X[["c_i"]] > 0] <- (1-X[["c_i"]][X[["c_i"]] > 0])*X[["tw"]][X[["c_i"]] > 0]
X
})
#### NOW THE WAGE LOSS FOR EACH SCENARIO #####
# 2 months
countries <- lapply(countries, function(df) {
#2 month
#we set the number of months of the lockdown
lcklenght <- 2
df[["wloss2l"]] <- df[["wage"]]*(lcklenght/12)*(1-(df[["lwa"]]))
df[["wage2l"]] <- df[["wage"]] - df[["wloss2l"]]
#2 month plus 6 closure with a 20% capacity decrease
# we set the closure at 20% (activity at 80% of capacity) for 6 months
capacitydecr <- 0.2 #capacity decrease
clslenght <- 6 #number of months of closure
#non-closed activities (c_i = 0)
df[["wage2l6c20"]][df[["c_i"]] == 0] <- df[["wage2l"]][df[["c_i"]] == 0] #same wage as 2 month of lockdown
#closed activities (c_i > 0)
df[["wage2l6c20"]][df[["c_i"]] > 0] <- df[["wage2l"]][df[["c_i"]] > 0] - ( df[["wage"]][df[["c_i"]] > 0]*(clslenght/12)*(capacitydecr)*df[["c_i"]][df[["c_i"]] > 0])
df
#2 month plus 6 closure with a 30% capacity decrease
capacitydecr <- 0.3 #capacity decrease
clslenght <- 6 #number of months of closure
#non-closed activities (c_i = 0)
df[["wage2l6c30"]][df[["c_i"]] == 0] <- df[["wage2l"]][df[["c_i"]] == 0] #same wage as 2 month of lockdown
#closed activities (c_i > 0)
df[["wage2l6c30"]][df[["c_i"]] > 0] <- df[["wage2l"]][df[["c_i"]] > 0] - ( df[["wage"]][df[["c_i"]] > 0]*(clslenght/12)*(capacitydecr)*df[["c_i"]][df[["c_i"]] > 0])
df
#2 month plus 6 closure with a 40% capacity decrease
capacitydecr <- 0.4 #capacity decrease
clslenght <- 6 #number of months of closure
#non-closed activities (c_i = 0)
df[["wage2l6c40"]][df[["c_i"]] == 0] <- df[["wage2l"]][df[["c_i"]] == 0] #same wage as 2 month of lockdown
#closed activities (c_i > 0)
df[["wage2l6c40"]][df[["c_i"]] > 0] <- df[["wage2l"]][df[["c_i"]] > 0] - ( df[["wage"]][df[["c_i"]] > 0]*(clslenght/12)*(capacitydecr)*df[["c_i"]][df[["c_i"]] > 0])
df
#now with 9 months (alternative simulation)
#2 month plus 9 closure with a 20% capacity decrease
capacitydecr <- 0.2 #capacity decrease
clslenght <- 9 #number of months of closure
#non-closed activities (c_i = 0)
df[["wage2l9c20"]][df[["c_i"]] == 0] <- df[["wage2l"]][df[["c_i"]] == 0] #same wage as 2 month of lockdown
#closed activities (c_i > 0)
df[["wage2l9c20"]][df[["c_i"]] > 0] <- df[["wage2l"]][df[["c_i"]] > 0] - ( df[["wage"]][df[["c_i"]] > 0]*(clslenght/12)*(capacitydecr)*df[["c_i"]][df[["c_i"]] > 0])
df
#2 month plus 9 closure with a 30% capacity decrease
capacitydecr <- 0.3 #capacity decrease
clslenght <- 9 #number of months of closure
#non-closed activities (c_i = 0)
df[["wage2l9c30"]][df[["c_i"]] == 0] <- df[["wage2l"]][df[["c_i"]] == 0] #same wage as 2 month of lockdown
#closed activities (c_i > 0)
df[["wage2l9c30"]][df[["c_i"]] > 0] <- df[["wage2l"]][df[["c_i"]] > 0] - ( df[["wage"]][df[["c_i"]] > 0]*(clslenght/12)*(capacitydecr)*df[["c_i"]][df[["c_i"]] > 0])
df
#2 month plus 9 closure with a 40% capacity decrease
capacitydecr <- 0.4 #capacity decrease
clslenght <- 9 #number of months of closure
#non-closed activities (c_i = 0)
df[["wage2l9c40"]][df[["c_i"]] == 0] <- df[["wage2l"]][df[["c_i"]] == 0] #same wage as 2 month of lockdown
#closed activities (c_i > 0)
df[["wage2l9c40"]][df[["c_i"]] > 0] <- df[["wage2l"]][df[["c_i"]] > 0] - ( df[["wage"]][df[["c_i"]] > 0]*(clslenght/12)*(capacitydecr)*df[["c_i"]][df[["c_i"]] > 0])
df
})
saveRDS(countries, file = "countries_3.rds") #saves file in current directory
|
9b86c1b53642d0c01a39735427586cec489fedcf | 040113c16d38a2ce1854c95c8d0b3c82481fc33c | /man/extract_country_code.Rd | 51695c35e3bf9e40afb76eaa56d7a23afdc0f49c | [] | no_license | sakrejda/pdhs | 9dc199648c89fc6ac036cd61d174e41a5335dedb | 0e32ea50e6db2abb7ab20e676c3457bd6e66c93c | refs/heads/master | 2020-03-17T22:25:43.022686 | 2018-05-18T20:27:55 | 2018-05-18T20:27:55 | 134,002,989 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 409 | rd | extract_country_code.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract-codes.R
\name{extract_country_code}
\alias{extract_country_code}
\title{For a filename 'file', extract the country code (first two characters).}
\usage{
extract_country_code(file)
}
\arguments{
\item{file}{filename to extract from.}
}
\description{
For a filename 'file', extract the country code (first two characters).
}
|
c033f2e0946ff4e65eaaee1ba8fcdcf5ecdfbb69 | 8c47a74d723b31f6097839364eea96085041725f | /man/export2cross.Rd | edcb78c20ab547cee160e80d18f14a25d6ef975d | [] | no_license | xuzhougeng/binmapr | e1afd9f0476e6353047c8123e2be457c0ee33ae2 | 41c45896374513eb3dceddbc531ebaeebe88dc72 | refs/heads/master | 2023-08-19T10:48:13.064347 | 2023-08-03T04:12:42 | 2023-08-03T04:12:42 | 212,075,720 | 10 | 4 | null | null | null | null | UTF-8 | R | false | true | 683 | rd | export2cross.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/export.R
\name{export2cross}
\alias{export2cross}
\title{Generate cross object for R/qtl}
\usage{
export2cross(x, BC.gen = 0, F.gen = 0, alleles = c("A", "B"), parents = NULL)
}
\arguments{
\item{x}{}
\item{BC.gen}{Used only for cross type "bcsft"}
\item{F.gen}{Used only for cross type "bcsft"}
\item{alleles}{A vector of two one-letter character strings
(or four, for the four-way cross), to be used as labels for the two alleles.}
\item{parents}{a vector with the position or name of two parents}
}
\value{
a binmapr object
}
\description{
Generate cross object for R/qtl
}
\author{
Zhougeng Xu
}
|
92777789ecaff52c99df2593c2b1457c1b3c55e3 | 92bc39ef76d365bdc137dc56b976e510d897b4a8 | /man/IShares.Rd | 7c49eaaa4093fb439c4f00bd4c42e231ef812783 | [] | no_license | skiamu/ETF | e18ecc62dee90ba82afd7b56d58e1d8e8029dff5 | 9295efe221eb8785a773ee54b1cdc95bb3523d7a | refs/heads/master | 2021-01-01T01:29:48.132929 | 2020-04-23T14:11:10 | 2020-04-23T14:11:10 | 208,893,785 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 809 | rd | IShares.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IShares-class.R
\name{IShares}
\alias{IShares}
\title{IShares class constructor}
\usage{
IShares(summary_link, tickers_to_keep)
}
\arguments{
\item{summary_link}{link where the IShares summary data (in JSON format) is downloaded from.
This is the data summarizing all the ETF of the IShares provider}
\item{tickers_to_keep}{character vector containing the tickers to keep. One might
want to keep just a subset of tickers due to storage constraints}
}
\value{
a new \code{IShares} object
}
\description{
The \code{IShares} class is a "virtual" class in the sense that it's never
actually instantiated but used to give the interface to its subclasses.
For instace at the \code{IShares} class level we implement the getter
methods
}
|
d8084ea17911c6f15254539682418fc42a61818c | dfc271928415b959127a00bf73ef58dfba7ac834 | /src/cdsomics/man/ensembl_hgnc_gene_pairing.Rd | c9ea4e45f9fa08830338851a2554a84656784401 | [] | no_license | Luigi-Ferraro/ccle_processing | c9eb2177a69342595091d81f7e5fea71ffc43fcf | 6af18b21784d1fc1fc5e782d3dfb6940c7914c5a | refs/heads/master | 2023-01-19T22:51:02.815771 | 2020-11-10T15:49:14 | 2020-11-10T15:49:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 532 | rd | ensembl_hgnc_gene_pairing.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_gene_annotations.R
\name{ensembl_hgnc_gene_pairing}
\alias{ensembl_hgnc_gene_pairing}
\title{Pair Ensembl gene ids with the correct HGNC gene symbol}
\usage{
ensembl_hgnc_gene_pairing(filename)
}
\arguments{
\item{filename:}{path to gencode gene annotations file, possible files are gencode.v19.annotation.gtf or gencode.v29.annotation.gff3}
}
\description{
Parses the GENCODE gene annotations file to get the Ensembl ID and HGNC symbols for genes
}
|
a1ef1019dc7c88909b4f2ebc288ca54c8d323134 | ea344f5ef7fda2ae1fbb1390367c12f754c23f26 | /cachematrix.R | af7b954d5b3e23eb26a8eaee66062b68a78ef67c | [] | no_license | KamolwanP/ProgrammingAssignment2 | 9589a1fc14af76bd40d95bb6a9d541b532f8fba6 | cc296937d105b864e4dc7109e4b30eedb127bd19 | refs/heads/master | 2020-04-22T05:00:03.447146 | 2019-02-11T16:08:01 | 2019-02-11T16:08:01 | 170,143,869 | 0 | 0 | null | 2019-02-11T14:36:30 | 2019-02-11T14:36:30 | null | UTF-8 | R | false | false | 625 | r | cachematrix.R |
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set<- function(y){
x<<-y
inverse<<-NULL
}
get<-function() x
setinverse<-function(solve) inverse<<-solve
getinverse<-function() inverse
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse<-x$getinverse()
if(!is.null(inverse))
{
message("getting cached data")
return(inverse)
}
matrix<-x$get()
inverse<-solve(matrix)
x$setinverse(inverse)
inverse
} |
7b6117cb0b0cb3bb7f2204a61853f5540a5546e1 | ac3264ff143e8bf99c6e9e2a77613a3f1499dc72 | /fastq_pooled/snakemake_ChIPseq_t2t-col.20210610/mapped/geneProfiles/quantiles/group_features_into_quantiles_noheatmaps.R | f40afd853c8d5b45c41504d2011749c130341230 | [] | no_license | ajtock/20190819_dh580_Athaliana_ChIPseq_MTOPVIB | 0291f5bf58ba760ff369263bc8f20773e685b9d9 | 2ff455aefc503757f97ec53a17fc1e21a7676f50 | refs/heads/master | 2021-07-30T10:34:56.081334 | 2021-07-22T15:01:53 | 2021-07-22T15:01:53 | 205,167,693 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 48,413 | r | group_features_into_quantiles_noheatmaps.R | #!/applications/R/R-4.0.0/bin/Rscript
#
# Divide features into quantiles based on mean log2(libName ChIP/control)
# in a given feature region (e.g., promoters).
# Extract and save feature IDs for each quantile for further analyses
# (e.g., GO enrichment and average + 95% CI profile plotting).
# Calculate mean winName-scaled recombination rate (cM/Mb) from
# the promoter to the terminator of each feature.
# Plot feature quantile heatmaps for various genomics datasets
# Plot feature quantile recombination rate densities in a
# heat map or violin plot
#
# Usage:
# /applications/R/R-4.0.0/bin/Rscript group_features_into_quantiles_noheatmaps.R WT_MTOPVIB_HA_Rep1_ChIP '20190819_dh580_Athaliana_ChIPseq_MTOPVIB/fastq_pooled/snakemake_ChIPseq_t2t-col.20210610' both 'Chr1,Chr2,Chr3,Chr4,Chr5' 2200 2000 2kb '2 kb' 10 10bp promoters 4 t2t-col.20210610
#libName <- "WT_MTOPVIB_HA_Rep1_ChIP"
#dirName <- "20190819_dh580_Athaliana_ChIPseq_MTOPVIB/fastq_pooled/snakemake_ChIPseq_t2t-col.20210610"
#align <- "both"
#chrName <- unlist(strsplit("Chr1,Chr2,Chr3,Chr4,Chr5",
# split = ","))
#bodyLength <- 2200
#upstream <- 2000
#downstream <- 2000
#flankName <- "2kb"
#flankNamePlot <- "2 kb"
#binSize <- 10
#binName <- "10bp"
#region <- "promoters"
#quantiles <- 4
#refbase <- "t2t-col.20210610"
args <- commandArgs(trailingOnly = T)
libName <- args[1]
dirName <- args[2]
align <- args[3]
chrName <- unlist(strsplit(args[4],
split = ","))
bodyLength <- as.numeric(args[5])
upstream <- as.numeric(args[6])
downstream <- as.numeric(args[6])
flankName <- args[7]
flankNamePlot <- args[8]
binSize <- as.numeric(args[9])
binName <- args[10]
region <- args[11]
quantiles <- as.numeric(args[12])
refbase <- args[13]
options(stringsAsFactors = F)
library(EnrichedHeatmap)
library(png)
#library(Cairo)
library(RColorBrewer)
library(circlize)
library(GenomicRanges)
library(dplyr)
library(parallel)
library(doParallel)
registerDoParallel(cores = detectCores())
print("Currently registered parallel backend name, version and cores")
print(getDoParName())
print(getDoParVersion())
print(getDoParWorkers())
outDir <- paste0("quantiles_by_log2_", libName,
"_control_in_", region, "/")
plotDir <- paste0(outDir, "plots/")
system(paste0("[ -d ", outDir, " ] || mkdir ", outDir))
system(paste0("[ -d ", plotDir, " ] || mkdir ", plotDir))
# Genomic definitions
fai <- read.table(paste0("/home/ajt200/analysis/nanopore/", refbase, "/", refbase, ".fa.fai"), header = F)
chrs <- fai$V1[which(fai$V1 %in% chrName)]
chrLens <- fai$V2[which(fai$V1 %in% chrName)]
regionGR <- GRanges(seqnames = chrs,
ranges = IRanges(start = 1,
end = chrLens),
strand = "*")
# Define centromere GRanges
CENstart <- c(14841110,3823792,13597188,4203902,11784131)[which(fai$V1 %in% chrName)]
CENend <- c(17559778,6045243,15733925,6977949,14551809)[which(fai$V1 %in% chrName)]
CENGR <- GRanges(seqnames = chrs,
ranges = IRanges(start = CENstart,
end = CENend),
strand = "*")
# Load ChIP matrix
# feature
ChIP_featureMat <- lapply(seq_along(chrName), function(y) {
as.matrix(read.table(paste0("/home/ajt200/analysis/",
dirName,
"/mapped/geneProfiles/matrices/",
libName,
"_MappedOn_", refbase, "_lowXM_", align, "_sort_norm_genes_in_",
chrName[y], "_matrix_bin", binName,
"_flank", flankName, ".tab"),
header = F, skip = 3))
})
# If features from multiple chromosomes are to be analysed,
# concatenate the corresponding feature coverage matrices
if(length(chrName) > 1) {
ChIP_featureMat <- do.call(rbind, ChIP_featureMat)
} else {
ChIP_featureMat <- ChIP_featureMat[[1]]
}
# ranLoc
ChIP_ranLocMat <- lapply(seq_along(chrName), function(y) {
as.matrix(read.table(paste0("/home/ajt200/analysis/",
dirName,
"/mapped/geneProfiles/matrices/",
libName,
"_MappedOn_", refbase, "_lowXM_", align, "_sort_norm_genes_in_",
chrName[y], "_ranLoc_matrix_bin", binName,
"_flank", flankName, ".tab"),
header = F, skip = 3))
})
# If ranLocs from multiple chromosomes are to be analysed,
# concatenate the corresponding ranLoc coverage matrices
if(length(chrName) > 1) {
ChIP_ranLocMat <- do.call(rbind, ChIP_ranLocMat)
} else {
ChIP_ranLocMat <- ChIP_ranLocMat[[1]]
}
# Load control matrices
controlNames <- c(
"WT_REC8_Myc_Rep1_input"
# "WT_CENH3_Rep1_input_SRR4430555",
# "WT_H3K9me2_Rep1_input"
# "H2AW_input_SRR5298544",
# "WT_gDNA_Rep1",
# "WT_gDNA_Rep1_R1",
# "map_K40_E2",
# "map_K45_E2",
# "map_K50_E2",
# "map_K150_E4",
# "map_K200_E4",
# "map_K300_E4"
)
controlNamesDir <- c(
paste0("REC8_pooled/snakemake_ChIPseq_", refbase)
# paste0("CENH3_seedlings_Maheshwari_Comai_2017_GenomeRes/snakemake_ChIPseq", refbase),
# paste0("170101_Chris_H3K9me2_ChIP/WT/snakemake_ChIPseq", refbase),
# paste0("HTA6_HTA7_leaf_Lorkovic_Berger_2017_CurrBiol/snakemake_ChIPseq", refbase),
# paste0("150701_Natasha_gDNA/WT/snakemake_ChIPseq", refbase),
# paste0("150701_Natasha_gDNA/WT/R1/snakemake_SPO11oligos", refbase),
# rep(paste0("nanopore/", refbase, "/genmap_mappability"), 6)
)
controlNamesPlot <- c(
"Input (REC8)"
# "Input (CENH3)",
# "Input (H3K9me2)"
# "Input (MNase)",
# "PE gDNA",
# "SE gDNA",
# "k=40 e=2",
# "k=45 e=2",
# "k=50 e=2",
# "k=150 e=4",
# "k=200 e=4",
# "k=300 e=4"
)
controlDirs <- sapply(seq_along(controlNamesDir), function(x) {
paste0("/home/ajt200/analysis/", controlNamesDir[x],
"/mapped/")
})
## control
# feature
control_featureMats <- mclapply(seq_along(controlNames), function(x) {
lapply(seq_along(chrName), function(y) {
if( grepl("map_K", controlNames[x]) ) {
as.matrix(read.table(paste0(controlDirs[x], "geneProfiles/matrices/",
controlNames[x],
"_MappedOn_", refbase, "_genes_in_",
chrName[y], "_matrix_bin", binName, "_flank", flankName, ".tab"),
header = F, skip = 3))
} else {
as.matrix(read.table(paste0(controlDirs[x], "geneProfiles/matrices/",
controlNames[x],
"_MappedOn_", refbase, "_lowXM_both_sort_norm_genes_in_",
chrName[y], "_matrix_bin", binName, "_flank", flankName, ".tab"),
header = F, skip = 3))
}
})
}, mc.cores = length(controlNames))
# If features from multiple chromosomes are to be analysed,
# concatenate the corresponding feature coverage matrices
control_featureMats <- mclapply(seq_along(control_featureMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, control_featureMats[[x]])
} else {
control_featureMats[[x]][[1]]
}
}, mc.cores = length(control_featureMats))
# ranLoc
control_ranLocMats <- mclapply(seq_along(controlNames), function(x) {
lapply(seq_along(chrName), function(y) {
if( grepl("map_K", controlNames[x]) ) {
as.matrix(read.table(paste0(controlDirs[x], "geneProfiles/matrices/",
controlNames[x],
"_MappedOn_", refbase, "_genes_in_",
chrName[y], "_ranLoc_matrix_bin", binName, "_flank", flankName, ".tab"),
header = F, skip = 3))
} else {
as.matrix(read.table(paste0(controlDirs[x], "geneProfiles/matrices/",
controlNames[x],
"_MappedOn_", refbase, "_lowXM_both_sort_norm_genes_in_",
chrName[y], "_ranLoc_matrix_bin", binName, "_flank", flankName, ".tab"),
header = F, skip = 3))
}
})
}, mc.cores = length(controlNames))
# If features from multiple chromosomes are to be analysed,
# concatenate the corresponding feature coverage matrices
control_ranLocMats <- mclapply(seq_along(control_ranLocMats), function(x) {
if(length(chrName) > 1) {
do.call(rbind, control_ranLocMats[[x]])
} else {
control_ranLocMats[[x]][[1]]
}
}, mc.cores = length(control_ranLocMats))
# Conditionally calculate log2(ChIP/control)
# for each matrix depending on library
# feature
log2ChIP_featureMat <-
if ( grepl("CENH3", libName) ) {
print(paste0(libName, " library; using ", controlNames[20], " for log2((ChIP+1)/(input+1)) calculation"))
log2((ChIP_featureMat+1)/(control_featureMats[[20]]+1))
} else if ( grepl("H3K9me2", libName) ) {
print(paste0(libName, " library; using ", controlNames[30], " for log2((ChIP+1)/(input+1)) calculation"))
log2((ChIP_featureMat+1)/(control_featureMats[[30]]+1))
} else if ( grepl("MNase", libName) ) {
print(paste0(libName, " library; using ", controlNames[40], " for log2((MNase+1)/(input+1)) calculation"))
log2((ChIP_featureMat+1)/(control_featureMats[[40]]+1))
} else if ( grepl("SPO11oligos", libName) ) {
print(paste0(libName, " library; using ", controlNames[50], " for log2((SPO11-1-oligos+1)/(input+1)) calculation"))
log2((ChIP_featureMat+1)/(control_featureMats[[50]]+1))
} else {
print(paste0(libName, " library; using ", controlNames[1], " for log2((ChIP+1)/(input+1)) calculation"))
log2((ChIP_featureMat+1)/(control_featureMats[[1]]+1))
}
# ranLoc
log2ChIP_ranLocMat <-
if ( grepl("CENH3", libName) ) {
print(paste0(libName, " library; using ", controlNames[20], " for log2((ChIP+1)/(input+1)) calculation"))
log2((ChIP_ranLocMat+1)/(control_ranLocMats[[20]]+1))
} else if ( grepl("H3K9me2", libName) ) {
print(paste0(libName, " library; using ", controlNames[30], " for log2((ChIP+1)/(input+1)) calculation"))
log2((ChIP_ranLocMat+1)/(control_ranLocMats[[30]]+1))
} else if ( grepl("MNase", libName) ) {
print(paste0(libName, " library; using ", controlNames[40], " for log2((MNase+1)/(input+1)) calculation"))
log2((ChIP_ranLocMat+1)/(control_ranLocMats[[40]]+1))
} else if ( grepl("SPO11oligos", libName) ) {
print(paste0(libName, " library; using ", controlNames[50], " for log2((SPO11-1-oligos+1)/(input+1)) calculation"))
log2((ChIP_ranLocMat+1)/(control_ranLocMats[[50]]+1))
} else {
print(paste0(libName, " library; using ", controlNames[1], " for log2((ChIP+1)/(input+1)) calculation"))
log2((ChIP_ranLocMat+1)/(control_ranLocMats[[1]]+1))
}
# Extract region for ordering of features (adjust promoter/terminator size as necessary)
if( region == "promoters" ) {
log2ChIP_featureMatRegion <- log2ChIP_featureMat[,(((upstream-1000)/binSize)+1):(upstream/binSize)]
log2ChIP_ranLocMatRegion <- log2ChIP_ranLocMat[,(((upstream-1000)/binSize)+1):(upstream/binSize)]
} else if ( region == "terminators" ) {
log2ChIP_featureMatRegion <- log2ChIP_featureMat[,(((upstream+bodyLength)/binSize)+1):(((upstream+bodyLength)/binSize)+(1000/binSize))]
log2ChIP_ranLocMatRegion <- log2ChIP_ranLocMat[,(((upstream+bodyLength)/binSize)+1):(((upstream+bodyLength)/binSize)+(1000/binSize))]
} else if ( region == "bodies" ) {
log2ChIP_featureMatRegion <- log2ChIP_featureMat[,((upstream/binSize)+1):((upstream+bodyLength)/binSize)]
log2ChIP_ranLocMatRegion <- log2ChIP_ranLocMat[,((upstream/binSize)+1):((upstream+bodyLength)/binSize)]
} else if ( region == "genes" ) {
log2ChIP_featureMatRegion <- log2ChIP_featureMat[,(((upstream-1000)/binSize)+1):(((upstream+bodyLength)/binSize)+(1000/binSize))]
log2ChIP_ranLocMatRegion <- log2ChIP_ranLocMat[,(((upstream-1000)/binSize)+1):(((upstream+bodyLength)/binSize)+(1000/binSize))]
} else {
print("The region name provided does not match 'promoters', 'terminators', 'bodies', or 'genes'")
}
log2ChIP_featureMatRegionRowMeans <- rowMeans(log2ChIP_featureMatRegion, na.rm = T)
#log2ChIP_featureMatRegionRowMeansSorted <- sort.int(log2ChIP_featureMatRegionRowMeans,
# decreasing = T,
# index.return = T,
# na.last = T)
#log2ChIP_featureMatRegionSorted <- log2ChIP_featureMatRegion[sort.int(log2ChIP_featureMatRegionRowMeans,
# decreasing = T,
# index.return = T,
# na.last = T)$ix,]
#log2ChIP_featureMatSorted <- log2ChIP_featureMat[sort.int(log2ChIP_featureMatRegionRowMeans,
# decreasing = T,
# index.return = T,
# na.last = T)$ix,]
## Replace NAs in log2ChIP_featureMatRegion with 0
#log2ChIP_featureMatRegion[which(is.na(log2ChIP_featureMatRegion))] <- 0
log2ChIP_ranLocMatRegionRowMeans <- rowMeans(log2ChIP_ranLocMatRegion, na.rm = T)
#log2ChIP_ranLocMatRegionRowMeansSorted <- sort.int(log2ChIP_ranLocMatRegionRowMeans,
# decreasing = T,
# index.return = T,
# na.last = T)
#log2ChIP_ranLocMatRegionSorted <- log2ChIP_ranLocMatRegion[sort.int(log2ChIP_ranLocMatRegionRowMeans,
# decreasing = T,
# index.return = T,
# na.last = T)$ix,]
#log2ChIP_ranLocMatSorted <- log2ChIP_ranLocMat[sort.int(log2ChIP_ranLocMatRegionRowMeans,
# decreasing = T,
# index.return = T,
# na.last = T)$ix,]
## Replace NAs in log2ChIP_ranLocMatRegion with 0
#log2ChIP_ranLocMatRegion[which(is.na(log2ChIP_ranLocMatRegion))] <- 0
# Load table of feature coordinates in BED format
features <- lapply(seq_along(chrName), function(x) {
read.table(paste0("/home/ajt200/analysis/nanopore/", refbase, "/annotation/genes/", refbase, "_representative_mRNA_",
chrName[x], ".bed"),
header = F)
})
if(length(chrName) > 1) {
features <- do.call(rbind, features)
} else {
features <- features[[1]]
}
# Convert 0-based start coordinates (BED)
# into 1-based start coordinates (for output as TSV below)
features[,2] <- features[,2]+1
colnames(features) <- c("chr", "start", "end", "featureID", "score", "strand")
featuresGR <- GRanges(seqnames = features$chr,
ranges = IRanges(start = features$start,
end = features$end),
strand = features$strand,
featureID = features$featureID)
# Load table of ranLoc coordinates in BED format
ranLocs <- lapply(seq_along(chrName), function(x) {
read.table(paste0("/home/ajt200/analysis/nanopore/", refbase, "/annotation/genes/", refbase, "_representative_mRNA_",
chrName[x], "_randomLoci.bed"),
header = F)
})
if(length(chrName) > 1) {
ranLocs <- do.call(rbind, ranLocs)
} else {
ranLocs <- ranLocs[[1]]
}
# Convert 0-based start coordinates (BED)
# into 1-based start coordinates (for output as TSV below)
ranLocs[,2] <- ranLocs[,2]+1
colnames(ranLocs) <- c("chr", "start", "end", "ranLocID", "score", "strand")
ranLocsGR <- GRanges(seqnames = ranLocs$chr,
ranges = IRanges(start = ranLocs$start+1,
end = ranLocs$end),
strand = ranLocs$strand,
ranLocID = ranLocs$ranLocID)
features <- data.frame(features,
log2ChIP_featureMatRegionRowMeans = log2ChIP_featureMatRegionRowMeans)
ranLocs <- data.frame(ranLocs,
log2ChIP_ranLocMatRegionRowMeans = log2ChIP_ranLocMatRegionRowMeans)
# Group features into quantiles according to decreasing orderingFactor
orderingFactor <- "log2ChIP_featureMatRegionRowMeans"
print(orderingFactor)
# Note that features_DF could be defined on
# line above or below mclapply(), with the same results
features_DF <- data.frame(features,
percentile = rank(features[,which(colnames(features) == orderingFactor)]) /
length(features[,which(colnames(features) == orderingFactor)]),
quantile = as.character(""))
## Assign 0s to NA values only for coverage data
#if(grepl("_in_", orderingFactor)) {
# features_DF[,which(colnames(features_DF) == orderingFactor)][
# which(is.na(features_DF[,which(colnames(features_DF) == orderingFactor)]))] <- 0
#}
quantilesStats <- data.frame()
for(k in 1:quantiles) {
# First quantile should span 1 to greater than, e.g., 0.75 proportions of features
if(k < quantiles) {
features_DF[ !is.na(features_DF[,which(colnames(features_DF) == orderingFactor)]) &
rank(features_DF[,which(colnames(features_DF) == orderingFactor)]) /
length(features_DF[,which(colnames(features_DF) == orderingFactor)]) <=
1-((k-1)/quantiles) &
rank(features_DF[,which(colnames(features_DF) == orderingFactor)]) /
length(features_DF[,which(colnames(features_DF) == orderingFactor)]) >
1-(k/quantiles), ]$quantile <- paste0("Quantile ", k)
} else {
# Final quantile should span 0 to, e.g., 0.25 proportions of features
features_DF[ !is.na(features_DF[,which(colnames(features_DF) == orderingFactor)]) &
rank(features_DF[,which(colnames(features_DF) == orderingFactor)]) /
length(features_DF[,which(colnames(features_DF) == orderingFactor)]) <=
1-((k-1)/quantiles) &
rank(features_DF[,which(colnames(features_DF) == orderingFactor)]) /
length(features_DF[,which(colnames(features_DF) == orderingFactor)]) >=
1-(k/quantiles), ]$quantile <- paste0("Quantile ", k)
}
write.table(features_DF[features_DF$quantile == paste0("Quantile ", k),],
file = paste0(outDir,
"quantile", k, "_of_", quantiles,
"_by_log2_", libName, "_control_in_", region,
"_of_genes_in_t2t-col.20210610_",
paste0(chrName, collapse = "_"), ".tsv"),
quote = FALSE, sep = "\t", row.names = FALSE)
stats <- data.frame(quantile = as.integer(k),
n = as.integer(dim(features_DF[features_DF$quantile == paste0("Quantile ", k),])[1]),
mean_width = as.integer(round(mean(
(features_DF[features_DF$quantile == paste0("Quantile ", k),]$end -
features_DF[features_DF$quantile == paste0("Quantile ", k),]$start) + 1, na.rm = T))),
total_width = as.integer(sum(
(features_DF[features_DF$quantile == paste0("Quantile ", k),]$end -
features_DF[features_DF$quantile == paste0("Quantile ", k),]$start) + 1, na.rm = T)),
mean_orderingFactor = as.numeric(mean(features_DF[features_DF$quantile == paste0("Quantile ", k),][,which(colnames(features_DF) == orderingFactor)], na.rm = T)))
quantilesStats <- rbind(quantilesStats, stats)
}
write.table(quantilesStats,
file = paste0(outDir,
"summary_", quantiles, "quantiles",
"_by_log2_", libName, "_control_in_", region,
"_of_genes_in_t2t-col.20210610_",
paste0(chrName, collapse = "_"), ".tsv"),
quote = FALSE, sep = "\t", row.names = FALSE)
write.table(features_DF,
file = paste0(outDir,
"features_", quantiles, "quantiles",
"_by_log2_", libName, "_control_in_", region,
"_of_genes_in_t2t-col.20210610_",
paste0(chrName, collapse = "_"), ".tsv"),
quote = FALSE, sep = "\t", row.names = FALSE)
# Divide ranLocs into quantiles based on feature quantile indices
ranLocs_DF <- data.frame(ranLocs,
percentile = rank(ranLocs[,which(colnames(ranLocs) == "log2ChIP_ranLocMatRegionRowMeans")]) /
length(ranLocs[,which(colnames(ranLocs) == "log2ChIP_ranLocMatRegionRowMeans")]),
random = as.character(""))
# Get row indices for each feature quantile
quantileIndices <- lapply(1:quantiles, function(k) {
which(features_DF$quantile == paste0("Quantile ", k))
})
for(k in 1:quantiles) {
ranLocs_DF[quantileIndices[[k]],]$random <- paste0("Random ", k)
}
write.table(ranLocs_DF,
file = paste0(outDir,
"features_", quantiles, "quantiles",
"_by_log2_", libName, "_control_in_", region,
"_of_genes_in_t2t-col.20210610_",
paste0(chrName, collapse = "_"), "_ranLocs.tsv"),
quote = FALSE, sep = "\t", row.names = FALSE)
## Order features in each quantile by decreasing log2ChIP_featureMatRegion levels
## to define "row_order" for heatmaps
#combineRowOrders <- function(quantile_bool_list) {
# do.call("c", lapply(quantile_bool_list, function(x) {
# quantile_log2ChIP_featureMatRegionRowMeans <- rowMeans(log2ChIP_featureMatRegion[x,], na.rm = T)
# quantile_log2ChIP_featureMatRegionRowMeans[which(is.na(quantile_log2ChIP_featureMatRegionRowMeans))] <- 0
# which(x)[order(quantile_log2ChIP_featureMatRegionRowMeans, decreasing = T)]
# }))
#}
#row_order <- combineRowOrders(quantile_bool_list =
# lapply(seq_along(1:quantiles), function(k) {
# featuresDF$quantile == paste0("Quantile ", k)
# })
#)
## Confirm row_order is as would be obtained by alternative method
## Note that this alternative
#stopifnot(identical(row_order,
# order(featuresDF$log2ChIP_featureMatRegionRowMeans,
# decreasing=T)))
#
## Order feature IDs in each quantile by decreasing log2ChIP_featureMatRegion levels
## for use in GO term enrichment analysis
#listCombineRowOrders <- function(quantile_bool_list) {
# do.call(list, lapply(quantile_bool_list, function(x) {
# quantile_log2ChIP_featureMatRegionRowMeans <- rowMeans(log2ChIP_featureMatRegion[x,], na.rm = T)
# quantile_log2ChIP_featureMatRegionRowMeans[which(is.na(quantile_log2ChIP_featureMatRegionRowMeans))] <- 0
# which(x)[order(quantile_log2ChIP_featureMatRegionRowMeans, decreasing = T)]
# }))
#}
#featureIndicesList <- listCombineRowOrders(quantile_bool_list =
# lapply(seq_along(1:quantiles), function(k) {
# featuresDF$quantile == paste0("Quantile ", k)
# })
#)
#stopifnot(identical(row_order,
# do.call(c, lapply(featureIndicesList,
# function(x) x))))
## Alternatively, with original ordering:
### Get feature indices for each quantile
##featureIndicesList <- lapply(seq_along(1:quantiles), function(k) {
## which(featuresDF$quantile == paste0("Quantile ", k))
##})
#
#featureIDsQuantileList <- lapply(seq_along(1:quantiles), function(k) {
# sub(pattern = "\\.\\d+", replacement = "",
# x = as.vector(featuresDF[featureIndicesList[[k]],]$featureID))
#})
#sapply(seq_along(featureIDsQuantileList), function(k) {
# write.table(featureIDsQuantileList[[k]],
# file = paste0(outDir,
# "featureIDs_quantile", k, "_of_", quantiles,
# "_by_log2_", libName, "_control_in_",
# region, "_of_",
# substring(chrName[1][1], first = 1, last = 5), "_in_",
# paste0(substring(chrName, first = 10, last = 16),
# collapse = "_"), "_",
# substring(chrName[1][1], first = 18), ".txt"),
# quote = F, row.names = F, col.names = F)
#})
## Load feature matrices for each chromatin dataset, calculate log2(ChIP/control),
## and sort by decreasing log2mat1RegionRowMeans
#ChIPNames <- c(
# "WT_MTOPVIB_HA_Rep1_ChIP",
# "DMC1_Rep1_ChIP",
# "H2AZ_Rep1_ChIP",
# "H3K4me3_Rep1_ChIP",
# "H3K4me1_Rep1_ChIP_SRR8126618",
# "H3K27ac_Rep1_ChIP_SRR8126621",
# "H3K27me3_ChIP_SRR6350666",
# "H3K9me2_Rep1_ChIP",
# "H3K27me1_Rep1_ChIP"
# )
#ChIPNamesDir <- c(
# "20190819_dh580_Athaliana_ChIPseq_MTOPVIB/fastq_pooled/snakemake_ChIPseq_t2t-col.20210610",
# "DMC1",
# "H2AZ",
# "H3K4me3",
# "H3K4me1",
# "H3K27ac",
# "H3K27me3",
# "H3K9me2",
# "H3K27me1"
# )
#ChIPNamesPlot <- c(
# "ASY1",
# "DMC1",
# "H2A.Z",
# "H3K4me3",
# "H3K4me1",
# "H3K27ac",
# "H3K27me3",
# "H3K9me2",
# "H3K27me1"
# )
#ChIPColours <- c(
# "purple4",
# "green2",
# "dodgerblue",
# "forestgreen",
# "goldenrod1",
# "orange",
# "navy",
# "magenta3",
# "firebrick1"
# )
#otherNames <- c(
# "MNase_Rep1",
# "DNaseI_Rep1_SRR8447247",
# "WT_RNAseq_Rep1_ERR2402974",
# "WT_RNAseq_Rep2_ERR2402973",
# "WT_RNAseq_Rep3_ERR2402972"
# )
#otherNamesDir <- c(
# "MNase",
# "DNaseI",
# "RNAseq_meiocyte_Martin_Moore_2018_FrontPlantSci",
# "RNAseq_meiocyte_Martin_Moore_2018_FrontPlantSci",
# "RNAseq_meiocyte_Martin_Moore_2018_FrontPlantSci"
# )
#otherNamesPlot <- c(
# "MNase",
# "DNaseI",
# "RNA-seq Rep1",
# "RNA-seq Rep2",
# "RNA-seq Rep3"
# )
#otherColours <- c(
# "darkcyan",
# "purple",
# "red4",
# "red4",
# "red4"
# )
#sRNANames <- c(
# "CS+_2_LIB18613_LDI16228"
# )
#sRNANamesDir <- c(
# "sRNAseq_meiocyte_Martin_Moore"
# )
#sRNANamesPlot <- c(
# "20-nt sRNAs",
# "21-nt sRNAs",
# "22-nt sRNAs",
# "23-nt sRNAs",
# "24-nt sRNAs",
# "34-nt sRNAs"
# )
#sRNAsizes <- c(
# "20nt",
# "21nt",
# "22nt",
# "23nt",
# "24nt",
# "33nt",
# "34nt"
# )
#sRNAColours <- c(
# "red",
# "blue",
# "green2",
# "darkorange2",
# "purple3",
# "darkgreen",
# "deeppink"
# )
#DNAmethNames <- c(
# "BSseq_Rep8a_SRR6792678"
# )
#DNAmethNamesDir <- c(
# "BSseq"
# )
#DNAmethContexts <- c(
# "CpG",
# "CHG",
# "CHH"
# )
#DNAmethNamesPlot <- c(
# "mCG",
# "mCHG",
# "mCHH"
# )
#DNAmethColours <- c(
# "navy",
# "blue",
# "deepskyblue1"
# )
#
#ChIPDirs <- sapply(seq_along(ChIPNames), function(x) {
# if(ChIPNames[x] %in% c("H3K4me3_ChIP_SRR6350668",
# "H3K27me3_ChIP_SRR6350666",
# "H3K36me3_ChIP_SRR6350670",
# "H3K9ac_ChIP_SRR6350667",
# "CENH3_ChIP_SRR1686799")) {
# paste0("/home/ajt200/analysis/wheat/epigenomics_shoot_leaf_IWGSC_2018_Science/",
# ChIPNamesDir[x], "/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/matrices/")
# } else if(ChIPNames[x] %in% c("H3K4me1_Rep1_ChIP_SRR8126618",
# "H3K27ac_Rep1_ChIP_SRR8126621")) {
# paste0("/home/ajt200/analysis/wheat/epigenomics_seedlings_Li_2019_Genome_Biol/",
# ChIPNamesDir[x], "/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/matrices/")
# } else {
# paste0("/home/ajt200/analysis/wheat/",
# ChIPNamesDir[x], "/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/matrices/")
# }
#})
#otherDirs <- sapply(seq_along(otherNames), function(x) {
# if(otherNames[x] %in% c("MNase_Rep1")) {
# paste0("/home/ajt200/analysis/wheat/",
# otherNamesDir[x], "/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/matrices/")
# } else if(otherNames[x] %in% c("DNaseI_Rep1_SRR8447247")) {
# paste0("/home/ajt200/analysis/wheat/epigenomics_seedlings_Li_2019_Genome_Biol/",
# otherNamesDir[x], "/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/matrices/")
# } else if(grepl("RNAseq", otherNames[x])) {
# paste0("/home/ajt200/analysis/wheat/",
# otherNamesDir[x], "/snakemake_RNAseq_HISAT2/mapped/geneProfiles_subgenomes/matrices/")
# } else {
# stop(paste0("otherNames[", x, "] is not compatible with the specified coverage matrix paths"))
# }
#})
#sRNADirs <- sapply(seq_along(sRNANames), function(x) {
# if(sRNANames[x] %in% c("CS+_2_LIB18613_LDI16228")) {
# paste0("/home/ajt200/analysis/wheat/",
# sRNANamesDir[x], "/snakemake_sRNAseq/mapped/geneProfiles_subgenomes/matrices/")
# } else {
# stop(paste0("sRNANames[", x, "] is not compatible with the specified coverage matrix paths"))
# }
#})
#DNAmethDirs <- sapply(seq_along(DNAmethNames), function(x) {
# if(DNAmethNames[x] %in% c("BSseq_Rep8a_SRR6792678")) {
# paste0("/home/ajt200/analysis/wheat/epigenomics_shoot_leaf_IWGSC_2018_Science/",
# DNAmethNamesDir[x],
# "/snakemake_BSseq/coverage/geneProfiles_subgenomes/matrices/")
# } else {
# stop(paste0("DNAmethNames[", x, "] is not compatible with the specified coverage matrix paths"))
# }
#})
#
## ChIP
#ChIP_featureMats <- mclapply(seq_along(ChIPNames), function(x) {
# lapply(seq_along(chrName), function(y) {
# as.matrix(read.table(paste0(ChIPDirs[x],
# ChIPNames[x],
# "_MappedOn_wheat_v1.0_lowXM_", align, "_sort_norm_",
# chrName[y], "_matrix_bin", binName,
# "_flank", flankName, ".tab"),
# header = F, skip = 3))
# })
#}, mc.cores = length(ChIPNames))
## If features from all 3 subgenomes are to be analysed,
## concatenate the 3 corresponding feature coverage matrices
#ChIP_featureMats <- mclapply(seq_along(ChIP_featureMats), function(x) {
# if(length(chrName) == 3) {
# do.call(rbind, ChIP_featureMats[[x]])
# } else {
# ChIP_featureMats[[x]][[1]]
# }
#}, mc.cores = length(ChIP_featureMats))
#
## Conditionally calculate log2(ChIP/input) or log2(ChIP/MNase)
## for each matrix depending on library
#log2ChIP_featureMats <- mclapply(seq_along(ChIP_featureMats), function(x) {
# if(ChIPNames[x] %in% c(
# "WT_MTOPVIB_HA_Rep1_ChIP",
# "DMC1_Rep1_ChIP",
# "H3K4me3_ChIP_SRR6350668",
# "H3K27me3_ChIP_SRR6350666",
# "H3K36me3_ChIP_SRR6350670",
# "H3K9ac_ChIP_SRR6350667",
# "H3K4me1_Rep1_ChIP_SRR8126618",
# "H3K27ac_Rep1_ChIP_SRR8126621"
# )) {
# print(paste0(ChIPNames[x], " was sonication-based; using ", controlNames[1], " for log2((ChIP+1)/(control+1)) calculation"))
# log2((ChIP_featureMats[[x]]+1)/(control_ranLocMats[[1]]+1))
# } else {
# print(paste0(ChIPNames[x], " was MNase-based; using ", controlNames[2], " for log2((ChIP+1)/(control+1)) calculation"))
# log2((ChIP_featureMats[[x]]+1)/(control_ranLocMats[[2]]+1))
# }
#}, mc.cores = length(ChIP_featureMats))
#
#for(x in seq_along(log2ChIP_featureMats)) {
# attr(log2ChIP_featureMats[[x]], "upstream_index") = 1:(upstream/binSize)
# attr(log2ChIP_featureMats[[x]], "target_index") = ((upstream/binSize)+1):((upstream+bodyLength)/binSize)
# attr(log2ChIP_featureMats[[x]], "downstream_index") = (((upstream+bodyLength)/binSize)+1):(((upstream+bodyLength)/binSize)+(downstream/binSize))
# attr(log2ChIP_featureMats[[x]], "extend") = c(upstream, downstream)
# attr(log2ChIP_featureMats[[x]], "smooth") = FALSE
# attr(log2ChIP_featureMats[[x]], "signal_name") = ChIPNamesPlot[x]
# attr(log2ChIP_featureMats[[x]], "target_name") = chrName
# attr(log2ChIP_featureMats[[x]], "target_is_single_point") = FALSE
# attr(log2ChIP_featureMats[[x]], "background") = 0
# attr(log2ChIP_featureMats[[x]], "signal_is_categorical") = FALSE
# class(log2ChIP_featureMats[[x]]) = c("normalizedMatrix", "matrix")
#}
#
#for(x in seq_along(control_ranLocMats)) {
# attr(control_ranLocMats[[x]], "upstream_index") = 1:(upstream/binSize)
# attr(control_ranLocMats[[x]], "target_index") = ((upstream/binSize)+1):((upstream+bodyLength)/binSize)
# attr(control_ranLocMats[[x]], "downstream_index") = (((upstream+bodyLength)/binSize)+1):(((upstream+bodyLength)/binSize)+(downstream/binSize))
# attr(control_ranLocMats[[x]], "extend") = c(upstream, downstream)
# attr(control_ranLocMats[[x]], "smooth") = FALSE
# attr(control_ranLocMats[[x]], "signal_name") = controlNamesPlot[x]
# attr(control_ranLocMats[[x]], "target_name") = chrName
# attr(control_ranLocMats[[x]], "target_is_single_point") = FALSE
# attr(control_ranLocMats[[x]], "background") = 0
# attr(control_ranLocMats[[x]], "signal_is_categorical") = FALSE
# class(control_ranLocMats[[x]]) = c("normalizedMatrix", "matrix")
#}
#
## other
#othermats <- mclapply(seq_along(otherNames), function(x) {
# lapply(seq_along(chrName), function(y) {
# otherFile <- system(paste0("ls ", otherDirs[x],
# otherNames[x],
# "_MappedOn_wheat_v1.0*", align, "_sort_norm_",
# chrName[y], "_matrix_bin", binName,
# "_flank", flankName, ".tab"),
# intern = T)
# as.matrix(read.table(otherFile,
# header = F, skip = 3))
# })
#}, mc.cores = length(otherNames))
## If features from all 3 subgenomes are to be analysed,
## concatenate the 3 corresponding feature coverage matrices
#othermats <- mclapply(seq_along(othermats), function(x) {
# if(length(chrName) == 3) {
# do.call(rbind, othermats[[x]])
# } else {
# othermats[[x]][[1]]
# }
#}, mc.cores = length(othermats))
#
#for(x in seq_along(othermats)) {
# attr(othermats[[x]], "upstream_index") = 1:(upstream/binSize)
# attr(othermats[[x]], "target_index") = ((upstream/binSize)+1):((upstream+bodyLength)/binSize)
# attr(othermats[[x]], "downstream_index") = (((upstream+bodyLength)/binSize)+1):(((upstream+bodyLength)/binSize)+(downstream/binSize))
# attr(othermats[[x]], "extend") = c(upstream, downstream)
# attr(othermats[[x]], "smooth") = FALSE
# attr(othermats[[x]], "signal_name") = otherNamesPlot[x]
# attr(othermats[[x]], "target_name") = chrName
# attr(othermats[[x]], "target_is_single_point") = FALSE
# attr(othermats[[x]], "background") = 0
# attr(othermats[[x]], "signal_is_categorical") = FALSE
# class(othermats[[x]]) = c("normalizedMatrix", "matrix")
#}
#
## sRNA
#sRNAmats <- mclapply(seq_along(sRNAsizes), function(x) {
# lapply(seq_along(chrName), function(y) {
# as.matrix(read.table(paste0(sRNADirs,
# sRNANames,
# "_MappedOn_wheat_v1.0_", align, "_", sRNAsizes[x], "_sort_norm_",
# chrName[y], "_matrix_bin", binName,
# "_flank", flankName, ".tab"),
# header = F, skip = 3))
# })
#}, mc.cores = length(sRNAsizes))
## If features from all 3 subgenomes are to be analysed,
## concatenate the 3 corresponding feature coverage matrices
#sRNAmats <- mclapply(seq_along(sRNAmats), function(x) {
# if(length(chrName) == 3) {
# do.call(rbind, sRNAmats[[x]])
# } else {
# sRNAmats[[x]][[1]]
# }
#}, mc.cores = length(sRNAmats))
#
#for(x in seq_along(sRNAmats)) {
# attr(sRNAmats[[x]], "upstream_index") = 1:(upstream/binSize)
# attr(sRNAmats[[x]], "target_index") = ((upstream/binSize)+1):((upstream+bodyLength)/binSize)
# attr(sRNAmats[[x]], "downstream_index") = (((upstream+bodyLength)/binSize)+1):(((upstream+bodyLength)/binSize)+(downstream/binSize))
# attr(sRNAmats[[x]], "extend") = c(upstream, downstream)
# attr(sRNAmats[[x]], "smooth") = FALSE
# attr(sRNAmats[[x]], "signal_name") = sRNANamesPlot[x]
# attr(sRNAmats[[x]], "target_name") = chrName
# attr(sRNAmats[[x]], "target_is_single_point") = FALSE
# attr(sRNAmats[[x]], "background") = 0
# attr(sRNAmats[[x]], "signal_is_categorical") = FALSE
# class(sRNAmats[[x]]) = c("normalizedMatrix", "matrix")
#}
#
## DNAmeth
#DNAmethmats <- mclapply(seq_along(DNAmethContexts), function(x) {
# lapply(seq_along(chrName), function(y) {
# as.matrix(read.table(paste0(DNAmethDirs,
# DNAmethNames,
# "_MappedOn_wheat_v1.0_incl_organelles_controls_dedup_", DNAmethContexts[x], "_",
# chrName[y], "_matrix_bin", binName,
# "_flank", flankName, ".tab"),
# header = F, skip = 3))
# })
#}, mc.cores = length(DNAmethContexts))
## If features from all 3 subgenomes are to be analysed,
## concatenate the 3 corresponding feature coverage matrices
#DNAmethmats <- mclapply(seq_along(DNAmethmats), function(x) {
# if(length(chrName) == 3) {
# do.call(rbind, DNAmethmats[[x]])
# } else {
# DNAmethmats[[x]][[1]]
# }
#}, mc.cores = length(DNAmethmats))
#
#for(x in seq_along(DNAmethmats)) {
# attr(DNAmethmats[[x]], "upstream_index") = 1:(upstream/binSize)
# attr(DNAmethmats[[x]], "target_index") = ((upstream/binSize)+1):((upstream+bodyLength)/binSize)
# attr(DNAmethmats[[x]], "downstream_index") = (((upstream+bodyLength)/binSize)+1):(((upstream+bodyLength)/binSize)+(downstream/binSize))
# attr(DNAmethmats[[x]], "extend") = c(upstream, downstream)
# attr(DNAmethmats[[x]], "smooth") = FALSE
# attr(DNAmethmats[[x]], "signal_name") = DNAmethNamesPlot[x]
# attr(DNAmethmats[[x]], "target_name") = chrName
# attr(DNAmethmats[[x]], "target_is_single_point") = FALSE
# attr(DNAmethmats[[x]], "background") = "NA"
# attr(DNAmethmats[[x]], "signal_is_categorical") = FALSE
# class(DNAmethmats[[x]]) = c("normalizedMatrix", "matrix")
#}
#
#
#if(grepl("genes", chrName)) {
# featureStartLab <- "TSS"
# featureEndLab <- "TTS"
#} else {
# featureStartLab <- "Start"
# featureEndLab <- "End"
#}
#
## Heatmap plotting function
## Note that for plotting heatmaps for individual datasets in separate PDFs,
## must edit this function - print(EnrichedHeatmap(...))
#featureHeatmap <- function(mat,
# col_fun,
# colour,
# datName) {
# EnrichedHeatmap(mat = mat,
# col = col_fun,
# column_title = datName,
# top_annotation = HeatmapAnnotation(enriched = anno_enriched(gp = gpar(col = colour,
# lwd = 2),
# yaxis_side = "left",
# yaxis_facing = "left",
# yaxis_gp = gpar(fontsize = 10),
# pos_line_gp = gpar(col = "black",
# lty = 2,
# lwd = 2))),
# top_annotation_height = unit(2, "cm"),
# width = unit(6, "cm"),
# name = datName,
# heatmap_legend_param = list(title = datName,
# title_position = "topcenter",
# title_gp = gpar(font = 2, fontsize = 12),
# legend_direction = "horizontal",
# labels_gp = gpar(fontsize = 10)),
# axis_name = c(paste0("-", flankNamePlot),
# featureStartLab, featureEndLab,
# paste0("+", flankNamePlot)),
# axis_name_gp = gpar(fontsize = 12),
# border = FALSE,
# pos_line_gp = gpar(col = "white", lty = 2, lwd = 2),
# # If converting into png with pdfTotiffTopng.sh,
# # set use_raster to FALSE
# #use_raster = FALSE)
# use_raster = TRUE, raster_device = "png", raster_quality = 10)
#}
#
## Define heatmap colours
#rich8to6equal <- c("#0000CB", "#0081FF", "#87CEFA", "#FDEE02", "#FFAB00", "#FF3300")
##quantileColours <- c("darkorange1", "green2", "purple3", "deepskyblue")
##quantileColours <- colorRampPalette(c("red", "blue"))(4)
#quantileColours <- c("red", "purple", "blue", "navy")
#
## Create quantile colour block "heatmap"
#quantileBlockhtmp <- Heatmap(featuresDF$quantile,
# col = structure(quantileColours,
# names = paste0("Quantile ", 1:quantiles)),
# show_row_names = FALSE, show_heatmap_legend = FALSE,
# width = unit(3, "mm"), name = "")
#quantilecMMbheatmap <- Heatmap(featuresDF$cMMb,
# cluster_rows = FALSE,
# col = colorRamp2(quantile(featuresDF$cMMb,
# c(0.60, 0.50, 0.40, 0.30),
# na.rm = T),
# quantileColours),
# na_col = "grey40",
# show_row_names = FALSE, show_heatmap_legend = TRUE,
# heatmap_legend_param = list(title = "cM/Mb",
# title_position = "topcenter",
# title_gp = gpar(font = 2, fontsize = 12),
# legend_direction = "horizontal",
# labels_gp = gpar(fontsize = 10)),
# width = unit(3, "cm"), name = "")
#quantilecMMbrowAnno <- rowAnnotation(cMMb = anno_points(featuresDF$cMMb,
# which = "row",
# size = unit(1, "mm"),
# gp = gpar(col = "black"),
# axis_param = list(at = c(0, 1.5), labels = c("0", "1.5")),
# width = unit(3, "cm")))
## Plot together
#log2ChIPhtmpList <- mclapply(seq_along(ChIPNames), function(x) {
# ChIP_col_fun <- colorRamp2(quantile(log2ChIP_featureMats[[x]],
# c(0.5, 0.6, 0.7, 0.8, 0.9, 0.95),
# na.rm = T),
# rich8to6equal)
# featureHeatmap(mat = log2ChIP_featureMats[[x]],
# col_fun = ChIP_col_fun,
# colour = quantileColours,
# datName = ChIPNamesPlot[x])
#}, mc.cores = length(log2ChIP_featureMats))
#otherhtmpList <- mclapply(seq_along(otherNames), function(x) {
# ChIP_col_fun <- colorRamp2(quantile(othermats[[x]],
# c(0.5, 0.6, 0.7, 0.8, 0.9, 0.95),
# na.rm = T),
# rich8to6equal)
# featureHeatmap(mat = othermats[[x]],
# col_fun = ChIP_col_fun,
# colour = quantileColours,
# datName = otherNamesPlot[x])
#}, mc.cores = length(othermats))
#controlhtmpList <- mclapply(seq_along(controlNames), function(x) {
# ChIP_col_fun <- colorRamp2(quantile(control_ranLocMats[[x]],
# c(0.5, 0.6, 0.7, 0.8, 0.9, 0.95),
# na.rm = T),
# rich8to6equal)
# featureHeatmap(mat = control_ranLocMats[[x]],
# col_fun = ChIP_col_fun,
# colour = quantileColours,
# datName = controlNamesPlot[x])
#}, mc.cores = length(control_ranLocMats))
##sRNAhtmpList <- mclapply(seq_along(sRNANamesPlot), function(x) {
## ChIP_col_fun <- colorRamp2(quantile(sRNAmats[[x]],
## c(0.998, 0.9982, 0.9984, 0.9986, 0.9988, 0.999),
## na.rm = T),
## rich8to6equal)
## featureHeatmap(mat = sRNAmats[[x]],
## col_fun = ChIP_col_fun,
## colour = quantileColours,
## datName = sRNANamesPlot[x])
##}, mc.cores = length(sRNAmats))
#DNAmethhtmpList <- mclapply(seq_along(DNAmethNamesPlot), function(x) {
# ChIP_col_fun <- colorRamp2(quantile(DNAmethmats[[x]],
# c(0.50, 0.60, 0.70, 0.80, 0.90, 0.95),
# na.rm = T),
# rich8to6equal)
# featureHeatmap(mat = DNAmethmats[[x]],
# col_fun = ChIP_col_fun,
# colour = quantileColours,
# datName = DNAmethNamesPlot[x])
#}, mc.cores = length(DNAmethmats))
#
#htmpList <- c(quantileBlockhtmp,
# quantilecMMbheatmap,
# log2ChIPhtmpList,
# controlhtmpList[[1]],
# otherhtmpList,
## sRNAhtmpList,
# DNAmethhtmpList)
#htmps <- NULL
#for(x in 1:length(htmpList)) {
# htmps <- htmps + htmpList[[x]]
#}
#pdf(paste0(plotDir, "log2ChIPcontrol_around_",
# substring(chrName[1][1], first = 1, last = 5), "_in_",
# paste0(substring(chrName, first = 10, last = 16),
# collapse = "_"), "_",
# substring(chrName[1][1], first = 18),
# "_heatmaps_quantiled_by_log2_", libName, "_control_in_", region, ".pdf"),
# width = 3*length(htmpList),
# height = 10)
#draw(htmps,
# split = featuresDF$quantile,
# row_order = row_order,
# heatmap_legend_side = "bottom",
# gap = unit(c(1, 1, rep(14, length(htmpList)-2)), "mm")
# )
#dev.off()
|
8fbae1ba0692afb647cc4449529f109e91b55dd0 | d7fd880e6f42afe9911b597b843d11b9035a801a | /cachematrix.R | 1ab7c74547c4ffc8db32728732468146b77b548f | [] | no_license | LaurenDembeck/ProgrammingAssignment2 | 289aaeda48823594c0f64bfa8f9d63135bbc5172 | cd7db4dd1c6d01c9efb1ed195d9389089e2154c2 | refs/heads/master | 2020-04-15T08:52:11.507386 | 2015-12-27T10:03:19 | 2015-12-27T10:03:19 | 48,567,335 | 0 | 0 | null | 2015-12-25T05:10:06 | 2015-12-25T05:10:05 | null | UTF-8 | R | false | false | 938 | r | cachematrix.R | #The makeCacheMatrix function will generate a list of functions(set, get, setinverse, and getinverse), which can be assigned to a variable
#It aslo sets a matrix, x, to exist in the global environment
makeCacheMatrix <- function(x) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#The cacheSolve function will take a variable, x (the variable that you assigned makeCacheMatrix to)
#It will check if the inverse for the matrix now contained in makeCacheMatrix is null or not
#If null, it solves the inverse of the matrix
cacheSolve <- function(x) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
} |
729e432baf449709c8eec0577302a0f74902b584 | 6b116d9cbd762360cefbde5bbdd85c3591f5d0c6 | /man/get_biooracle_temperature.Rd | 9b1a9f32b68f9fd21c7f0631ac07894d7bcc67b5 | [] | no_license | iobis/findingdemo | 05b01fb5e577858bd316b379f218937f20ab7fbd | 46defee7990f1225bd7f4ccec69070e9e8c9949e | refs/heads/master | 2021-07-05T17:04:55.516572 | 2020-11-30T11:29:32 | 2020-11-30T11:29:32 | 206,312,569 | 0 | 3 | null | 2019-09-05T15:02:13 | 2019-09-04T12:20:40 | R | UTF-8 | R | false | true | 311 | rd | get_biooracle_temperature.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raster.R
\name{get_biooracle_temperature}
\alias{get_biooracle_temperature}
\title{Reads bio-oracle temperature data.}
\usage{
get_biooracle_temperature(bbox = c(-40, 50, 20, 90))
}
\description{
Reads bio-oracle temperature data.
}
|
99d051090730ad61429e5a553cea4324fc3b38cf | ee3d81df33936bccdf3c4a795fb2ba57cfbb2ab1 | /server.R | 49f961455d442dbc4329491a97716c658e89fe11 | [] | no_license | rikku1983/data_product | 85482ef8d03cd7e28d559107f21cc070eb43c056 | 76d836f6c8e226b7b7beaadc82ccfcc227a001e2 | refs/heads/master | 2021-01-01T19:46:14.722602 | 2015-08-09T16:37:00 | 2015-08-09T16:37:00 | 40,442,564 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 507 | r | server.R | library(shiny)
prempg <- function(weight, qsec, transmission){
9.7231 -2.936531*weight/1000 + 1.016974*qsec + 14.079428 * transmission - 4.141376 * transmission * weight/1000
}
shinyServer(
function(input, output){
#w <- as.numeric(input$weight)/1000
#q <- as.numeric(input$qsec)
#output$mpg <- 9.7231 -2.936531*w + 1.016974*q + 14.079428 * t - 4.141376 * t * w
output$mpg <- renderPrint(
prempg(as.numeric(input$weight), as.numeric(input$qsec), as.numeric(input$t)))
}
) |
d6e4861bba30c9ef8ab8c719921e693b5dc17ef6 | d84f3a58168994f55530b8a5d5b9ba2837db4fc3 | /plot1.R | e3229b68678defdcc1e299a73a91946bb800fe76 | [] | no_license | Palrich/Exploratory_Data_Analysis_Project2 | a26e48b426d227fcceca8f4cb982284fe7bf0be9 | 2a8a63b98e5bfdcf73bacfa843d2d276123df937 | refs/heads/master | 2016-09-13T03:47:17.499789 | 2016-05-23T06:12:57 | 2016-05-23T06:12:57 | 59,456,230 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 737 | r | plot1.R | ## Reading the data (make sure you are in the directory where the RDS files have been extracted)
pmData <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Totaling the data for emissions across the years variable in the data
EmissionSumsYear <- with(pmData, tapply(Emissions, year, sum))
## Plotting the total emission
png("plot1.png")
with(pmData, plot(names(EmissionSumsYear), EmissionSumsYear, xlab = "Year",
ylab = expression("Total PM"[2.5] * " Emissions (Tons)"),
main = expression("Total PM"[2.5] * " Emissions From 1999-2008"), pch = 19))
## Regression Line
regLine <- lm(EmissionSumsYear ~ as.numeric(names(EmissionSumsYear)), pmData)
abline(regLine)
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.