blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23ba54e4657b411f25904d9be7d052fbd26c17cd | 48518d18d28d52e25ec6e26dd7d9e0eb1019d10a | /man/get_summary_of_ticker.Rd | 636a56228f6dc7b9dedd3472c6655d2a263d0618 | [] | no_license | misrori/getrich | 68da281341203d12a8386043b801309852eb32a7 | 6e6c49cac0f7e86575dbf7dfdee6eba0eeeacf40 | refs/heads/master | 2023-02-12T22:49:19.980733 | 2021-01-02T12:23:24 | 2021-01-02T12:23:24 | 265,784,961 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 343 | rd | get_summary_of_ticker.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slack_report.R
\name{get_summary_of_ticker}
\alias{get_summary_of_ticker}
\title{Get info of a ticker from tradingview}
\usage{
get_summary_of_ticker(ticker)
}
\arguments{
\item{Ticker}{The id of the company}
}
\description{
Get info of a ticker from tradingview
}
|
13cfcf8c4266d790976c7d92129cd1894b5f886d | ba3025381d302e867d07ce7cb1852cc3fc927007 | /man/addins.Rd | 8e34d000dcb351c695800bb363c6d971469b50cf | [] | no_license | stephenturner/Tmisc | 01a2021c0526a1ad11eb04d659fdd0c34f593d07 | c2302beb4c7219157a5362d95ca15ae0f9eb06e6 | refs/heads/master | 2023-01-13T01:16:33.279211 | 2021-03-09T20:25:28 | 2021-03-09T20:25:28 | 18,997,792 | 8 | 13 | null | 2020-08-29T21:52:48 | 2014-04-21T16:04:38 | R | UTF-8 | R | false | true | 418 | rd | addins.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/addins.R
\name{addins}
\alias{addins}
\alias{insertInAddin}
\alias{insertEqual}
\title{Insert text at current position.}
\description{
Call these function as an addin to insert desired text at the
cursor position. After installing Tmisc, hit the Addins menu, and
optionally add a keyboard shortcut, e.g., Command+Shift+I, Alt+-, etc.
}
|
993f203e20f5e3568d40878ee3f5e006925a214f | 30975dc286e36555ae827345d8df7b4cc22ac345 | /man/plotKinrespDiagnostics.kinrespList.Rd | 9988a164aaf711738f34334183a530b0ba0c0e8a | [] | no_license | bgctw/twKinresp | c70eec323028176b340681f6c307103e7d397bbc | 94234c83ba34d9a69203f162586028a78f9a33c7 | refs/heads/master | 2020-05-15T18:59:22.958201 | 2019-04-20T21:14:19 | 2019-04-20T21:14:19 | 182,444,867 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 869 | rd | plotKinrespDiagnostics.kinrespList.Rd | \name{plotKinrespDiagnostics.kinrespList}
\alias{plotKinrespDiagnostics.kinrespList}
\title{plotKinrespDiagnostics kinrespList}
\description{Diagnostic plots for confining unlimited growth phase for each replicate.}
\usage{\method{plotKinrespDiagnostics}{kinrespList}(kinrespRes,
plotFileBasename = "", ...)}
\arguments{
\item{kinrespRes}{object of class kinrespList from \code{\link{kinrespGrowthphaseExperiment}} to plot diagnostics for.}
\item{plotFileBasename}{basename of figure files for diagnostic plots}
\item{\dots}{further argument to \code{\link{plotKinrespDiagnostics.kinresp}}}
}
\value{invisible: list of all results of \code{\link{plotKinrespDiagnostics.kinresp}}}
\author{Thomas Wutzler <thomas.wutzler@web.de>}
\seealso{\code{\link{kinrespGrowthphaseExperiment}}
,\code{\link{plotKinrespDiagnostics.kinresp}}
,\code{\link{twKinresp}}}
|
684acd0ede6aa5117f4a4a5e615e8d5aa5ebf597 | deff237d47e608a32138f144d510998e861c43e7 | /5 Association Rule Learning/Eclat/eclat_associative_rule_learn.r | fbc931afe4f4c8e6328812c2008d76ebeec8b41c | [] | no_license | uminmay/udemy_ML_R | e5069f23421f9ea4c79fb8569276d3b619d3fba2 | 331bd937bc29850c84f3a0ff69c147ff46dff564 | refs/heads/master | 2023-06-24T13:28:43.083045 | 2021-07-19T12:11:33 | 2021-07-19T12:11:33 | 379,516,068 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 777 | r | eclat_associative_rule_learn.r | # Apriori
# Data Preprocessing
dataset = read.csv("Market_Basket_Optimisation.csv", header=FALSE)
## Library to be used needs sparse matrix as input
#install.packages("arules")
##Create sparse matrix
library(arules)
dataset_sparse = read.transactions("Market_Basket_Optimisation.csv",
sep = ',',
rm.duplicates =TRUE)
#summary(dataset_sparse)
#itemFrequencyPlot(dataset_sparse, topN = 100)
## Training apriori on the data
rules = eclat(data = dataset_sparse,
parameter = list(support = 0.004,minlen = 2))
## Visualizing the results
inspect(sort(rules, by='support')[1:10])
#chocolate has high support, among most purchased products <-> water
#so change confidence from 0.4 to 0.2
|
43a9327b509f89c01e65a3b7445871ef8a7eda08 | ff7ad43cf16fe7f45349783391e229d6be212df4 | /app/global.R | d1f10cd109d9f7c8498fec398bfadfdde575cf5f | [] | no_license | carlosespino11/landis | ec1cd6daa32215d316aae2622136c19f7358a4f3 | d6ad529d6b5da9d1448bc1f563365a3a578a2d6d | refs/heads/master | 2020-03-21T01:56:55.856985 | 2018-06-20T02:45:43 | 2018-06-20T02:45:43 | 137,971,072 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 408 | r | global.R | library(shiny)
library(leaflet)
library(maptools)
library(dplyr)
library(ggthemes)
library(ggplot2)
library(ggthemes)
library(shinyBS)
library(shinyjs)
library(tidyr)
library(RColorBrewer)
library(magrittr)
library(tidyverse)
library(sf)
#' Read datasets
features_bg = read_rds('./data/fetures_bg.rds') %>%dplyr::select(-GEOID)%>%
st_transform(st_crs('+proj=longlat +datum=WGS84'))
source("app-utils.R") |
3cf06d904401a0da2216628bd6bac2102f32f393 | d4ffa0e79c4a6f61baae5d2d8ce61481ca3d00d9 | /man/lsem_kernel_weights.Rd | c57c05ef6e3f96cad49456aadd794b130be55c34 | [
"MIT"
] | permissive | xanthematthijssen/lsembandwidth | a35d819fe078ffffd1e5e17caab1075b00333eee | fed4fa8c6362003bef30ff7e7484eb1bd3211684 | refs/heads/master | 2022-12-29T09:48:50.704064 | 2020-10-20T14:55:51 | 2020-10-20T14:55:51 | 242,844,323 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 619 | rd | lsem_kernel_weights.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lsem_kernel_weights.R
\name{lsem_kernel_weights}
\alias{lsem_kernel_weights}
\title{Function from sirt that calculates kernel weights for certain bandwidth and moderator value}
\usage{
lsem_kernel_weights(x, x0, bw, kernel = "gaussian")
}
\arguments{
\item{x}{vector of datapoints to calculate weights}
\item{x0}{moderator value}
\item{bw}{bandwidth}
\item{kernel}{kernel type, defaults to gaussion}
}
\value{
vector of weights
}
\description{
Function from sirt that calculates kernel weights for certain bandwidth and moderator value
}
|
50cdcddcb980268110d89571e9aacf300b0996ad | 0c21028ce68d0ebd7e0f6698e97521d457f3e9ee | /DEB_IBM.R | b59364542a94a6489afaa478aa81093fd9d8c94f | [] | no_license | xocloo/SchistoIBM | da150240c8c4c4abfdde2bbabecb607e06503c7d | 728e741e1042b974aa22be5162e5e7162d2e543f | refs/heads/master | 2020-04-24T17:10:38.993360 | 2019-02-22T15:32:51 | 2019-02-22T15:32:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 36,369 | r | DEB_IBM.R | ## DEB IBM
# check bottom of page for diagnostics for running netlogo in rstudio
# version 1.2
# DEB_INF_GUTS_IBM_1.1.nlogo
# IndividualModel_IBM3.c (generates .o and .so files)
# FullStarve_shrink_dilute_damage3.Rda
# version 1.1
# DEB_INF_GUTS_IBM_1.1.nlogo
# IndividualModel_IBM2.c
# ILL_shrink_damageA5.Rda
# 11-2-19
# added harwell script
# updated Fh and K pars
# merged snab version with mac version
# 4-2-19
# added multi-panel plots for sim outputs
# 28-1-19
# reverted back to Food=environment[1] in snail update (eating by sum(L2)^2 snails is in C script)
# 25-1-19 (v.1.2)
# MCMC DEB params (full starve model Rda)
# new detritus input
# new food growth in C file
# new mcmc deb estimates
# 23-1-19
# fixed individual snail deb update when using detritus: Food=environment[1]*(snail.update[,2]^2)/sum(snail.update[,2]^2),
# added detritus supply as food source
# added detritus param: pars["d_Z"] = 0.2 # detritus mg L-1 day-1
# removed resource cycle toggle from within sim loop to define param section (now alpha and rho of 0 = no cycle)
# 22-1-19
# added host length and parasite biomass to sim outputs
# 15-1-19
# set LHS parameter space
# 31-12-18
# added molluscide events (me_pars) for 95% host adult and eggs in env mortality rate (per day)
# 21-12-18
# added adult, juv, and infected host pop that sheds to sim results output
# fixed install packages section for windows
# 20-12-18
# added host length and parasite biomass to model outputs
# removed .so .o and .dll files from github and added to .gitignore and sensitive files dir
#18-12-18
# changed p to rho
# added rgrowth (pars[“r”]) (rg)
# added master lists for total and infected hosts
# changed resource wave eq to r = r + alpha * r * sin(2 * pi * t/rho)
#16-12-18
# new damage density deb params (IndividualModel_IBM2.c, ILL_shrink_damageA5.Rda)
# added alpha and periodicity (p) param space to resource dynamics
# 11-12-18
# changed overdamped and periodic food dynamics to cyclical resource dynamics
# fixed cyclical resource dynamics
# 28-11-18
# added overdamped and periodic food dynamics to nl loop
# 27-11-18
# fixed NAs in 'create snails' command (Env_G param)
# list to check if 'create snails' command isn't producing NAs from Env_G
# set pop density outputs in NL loop to integer to pass into Env_G and rbinom func
# 23-11-18
# all user inputs at beginning of doc
# 22-11-19
# added debfunction.txt and pars.txt for defining params
# 19-11-18
# added "DEB_INF_GUTS_IBM_1.1.nlogo" as test model
###### TO DO ######
# for new snail deb feeding
## in new C file
### redefine dFdf (dfdt = -iM * f * sum(L^2) + rF(1-K/F) + Det)
# LHC sampling of r, alpha, and rho for different host sizes (for cyclical food)
# get ratio of infected hosts and shedding infected hosts
# change r and food
# - plot food against r as heatmap
# define
# - starvation-related hazard rate
# - pars and debfunction
# find papers on periodicity in resource loads in pops (Nisbet, Gurney, daphnia, etc)
# E.g. Nisbet Gurney Population dynamics in a periodically varying environment
# verify what volume of food density is reasonable (F)
# - sin wave of resource change
# - step function of resources (on/off season) (most unrealistic)
# - non-regenerative detritus (event-based)
# R = algae supply rate, don't vary K
# heat map of where peaks or resources and peaks of cercariae occur
# fix days parameter in NL
# output NL plots to R
# OUTPUTS
# survival and shell length results from DEBstep
# plot of rP vs. P/V (parasite biomass outcome)
#############################################################################################
#################################### Mac OSX test ###########################################
#############################################################################################
# Search "@netlogo" for netlogo code in file
### Files required
# "DEB_IBM.R"
# "DEB_INF_GUTS_IBM.nlogo"
# "FullStarve_shrink_dilute_damage3.Rda"
# "IndividualModel_IBM3.c"
# "IndividualModel_IBM3.so" # Mac OSX. generated from C
# "IndividualModel_IBM3.o" # Mac OSX. generated from C
# "IndividualModel_IBM.dll" # Windows. generated from C
test.java <- 0 # 1 = run java diagnostics
# run java test
install.packages("RCurl")
if(test.java==1){
require(RCurl)
script <- getURL("https://raw.githubusercontent.com/darwinanddavis/SchistoIBM/master/mac/java_test.R", ssl.verifypeer = FALSE)
eval(parse(text = script))
# check rJava version
.jinit()
.jcall("java/lang/System", "S", "getProperty", "java.runtime.version")
# get latest Java/Oracle version: https://www.oracle.com/technetwork/java/javase/downloads/index-jsp-138363.html
}
# :three: [GCC compiler in R (unconfirmed)](https://stackoverflow.com/questions/1616983/building-r-packages-using-alternate-gcc)
# [Running Netlogo 6.0.+](https://github.com/NetLogo/NetLogo/issues/1282)
################################# Running NetLogo in Mac ##################################
# if using Mac OSX El Capitan+ and not already in JQR, download and open JGR
mac <- 0
if(mac==1){
install.packages('JGR',,'http://www.rforge.net/')
library(JGR)
JGR::JGR()
}
#############################################################################################
############################# Windows or JGR onwards ########################################
#############################################################################################
#################################### set user inputs #######################################
# isolate sensitive data:
# "FullStarve_shrink_dilute_damage3.Rda"
# set user outputs
snab <- 0 # 1 = use remote access (snab comp), 0 = run model on your comp
mac <- 1 # mac or windows system? 1 = mac, 0 = windows
gui <- 0 # display the gui? 1 = yes, 0 = no
pck <- 0 # if not already, install rnetlogo and rjava from source? 1 = yes, 0 = already installed
save_to_file <- 0 # 1 = save simulation outputs to local dir, 0 = plot in current R session
mcmcplot <- 0 # 1 = save mcmc plots to dir
traceplot <- 0 # 1 = include traceplot in mcmc plots? intensive!!!
# set dir paths (for "/" for both Windows and Mac)
if(snab==1){
# set dir paths (for "/" for both Windows and Mac)
wd <- "R:/CivitelloLab/matt/schisto_ibm" # set working directory
ver_nl <-"6.0.4"# type in Netlogo version. found in local dir.
ver_gcc <-"4.6.3" # NULL # type in gcc version (if known). leave as "NULL" if unknown
nl.path <- "C:/Program Files" # set path to Netlogo program location
}else{
wd <- "/Users/malishev/Documents/Emory/research/schisto_ibm/SchistoIBM" # set working directory
ver_nl <-"6.0.4" # type in Netlogo version. found in local dir.
ver_gcc <-"4.6.3" # NULL # type in gcc version (if known). leave as "NULL" if unknown
nl.path <- "/Users/malishev/Documents/Melbourne Uni/Programs" # set path to Netlogo program location
}
# define starting conditions for simulation model @netlogo
n.ticks <- 120 # set number of days to simulate
day <- 1 # number of days to run simulation
resources <- "cyclical" # set resources: "cyclical" or "event"
resource_type <- "detritus" # set resource type as "algae" or "detritus"
#################################### set model paths #######################################
# load files
deb_samps <- "FullStarve_shrink_dilute_damage3.Rda"
deb_compile <- "IndividualModel_IBM3"
setwd(wd)
nl.model <- list.files(pattern="*.nlogo") ;nl.model # Netlogo model
if(mac==1){
nl.path <- paste0(nl.path,"/NetLogo ",ver_nl,"/Java/"); cat("Mac path:",nl.path)
}else{
nl.path <- paste0(nl.path,"/NetLogo ",ver_nl,"/app/"); cat("Windows path:",nl.path)
}
model.path <- paste0(wd,"/"); model.path # set path to Netlogo model
#################################### load packages #######################################
# if already loaded, uninstall RNetlogo and rJava
if(pck==1){
p<-c("rJava", "RNetLogo"); remove.packages(p)
# then install rJava and RNetLogo from source
if(mac==1){
install.packages("rJava", repos = "https://cran.r-project.org/", type="source"); library(rJava)
install.packages("RNetLogo", repos = "https://cran.r-project.org/", type="source"); library(RNetLogo)
}
}
# check pck versions
installed.packages()["RNetLogo","Version"]
installed.packages()["rJava","Version"]
# check rJava version
.jinit()
.jcall("java/lang/System", "S", "getProperty", "java.runtime.version")
# get latest Java/Oracle version: https://www.oracle.com/technetwork/java/javase/downloads/index-jsp-138363.html
# install relevant packages
packages <- c("Matrix","deSolve","mvtnorm","LaplacesDemon","coda","adaptMCMC","sp","RNetLogo","ggplot2","RCurl","RColorBrewer","Interpol.T","lubridate","ggExtra","tidyr","ggthemes","reshape2","pse","sensitivity","beepr")
if(require(packages)){
install.packages(packages,dependencies = T)
}
# load annoying packages manually because they're stubborn
if(mac==0){
install.packages("RNetLogo")
install.packages("RCurl")
install.packages("Interpol.T")
install.packages("lubridate")
install.packages("tidyr")
install.packages("ggthemes")
install.packages("ggExtra")
install.packages("beepr")
}
ppp <- lapply(packages,require,character.only=T)
if(any(ppp==F)){cbind(packages,ppp);cat("\n\n\n ---> Check packages are loaded properly <--- \n\n\n")}
cs <- list() # diagnostics list for checking NAs in create snails command
# load plot function
script <- getURL("https://raw.githubusercontent.com/darwinanddavis/plot_it/master/plot_it.R", ssl.verifypeer = FALSE)
eval(parse(text = script))
display.brewer.all()
# Set global plotting parameters
cat("plot_it( \n0 for presentation, 1 for manuscript, \nset colour for background, \nset colour palette 1. use 'display.brewer.all()', \nset colour palette 2. use 'display.brewer.all()', \nset alpha for colour transperancy, \nset font style \n)")
plot_it(0,"blue","YlOrRd","Greens",1,"mono") # set plot function params
plot_it_gg("white") # same as above for ggplot
# load harwell script
script <- getURL("https://raw.githubusercontent.com/darwinanddavis/harwell/master/harwell.R", ssl.verifypeer = FALSE)
eval(parse(text = script))
################################ compile packages and load files ###################################
### Install rtools and gcc for using C code and coda package
#### https://cran.r-project.org/bin/macosx/tools/
# define paths for gcc compiler
if(mac==1){ #### Mac OSX
rtools <- "/usr/local/clang6/bin"
gcc <- paste0("usr/local/clang6/gcc-",ver_gcc,"/bin")
# Mac OSX
}else{ #### Windows
rtools <- "C:\\Rtools\\bin"
gcc <- paste0("C:\\Rtools\\gcc-",ver_gcc,"\\bin")
}
#### point to path on comp to access rtools and gcc for C compiler
path <- strsplit(Sys.getenv("PATH"), ";")[[1]]
new_path <- c(rtools, gcc, path)
new_path <- new_path[!duplicated(tolower(new_path))]
Sys.setenv(PATH = paste(new_path, collapse = ";"))
if(mac==1){
# dyn.unload("IndividualModel_IBM3.so") # unLoad .so (Mac OSX
system(paste0("R CMD SHLIB ",deb_compile,".c")) # generates .o and .so files
dyn.load(paste0(deb_compile,".so")) # Load .so (Mac OSX)
}else{
# compile model from C definition
#dyn.unload(paste0(deb_compile,".dll")) # unload dll (Windows only)
system(paste0("R CMD SHLIB ",deb_compile,".c"))
dyn.load(paste0(deb_compile,".dll"))# Load dll (Windows only)
}
#################################### load deb params #######################################
# load DEB starvation model parameters and create mcmc (and convert mcmc chain to coda format)
samps = readRDS(deb_samps)
samps <- as.mcmc(samps[, c("iM", "k", "M", "EM", "Fh", "muD", "DR", "fe", "yRP",
"ph", "yPE", "iPM", "eh", "mP", "alpha", "yEF", "LM",
"kd", "z", "kk", "hb", "theta", "mR", "yVE", "yEF2",
"sd.LI1", "sd.LU1", "sd.EI1", "sd.EU1", "sd.W1", "sd.LI2",
"sd.LU2", "sd.EI2", "sd.EU2", "sd.W2", "gammaH", "gammaP", "lpost")])
# ---------- summarise and plot estimated params
svar <- "M" # select variable
sampsvar <- samps[,svar] # pull from mcmc
summary(sampsvar) # get mean, sd, se, and quantiles for each input variable
den <- density(sampsvar) # get AUC
densplot(sampsvar, show.obs = F,type="n") # density estimate of each variable
polygon(den, col=adjustcolor(colv,alpha=0.5),border=colv) # fill AUC
plot(sampsvar,trace=T,density=T,col=colv) # traceplot (below) and density plot (above)
# intensive
traceplot(sampsvar,smooth=T,type="l",lwd=0.3,xlim=c(0,length(sampsvar)),col=colv[2],xlab=paste0("Iterations"),ylab=paste0("Sampled values"),main=paste0("Sampled values over iterations for ",svar)) # iterations vs sampled valued per variable
if(mcmcplot==1){
par(mfrow=c(1,1))
plotlist <- list()
pdf("mcmc_vars.pdf",onefile = T,paper="a4")
for(i in colnames(samps)){
par(bty="n", las = 1)
if(traceplot==1){
traceplot(sampsvar,smooth=T,type="l",xlim=c(0,length(sampsvar)),col=colv[2],xlab=paste0("Iterations"),ylab=paste0("Sampled values"),main=paste0("Sampled values over iterations for ",svar)) # iterations vs sampled valued per variable
}
svar <- i # select variable
sampsvar <- samps[,svar] # pull from mcmc
den <- density(sampsvar) # get AUC
densplot(sampsvar, show.obs = F,type="n",main=paste0("Density estimate of ",i)) # density estimate of each variable
polygon(den, col=adjustcolor(colv,alpha=0.5),border=colv) # fill AUC
}
dev.off()
} # end mcmcplot
# ----------
# get the best fit DEB parameters to match the data (using mcmc)
read.csv("pars.txt",header=T,sep="/",fill=T,flush=T,strip.white=T,row.names=NULL)
pars = as.vector(data.frame(samps)[max(which(data.frame(samps)$lpost >= max(data.frame(samps)$lpost) -0.001)),])
pars["Fh"] = 2 # f_scaled (for v.1.1)
pars["ENV"] = 500 # Units: L
pars["r"] = 1 # Units: day-1
pars["step"] = 1 # Units: day
pars["epsilon"] = 20 # Units: L host-1, day-1 (Rounded estimate from Civitello and Rohr)
pars["sigma"] = 0.5
pars["m_M"] = 1 # Units: day-1
pars["m_Z"] = 1 # Units: day-1
pars["M_in"] = 10
pars["K"] = 5
pars["Det"] = 0.1 # Units mg C/L-1 d-1 (detritus)
#################################### solve deb eqs #######################################
# display list of param definitions
read.csv("debfunction.txt",header=T,sep="/",fill=T,flush=T,strip.white=T,row.names=NULL)
DEB = function(step, Food, L, e, D, RH, P, RP, DAM, HAZ, iM, k, M, EM,
Fh, muD, DR, yRP, ph, yPE, iPM, eh, mP, alpha, yEF,
LM, kd, z, kk, hb, theta, mR, yVE, ENV, Lp, SAtotal, r, K, Det){
# starting conditions
initials = c(Food=Food, L=L, e=e, D=D, RH=RH, P=P, RP=RP, DAM=DAM, HAZ=HAZ)
# deb parameters
parameters = c(iM, k, M, EM, Fh, muD, DR, yRP, ph, yPE, iPM,
eh, mP, alpha, yEF, LM, kd, z, kk, hb, theta, mR, yVE, ENV, Lp, SAtotal, r, K, Det)
# estimate starting deb conditions using fitted params by solving ode's
## return survival and host shell length
DEBstep <- lsoda(initials, c(0,step), func = "derivs", dllname = deb_compile,
initfunc = "initmod", nout=2, outnames=c("Survival", "LG"), maxsteps=500000,
as.numeric(parameters), rtol=1e-6, atol=1e-6, hmax=1)
DEBstep[2, 2:12] # 12 = survival
} # end deb model
### deb output for each timestep
result = DEB(step=1, Food=5, L=10, e=0.9, D=as.numeric(pars["DR"]), RH=0, P=0, RP=0, DAM=0, HAZ=0, iM=pars["iM"], k=pars["k"], M=pars["M"], EM=pars["EM"],
Fh=pars["Fh"], muD=pars["muD"], DR=pars["DR"], yRP=pars["yRP"], ph=pars["ph"], yPE=pars["yPE"], iPM=pars["iPM"], eh=pars["eh"],
mP=pars["mP"], alpha=pars["alpha"], yEF=pars["yEF"], LM=pars["LM"], kd=pars["kd"], z=pars["z"], kk=pars["kk"], hb=pars["hb"],
theta=pars["theta"], mR=pars["mR"], yVE=pars["yVE"], ENV=pars["ENV"], Lp=10,SAtotal=7007.822, r=pars["r"], K=pars["K"], Det=pars["Det"])
### Exposure submodel
# pass the deb state vars into infection model
Infection = function(snail.stats, miracidia, parameters){
# Parameters
epsilon = as.numeric(parameters["epsilon"])
sigma = as.numeric(parameters["sigma"])
ENV = as.numeric(parameters["ENV"])
m_M = as.numeric(parameters["m_M"])
step = as.numeric(parameters["step"])
# Later calculations depend on exposure probabilities
exp.rates = epsilon/ENV*(snail.stats[,"L"]>0) # This is just to get uniform exposure rates
sum.exp.rates = sum(exp.rates)
# Probabilities for fate of miracidia
## Still in water
P.left.in.water = exp(-(m_M+sum(exp.rates))*step)
## Infect a snail
P.infects.this.snail = (1 - P.left.in.water)*(sigma*exp.rates/(m_M+sum.exp.rates))
## Die in water or fail to infect
P.dead = (1 - P.left.in.water)*(m_M/(m_M+sum.exp.rates)) + sum((1 - P.left.in.water)*((1-sigma)*exp.rates/(m_M+sum.exp.rates)))
prob.vector = c(P.infects.this.snail, P.left.in.water, P.dead)
# Multinomial outcome from number of miracidia in env based on their survival probability
rmultinom(n=1, size=miracidia, prob=prob.vector)
#sum(P.left.in.water, P.invades.this.snail, P.dead)
} # end infection model
### update all the snails @netlogo
update.snails = function(who, new.L, new.e, new.D, new.RH, new.P, new.RP, new.DAM, new.HAZ, new.LG){
paste("ask snail", who,
"[set L", new.L,
"set ee", new.e,
"set D", new.D,
"set RH", new.RH,
"set P", new.P,
"set RPP", new.RP,
"set DAM", new.DAM,
"set HAZ", new.HAZ,
"set LG", new.LG, # new max length
"]")
} # end host update
#Example update
#paste(mapply(update.snails, who=snail.stats[,"who"], new.L=L, new.e=e, new.D=D, new.RH=RH, new.P=P, new.RP=RP, new.DAM=DAM, new.HAZ=HAZ), collapse=" ")
geterrmessage() # check if there were any error messages
###########################################################################################
#################################### load netlogo ########################################
###########################################################################################
# @netlogo
# working NLStart in RStudio. works with gui=F (2018/09/24)
if(gui==0){
NLStart(nl.path,gui=F,nl.jarname = paste0("netlogo-",ver_nl,".jar")) # open netlogo without a gui
}else{
NLStart(nl.path,nl.jarname = paste0("netlogo-",ver_nl,".jar")) # open netlogo
}
NLLoadModel(paste0(model.path,nl.model),nl.obj=NULL) # load model
# if java.lang error persists on Mac, try copying all .jar files from the 'Java' folder where Netlogo is installed into the main Netlogo folder
resource_type="algae"
resources="event"
# set type of resource input @netlogo
set_resource_type<-function(resource_type){ # set resource input in env
if(resource_type == "detritus"){NLCommand("set resource_type \"detritus\" ")}else{NLCommand("set resource_type \"algae\" ")}}
set_resource_type(resource_type) # set resource type: "detritus" or "algae" @netlogo
# set type of resource dynamics @netlogo
set_resources<-function(resources){ # set resource input in env
if (resources == "cyclical"){NLCommand("set resources \"cyclical\" ")}else{NLCommand("set resources \"event\" ")}}
set_resources(resources) # set resources: "cyclical" or "event" @netlogo
cat("\nResource type = ",resource_type,"\nResources = ",resources)
################################################################################################
#################################### start netlogo sim ########################################
################################################################################################
# OG scenario
# Fh = c(0.5,1,2,5)
# K = c(1,2,5,10)
# new test space
# Fh = c(0.5, 1, 1.5, 2)
# K = c(0.5, 1, 2, 3)
testrun <- 1 # do a quick testrun to see plots
snail_control <- 0 # 1 = add molluscicide event
if(save_to_file==1){pdf(paste0(wd,"/master_sim.pdf"),onefile=T,paper="a4")}
ifelse(testrun==1,n.ticks<-5,n.ticks<-500)
# param spaces
detr_pars <- seq(0,0.5,0.1) # detritus input (mg L^-1 day^-1)
alpha_pars <- c(0,0.25,0.5,0.75,1) # amplitude of resources (alphas)
rho_pars <- c(1,seq(10,120,10)) # periodicity of resources (rhos)
rg_pars <- seq(0.5,1.5,0.5) # resource growth rates (rs)
me_pars <- seq(10,110,10) # molluscicide events (me)
me_90 <- 2.3 # background hazard rate for 90% snail mortality from molluscicide event (per day)
Env_G = numeric() # create empty environment vector
# set detritus params
if(resource_type=="detritus"){detr_pars <- detr_pars;alpha_pars <- 0; rho_pars <- 10; rg_pars <- 0;cat("detritus input = ",detr_pars)}else{detr_pars <- 0;cat("detritus input = ", detr_pars)}
# set resource to cycle or be constant
if(resource_type=="algae"){if(resources=="cyclical"){alpha_pars <- alpha_pars; rho_pars <- rho_pars ; rg_pars <- rg_pars;cat("alphas = ",alpha_pars,"\nrhos = ",rho_pars,"\nrgs = ",rg_pars)}else{alpha_pars <- 0; rho_pars <- 10; rg_pars <- seq(0,1,0.1);cat("alphas = ",alpha_pars,"\nrhos = ",rho_pars,"\nrgs = ",rg_pars)}}
# set snail control events or none
if(snail_control==1){me_pars <- me_pars}else{me_pars <- 1000000}; cat("Snail control will occur every ",max(me_pars)/me_pars[1]-1," days")
# # define param sample space with LHS
# lhsmodel <- function(params){
# params <- factors_space[[2]]*factors_space[[3]]*factors_space[[4]]
# }
# factors <- c("alpha","rho","rg","me") # name of params
# factors_space <- list(alpha_pars,rho_pars,rg_pars,me_pars)
# q <- rep("qnorm",length(factors)) # apply the dist to be used
# q.arg <- list(list(alpha_pars),list(rho_pars),list(rg_pars),list(me_pars)) # inputs for dist q
# # list(list(mean=1.7, sd=0.3), list(mean=40, sd=1),list(min=1, max=50) )
# N <- prod(as.numeric(summary(factors_space)[,1]))
# lhs_model <- LHS(model=lhsmodel,factors=factors,N=N,q=q,q.arg=q.arg,nboot=100)
# lhs_data <- get.data(lhs_model) # param space from LHS
# lhs_results <- get.results(lhs_model)
# get.N(lhs_model) # get the number of output points in hypercube
# individual outputs
cerc_list <- list() # cercariae
food_list <- list() # food in env
juv_list <- list() # juvenile hosts
adult_list <- list() # adult hosts
infec_list <- list() # infected hosts
infec_shed_list <- list() # infected shedding hosts
hl_list <- list() # host length
pmass_list <- list() # parasite biomass
# master outputs
cerc_master <- list() # master list for cerc density (Env_Z)
food_master <- list() # master list for food dynamics (Env_F)
juv_master <- list() # master list for total host pop ()
adult_master <- list() # master list for total host pop ()
infec_master <- list() # master list for infected host pop ()
infec_shed_master <- list() # master list for infected shedding host pop
hl_master <- list() # master list for host length
pmass_master <- list() # master list for parasite biomass
# define plot window
plot.matrix <- matrix(c(length(alpha_pars),length(rho_pars)))
par(mfrow=plot.matrix)
#################################### start netlogo sim ########################################
for(detr in detr_pars){ # loop through detritus inputs
for(alpha in alpha_pars){ # loop through alphas (amplitude in food cycle)
for(rho in rho_pars){ # loop through rhos (periodicity of food cycle)
for(rg in rg_pars){ # loop through rgs (food growth rates)
for(me in me_pars){ # loop through mes (molluscicide events)
NLCommand("setup")
for(t in 1:n.ticks){ # start nl sim @netlogo
snail.stats = NLGetAgentSet(c("who", "L", "ee", "D", "RH", "P", "RPP", "DAM", "HAZ", "LG"), "snails")
N.snails = length(snail.stats[,"L"])
environment = as.numeric(NLGetAgentSet(c("F", "M", "Z", "G"), "patches")) # calc food, free miracidia, cercariae released, and eggs, per patch
# Infect snails
Infection.step = as.vector(Infection(snail.stats, environment[2], pars)) # Who gets infected
snail.stats[which(Infection.step[1:N.snails] > 0),"P"] = snail.stats[which(Infection.step[1:N.snails] > 0),"P"] + 2.85e-5 # add biomass of one miracidia
# define food dynamics for cyclical algal (logistic food growth equation) or detritus food sources
alpha <- alpha # amplitude of resources
rho <- rho # periodicity (time range of resource cycles)
rg <- rg # resource growth rate
rg_t <- rg + alpha * rg * sin(2 * pi * t/rho) # equilibrium cyclical resource dynamics (19-12-18)
pars["r"] <- rg_t # set resource growth rate
pars["Det"] <- detr # Units mg C/L-1 d-1 (detritus)
# Update DEBS, HAZ=0 so survival probs are calculated for the current day
snail.update = t(mapply(DEB, L=snail.stats[,2], e=snail.stats[,3], D=snail.stats[,4], RH=snail.stats[,5],
P=snail.stats[,6], RP=snail.stats[,7], DAM=snail.stats[,8], Lp=snail.stats[,10],# Food=environment[1]*(snail.stats[,2]^2)/sum(snail.stats[,2]^2), # update food availability per snail
MoreArgs = list(step=1, HAZ=0, Food=environment[1],# constant food available (23-1-19)
iM=pars["iM"], k=pars["k"], M=pars["M"], EM=pars["EM"], Fh=pars["Fh"],
muD=pars["muD"],
DR=pars["DR"], yRP=pars["yRP"], ph=pars["ph"], yPE=pars["yPE"], iPM=pars["iPM"], eh=pars["eh"],
mP=pars["mP"], alpha=pars["alpha"], yEF=pars["yEF"], LM=pars["LM"], kd=pars["kd"], z=pars["z"],
kk=pars["kk"],
if(snail_control==1){
if(day==me){hb <- me_90}
}else{hb <- pars["hb"]},
theta=pars["theta"], mR=pars["mR"], yVE=pars["yVE"], SAtotal= sum(snail.stats[,2]^2),
ENV=pars["ENV"], r=pars["r"], K=pars["K"],
Det=pars["Det"]))) # detritus (Det) defined in C file
L = snail.update[,"L"] # host structural length
e = snail.update[,"e"] # host scaled reserve density
D = snail.update[,"D"] # host development
RH = snail.update[,"RH"] # host energy to reproduction buffer
DAM = snail.update[,"DAM"] # host damage from starvation
HAZ = snail.update[,"HAZ"] # host hazard rate from starvation
LG = snail.update[,"LG"] # host shell length
P = snail.update[,"P"] # parasite mass (sum within host)
RP = snail.update[,"RP"] # parasite reproductive buffer
# ingestion = environment[1] - sum(snail.update[,"Food"]) # food intake by host from environment (for v.1.1)
hl_list[t] <- L # get host lengths per model step
pmass_list[t] <- P # get parasite mass per model step
Eggs = floor(RH/0.015) # Figure out how many (whole) eggs are released
# if(day==me){Eggs <- Eggs[1:round(0.1*length(Eggs))]} # kill off 90% of snail eggs in water with molluscicide event
RH = RH %% 0.015 # Remove released cercariae from the buffer
Cercs = floor(RP/4e-5) # Figure out how many (whole) cercs are released
RP = RP %% 4e-5 # Remove released cercariae from buffer
Eggs = as.integer(Eggs); Cercs = as.integer(Cercs)
# Update environment
Env_M = as.numeric(Infection.step[N.snails + 1] + pars["M_in"]) # total miracidia density
Env_Z = as.numeric(environment[3]*exp(-pars["m_Z"]*pars["step"]) + sum(Cercs)/pars["ENV"]) # total cerc density
Env_G = as.integer(Env_G) # set pop density outputs to integer to pass into Env_G and rbinom func
# ifelse(day==me,Env_G[day] <- max(0, 0.1*sum(Eggs),na.rm=T),Env_G[day] <- max(0, sum(Eggs),na.rm=T)) # kill off 90% of snail eggs in water with molluscicide event
Env_G[day] <- max(0, sum(Eggs),na.rm=T)
Env_G[is.na(Env_G)] <- 0 # turn NAs to 0 to feed into rbinom function
# Env_F = max(0.001, as.numeric(pars["K"]*environment[1]/(environment[1] + (pars["K"] - environment[1])*exp(-pars["r"]*pars["step"])) - ingestion)) # Analytical soln to logistic - ingestion (alphas [1,100]) (original r growth equation)
# Env_F = max(0.001, as.numeric(pars["K"]*environment[1]/(environment[1] + (pars["K"] - environment[1])*exp(-rg_t*pars["step"])) - ingestion)) # Analytical soln to logistic - ingestion with equilibrium resource growth wave (rg_t) (alphas [0,1]) (for v.1.1)
# F = F * exp(- r + alpha * r * sin(2 * pi * t/rho) * s) * (1 - F/K) - f(i_{M} * sum(L^2) # v. 1.2 algae and detritus with cyclical algal growth
# r_t <- pars["r"] + alpha * pars["r"] * sin(2 * pi * t/rho) # equilibrium resource dynamics (static)
Env_F = max(0.001, snail.update[1,"Food"]) # algal or detritus food sources (for v.1.2)
# Command back to NL @netlogo
NLCommand("ask patch 0 0 [set F", Env_F, "set M", Env_M, "set Z", Env_Z, "set G", Env_G[day], "]")
snail.commands = paste(mapply(update.snails, who=snail.stats[,"who"], new.L=L, new.e=e, new.D=D, new.RH=RH, new.P=P, new.RP=RP, new.DAM=DAM, new.HAZ=HAZ, new.LG=LG), collapse=" ")
NLCommand(snail.commands)
if(day > 10){
if(snail_control==1){ # kill off 90% of snail eggs in water with molluscicide event
if(day==me){create_snails <- rbinom(n=1, size=Env_G[day - 10], prob=0.1)}
}else{create_snails <- rbinom(n=1, size=Env_G[day - 10], prob=0.5)}
NLCommand("create-snails ", create_snails, "[set L 0.75 set ee 0.9 set D 0 set RH 0 set P 0 set RPP 0 set DAM 0 set HAZ 0 set LG 0.75]")
} # end create snails
NLCommand("go") # run @netlogo sim steps
#cs[t] <- rbinom(n=1, size=Env_G[day - 10], prob=0.5) # list to check 'create snails' output doesn't produce NAs
day = day + 1
if(testrun==1){
cerc_list[t] <- Env_F + rho # use to test plot outputs quickly (plots food + rho value as mock output to show amplitude)
}else{
# results outputs
cerc_list[t] <- Env_Z # get cercariae density
food_list[t] <- Env_F # get food growth
juv_list[t] <- length(which(snail.stats$RH==0)) # get juvenile hosts
adult_list[t] <- length(which(snail.stats$RH>0)) # get adult hosts
infec_list[t] <- length(which(snail.stats$P>0)) # get just infected hosts
infec_shed_list[t] <- length(which(snail.stats$RP>0)) # get infected hosts that are shedding
} # end testrun
} # --------------------------------------- end nl sim
# save individual outputs
cerc_list <- as.numeric(cerc_list)
food_list <- as.numeric(food_list)
juv_list <- as.numeric(juv_list)
adult_list <- as.numeric(adult_list)
infec_list <- as.numeric(infec_list)
infec_shed_list <- as.numeric(infec_shed_list)
hl_list <- as.numeric(hl_list)
pmass_list <- as.numeric(pmass_list)
# save master outputs
cerc_master[[length(cerc_master)+1]] <- cerc_list # cerc master list
food_master[[length(food_master)+1]] <- food_list # food master list
juv_master[[length(juv_master)+1]] <- juv_list # juv pop master list
adult_master[[length(adult_master)+1]] <- adult_list # adult pop master list
infec_master[[length(infec_master)+1]] <- infec_list # infected host pop master list
infec_shed_master[[length(infec_shed_master)+1]] <- infec_shed_list # infected shedding host pop master list
hl_master[[length(hl_master)+1]] <- hl_list # host length master
pmass_master[[length(pmass_master)+1]] <- pmass_list # host length master
### plot outputs
# plot(cerc_list,type="l",las=1,bty="n",ylim=c(0,do.call(max,cerc_master)),col=round(do.call(max,cerc_master)),
# main=paste0("alpha = ",alpha, "; rho = ", rho, "; r = ", rg),ylab="Cercariae density",xlab="Days")
# paste0(expression("alpha = ",alpha, "; rho = ", rho, "; r = ", rg))
# text(which(cerc_list==max(cerc_list)),max(cerc_list),paste0("a= ",alpha," \n p= ",rho)#,col=max(cerc_list),
# )
#abline(h=which(cerc_list==max(cerc_list)),type=3,col=round(do.call(max,cerc_master))) # draw line at max peak
if(save_to_file==1){dev.off()}
} # --------------- end mes
} # ------------------------------ end rgs
} # --------------------------------------------- end rhos
} # ----------------------------------------------------------- end alphas
} # ------------------------------------------------------------------------- end detritus
#################################### end netlogo sim ########################################
# results output
# save sim results to dir
str(list(cerc_master,food_master,juv_master, adult_master,infec_master,infec_shed_master,hl_master,pmass_master))
global_output <- list(cerc_master,food_master,juv_master, adult_master,infec_master,infec_shed_master,hl_master,pmass_master)
saveRDS(global_output,paste0(wd,"/global_output_",resource_type,"_",resources,".R"))
# read in saved sim results
cat("order = cerc, food, juv, adult, infected, infected shedding, host length, parasite mass")
cat("detritus = ",seq(0,0.5,0.1))
cat("algae with rg = ",seq(0,1,0.1))
# ------------------------- select results to plot
fh = "global_output_detritus"
# ------------------------- plot individual outputs -------------------------
mm_ = readRDS(paste0(model.path,fh,".R"))
cat("order = cerc, food, juv, adult, infected, infected shedding, host length, parasite mass")
# plot master
mm <- mm_[[2]]
y_m <- melt(mm);y_m
ggplot() +
geom_point(data = y_m, aes(x = rep.int(1:n.ticks,max(L1)) , y = value, group = L1, colour=factor(L1)), ) +
geom_line(data = y_m, aes(x = rep.int(1:n.ticks,max(L1)) , y = value, group = L1, colour=factor(L1)), ) +
#linetype=y_m$L1) +
theme_tufte()
# + geom_text(x=,y=,label = max(value),check_overlap = TUE)
#------------------------- plot all sim results in one window -------------------------
require(gridExtra)
layout(matrix(c(1:16),4,4,byrow=T))
gspl <- list()
K_pars = c(0.5, 1, 2, 3)
Fh_pars = c(0.5, 1, 1.5, 2)
ttl_list <- c("cerc","food", "juv", "adult", "infec", "infec (shed)", "host L", "parasite mass")
ttl_list1 <- Fh_pars
ttl_list2 = K_pars
g = 1
# choose sim to plot
global_sim_plot <- mm_
# global_output_algae_event_Fh-K-alpha =
# k_pars = c(1,2,5,10)
# fh_pars = c(0.5,1,2,5)
# rg = 0.5
for(g in 1:length(global_sim_plot)){
par(bty="n", las = 1)
mm <- global_sim_plot[[g]]
y_m <- melt(mm);y_m
gspl[[g]] <- ggplot() +
# geom_point(data = y_m, aes(x = rep.int(1:n.ticks,max(L1)) , y = value, group = L1, colour=factor(L1)), ) +
geom_line(data = y_m, aes(x = rep.int(1:length(mm_[[1]][[1]]),max(L1)) , y = value, group = L1, colour=factor(L1)), ) +
# scale_color_manual(values = viridis(length(mm))) +
#linetype=y_m$L1) +
theme_tufte() +
labs(title=ttl_list[g],x="",y="") +
#labs(title=paste0("Fh",ttl_list1[g],"_K",ttl_list2[g]),x="",y="") +
if(g==length(global_sim_plot)){
theme(legend.title=element_text(size=0.2),
legend.text=element_text(size=0.2)) +
theme(legend.position = "top")
labs(x="Time")
}else{
theme(legend.position="none")
}
}
# + geom_text(x=,y=,label = max(value),check_overlap = TUE)
do.call(grid.arrange,gspl) # plot in one window
# NLQuit()
#################################################################################################
########################################## end body ############################################
#################################################################################################
|
3f683d8da593393cf5d6e4c4e3555d6c48c45684 | 17d42319f3067c0c881728f0fd72c18f0bd19525 | /Data Application - Section 4 of the main manuscript/Part5b.R | 5f852ad74db97adf8fc754ece0022ba46b89d750 | [] | no_license | pratik187/SP-Estim-Crosscov | 5772557b740987c10600a793e4bd2fa29a8faaf8 | 7f13bd34234b525aeeb3297e8dbe785684bbe45c | refs/heads/master | 2022-11-09T00:38:46.165986 | 2020-06-26T14:46:36 | 2020-06-26T14:46:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,780 | r | Part5b.R | #######################################################################
###################### Part 5b of the Data application code ############
#######################################################################
###################################################################################################
######### In this part, we estimate Semi parametric model with Delta 5 (for i=39:76) ###########
######### for the 100 times randomly splitted training sample locations ####################
###################################################################################################
######### Loading libraries #########
library(fields)
library(doParallel)
#####################################
######################################
###### Setting work directory ########
######################################
## set the directory where the file "Final dataset to be worked with.RData" is saved from part 1
setwd("/ibex/scratch/qadirga/WNC_final")
##########################################
###### Loading the image from part 1 #####
##########################################
#Sys.sleep(18000)
load("Final dataset to be worked with.RData")
load("aux_var_2_5.RData")
load("Estimates from all the other candidate models.RData")
o_candi.estims<-ind_bvm_lmc_estimations
#######################################
####### Setting number of cores #######
#######################################
biwm_coh<-function(w,a1,a2,v1,v2,a12,v12,d,rho)
{
temp<-numeric(length=length(w))
for(i in 1:length(w))
{
num<-((gamma(v12+(d/2)))^2)*gamma(v1)*gamma(v2)*(a12^(4*v12))*((a1^2+w[i]^2)^(v1+(d/2)))*((a2^2+w[i]^2)^(v2+(d/2)))
den<-gamma(v1+(d/2))*gamma(v2+(d/2))*(gamma(v12)^2)*(a1^(2*v1))*(a2^(2*v2))*((a12^2+w[i]^2)^(2*v12+d))
temp[i]<-rho*sqrt(num/den)
}
return(((temp)))
}
ncores<-detectCores()-2
registerDoParallel(cores = ncores)
############## Likelihood Estimation ##############
full.cov.compute2<-function(f1.f,f2.f,f12.f,u.f,index.mat.f,index.val.f,Bes_mat.f,uniq.dist.f,sigma1.f,sigma2.f,dmat.f,nug1.f,nug2.f)
{
C11n<-colSums(Bes_mat.f*matrix(rep(2*pi*u*f1.f,length(uniq.dist.f)),
ncol=length(uniq.dist.f),
byrow=F))
scl1<-max(C11n)
C11n<-(C11n/scl1)*(sigma1.f^2)
C22n<-colSums( Bes_mat.f*matrix(rep(2*pi*u*f2.f,length(uniq.dist.f)),
ncol=length(uniq.dist.f),
byrow=F))
scl2<-max(C22n)
C22n<-(C22n/scl2)*(sigma2.f^2)
C12n<-colSums(Bes_mat.f*matrix(rep(2*pi*u*f12.f,length(uniq.dist.f)),
ncol=length(uniq.dist.f),
byrow=F))
C12n<-(C12n/(sqrt(scl1*scl2)))*(sigma1.f*sigma2.f)
dist.mat<-dmat.f
COV11op<-COV22op<-COV12op<-matrix(NA,nrow = nrow(dist.mat),ncol=ncol(dist.mat))
NUG1<-diag(nug1.f,nrow = nrow(dist.mat),ncol=ncol(dist.mat))
NUG2<-diag(nug2.f,nrow = nrow(dist.mat),ncol=ncol(dist.mat))
COV11op[index.mat.f]<-C11n[index.val.f]
COV22op[index.mat.f]<-C22n[index.val.f]
COV12op[index.mat.f]<-C12n[index.val.f]
myC4<-rbind(cbind(COV11op+NUG1,COV12op),cbind(t(COV12op),COV22op+NUG2))
return(myC4)
}
#################################################################################
#################################################################################
##### Computing the test and train samples plus all other required matrices #####
##### before we attempt to optimize #############################################
#################################################################################
#################################################################################
bcoeff<-function(x)
{
temp<-sin(x)
temp[temp==-1]<--1+1e-10
temp[temp==1]<-1-1e-10
return(temp)
}
#### Random splitting index ############
freq.max<-9
n.nodes<-500
u<-seq(0,freq.max,length=n.nodes)
full.dist<-rdist(un.grd.total[,-c(3,4)])
full.uniq.dist<-unique(c(full.dist))
full.uniq.dist<-sort(full.uniq.dist) ##### sorting distances in increasing order #####
full.theta<-outer(u,full.uniq.dist,"*")
full.bessel<-besselJ(x=full.theta,nu=0)
####### Computing indexes to be chosen from columns of full. bessel matrix in the ith run ####
bessel.index<-list()
for(i in 1:100)
{
tempseq<-1:length(full.uniq.dist)
temp.index<-tempseq[full.uniq.dist%in%uniq.dist.train.f.list[[i]]]
bessel.index[[i]]<-temp.index
}
#####################################################
###### Estimation of semi-parametric model ##########
#####################################################
sp2esti<-foreach(i=39:76)%dopar%{
library(fields)
freq.max<-9
n.nodes<-500
u<-seq(0,freq.max,length=n.nodes)
un.grd.train<-un.grd.total[-rand.index[,i],] ########## Training set #########
un.grd.test<-un.grd.total[rand.index[,i],] ########## Test set #############
dist.mat.train<-dist.mat.train.f.list[[i]]
uniq.dist.train<-uniq.dist.train.f.list[[i]] ##### sorting distances in increasing order #####
bcoeff<-function(x)
{
temp<-sin(x)
temp[temp==-1]<--1+1e-10
temp[temp==1]<-1-1e-10
return(temp)
}
mle_spd5<-function(p,z,dmat.ml,Bes_mat.ml,index.mat.ml,index.val.ml,u.ml,uniq.dist.ml)
{
a1<-p[1]
nu1<-p[2]
sigma1<-p[3]
a2<-p[4]
nu2<-p[5]
sigma2<-p[6]
b_3<-bcoeff(p[7])
b_2<-bcoeff(p[8])
b_1<-bcoeff(p[9])
b0<-bcoeff(p[10])
b1<-bcoeff(p[11])
#b2<-bcoeff(p[12])
#b3<-bcoeff(p[13])
#b4<-bcoeff(p[14])
nug1<-p[12]
nug2<-p[13]
if(sum(p[1:6]<0)!=0||nug1<0||nug2<0)
{
nloglikelihood<-10000000
return(list(mlv=nloglikelihood,params=NULL))
}
else
{
f.var1<-f.matern(w=u, nu=nu1, sigma = sigma1, a=a1, d=2)
f.var2<-f.matern(w=u, nu=nu2, sigma = sigma2, a=a2, d=2)
Delta=5
coh12<-b_3*Bspline(j=-3,k=4,delta = Delta,x=u)+b_2*Bspline(j=-2,k=4,delta = Delta,x=u)+b_1*Bspline(j=-1,k=4,delta = Delta,x=u)+b0*Bspline(j=0,k=4,delta = Delta,x=u)+b1*Bspline(j=1,k=4,delta = Delta,x=u)#+b2*Bspline(j=2,k=4,delta = Delta,x=u)#+b3*Bspline(j=3,k=4,delta = Delta,x=u)+b4*Bspline(j=4,k=4,delta = Delta,x=u)
f.var12<-coh12*sqrt(f.var1*f.var2)
C<-full.cov.compute2(f1.f=f.var1,f2.f=f.var2,f12.f=f.var12,u.f=u.ml,index.mat.f=index.mat.ml,index.val.f=index.val.ml,Bes_mat.f=Bes_mat.ml,uniq.dist.f=uniq.dist.ml,sigma1.f=sigma1,sigma2.f=sigma2,dmat.f = dmat.ml,nug1.f = nug1,nug2.f = nug2)
############## Inverting C11 ##########
if(sum(C==Inf)>0||sum(is.nan(C))>0)
{
nloglikelihood <- 1e+12
}
else
{
#checking due to numerical issues
cholS<-chol(C)
nloglikelihood <-
-as.numeric(-0.5 * determinant(C)$modulus -
0.5 * t(z) %*% chol2inv(cholS) %*% z -
0.5 * length(z)*log(2*pi))
}
if (abs(nloglikelihood) == Inf || is.nan(nloglikelihood)) nloglikelihood <- 1e+08
return(list(mlv=nloglikelihood,a1=a1,a2=a2,nu1=nu1,nu2=nu2,sigma1=sigma1,sigma2=sigma2,coh12=coh12,u=u,full.cov=C))
}
}
mle_spd5_mlv<-function(pars)
{
return(mle_spd5(p=pars,z=c(un.grd.train$PM2_5,un.grd.train$WS),dmat.ml=dist.mat.train,Bes_mat.ml=full.bessel[,bessel.index[[i]]],index.mat.ml=index.mat.train.f.list[[i]],index.val.ml=index.val.train.f.list[[i]],u.ml=u,uniq.dist.ml=uniq.dist.train)$mlv)
}
###### Finding optimum initial values ########
####### Finding optimized initial values for semiparametric model #########
bvm.coh.compute<-function(estim.par)
{
p<-estim.par
a1<-p[1]
nu1<-p[2]
sigma1<-p[3]
a2<-p[4]
nu2<-p[5]
sigma2<-p[6]
deltaA<-p[7]
deltaB<-p[8]
beta<-sin(p[9])
nug1<-p[10]
nug2<-p[11]
a12<-sqrt((a1^2+a2^2)/2+deltaB)
nu12<-((nu1+nu2)/2)+deltaA
num1<-beta*(a12^(-2*deltaA-(nu1+nu2)))*gamma(((nu1+nu2)/2)+(2/2))*gamma(nu12)
den1<-(a1^(-deltaA-(nu1)))*(a2^(-deltaA-(nu2)))*sqrt(gamma(nu1)*gamma(nu2))*gamma(nu12+(2/2))
rho12<-num1/den1
coh12<-biwm_coh(w=u,a1=a1,a2=a2,v1=nu1,v2=nu2,a12=a12,v12=nu12,d=2,rho=rho12)
############## Inverting C11 ##########
#C.train<-C22
#C.test<-C11
#C.test.train<-C12
return(coh12)
}
tvalue<-bvm.coh.compute(o_candi.estims[[i]]$bvm$par)
######## Now we find a set of good initial values on the basis of bivariate matern model #######
l2dist<-function(p)
{
b_3<-bcoeff(p[1])
b_2<-bcoeff(p[2])
b_1<-bcoeff(p[3])
b0<-bcoeff(p[4])
b1<-bcoeff(p[5])
#b2<-bcoeff(p[6])
#b3<-bcoeff(p[7])
#b4<-bcoeff(p[8])
Delta=5
coh12<-b_3*Bspline(j=-3,k=4,delta = Delta,x=u)+b_2*Bspline(j=-2,k=4,delta = Delta,x=u)+b_1*Bspline(j=-1,k=4,delta = Delta,x=u)+b0*Bspline(j=0,k=4,delta = Delta,x=u)+b1*Bspline(j=1,k=4,delta = Delta,x=u)#+b2*Bspline(j=2,k=4,delta = Delta,x=u)#+b3*Bspline(j=3,k=4,delta = Delta,x=u)+b4*Bspline(j=4,k=4,delta = Delta,x=u)
rv<-sqrt(sum((coh12-tvalue)^2))
return(rv)
}
init.value<-optim(par = c(0,0,0,0,0),l2dist,
hessian=FALSE,
control=list(trace=6,
maxit=10000))
init.spd5<-c(o_candi.estims[[i]]$bvm$par[1:6],init.value$par,o_candi.estims[[i]]$bvm$par[10:11])
optim_spd5_loglik <- function(par){
optim(par=par,
fn = mle_spd5_mlv,
hessian=FALSE,
control=list(trace=6,
pgtol=0,
parscale=rep(0.1,length(par)),
maxit=10000))
}
spd5.estim<-optim_spd5_loglik(init.spd5)
spd5.estim
}
rm(full.bessel,full.theta)
save.image("p5b.RData")
|
50fd28bbbf33a7edd5d76b216ffb6023918f039d | bbe0ba427d05eaeded9a920251d5c109b48d731d | /tests/testthat.R | 96c854089f5b45bf7cb18452cf1b46861b0e03ef | [] | no_license | cran/rcolors | 77c9a96490bffb5a3a80a8d6d2ef5cbcc03dbd0d | e09ad9df5e846c2971b39b52a88c7e9962f6ae8c | refs/heads/master | 2023-04-22T11:09:19.959872 | 2021-04-23T09:40:02 | 2021-04-23T09:40:02 | 360,933,493 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 62 | r | testthat.R | library(testthat)
library(rcolors)
test_check("rcolors")
|
2df69f0a2127eee20c2ca4f5d5d96d999f086583 | 78498ee1b47cd9606f0763f7f3582a44084a15da | /Code/spca_fil.R | 6793727bebece7ff4a5c65f228ceebbe0d715828 | [] | no_license | XKSH/Prevision-du-defaut-sur-le-perimetre-de-TPE | 29b7b8e3627a292a8fc91136d426aec5fe1fa332 | 69f25eb1b12bb5d37229786a6a71017db585babe | refs/heads/master | 2021-08-11T12:27:49.456236 | 2017-11-13T17:37:33 | 2017-11-13T17:37:33 | 106,800,737 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 1,691 | r | spca_fil.R | #supervised pca
library(superpc)
acp_train=PEdat[traincand[,2],-246]
acp_test=PEdat[-traincand[,2],-246]
acp_dft=PEdat[traincand[,2],246]
matd.complet=do.call(cbind,acp_train)
for(i in 1:ncol(matd.complet))
{
matd.complet[is.na(matd.complet[,i]),i]=median(matd.complet[,i],na.rm=TRUE)
}
data<-list(x=t(matd.complet),y=acp_dft, featurenames=names(acp_train))
train.obj<- superpc.train(data, type="regression")
#IMv=train.obj$feature.scores[order(train.obj$feature.scores), ,drop = FALSE]
IMv=train.obj$feature.scores
IMv1=IMv[1:245,]
IMv1=IMv[IMv1>1,,drop=FALSE]
IMv2=IMv[-(1:245),]
IMv2=IMv[IMv2>1,,drop=FALSE]
#IMv2=c()
train_spca=cbind(PEdat[traincand[,2],c(rownames(IMv1),rownames(IMv2))],PEdat[traincand[,2],245])
nr=5;nc=5
matd=do.call(cbind,train_spca)
w=c(rep(1/(3*length(IMv1)),length(IMv1)),rep(1/(3*length(IMv2)),length(IMv2)),1/3)
set.seed(15);matd=matd[sample(nrow(matd)),]
prof=kohonenqualigo.weight(17,nr,nc,0.04,0.01,2.99,0.65,matd[sample(nrow(matd)),],dim(matd)[1],w)
m=kohonenqualiclass.weight(prof,train_spca,dim(train_spca)[1],w)
#clustering
clustrain=cbind(train_spca,PEquan[traincand[,2],ncol(PEquan)])
clustrain=cbind(clustrain,m)[order(m),]
#graphe
grillecarte(nr,nc,2,clustrain[,(ncol(clustrain)-1)],clustrain[,ncol(clustrain)])
par(xpd=TRUE)
nb=2
ncol=seq(0,240,length.out=nb)
legend("topright", inset=c(-0.15,0.2), title="Groupe", c("Non-défaut","Défaut"), pch=15,col=hcl(ncol,120,85),cex=0.55)
###
cc=table(clustrain[,(ncol(clustrain)-1)],clustrain[,ncol(clustrain)])
cc
nmi(table(clustrain[,(ncol(clustrain)-1)],clustrain[,ncol(clustrain)]))
purity(table(clustrain[,(ncol(clustrain)-1)],clustrain[,ncol(clustrain)])) |
c8b9df2e191014224a5981ed1c58949eb7314684 | 8f5890d3b98e36b1dd96657fdbb5e9cc10a9edd0 | /shiny/wordcloud/global.R | ebcaec803369479b576ee06623d8a5b190a38ce0 | [
"MIT"
] | permissive | ncdingari/WebAppEx | e8bc733b26ea3c16bc9c84859cd1b38d53c38366 | 137bd7389a11037aeff32b99e75a95146eb6ec81 | refs/heads/master | 2021-01-23T21:21:42.090434 | 2017-06-08T10:15:41 | 2017-06-08T10:15:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,151 | r | global.R | library(tm)
library(wordcloud)
library(memoise)
library(capek)
# The list of valid books
books <<- list("Továrna na absolutno" = "tovarna_na_absolutno",
"Krakatit" = "krakatit",
"Hordubal" = "hordubal",
"Povětroň"="povetron",
"Obyčejný život"="obycejny_zivot",
"Válka s mloky"="valka_s_mloky")
stopwords_cs <<- read.csv("stopwords-cs.txt", header=FALSE, encoding = "UTF-8", as.is=TRUE)[,1]
# Using "memoise" to automatically cache the results
getTermMatrix <- memoise(function(book) {
# Careful not to let just any name slip in here; a
# malicious user could manipulate this value.
if (!(book %in% books))
stop("Unknown book")
text <- get(book)
myCorpus = Corpus(VectorSource(text))
myCorpus = tm_map(myCorpus, content_transformer(tolower))
myCorpus = tm_map(myCorpus, removePunctuation)
myCorpus = tm_map(myCorpus, removeNumbers)
myCorpus = tm_map(myCorpus, removeWords, stopwords_cs)
myDTM = TermDocumentMatrix(myCorpus,
control = list(minWordLength = 1))
m = as.matrix(myDTM)
sort(rowSums(m), decreasing = TRUE)
})
|
558bfa89abb54c1129060eb3bafc23b85c3f34b4 | 7bb21189354bf72b2e8aeeb9f0e4340e69ed2913 | /man/rseg.std.tri.Rd | 6131f5b956503d462a8f4bec93738c14a65d593a | [] | no_license | elvanceyhan/pcds | 16371849188f98138933afd2e68a46167f674923 | 00331843a0670e7cd9a62b7bca70df06d4629212 | refs/heads/master | 2023-07-02T10:03:48.702073 | 2023-06-16T15:50:46 | 2023-06-16T15:50:46 | 218,353,699 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,486 | rd | rseg.std.tri.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PatternGen.R
\name{rseg.std.tri}
\alias{rseg.std.tri}
\title{Generation of points segregated (in a Type I fashion)
from the vertices of \eqn{T_e}}
\usage{
rseg.std.tri(n, eps)
}
\arguments{
\item{n}{A positive integer
representing the number of points to be generated.}
\item{eps}{A positive real number
representing the parameter of type I segregation (which is the
height of the triangular forbidden regions around the vertices).}
}
\value{
A \code{list} with the elements
\item{type}{The type of the point pattern}
\item{mtitle}{The \code{"main"} title for the plot of the point pattern}
\item{parameters}{The exclusion parameter, \code{eps},
of the segregation pattern, which is the height
of the triangular forbidden regions around the vertices }
\item{ref.points}{The input set of points \code{Y};
reference points, i.e., points
from which generated points are segregated
(i.e., vertices of \eqn{T_e}).}
\item{gen.points}{The output set of generated points segregated
from \code{Y} points (i.e., vertices of \eqn{T_e}).}
\item{tri.Y}{Logical output for triangulation
based on \code{Y} points should be implemented or not.
if \code{TRUE} triangulation based on \code{Y} points is
to be implemented (default is set to \code{FALSE}).}
\item{desc.pat}{Description of the point pattern}
\item{num.points}{The \code{vector} of two numbers,
which are the number of generated points and
the number of reference (i.e., \code{Y}) points, which is 3 here.}
\item{xlimit,ylimit}{The ranges of the \eqn{x}-
and \eqn{y}-coordinates of the reference points,
which are the vertices of \eqn{T_e} here.}
}
\description{
An object of class \code{"Patterns"}.
Generates \code{n} points uniformly
in the standard equilateral triangle
\eqn{T_e=T((0,0),(1,0),(1/2,\sqrt{3}/2))}
under the type I segregation alternative for \code{eps}
in \eqn{(0,\sqrt{3}/3=0.5773503]}.
In the type I segregation, the triangular forbidden regions
around the vertices are determined by
the parameter \code{eps}
which serves as the height of these triangles
(see examples for a sample plot.)
See also (\insertCite{ceyhan:arc-density-PE,ceyhan:arc-density-CS,ceyhan:dom-num-NPE-Spat2011;textual}{pcds}).
}
\examples{
\dontrun{
A<-c(0,0); B<-c(1,0); C<-c(1/2,sqrt(3)/2);
Te<-rbind(A,B,C);
n<-100
eps<-.3 #try also .15, .5, .75
set.seed(1)
Xdt<-rseg.std.tri(n,eps)
Xdt
summary(Xdt)
plot(Xdt,asp=1)
Xlim<-range(Te[,1])
Ylim<-range(Te[,2])
xd<-Xlim[2]-Xlim[1]
yd<-Ylim[2]-Ylim[1]
Xp<-Xdt$gen.points
plot(Te,asp=1,pch=".",xlab="",ylab="",
main="Type I segregation in the \n standard equilateral triangle",
xlim=Xlim+xd*c(-.01,.01),ylim=Ylim+yd*c(-.01,.01))
polygon(Te)
points(Xp)
#The support for the Type I segregation alternative
sr<-eps/(sqrt(3)/2)
C1<-C+sr*(A-C); C2<-C+sr*(B-C)
A1<-A+sr*(B-A); A2<-A+sr*(C-A)
B1<-B+sr*(A-B); B2<-B+sr*(C-B)
supp<-rbind(A1,B1,B2,C2,C1,A2)
plot(Te,asp=1,pch=".",xlab="",ylab="",
main="Support of the Type I Segregation",
xlim=Xlim+xd*c(-.01,.01),ylim=Ylim+yd*c(-.01,.01))
if (sr<=.5)
{
polygon(Te)
polygon(supp,col=5)
} else
{
polygon(Te,col=5,lwd=2.5)
polygon(rbind(A,A1,A2),col=0,border=NA)
polygon(rbind(B,B1,B2),col=0,border=NA)
polygon(rbind(C,C1,C2),col=0,border=NA)
}
points(Xp)
}
}
\references{
\insertAllCited{}
}
\seealso{
\code{\link{rseg.circular}}, \code{\link{rassoc.circular}},
\code{\link{rsegII.std.tri}}, and \code{\link{rseg.multi.tri}}
}
\author{
Elvan Ceyhan
}
|
27b5df5181ddec84f097026640d220adfdc28304 | 9f610c112fdaaab291fbcc8b80572e52743e4574 | /R/matrix-construct.R | 31dff010afae07b20b3f70d87bfebafccb12c530 | [
"MIT"
] | permissive | teslajoy/hnscc-macOS | c907473d22380d6a5a7bc22d3fbe9312bdb68391 | 93cf238cd56af0452b34c25a5054524673f3fc78 | refs/heads/master | 2022-01-13T17:31:46.622914 | 2021-12-27T16:34:54 | 2021-12-27T16:34:54 | 77,406,143 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,611 | r | matrix-construct.R | #http://statisticalrecipes.blogspot.com/2012/08/biomart-find-gene-name-using-chromosome.html
library(biomaRt) # map exon to gene by chromosome location
# load data --------------------------------------------------
load("RNA_normalized_samples/gene_prog_files.RData")
load("RNA_normalized_samples/gene_nonprog_files.RData")
load("RNA_normalized_samples/gene_files.RData")
load("RNA_normalized_samples/exon_prog_files.RData")
load("RNA_normalized_samples/exon_nonprog_files.RData")
load("RNA_normalized_samples/exon_files.RData")
# 20531 genes and 239322 exons -----------------------------
unique(lapply(exon.prog.files, function(x) nrow(x)))
unique(lapply(gene.prog.files, function(x) nrow(x)))
# patient X gene matrix ------------------------------------
# progressor ------------------------------------------------
gene.prog <- lapply(gene.prog.files ,function(x) cbind(x$normalized_count))
gene.prog <- do.call(cbind, gene.prog)
dim(gene.prog) # 20531 68
genenames <- gsub("\\|.*", "", gene.prog.files[[1]][[1]])
rownames(gene.prog) <- genenames
colnames(gene.prog) <- names(gene.prog.files)
gene.prog <- t(gene.prog)
gene.prog <- gene.prog[ ,which(colnames(gene.prog) != "?")]
#write.table(gene.prog, file = "geneprog_matrix.txt", col.names = T, row.names = T, sep = "\t", append = F)
#test <- read.delim(file = "geneprog_matrix.txt")
# nonprogressor --------------------------------------------------------
gene.nonprog <- lapply(gene.nonprog.files ,function(x) cbind(x$normalized_count))
gene.nonprog <- do.call(cbind, gene.nonprog)
dim(gene.nonprog) #20531 161
genenames <- gsub("\\|.*", "", gene.nonprog.files[[1]][[1]])
rownames(gene.nonprog) <- genenames
colnames(gene.nonprog) <- names(gene.nonprog.files)
gene.nonprog <- t(gene.nonprog)
gene.nonprog <- gene.nonprog[ ,which(colnames(gene.nonprog) != "?")]
#write.table(gene.nonprog, file = "genenonprog_matrix.txt", col.names = T, row.names = T, sep = "\t", append = F)
#test <- read.delim(file = "genenonprog_matrix.txt")
# all ----------------------------------------------------------------
gene.all <- lapply(gene.files ,function(x) cbind(x$normalized_count))
gene.all <- do.call(cbind, gene.all)
dim(gene.all) #20531 229
genenames <- gsub("\\|.*", "", gene.files[[1]][[1]])
rownames(gene.all) <- genenames
colnames(gene.all) <- names(gene.files)
gene.all <- t(gene.all)
gene.all <- gene.all[ ,which(colnames(gene.all) != "?")]
write.table(gene.all, file = "geneall_matrix.txt", col.names = T, row.names = T, sep = "\t", append = F)
# patient X exon matrix ------------------------------------
# progressor -----------------------------------------------
exon.prog <- lapply(exon.prog.files ,function(x) cbind(x$RPKM))
exon.prog <- do.call(cbind, exon.prog)
rownames(exon.prog) <- exon.prog.files[[1]][[1]]
colnames(exon.prog) <- names(exon.prog.files)
exon.prog <- t(exon.prog)
write.table(exon.prog, file = "exonprog_matrix.txt", col.names = T, row.names = T, sep = "\t", append = F)
#test <- read.delim(file = "exonprog_matrix.txt")
#dim(exon.prog) # 239322 68
#test <- do.call(cbind, list(exon.prog.files[[1]][[1]], exon.prog))
#rownames(exon.prog)[which(!rownames(exon.prog) %in% rownames(gene.prog))] # none
# non progressor --------------------------------------------
exon.nonprog <- lapply(exon.nonprog.files ,function(x) cbind(x$RPKM))
exon.nonprog <- do.call(cbind, exon.nonprog)
dim(exon.nonprog) # 239322 162
rownames(exon.nonprog) <- exon.nonprog.files[[1]][[1]]
colnames(exon.nonprog) <- names(exon.nonprog.files)
exon.nonprog <- t(exon.nonprog)
rownames(exon.nonprog)[which(!rownames(exon.nonprog) %in% rownames(gene.nonprog))] #"TCGA-CQ-A4CG"
exon.nonprog <- exon.nonprog[-which(!rownames(exon.nonprog) %in% rownames(gene.nonprog)),]
write.table(exon.nonprog, file = "exonnonprog_matrix.txt", col.names = T, row.names = T, sep = "\t", append = F)
#test <- read.delim(file = "exonnonprog_matrix.txt")
# all patients --------------------------------------------
exon.all <- lapply(exon.files ,function(x) cbind(x$RPKM))
exon.all <- do.call(cbind, exon.all)
dim(exon.all) # 239322 230
rownames(exon.all) <- exon.files[[1]][[1]]
colnames(exon.all) <- names(exon.files)
exon.all <- t(exon.all)
rownames(exon.all)[which(!rownames(exon.all) %in% rownames(gene.all))] #"TCGA-CQ-A4CG"
exon.all <- exon.all[-which(!rownames(exon.all) %in% rownames(gene.all)),]
write.table(exon.all, file = "exonall_matrix.txt", col.names = T, row.names = T, sep = "\t", append = F)
#test <- read.delim(file = "exonall_matrix.txt")
#gene.prog <- read.delim(file = "/home/users/sanati/thesis/Data/matrix/geneprog_matrix.txt")
|
76a2cbc25da6b580024b6903b792fd22879d0c2b | 77ef73c072c75fc92d313d404fa1b6df50a53e40 | /man/cyto_gate_edit.Rd | 0242ce9bd83f5e71f05b673e1f281b3f078a021d | [] | no_license | DillonHammill/CytoExploreR | 8eccabf1d761c29790c1d5c1921e1bd7089d9e09 | 0efb1cc19fc701ae03905cf1b8484c1dfeb387df | refs/heads/master | 2023-08-17T06:31:48.958379 | 2023-02-28T09:31:08 | 2023-02-28T09:31:08 | 214,059,913 | 60 | 17 | null | 2020-08-12T11:41:37 | 2019-10-10T01:35:16 | R | UTF-8 | R | false | true | 5,300 | rd | cyto_gate_edit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cyto_gate-helpers.R
\name{cyto_gate_edit}
\alias{cyto_gate_edit}
\title{Edit Existing Gate(s).}
\usage{
cyto_gate_edit(
x,
parent = NULL,
alias = NULL,
channels = NULL,
type = NULL,
gatingTemplate = NULL,
overlay = NA,
group_by = "all",
select = NULL,
negate = FALSE,
display = 25000,
axis = "x",
label = TRUE,
plot = TRUE,
popup = TRUE,
axes_limits = "machine",
gate_point_shape = 16,
gate_point_size = 1,
gate_point_col = "red",
gate_point_col_alpha = 1,
gate_line_type = 1,
gate_line_width = 2.5,
gate_line_col = "red",
gate_line_col_alpha = 1,
...
)
}
\arguments{
\item{x}{an object of class \code{GatingSet}.}
\item{parent}{name of the parental population.}
\item{alias}{name(s) of the gate to edit (e.g. "Single Cells").}
\item{channels}{name(s) of the channel(s) used to construct the gate(s). This
argument is not necessary but is included to allow conversion of
\code{cyto_gate_draw} code to \code{cyto_gate_remove} code by simply changing
\code{"draw"} to \code{"remove"}.}
\item{type}{vector of gate type names used to construct the gates. Multiple
\code{types} are supported but should be accompanied with an \code{alias}
argument of the same length (i.e. one \code{type} per \code{alias}).
Supported \code{gate_types} are \code{polygon, rectangle, ellipse,
threshold, boundary, interval, quadrant and web} which can be abbreviated
as upper or lower case first letters as well. Default \code{type} is
\code{"polygon"}.}
\item{gatingTemplate}{name of the \code{gatingTemplate} csv file (e.g.
"gatingTemplate.csv") where the gate is saved.}
\item{overlay}{name(s) of the populations to overlay or a \code{flowFrame},
\code{flowSet}, \code{list of flowFrames} or \code{list of flowSets}
containing populations to be overlaid onto the plot(s). Only overlaid
flowSet objects are subjected to sampling by \code{display}.}
\item{group_by}{vector of pData column names (e.g.
c("Treatment","Concentration") indicating how the samples should be grouped
prior to gating, set to the length of x by default to construct a single
gate for all samples. If group_by is supplied a different gate will be
constructed for each group.}
\item{select}{vector containing the indices of samples within gs to use for
plotting.}
\item{negate}{logical indicating whether a gatingTemplate entry should be
made for the negated population (i.e. all events outside the constructed
gates), set to FALSE by default. If negate is set to TRUE, a name for the
negated population MUST be supplied at the end of the alias argument.}
\item{display}{fraction or number of events to display in the plot during the
gating process, set to 25 000 events by default.}
\item{axis}{indicates whether the \code{"x"} or \code{"y"} axis should be
gated for 2-D interval gates.}
\item{label}{logical indicating whether to include
\code{\link{cyto_plot_label}} for the gated population(s), \code{TRUE} by
default.}
\item{plot}{logical indicating whether a plot should be drawn, set to
\code{TRUE} by default.}
\item{popup}{logical indicating whether the plot should be constructed in a
pop-up window, set to TRUE by default.}
\item{axes_limits}{options include \code{"auto"}, \code{"data"} or
\code{"machine"} to use optimised, data or machine limits respectively. Set
to \code{"machine"} by default to use entire axes ranges. Fine control over
axes limits can be obtained by altering the \code{xlim} and \code{ylim}
arguments.}
\item{gate_point_shape}{shape to use for selected gate points, set to
\code{16} by default to use filled circles. See
\code{\link[graphics:par]{pch}} for alternatives.}
\item{gate_point_size}{numeric to control the size of the selected gate
points, set to 1 by default.}
\item{gate_point_col}{colour to use for the selected gate points, set to
"red" by default.}
\item{gate_point_col_alpha}{numeric [0,1] to control the transparency of the
selected gate points, set to 1 by default to use solid colours.}
\item{gate_line_type}{integer [0,6] to control the line type of gates, set to
\code{1} to draw solid lines by default. See
\code{\link[graphics:par]{lty}} for alternatives.}
\item{gate_line_width}{numeric to control the line width(s) of gates, set to
\code{2.5} by default.}
\item{gate_line_col}{colour to use for gates, set to \code{"red"} by default.}
\item{gate_line_col_alpha}{numeric [0,1] to control the transparency of the
selected gate lines, set to 1 by default to use solid colours.}
\item{...}{additional arguments for \code{\link{cyto_plot.flowFrame}}.}
}
\value{
an object of class \code{GatingSet} with edited gate applied, as well
as gatingTemplate file with edited gate saved.
}
\description{
Edit Existing Gate(s).
}
\examples{
\dontrun{
library(CytoExploreRData)
# Load in samples
fs <- Activation
gs <- GatingSet(fs)
# Apply compensation
gs <- cyto_compensate(gs)
# Transform fluorescent channels
gs <- cyto_transform(gs)
# Gate using cyto_gate_draw
gt <- Activation_gatingTemplate
gt_gating(gt, gs)
# Edit CD4 T Cells Gate - replace gatingTemplate name
cyto_gate_edit(gs,
parent = "T Cells",
alias = "CD4 T Cells",
gatingTemplate = "gatingTemplate.csv"
)
}
}
\author{
Dillon Hammill, \email{Dillon.Hammill@anu.edu.au}
}
|
769529306ab33dc197c70f8c9341ccea50a81817 | b4cf1178b97f1f747f1c6cd8469bb24e1fd8c6e5 | /R/utils.R | 709a52af7ee67c03c6565664c5f44b2602630e48 | [
"MIT"
] | permissive | benitezrcamilo/xplorerr | c0ddc8bd984354db6b6bd3c2f4074a25cb9640d4 | 37c2a74b52760cc5d383d9b7b64f9175a04c566f | refs/heads/master | 2023-04-29T06:18:03.612353 | 2021-05-21T08:36:50 | 2021-05-21T08:36:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 659 | r | utils.R | #' @importFrom utils packageVersion menu install.packages
check_suggests <- function(pkg) {
pkg_flag <- tryCatch(utils::packageVersion(pkg), error = function(e) NA)
if (is.na(pkg_flag)) {
msg <- message(paste0(pkg, ' must be installed for this functionality.'))
if (interactive()) {
message(msg, "\nWould you like to install it?")
if (utils::menu(c("Yes", "No")) == 1) {
utils::install.packages(pkg)
} else {
stop(paste0(pkg, ' must be installed for this functionality.'), call. = FALSE)
}
} else {
stop(paste0(pkg, ' must be installed for this functionality.'), call. = FALSE)
}
}
}
|
775545c1f4547bbe1805c2437fdedb72e3b4073b | 799f724f939763c26c4c94497b8632bad380e8f3 | /man/groups.Rd | f2671242b19ab4fc2fe226d3fbcb3009c67c92cf | [] | no_license | chmue/quanteda | 89133a7196b1617f599e5bba57fe1f6e59b5c579 | aed5cce6778150be790b66c031ac8a40431ec712 | refs/heads/master | 2020-12-01T02:59:48.346832 | 2017-11-22T18:35:37 | 2017-11-22T18:35:37 | 50,363,453 | 2 | 0 | null | 2016-01-25T16:18:50 | 2016-01-25T16:18:50 | null | UTF-8 | R | false | true | 723 | rd | groups.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quanteda-documentation.R
\name{groups}
\alias{groups}
\title{grouping variable(s) for various functions}
\arguments{
\item{groups}{either: a character vector containing the names of document
variables to be used for grouping; or a factor or object that can be
coerced into a factor equal in length or rows to the number of documents.
See \link{groups} for details.}
}
\description{
Groups for aggregation by various functions that take grouping options.
Groups can be the name(s) of document variables (as a character vector), or
variables whose length or number of rows (if a data.frame) equal the number
of documents.
}
\keyword{internal}
|
bc6add16faa1355d0aa20c326a46af8b4012dbb0 | 4fc8ffe5d2450118ca79041c3acba6e5cd1e88d4 | /R/checkSpeciesIdentification.R | 79a38ce8ebe9511773f205358baa118e05b170a2 | [] | no_license | adithirgis/camtrapR | 5b44ae46305b50434053652be9c3ec26dcec8de2 | be8b0dca9905a56799a5f8983db71a4e2b989709 | refs/heads/master | 2023-08-06T06:05:42.510507 | 2021-09-17T09:45:45 | 2021-09-17T09:45:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,203 | r | checkSpeciesIdentification.R | checkSpeciesIdentification <- function(inDir,
IDfrom,
hasCameraFolders,
metadataSpeciesTag,
metadataSpeciesTagToCompare,
metadataHierarchyDelimitor = "|",
maxDeltaTime,
excludeSpecies,
stationsToCheck,
writecsv = FALSE
)
{
wd0 <- getwd()
on.exit(setwd(wd0))
if(Sys.which("exiftool") == "") stop("cannot find ExifTool")
if(hasArg(excludeSpecies)){
if(!is.character(excludeSpecies)) stop("excludeSpecies must be of class 'character'")
}
if(hasArg(stationsToCheck)){
if(!is.character(stationsToCheck)) stop("stationsToCheck must be of class 'character'")
}
stopifnot(is.logical(hasCameraFolders))
stopifnot(is.numeric(maxDeltaTime))
file.sep <- .Platform$file.sep
if(!is.character(IDfrom)){stop("IDfrom must be of class 'character'")}
if(IDfrom %in% c("metadata", "directory") == FALSE) stop("'IDfrom' must be 'metadata' or 'directory'")
if(IDfrom == "metadata"){
if(metadataHierarchyDelimitor %in% c("|", ":") == FALSE) stop("'metadataHierarchyDelimitor' must be '|' or ':'")
metadata.tagname <- "HierarchicalSubject"
if(!hasArg(metadataSpeciesTag)) {stop("'metadataSpeciesTag' must be defined if IDfrom = 'metadata'")}
if(!is.character(metadataSpeciesTag)){stop("metadataSpeciesTag must be of class 'character'")}
if(length(metadataSpeciesTag) != 1){stop("metadataSpeciesTag must be of length 1")}
if(hasArg(metadataSpeciesTagToCompare)) {
if(!is.character(metadataSpeciesTagToCompare)){stop("metadataSpeciesTagToCompare must be of class 'character'")}
if(length(metadataSpeciesTagToCompare) != 1){stop("metadataSpeciesTagToCompare must be of length 1")}
}
}
multiple_tag_separator <- "_&_"
if(!dir.exists(inDir)) stop("Could not find inDir:\n", inDir, call. = FALSE)
# find station directories
dirs <- list.dirs(inDir, full.names = TRUE, recursive = FALSE)
dirs_short <- list.dirs(inDir, full.names = FALSE, recursive = FALSE)
if(length(dirs) == 0) stop("inDir contains no station directories", call. = FALSE)
check_table <- conflict_ID_table <- data.frame(stringsAsFactors = FALSE)
# if only checking certain station, subset dirs/dirs_short
if(hasArg(stationsToCheck)){
whichStationToCheck <- which(dirs_short %in% stationsToCheck)
if(length(whichStationToCheck) == 0) {stop("found no directories of names specified in stationsToCheck")} else {
dirs <- dirs [whichStationToCheck]
dirs_short <- dirs_short[whichStationToCheck]
}
}
for(i in 1:length(dirs)){
if(IDfrom == "directory"){
dirs.to.check.sho <- list.dirs(dirs[i], full.names = FALSE)[-1]
dirs.to.check <- list.dirs(dirs[i], full.names = TRUE)[-1]
if(hasArg(excludeSpecies)){
dirs.to.check <- dirs.to.check [!dirs.to.check.sho %in% excludeSpecies]
dirs.to.check.sho <- dirs.to.check.sho[!dirs.to.check.sho %in% excludeSpecies]
}
}
# remove empty species directories
# empty_dirs <- sapply(dirs.to.check, FUN = function(X){length(list.files(X)) == 0})
# if(any(empty_dirs)){
# dirs.to.check <- dirs.to.check[-empty_dirs]
# dirs.to.check.sho <- dirs.to.check.sho[-empty_dirs]
# }
# create command line for exiftool execution
if(IDfrom == "directory"){
if(hasArg(excludeSpecies)) { # under some rare circumstances, this caused an error if directories were empty
command.tmp <- paste('exiftool -t -q -r -f -Directory -FileName -EXIF:DateTimeOriginal -HierarchicalSubject -ext JPG "', paste(dirs.to.check, collapse = '" "'), '"', sep = "")
} else {
command.tmp <- paste('exiftool -t -q -r -f -Directory -FileName -EXIF:DateTimeOriginal -HierarchicalSubject -ext JPG "', dirs[i], '"', sep = "")
}
} else {
command.tmp <- paste('exiftool -t -q -r -f -Directory -FileName -EXIF:DateTimeOriginal -HierarchicalSubject -ext JPG "', dirs[i], '"', sep = "")
}
colnames.tmp <- c("Directory", "FileName", "DateTimeOriginal", "HierarchicalSubject")
# run exiftool and make data frame
metadata.tmp <- runExiftool(command.tmp = command.tmp, colnames.tmp = colnames.tmp)
if(inherits(metadata.tmp, "data.frame")){
if(IDfrom == "directory"){
message(paste(dirs_short[i], ": checking", nrow(metadata.tmp), "images in", length(dirs.to.check.sho), "directories"))
}
# write metadata from HierarchicalSubject field to individual columns
if(IDfrom == "metadata"){
message(paste(dirs_short[i], ": ", formatC(nrow(metadata.tmp), width = 4), " images",
makeProgressbar(current = i, total = length(dirs_short)), sep = ""))
metadata.tmp <- addMetadataAsColumns (intable = metadata.tmp,
metadata.tagname = metadata.tagname,
metadataHierarchyDelimitor = metadataHierarchyDelimitor,
multiple_tag_separator = multiple_tag_separator)
}
# assign species ID
metadata.tmp <- assignSpeciesID (intable = metadata.tmp,
IDfrom = IDfrom,
metadataSpeciesTag = metadataSpeciesTag,
speciesCol = "species",
dirs_short = dirs_short,
i_tmp = i,
multiple_tag_separator = multiple_tag_separator,
returnFileNamesMissingTags = FALSE
)
# if images in station contain no metadata species tags, skip that station
if(!inherits(metadata.tmp, "data.frame")){
if(metadata.tmp == "found no species tag") {
warning(paste(dirs_short[i], ": metadataSpeciesTag '", metadataSpeciesTag, "' not found in image metadata tag 'HierarchicalSubject'. Skipping", sep = ""), call. = FALSE, immediate. = TRUE)
} else {
warning(paste(dirs_short[i], ": error in species tag extraction. Skipping. Please report", sep = ""), call. = FALSE, immediate. = TRUE)
}
next
}
# exclude species if using metadata tags (if using IDfrom = "directory", they were removed above already)
if(IDfrom == "metadata"){
if(hasArg(excludeSpecies)){
metadata.tmp <- metadata.tmp[!metadata.tmp$species %in% excludeSpecies,]
}
}
# assign camera ID
if(IDfrom == "directory" & hasCameraFolders == TRUE){
metadata.tmp$camera <- sapply(strsplit(metadata.tmp$Directory, split = file.sep, fixed = TRUE), FUN = function(X){X[length(X) - 1]})
}
if(IDfrom == "metadata" & hasCameraFolders == TRUE){
metadata.tmp$camera <- sapply(strsplit(metadata.tmp$Directory, split = file.sep, fixed = TRUE), FUN = function(X){X[length(X)]})
}
# make date/time R-readable
metadata.tmp$DateTimeOriginal <- as.POSIXct(strptime(x = metadata.tmp$DateTimeOriginal, format = "%Y:%m:%d %H:%M:%S"))
# add station ID and assemble table
metadata.tmp <- cbind(station = rep(dirs_short[i], times = nrow(metadata.tmp)),
metadata.tmp)
# compare ID between different observers
if(hasArg(metadataSpeciesTagToCompare)){
metadataSpeciesTag2 <- paste("metadata", metadataSpeciesTag, sep = "_")
metadataSpeciesTagToCompare2 <- paste("metadata", metadataSpeciesTagToCompare, sep = "_")
if(metadataSpeciesTagToCompare2 %in% colnames(metadata.tmp)){
metadata.tmp.conflict <- metadata.tmp[metadata.tmp[,metadataSpeciesTag2] != metadata.tmp[,metadataSpeciesTagToCompare2] |
is.na(metadata.tmp[,metadataSpeciesTag2] != metadata.tmp[,metadataSpeciesTagToCompare2]) ,]
metadata.tmp.conflict <- metadata.tmp.conflict[,which(colnames(metadata.tmp.conflict) %in% c("station", "Directory", "FileName", metadataSpeciesTag2, metadataSpeciesTagToCompare2))]
# if anything to report, append to main table
if(nrow(metadata.tmp.conflict) >= 1){
conflict_ID_table <- rbind(conflict_ID_table, metadata.tmp.conflict)
}
} else {warning(paste("metadata tag '", metadataSpeciesTagToCompare, "' was not found in image metadata in Station ", dirs_short[i], sep = ""), call. = FALSE, immediate. = TRUE)}
suppressWarnings(rm(metadataSpeciesTag2, metadataSpeciesTagToCompare2, metadata.tmp.conflict))
}
# calculate minimum delta time between image and all images in other species folders at station i
if(length(unique(metadata.tmp$species)) >= 2){
for(rowindex in 1:nrow(metadata.tmp)){
if(hasCameraFolders == TRUE){
# only compare within a camera folder if there was >1 camera per station
which.tmp1 <- which(metadata.tmp$species != metadata.tmp$species[rowindex] &
metadata.tmp$camera == metadata.tmp$camera[rowindex])
if(length(which.tmp1) >= 1){
metadata.tmp$min.delta.time[rowindex] <- round(min(abs(difftime(time1 = metadata.tmp$DateTimeOriginal[rowindex],
time2 = metadata.tmp$DateTimeOriginal[which.tmp1],
units = "secs"))))
} else {
metadata.tmp$min.delta.time[rowindex] <- NA
}
rm(which.tmp1)
} else { # if no camera subfolders
# compare to other species
which.tmp2 <- which(metadata.tmp$species != metadata.tmp$species[rowindex])
if(length(which.tmp2) >= 1){
metadata.tmp$min.delta.time[rowindex] <- round(min(abs(difftime(time1 = metadata.tmp$DateTimeOriginal[rowindex],
time2 = metadata.tmp$DateTimeOriginal[which.tmp2],
units = "secs"))))
} else {
metadata.tmp$min.delta.time[rowindex] <- NA
}
rm(which.tmp2)
} # end ifelse hasCameraFolders
} # end for
if(hasCameraFolders == TRUE){
check_table_tmp <- metadata.tmp[metadata.tmp$min.delta.time <= maxDeltaTime & !is.na(metadata.tmp$min.delta.time), c("station", "Directory", "FileName", "species", "DateTimeOriginal", "camera")]
} else {
check_table_tmp <- metadata.tmp[metadata.tmp$min.delta.time <= maxDeltaTime & !is.na(metadata.tmp$min.delta.time), c("station", "Directory", "FileName", "species", "DateTimeOriginal")]
}
# order output
check_table_tmp <- check_table_tmp[order(check_table_tmp$DateTimeOriginal),]
# if anything to report, append to main table
if(nrow(check_table_tmp) >= 1){
check_table <- rbind(check_table, check_table_tmp)
}
suppressWarnings(rm(metadata.tmp, check_table_tmp))
} # end if(length(unique(metadata.tmp$species)) >= 2){
} # end if(class(metadata.tmp) == "data.frame"){
} # end for (i ...)
if(writecsv == TRUE){
check_table_filename <- paste("species_ID_check_", Sys.Date(), ".csv", sep = "")
conflict_table_filename <- paste("species_ID_conflicts_", Sys.Date(), ".csv", sep = "")
setwd(inDir)
write.csv(check_table, file = check_table_filename)
write.csv(conflict_ID_table, file = conflict_table_filename)
}
# make output list
outlist <- list(check_table, conflict_ID_table)
names(outlist) <- c("temporalIndependenceCheck", "IDconflictCheck")
return(outlist)
} |
b53a3313dededa35207fdb17fc576709d0c6a7e2 | a226f4b4cf54dd0e8164a727d24dca99e79e1354 | /tests/testthat/test_equalGADS.R | edc9aee6e1316c5d309702827256b7404b7f5bc4 | [] | no_license | beckerbenj/eatGADS | 5ef0bdc3ce52b1895aaaf40349cbac4adcaa293a | e16b423bd085f703f5a548c5252da61703bfc9bb | refs/heads/master | 2023-09-04T07:06:12.720324 | 2023-08-25T11:08:48 | 2023-08-25T11:08:48 | 150,725,511 | 0 | 1 | null | 2023-09-12T06:44:54 | 2018-09-28T10:41:21 | R | UTF-8 | R | false | false | 3,119 | r | test_equalGADS.R |
# load test data (df1, df2, pkList, fkList)
# load(file = "tests/testthat/helper_data.rda")
load(file = "helper_data.rda")
# dfSAV <- import_spss(file = "tests/testthat/helper_spss_missings.sav")
dfSAV <- import_spss(file = "helper_spss_missings.sav")
test_that("Compare two different GADSdat objects",{
out <- equalGADS(df1, df2)
expect_equal(out$names_not_in_1, "V2")
expect_equal(out$names_not_in_2, "V1")
expect_equal(out$data_differences, "ID1")
expect_equal(out$meta_data_differences, character())
expect_equal(out$data_nrow, "all.equal")
df1_2 <- df1
df1_2$dat[1, ] <- c(0, 0)
out2 <- equalGADS(df1, df1_2)
expect_equal(out2$data_differences, c("ID1", "V1"))
})
test_that("Compare two identical GADSdat objects",{
out <- equalGADS(df1, df1)
expect_equal(out$names_not_in_1, character())
expect_equal(out$names_not_in_2, character())
expect_equal(out$data_differences, character())
expect_equal(out$meta_data_differences, character())
expect_equal(out$data_nrow, "all.equal")
})
test_that("Compare while ignoring order differences for meta data",{
dfSAV2 <- dfSAV
dfSAV2$labels <- dfSAV2$labels[c(2:1, 3:7), ]
out <- equalGADS(dfSAV, dfSAV2)
expect_equal(out$meta_data_differences, character())
})
test_that("Compare while ignoring order differences for data",{
dfSAV2 <- dfSAV
dfSAV2$dat <- dfSAV2$dat[c(2, 4, 3, 1), ]
out <- equalGADS(dfSAV, dfSAV2)
expect_equal(out$data_differences, c("VAR1", "VAR3"))
out2 <- equalGADS(dfSAV, dfSAV2, id = "VAR1")
expect_equal(out2$data_differences, character())
})
test_that("Compare while ignoring irrelevant format differences",{
dfSAV2 <- changeSPSSformat(dfSAV, "VAR1", format = "F8")
out <- equalGADS(dfSAV, dfSAV2)
expect_equal(out$meta_data_differences, character())
})
test_that("Compare two different GADSdat objects, large ID numbers",{
df1_2 <- df1
df1_2$dat$ID1 <- c(5140010110, 5140010111)
df1$dat$ID1 <- c(5140010109, 5140010110)
out <- equalGADS(df1, df1_2)
expect_equal(out$data_differences, c("ID1"))
})
test_that("Compare two different GADSdat objects with varying tolerance",{
df1_2 <- df1
df1_2$dat$V1 <- c(3 + 1e-07, 5 + 1e-09)
out <- equalGADS(df1, df1_2)
expect_equal(out$data_differences, c("V1"))
df1_2 <- df1
df1_2$dat$V1 <- c(3 + 1e-07, 5 + 1e-09)
out <- equalGADS(df1, df1_2, tolerance = 0.00001)
expect_equal(out$data_differences, character())
})
test_that("Compare two GADSdat objects with metaExceptions",{
df1_3 <- df1_2 <- df1
df1_2$labels[1, "format"] <- "F8"
df1_2$labels[2, "varLabel"] <- "F8"
out <- equalGADS(df1, df1_2, metaExceptions = c("format", "varLabel"))
expect_equal(out$names_not_in_1, character())
expect_equal(out$names_not_in_2, character())
expect_equal(out$data_differences, character())
expect_equal(out$meta_data_differences, character())
expect_equal(out$data_nrow, "all.equal")
out2 <- equalGADS(df1, df1_2, metaExceptions = c("display_width"))
expect_equal(out2$meta_data_differences, c("ID1", "V1"))
})
|
6273cf0e7943a5dcd13a637822420811d9a400c4 | 0e59d41c7718e9a4b0fbb4f6728a6046075e8149 | /R/data.R | cc560ef86f634492746c71ec87ba71ffdfaac37f | [] | no_license | Ngendahimana/epbi500 | 61a1fa989a152eb1da946a688c8640a9c812d1ae | 57f4a6a560debd4de7bb2b6183c991242ec953dc | refs/heads/master | 2021-01-17T15:50:49.837566 | 2017-03-09T13:12:45 | 2017-03-09T13:12:45 | 84,102,642 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 811 | r | data.R | #' Right Heart Catheterization Dataset
#'
#' This dataset was used in Connors et al. (1996): The effectiveness of RHC in the initial care of critically
#' #' ill patients. J American Medical Association 276:889-897. The dataset pertains to day 1 of hospitalizatio#' n, i.e., the 'treatment' variable swang1 is whether or not a patient received a RHC (also called the Swan
#' #'-Ganz catheter) on the first day in which the patient qualified for the SUPPORT study (see above)
#'
#' @format A data frame with 5735 rows and 63 variables:
#' \describe{
#' \item{Age}{Age, in years}
#' \item{Sex}{Sex at birth}
#' \item{Income}{Yearly Income, in dollars}
#' \item{meta}{Metabolic Diagnostic}
#' .
#' .
#' .
#' }
#' @source \url{http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/rhc.html}
"rhc"
|
c4fd68a92d49c0876d8f1e7043a4002b927c9f4e | e9c08d14fe6aef1589dbde525d5f933944f9c37f | /R-EDA_PP_FE/FE/FE2.R | b22951ee170e6a211693a8526b5aa16030ad2b5c | [] | no_license | vamsiry/Data-Science | beee7654b5085fd494bf7349a0846fdfa6b784ea | a2a9e3237fb4e727393d2a5fe5d7e0a9b9c6de8c | refs/heads/master | 2021-01-11T22:48:15.474254 | 2018-05-03T02:35:13 | 2018-05-03T02:35:13 | 79,034,903 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,668 | r | FE2.R |
#feature engg-basics.R
#======================
library(caret)
setwd("C:\\Users\\Vamsi\\Desktop\\R.Alg\\practice\\data sets\\datasets\\")
winedata = read.csv("wine.txt", header = TRUE)
dim(winedata)
str(winedata)
#Remove the variables which have 95% NAs
#=========================================
threshold_val = 0.95 * dim(winedata)[1]
include_cols = !apply(winedata, 2, function(y) sum(is.na(y)) > threshold_val)
winedata = winedata[, include_cols]
#Find the variables which have very less variance
#=================================================
nearZvar = nearZeroVar(winedata, saveMetrics = TRUE)
winedata = winedata[nearZvar$nzv==FALSE]
cor(winedata)
#variance-based-filteration--------------
setwd("D:\\digit_recognizer")
digit_train = read.csv("train.csv", header = TRUE, na.strings=c("NA",""))
nzv_obj = nearZeroVar(digit_train, saveMetrics = T)
digit_train1 = digit_train[,nzv_obj$zeroVar==F]
dim(digit_train1)
digit_train2 = digit_train[,nzv_obj$nzv==F]
dim(digit_train2)
#Find the variables which are highly correlated
#==============================================
corr_matrix = abs(cor(winedata))
diag(corr_matrix) = 0
correlated_col = findCorrelation(corr_matrix, verbose = FALSE , cutoff = .60)
winedata = winedata[-correlated_col]
cor(winedata)
dim(winedata)
###############################################################
#correlation-based-filteration.R
#==================================
library(caret)
library(corrplot)
setwd("C:\\Users\\Vamsi\\Desktop\\R.Alg\\practice\\data sets\\datasets\\restaurent-rp")
restaurant_train = read.csv("train.csv", header = TRUE, na.strings=c("NA",""))
dim(restaurant_train)
str(restaurant_train)
restaurant_train1 = restaurant_train[,-1]
str(restaurant_train1)
# picking only numerical attributes for correlation matrix
numeric_attr = sapply(restaurant_train1, is.numeric)
correlations = cor(restaurant_train1[,numeric_attr])
#plotting correlation matrix
X11()
corrplot(correlations)
corrplot(correlations, order = "hclust")
corrplot(correlations, order = "hclust", addrect=3)
# finding highly correlated featues using correlation matrix
filtered_features_correlation = findCorrelation(abs(correlations), cutoff = 0.95)
restaurant_train1 = restaurant_train[,-filtered_features_correlation]
#covariance-correlation.R
#=========================
library(ggplot2)
stock_plot = function(s1,s2) {
df = data.frame(a=s1,b=s2)
X11()
print(ggplot(df) + geom_point(aes(x = a, y = b)))
print(cov(df$a, df$b))
print(cor(df$a, df$b))
}
s1 = c(100, 200, 300, 400)
s2 = c(10, 20, 30, 50)
stock_plot(s1,s2)
s3 = c(100, 200, 300, 400)
s4 = c(50, 40, 35, 32)
stock_plot(s3, s4)
s5 = c(100, 200, 300, 400)
s6 = c(1, 2, 3, 5)
stock_plot(s5,s6)
s7 = c(100, 200, 300, 400)
s8 = c(500, 600, 700, 800)
stock_plot(s7,s8)
#why mean of z-scores is 0?
x = c(10,20,30,40, 50, 60, 70)
x_z = (x - mean(x) ) / sd(x)
df = data.frame(x, x_z)
mean(x)
mean(x_z)
######################################################################
#eigenvectors.R
#===============
migration = matrix(c(.9,.05,.1,.95),2,2,byrow = TRUE)
#initial_population = c(300,100)
#initial_population = c(200,100)
initial_population = c(100,100)
initial_population_mat = as.matrix(initial_population)
after_population_frame = data.frame(v=c(),h=c())
for(i in 1:100) {
after_population = migration %*% initial_population_mat
after_population_frame[i,1] = round(after_population[1,1])
after_population_frame[i,2] = round(after_population[2,1])
initial_population_mat = after_population
}
e = eigen(migration)
|
35777b600d675eafce25d9eebb010f22a8b425c7 | 338f458b6a99e2cca33e7319d374a94ab48ca3ad | /code.r | 643e7f456a1930a1e6338713d878af15cad579de | [] | no_license | ahmedali20001/Optimizing-a-Healthcare-Network- | 428658dc2481bb1e4c70cea7c2dd5d35e2723687 | e051422d99b28172ac3883e0eb43a6594e257348 | refs/heads/master | 2020-04-28T07:15:31.267590 | 2019-03-11T21:22:40 | 2019-03-11T21:22:40 | 175,085,383 | 0 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 4,036 | r | code.r | # import data set
data <- read.csv("C:/Users/HP/Desktop/Dojo Task/Data.csv",header = TRUE,sep = ",")
# setting up data, also getting latitude and longitude to pin point data
# for getting accurate results
library(zipcode)
data(zipcode)
nrow(zipcode)
head(zipcode)
library(zipcode)
data(zipcode)
somedata = data.frame(postal = c(98007, 98290, 98065, 98801, 98104))
somedata
somedata$zip = clean.zipcodes(somedata$postal)
somedata
# getting lattitude and longitude
data(zipcode)
somedata = merge(somedata, zipcode, by.x='zip', by.y='zip')
somedata
# ggplot to get a show to a map ;)
town <- readOGR(dsn = "C:/Users/HP/Desktop/Dojo Task/Dataset/geo_map", layer = "SPD_BEATS_WGS84")
plot(town)
library(zipcode)
library(tidyverse)
library(maps)
library(viridis)
library(ggthemes)
data(zipcode)
#Seattle map
library("rgdal")
town <- readOGR(dsn = "C:/Users/HP/Desktop/Dojo Task/Dataset/geo_map", layer = "SPD_BEATS_WGS84")
plot(town)
#Plotting Distance
fm <- read.csv("C:/Users/HP/Desktop/Dojo Task/Data.csv",header = TRUE,sep = ",")
data(zipcode)
fm$Facility.Area.Zipcode<- clean.zipcodes(fm$Facility.Area.Zipcode)
#size by zip
fm.zip<-aggregate(data.frame(count=fm$ï..Facility.ID),list(zip=somedata$zip,county=somedata$city),length)
fm<- merge(fm.zip, zipcode, by='zip')
# joined connections
ggplot(fm,aes(longitude,latitude)) +
geom_polygon(data=somedata,aes(x=somedata$longitude,y=somedata$latitude),color='red',fill=NA,alpha=1)+
geom_point(aes(color = count),size=.2,alpha=.25) +
xlim(-123,-119)+ylim(46,48)
# dotted plot
ggplot(fm,aes(longitude,latitude)) +
geom_point(data = somedata, aes(x=somedata$longitude,y=somedata$latitude))
town <- readOGR(dsn = "C:/Users/HP/Desktop/Dojo Task/Dataset/geo_map", layer = "SPD_BEATS_WGS84")
#Data Preparation/Analysis
library(VIM)
aggr(somedata)
#clustering
library(tidyverse) # data manipulation
library(cluster) # clustering algorithms
library(factoextra) # clustering algorithms & visualization
par(mfrow = c(1, 3))
hist(somedata$latitude, col = 'gray')
hist(somedata$longitude, ylim = c(0, 10), col = 'gray')
plot(somedata$latitude, somedata$longitude, asp = 1)
set.seed(123)
two <- kmeans(somedata, 2)
three <- kmeans(somedata, 3)
three
two$centers
#cluster results
clus <- cbind(somedata, clus2 = two$cluster,
clus3 = three$cluster)
head(clus)
#clustering visualization
par(mfrow = c(1, 2))
plot(clus$longitude, clus$latitude, col = two$cluster, asp = 1,
pch = two$cluster, main = "Sites for two kiosks",
xlab = "Longitude", ylab = "Latitude")
points(two$centers[ ,2], two$centers[ ,1], pch = 23,
col = 'maroon', bg = 'lightblue', cex = 3)
text(two$centers[ ,2], two$centers[ ,1], cex = 1.1,
col = 'black', attributes(two$centers)$dimnames[[1]])
plot(clus$longitude, clus$latitude, col = three$cluster, asp = 1,
pch = three$cluster, main = "Sites for three kiosks",
xlab = "Longitude", ylab = "Latitude")
points(three$centers[ ,2], three$centers[ ,1],
pch = 23, col = 'maroon', bg = 'lightblue', cex = 3)
text(three$centers[ ,2], three$centers[ ,1], cex = 1.1,
col = 'black', attributes(three$centers)$dimnames[[1]])
#clus continue
hybrid <- cbind(clus, hybrid_shape = rep(0, dim(clus)[1]))
for (e in 1:dim(hybrid[1])[1]) {
if (hybrid[e, 3] == hybrid[e, 4]) {
hybrid[e, 5] <- hybrid[e, 3]
}
if (hybrid[e, 3] != hybrid[e, 4]) {
hybrid[e, 5] <- hybrid[e ,3] + 15
}
}
plot(hybrid$longitude, hybrid$latitude, col = two$cluster,
main = "Hybrid: Two-cluster kiosks in three-cluster locations", pch = hybrid$hybrid_shape, cex = 1.1,
xlab = "Longitude", ylab = "Latitude", asp = 1)
points(three$centers[1:2, 2], three$centers[1:2, 1],
pch = 23, col = 'maroon', bg = 'lightblue', cex = 3)
text(three$centers[1:2, 2], three$centers[1:2, 1], cex = 1.1,
col = 'black', attributes(two$centers)$dimnames[[1]])
|
6f940cd2794fb2317a954018a9f58f39a777b9b4 | dba7646b74a68ef18375cd5c0ca3233ba1684ea1 | /targetscore/man/get_fs_vals.Rd | 63790ea46721e610ae43f4e8adbb9e2dd60468a4 | [
"Apache-2.0"
] | permissive | jmig5776/targetscore | 130dfe6aec7e6437e2f9ae6e3d075e5a871097e8 | 83e6550446abea571f78bfe39aeb7e4fde894c98 | refs/heads/master | 2023-02-06T14:17:23.311838 | 2020-12-18T13:36:08 | 2020-12-18T13:36:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,045 | rd | get_fs_vals.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_fs_vals.R
\name{get_fs_vals}
\alias{get_fs_vals}
\title{Extracted functional score value from COMIC/ONCODRIVE Database. Can be override with Manually set functional score.}
\usage{
get_fs_vals(
n_prot,
proteomic_responses,
mab_to_genes,
fs_override = NULL,
cancer_role_file = system.file("extdata", "Cosmic.txt", package = "targetscore"),
verbose = FALSE
)
}
\arguments{
\item{n_prot}{Antibody number of input data.}
\item{proteomic_responses}{Input drug perturbation data. With columns as antibody, rows as samples.}
\item{mab_to_genes}{A list of antibodies, their associated genes, modification sites and effect.}
\item{fs_override}{a listing of functional scores for each gene manually set up
for overriding COSMIC Database given value, the modification path. (.txt)}
\item{cancer_role_file}{a file specifying the role of cancer genes; 2-column table "gene" and "fs";
fs 1 is oncogene, 0 is dual or unknown, -1 is tumor supressor}
\item{verbose}{Default as FALSE. If given TRUE, will print out the gene seq mapped with antibody map file.}
}
\value{
* "fs_final" dataframe with two coloumns: prot as antibody label; fs as functional #' score
}
\description{
Extracted functional score value from COMIC/ONCODRIVE Database. Can be override with Manually set functional score.
}
\examples{
# Read fs_manually set file
fs_override_org <- readRDS(system.file("test_data_files", "fs_value_file.rds",
package = "targetscore"
))
# Read proteomic responce file
file <- system.file("test_data", "BT474.csv", package = "targetscore")
proteomic_responses <- read.csv(file, row.names = 1)
# Read antibody file
file <- system.file("target_score_data", "antibody_map.csv", package = "targetscore")
mab_to_genes <- read.csv(file,
header = TRUE,
stringsAsFactors = FALSE
)
fs <- get_fs_vals(
n_prot = ncol(proteomic_responses), proteomic_responses = proteomic_responses,
mab_to_genes = mab_to_genes, fs_override = fs_override_org
)
}
\concept{targetscore}
|
644059de8ac449a3bb2d51754ba1901ea264b08f | b98573fb1a642b0c3fedfc11d7f625c9a4aef31b | /EDA.R | c2669555fe133dad6b83009e5a0da8bc0746ce80 | [] | no_license | Rajat-Gaur/Time-series-forecasting-Power-demand | 989160bc54bfe667f3dafe8e6e0822543a938e88 | 28fd3b39ddedc6eabce6b5be60d33fcbb3911e88 | refs/heads/main | 2023-02-24T23:24:12.668168 | 2021-01-27T03:11:05 | 2021-01-27T03:11:05 | 333,287,848 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,346 | r | EDA.R | library(gbm)
library(lubridate)
library(MLmetrics)
library(dplyr)
library(xts)
library(forecast)
library(ggplot2)
library(xts)
getwd()
final_data = read.csv("final_data.csv")
tail(final_data)
final_data$Date = ymd_hms(final_data$Date)
new = final_data[,c('Load','Date')]
ts1 = xts(new$Load, order.by = new$Date)
autoplot(ts1) + labs(titles = "Hourly Electricity Demand",
x = "year")
#Making histograms
ts1 %>% summary()
ggplot(new,aes(Load))+ geom_histogram(bins = 50, col = "red",alpha=0.5) +
labs(y ="No. of values")
start_date = ymd_hms("2014-01-01 08:00:00")
end_date = ymd_hms("2019-08-31 23:00:00")
#newVal = as.numeric((end_date - start_date) + 1)
training_data = new[(new$Date>=start_date) & (new$Date<=end_date),]
ts1 = xts(training_data$Load,order.by = training_data$Date)
#
autoplot(ts1) + labs(titles = 'Hourly Demand',
x = 'Month-Year',
y = 'Load')
autoplot(diff(ts1)) +
labs(titles = "Differenced Hourly Electricity Demand",
x = "year")
ts2 = ts(data = training_data$Load,frequency = 8760)
#ggseasonplot(ts2)
y = decompose(ts2, type = "mult")
plot(y)
z = decompose(ts2, type = "additive")
plot(z)
vb <- training_data$Load %>% msts(seasonal.periods = c(24*7,8760))
vb %>% mstl() %>% autoplot()
acf(ts1)
|
4c17e1c98944852498358d030477a3f1072bc964 | 9ee587651e82c3efdf58036364c197829ffa57e1 | /Chapter3_EcosystemComparison/15.05.2022_NMDS.R | 11174b963f54c475724518bbea0d51a2e9f996c7 | [
"Apache-2.0"
] | permissive | QutEcoacoustics/spatial-acoustics | 7f0fd2af6663200ab529a2f8979eec56a0bf2e40 | 5e8eaba29576a59f85220c8013d0b083ddb70592 | refs/heads/master | 2023-04-15T09:50:44.063038 | 2023-03-14T23:36:36 | 2023-03-14T23:36:36 | 222,621,976 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 30,282 | r | 15.05.2022_NMDS.R | library(tidyverse)
library(ggplot2)
library(lubridate)
library(vegan)
rm(list = ls())
set.seed(123)
# set.group <- "bird"
#Splitting the data using a function from dplyr package
getDataPath <- function (...) {
return(file.path("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Chapter3_SoundscapeEcosystemComparation", ...))
}
data_og <- read.csv(getDataPath("13.05.2022_fixingdata5.csv")) %>%
# mutate_at(c(3:6,47,48), ~(scale(.) %>% as.vector(.))) %>%
# filter(RFclass == "bird" ) %>%
# group_by(ID.x, RFclass, date_r) %>%
# mutate(n = n()) %>%
# mutate(moon_illu = case_when(period =="day" ~ 0,
# TRUE ~ moon_illu)) %>%
# rowwise() %>%
# mutate(., mean_temp = mean(c(temp_max,temp_min))) %>%
# mutate(., mean_temp = mean(c(temp_max,temp_min))) %>%
mutate(location = case_when(ID.x == "BonBon_WetA" ~ "7",
ID.x == "BonBon_DryA" ~ "6",
ID.x == "Booroopki_DryA" ~ "305",
ID.x == "Booroopki_WetA" ~ "306",
ID.x == "Bowra_DryA" ~ "259",
ID.x == "Bowra_WetA" ~ "258",
ID.x == "Eungella_DryA" ~ "110",
ID.x == "Eungella_WetA" ~ "111",
ID.x == "SERF_DryA" ~ "253",
ID.x == "SERF_WetA" ~ "254")) %>%
dplyr::select(everything(), -c(Recording_time, day, week, id, id_path, fid_what, ca_class_6_325)) %>%
ungroup() %>%
# filter(n > 3) %>%
distinct()
#General multinomial - between sites ----
dataframe_landscape <- data_og %>%
dplyr::select(., ID.x, RFclass, site, bvg_char, temp_max, natural_cover_325, contag_landscape_325, tca_landscape_325) %>%
# group_by(ID.x, RFclass) %>%
# # mutate(temp_total = round(mean(temp_max)),
# # moon = round(mean(moon_illu), 2),
# mutate(natural_cover_3k = round(natural_cover_3k,2),
# tca_landscape_325 = round(tca_landscape_325,2),
# # water_3k = round(water_3k,2),
# np_landscape_3k = round(np_landscape_3k,2)) %>%
# ungroup() %>%
group_by(RFclass, site, bvg_char, temp_max, natural_cover_325, contag_landscape_325, tca_landscape_325) %>%
mutate(n = n(),
natural_cover_325 = round(natural_cover_325,2),
tca_landscape_325 = round(tca_landscape_325,2),
# water_3k = round(water_3k,2),
contag_landscape_325 = round(contag_landscape_325,2),
temp_total = round(mean(temp_max), 2)) %>%
ungroup() %>%
# group_by(ID.x) %>%
# ungroup() %>%
dplyr::select(., RFclass, site, ID.x, bvg_char, everything(), -c(temp_max)) %>%
filter(n > 3) %>%
distinct() %>%
droplevels()
# dataframe$id_number <- as.factor(dataframe$id_number)
#
# rownames(dataframe) <- dataframe$id_number
# dataframe <- dplyr::select(dataframe, everything()) %>%
# distinct()
# dataframe$np_landscape_3k <- as.numeric(dataframe$np_landscape_3k)
# dataframe$contag_landscape_325 <- as.numeric(dataframe$contag_landscape_325)
dataframe_land_wide <- pivot_wider(dataframe_landscape, names_from = "RFclass", values_from = "n")
dataframe_land_wide$bird[is.na(dataframe_land_wide$bird)] <- 0
dataframe_land_wide$frog[is.na(dataframe_land_wide$frog)] <- 0
dataframe_land_wide$insect[is.na(dataframe_land_wide$insect)] <- 0
dataframe_norm_landscape <- dataframe_land_wide %>% mutate_at(c(4:7), ~decostand(., method = "range") %>% as.vector(.)) %>%
droplevels()
nmds <- metaMDS(dataframe_norm_landscape[,c(8:10)], k = 2, trymax = 100, distance = "jaccard")
en <- envfit(nmds, dataframe_norm_landscape[,3:7], permutations = 999, na.rm = T, distance = "jaccard")
en
plot(nmds$species)
plot(en)
data.scores = as.data.frame(scores(nmds)$sites)
#add 'season' column as before
data.scores$site = dataframe_land_wide$site
data.scores$bvg = dataframe_land_wide$bvg_char
data.scores$id <- rownames(data.scores)
species.scores <- as.data.frame(scores(nmds, "species"))
species.scores$var <- rownames(species.scores)
en_coord_cont = as.data.frame(scores(en, "vectors")) * ordiArrowMul(en)
en_coord_cont$variables <- rownames(en_coord_cont)
en_coord_cat = as.data.frame(scores(en, "factors")) * ordiArrowMul(en)
ggplot(data = data.scores, aes(x = NMDS1, y = NMDS2)) +
geom_point(data = data.scores, aes(colour = site), size = 3, alpha = 0.5) +
scale_colour_manual(values = c("#fdbb84", "#8c6bb1", "#8c510a", "#1b7837", "#4393c3")) +
geom_segment(aes(x = 0, y = 0, xend = NMDS1, yend = NMDS2),
data = en_coord_cont, size =1, alpha = 0.5, colour = "grey30") +
geom_point(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
shape = "diamond", size = 4, alpha = 0.6, colour = "navy") +
geom_point(data = species.scores, aes(x = NMDS1, y = NMDS2, shape = var), size = 3) +
geom_text(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
label = c("dry rainforest", "EOF - shruby understory", "EW - grassy understory", "EW - shruby understory", "Mulga", "Saltbush shrub", "Tropical rainforest"), colour = "navy", fontface = "bold") +
geom_text(data = en_coord_cont, aes(x = NMDS1, y = NMDS2), colour = "grey30",
fontface = "bold", label = c("natural cover (3k)", "number of patches (3k)", "total core area (325m)", "mean temperature")) +
theme(axis.title = element_text(size = 10, face = "bold", colour = "grey30"),
panel.background = element_blank(), panel.border = element_rect(fill = NA, colour = "grey30"),
axis.ticks = element_blank(), axis.text = element_blank(), legend.key = element_blank(),
legend.title = element_text(size = 10, face = "bold", colour = "grey30"),
legend.text = element_text(size = 9, colour = "grey30")) +
labs(colour = "Site", shape = "Group")
ggsave(getDataPath("Figures", "landscape_nmds.jpg"))
# bvg_char, temp_max, natural_cover_3k, np_landscape_3k, tca_landscape_325
# temp_total, moon, natural_cover_3k, np_landscape_3k, tca_landscape_325
PERMANOVA <- adonis2(dataframe_norm_landscape[,9:11]~ dataframe_norm_landscape$natural_cover_3k + dataframe_norm_landscape$np_landscape_3k + dataframe_norm_landscape$tca_landscape_325 + dataframe_norm_landscape$bvg_char + dataframe_norm_landscape$moon + dataframe_norm_landscape$temp_total)
PERMANOVA
#Eungella ----
filtered <- filter(data_og, site == "Eungella" & RFclass != "bat")
dataframe_landscape <- filtered %>%
dplyr::select(., ID.x, RFclass, temp_max, period, bvg_char) %>%
# group_by(ID.x, RFclass) %>%
# # mutate(temp_total = round(mean(temp_max)),
# # moon = round(mean(moon_illu), 2),
# mutate(natural_cover_3k = round(natural_cover_3k,2),
# tca_landscape_325 = round(tca_landscape_325,2),
# # water_3k = round(water_3k,2),
# np_landscape_3k = round(np_landscape_3k,2)) %>%
# ungroup() %>%
group_by(RFclass, ID.x, temp_max, period) %>%
mutate(n = n(),
# natural_cover_3k = round(natural_cover_3k,2),
# tca_landscape_325 = round(tca_landscape_325,2),
# # water_3k = round(water_3k,2),
# np_landscape_3k = round(np_landscape_3k,2),
temp_total = round(mean(temp_max), 2)) %>%
# moon = round(moon_illu,4)) %>%
ungroup() %>%
# group_by(ID.x) %>%
# ungroup() %>%
dplyr::select(., RFclass, ID.x, bvg_char, everything(), -c(temp_max)) %>%
# filter(n > 3) %>%
distinct() %>%
droplevels()
# dataframe$id_number <- as.factor(dataframe$id_number)
#
# rownames(dataframe) <- dataframe$id_number
# dataframe <- dplyr::select(dataframe, everything()) %>%
# distinct()
# dataframe$np_landscape_3k <- as.numeric(dataframe$np_landscape_3k)
# dataframe$contag_landscape_325 <- as.numeric(dataframe$contag_landscape_325)
dataframe_land_wide <- pivot_wider(dataframe_landscape, names_from = "RFclass", values_from = "n")
dataframe_land_wide$bird[is.na(dataframe_land_wide$bird)] <- 0
dataframe_land_wide$frog[is.na(dataframe_land_wide$frog)] <- 0
dataframe_land_wide$insect[is.na(dataframe_land_wide$insect)] <- 0
dataframe_norm_landscape <- dataframe_land_wide %>% mutate_at(c(4), ~decostand(., method = "range") %>% as.vector(.)) %>%
droplevels()
# nmds <- metaMDS(dataframe_norm_landscape[,c(6:8)], k = 2, trymax = 100)
#
# en <- envfit(nmds, dataframe_norm_landscape[,3:4], permutations = 999, na.rm = T)
# en
#
# plot(nmds$species)
# plot(en)
#
# data.scores = as.data.frame(scores(nmds)$sites)
#
# #add 'season' column as before
# data.scores$site = dataframe_land_wide$ID.x
# data.scores$bvg = dataframe_land_wide$bvg_char
# data.scores$id <- rownames(data.scores)
#
#
# species.scores <- as.data.frame(scores(nmds, "species"))
# species.scores$var <- rownames(species.scores)
#
# en_coord_cont = as.data.frame(scores(en, "vectors")) * ordiArrowMul(en)
# en_coord_cont$variables <- rownames(en_coord_cont)
# en_coord_cat = as.data.frame(scores(en, "factors")) * ordiArrowMul(en)
#
#
#
# ggplot(data = data.scores, aes(x = NMDS1, y = NMDS2)) +
# geom_point(data = data.scores, aes(colour = bvg), size = 3, alpha = 0.5) +
# # scale_colour_manual(values = c("orange", "steelblue")) +
# geom_segment(aes(x = 0, y = 0, xend = NMDS1, yend = NMDS2),
# data = en_coord_cont, size =1, alpha = 0.5, colour = "grey30") +
# geom_point(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
# shape = "diamond", size = 4, alpha = 0.6, colour = "navy") +
# geom_point(data = species.scores, aes(x = NMDS1, y = NMDS2, shape = var), size = 3) +
# geom_text(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
# label = row.names(en_coord_cat), colour = "navy", fontface = "bold") +
# geom_text(data = en_coord_cont, aes(x = NMDS1, y = NMDS2), colour = "grey30",
# fontface = "bold", label = row.names(en_coord_cont)) +
# theme(axis.title = element_text(size = 10, face = "bold", colour = "grey30"),
# panel.background = element_blank(), panel.border = element_rect(fill = NA, colour = "grey30"),
# axis.ticks = element_blank(), axis.text = element_blank(), legend.key = element_blank(),
# legend.title = element_text(size = 10, face = "bold", colour = "grey30"),
# legend.text = element_text(size = 9, colour = "grey30")) +
# labs(colour = "Site", shape = "Group")
# ggsave(getDataPath("Figures", "landscape_nmds_colouredbvg.jpg"))
# temp_total, moon, natural_cover_3k, np_landscape_3k, tca_landscape_325
PERMANOVA <- adonis2(dataframe_norm_landscape[,5:7]~ dataframe_norm_landscape$period + dataframe_norm_landscape$temp_total)
PERMANOVA
result$conv <- as.character(nmds$converged)
result$stress <- as.numeric(nmds$stress)
result$permanova_F <- as.numeric(PERMANOVA$F.Model[1])
result$permanova_R2 <- as.numeric(PERMANOVA$aov.tab$R2[1])
result$permanova_p <- as.numeric(PERMANOVA$aov.tab$`Pr(>F)`[1])
result <- as.data.frame(result)
#SERF ----
filtered <- filter(data_og, site == "SERF" & RFclass != "bat" & RFclass != "mammal")
dataframe_landscape <- filtered %>%
dplyr::select(., ID.x, RFclass, temp_max, period, bvg_char) %>%
# group_by(ID.x, RFclass) %>%
# # mutate(temp_total = round(mean(temp_max)),
# # moon = round(mean(moon_illu), 2),
# mutate(natural_cover_3k = round(natural_cover_3k,2),
# tca_landscape_325 = round(tca_landscape_325,2),
# # water_3k = round(water_3k,2),
# np_landscape_3k = round(np_landscape_3k,2)) %>%
# ungroup() %>%
group_by(RFclass, ID.x, temp_max, period) %>%
mutate(n = n(),
# natural_cover_3k = round(natural_cover_3k,2),
# tca_landscape_325 = round(tca_landscape_325,2),
# # water_3k = round(water_3k,2),
# np_landscape_3k = round(np_landscape_3k,2),
temp_total = round(mean(temp_max), 2)) %>%
# moon = round(moon_illu,4)) %>%
ungroup() %>%
# group_by(ID.x) %>%
# ungroup() %>%
dplyr::select(., RFclass, ID.x, bvg_char, everything(), -c(temp_max)) %>%
# filter(n > 3) %>%
distinct() %>%
droplevels()
# dataframe$id_number <- as.factor(dataframe$id_number)
#
# rownames(dataframe) <- dataframe$id_number
# dataframe <- dplyr::select(dataframe, everything()) %>%
# distinct()
# dataframe$np_landscape_3k <- as.numeric(dataframe$np_landscape_3k)
# dataframe$contag_landscape_325 <- as.numeric(dataframe$contag_landscape_325)
dataframe_land_wide <- pivot_wider(dataframe_landscape, names_from = "RFclass", values_from = "n")
dataframe_land_wide$bird[is.na(dataframe_land_wide$bird)] <- 0
dataframe_land_wide$frog[is.na(dataframe_land_wide$frog)] <- 0
dataframe_land_wide$insect[is.na(dataframe_land_wide$insect)] <- 0
dataframe_norm_landscape <- dataframe_land_wide %>% mutate_at(c(4), ~decostand(., method = "range") %>% as.vector(.)) %>%
droplevels()
# nmds <- metaMDS(dataframe_norm_landscape[,c(6:8)], k = 2, trymax = 100)
#
# en <- envfit(nmds, dataframe_norm_landscape[,3:4], permutations = 999, na.rm = T)
# en
#
# plot(nmds$species)
# plot(en)
#
# data.scores = as.data.frame(scores(nmds)$sites)
#
# #add 'season' column as before
# data.scores$site = dataframe_land_wide$ID.x
# data.scores$bvg = dataframe_land_wide$bvg_char
# data.scores$id <- rownames(data.scores)
#
#
# species.scores <- as.data.frame(scores(nmds, "species"))
# species.scores$var <- rownames(species.scores)
#
# en_coord_cont = as.data.frame(scores(en, "vectors")) * ordiArrowMul(en)
# en_coord_cont$variables <- rownames(en_coord_cont)
# en_coord_cat = as.data.frame(scores(en, "factors")) * ordiArrowMul(en)
#
#
#
# ggplot(data = data.scores, aes(x = NMDS1, y = NMDS2)) +
# geom_point(data = data.scores, aes(colour = bvg), size = 3, alpha = 0.5) +
# # scale_colour_manual(values = c("orange", "steelblue")) +
# geom_segment(aes(x = 0, y = 0, xend = NMDS1, yend = NMDS2),
# data = en_coord_cont, size =1, alpha = 0.5, colour = "grey30") +
# geom_point(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
# shape = "diamond", size = 4, alpha = 0.6, colour = "navy") +
# geom_point(data = species.scores, aes(x = NMDS1, y = NMDS2, shape = var), size = 3) +
# geom_text(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
# label = row.names(en_coord_cat), colour = "navy", fontface = "bold") +
# geom_text(data = en_coord_cont, aes(x = NMDS1, y = NMDS2), colour = "grey30",
# fontface = "bold", label = row.names(en_coord_cont)) +
# theme(axis.title = element_text(size = 10, face = "bold", colour = "grey30"),
# panel.background = element_blank(), panel.border = element_rect(fill = NA, colour = "grey30"),
# axis.ticks = element_blank(), axis.text = element_blank(), legend.key = element_blank(),
# legend.title = element_text(size = 10, face = "bold", colour = "grey30"),
# legend.text = element_text(size = 9, colour = "grey30")) +
# labs(colour = "Site", shape = "Group")
# ggsave(getDataPath("Figures", "landscape_nmds_colouredbvg.jpg"))
# temp_total, moon, natural_cover_3k, np_landscape_3k, tca_landscape_325
PERMANOVA <- adonis2(dataframe_norm_landscape[,5:7]~ dataframe_norm_landscape$period + dataframe_norm_landscape$temp_total)
PERMANOVA
# result$conv <- as.character(nmds$converged)
# result$stress <- as.numeric(nmds$stress)
# result$permanova_F <- as.numeric(PERMANOVA$F.Model[1])
# result$permanova_R2 <- as.numeric(PERMANOVA$aov.tab$R2[1])
# result$permanova_p <- as.numeric(PERMANOVA$aov.tab$`Pr(>F)`[1])
# result <- as.data.frame(result)
#Bowra ----
filtered <- filter(data_og, site == "Bowra" & RFclass != "bat" & RFclass != "mammal")
dataframe_landscape <- filtered %>%
dplyr::select(., ID.x, RFclass, bvg_char, period, temp_max, ndvi_mean, natural_cover_325, rain_value) %>%
# group_by(ID.x, RFclass) %>%
# # mutate(temp_total = round(mean(temp_max)),
# # moon = round(mean(moon_illu), 2),
# mutate(natural_cover_3k = round(natural_cover_3k,2),
# tca_landscape_325 = round(tca_landscape_325,2),
# # water_3k = round(water_3k,2),
# np_landscape_3k = round(np_landscape_3k,2)) %>%
# ungroup() %>%
group_by(RFclass, ID.x, period, temp_max, ndvi_mean, natural_cover_325, rain_value) %>%
mutate(n = n(),
natural_cover_325 = round(natural_cover_325,2),
rain_value = round(mean(rain_value),2),
ndvi_mean = round(mean(ndvi_mean),2),
# np_landscape_3k = round(np_landscape_3k,2),
temp_total = round(mean(temp_max), 2)) %>%
# moon = round(moon_illu,4)) %>%
ungroup() %>%
# group_by(ID.x) %>%
# ungroup() %>%
dplyr::select(., RFclass, ID.x, bvg_char, everything(), -c(temp_max)) %>%
# filter(n > 3) %>%
distinct() %>%
droplevels()
# dataframe$id_number <- as.factor(dataframe$id_number)
#
# rownames(dataframe) <- dataframe$id_number
# dataframe <- dplyr::select(dataframe, everything()) %>%
# distinct()
# dataframe$np_landscape_3k <- as.numeric(dataframe$np_landscape_3k)
# dataframe$contag_landscape_325 <- as.numeric(dataframe$contag_landscape_325)
dataframe_land_wide <- pivot_wider(dataframe_landscape, names_from = "RFclass", values_from = "n")
dataframe_land_wide$bird[is.na(dataframe_land_wide$bird)] <- 0
dataframe_land_wide$frog[is.na(dataframe_land_wide$frog)] <- 0
dataframe_land_wide$insect[is.na(dataframe_land_wide$insect)] <- 0
dataframe_norm_landscape <- dataframe_land_wide %>% mutate_at(c(4:7), ~decostand(., method = "range") %>% as.vector(.)) %>%
droplevels()
# nmds <- metaMDS(dataframe_norm_landscape[,c(6:8)], k = 2, trymax = 100)
#
# en <- envfit(nmds, dataframe_norm_landscape[,3:4], permutations = 999, na.rm = T)
# en
#
# plot(nmds$species)
# plot(en)
#
# data.scores = as.data.frame(scores(nmds)$sites)
#
# #add 'season' column as before
# data.scores$site = dataframe_land_wide$ID.x
# data.scores$bvg = dataframe_land_wide$bvg_char
# data.scores$id <- rownames(data.scores)
#
#
# species.scores <- as.data.frame(scores(nmds, "species"))
# species.scores$var <- rownames(species.scores)
#
# en_coord_cont = as.data.frame(scores(en, "vectors")) * ordiArrowMul(en)
# en_coord_cont$variables <- rownames(en_coord_cont)
# en_coord_cat = as.data.frame(scores(en, "factors")) * ordiArrowMul(en)
#
#
#
# ggplot(data = data.scores, aes(x = NMDS1, y = NMDS2)) +
# geom_point(data = data.scores, aes(colour = bvg), size = 3, alpha = 0.5) +
# # scale_colour_manual(values = c("orange", "steelblue")) +
# geom_segment(aes(x = 0, y = 0, xend = NMDS1, yend = NMDS2),
# data = en_coord_cont, size =1, alpha = 0.5, colour = "grey30") +
# geom_point(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
# shape = "diamond", size = 4, alpha = 0.6, colour = "navy") +
# geom_point(data = species.scores, aes(x = NMDS1, y = NMDS2, shape = var), size = 3) +
# geom_text(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
# label = row.names(en_coord_cat), colour = "navy", fontface = "bold") +
# geom_text(data = en_coord_cont, aes(x = NMDS1, y = NMDS2), colour = "grey30",
# fontface = "bold", label = row.names(en_coord_cont)) +
# theme(axis.title = element_text(size = 10, face = "bold", colour = "grey30"),
# panel.background = element_blank(), panel.border = element_rect(fill = NA, colour = "grey30"),
# axis.ticks = element_blank(), axis.text = element_blank(), legend.key = element_blank(),
# legend.title = element_text(size = 10, face = "bold", colour = "grey30"),
# legend.text = element_text(size = 9, colour = "grey30")) +
# labs(colour = "Site", shape = "Group")
# ggsave(getDataPath("Figures", "landscape_nmds_colouredbvg.jpg"))
# temp_total, moon, natural_cover_3k, np_landscape_3k, tca_landscape_325
PERMANOVA <- adonis2(dataframe_norm_landscape[,8:10]~ dataframe_norm_landscape$period + dataframe_norm_landscape$temp_total + dataframe_norm_landscape$ndvi_mean + dataframe_norm_landscape$natural_cover_325 + dataframe_norm_landscape$rain_value)
PERMANOVA
PERMANOVA2 <- adonis2(dataframe_norm_landscape[,8:10]~ dataframe_norm_landscape$period + dataframe_norm_landscape$temp_total + dataframe_norm_landscape$natural_cover_325)
PERMANOVA2
# result$conv <- as.character(nmds$converged)
# result$stress <- as.numeric(nmds$stress)
# result$permanova_F <- as.numeric(PERMANOVA$F.Model[1])
# result$permanova_R2 <- as.numeric(PERMANOVA$aov.tab$R2[1])
# result$permanova_p <- as.numeric(PERMANOVA$aov.tab$`Pr(>F)`[1])
# result <- as.data.frame(result)
#BonBon ----
filtered <- filter(data_og, site == "BonBon" & RFclass != "bat" & RFclass != "mammal")
dataframe_landscape <- filtered %>%
dplyr::select(., ID.x, RFclass, bvg_char, contag_landscape_325, ndvi_mean, moon_illu, rain_value, np_landscape_325) %>%
# group_by(ID.x, RFclass) %>%
# # mutate(temp_total = round(mean(temp_max)),
# # moon = round(mean(moon_illu), 2),
# mutate(natural_cover_3k = round(natural_cover_3k,2),
# tca_landscape_325 = round(tca_landscape_325,2),
# # water_3k = round(water_3k,2),
# np_landscape_3k = round(np_landscape_3k,2)) %>%
# ungroup() %>%
group_by(RFclass, ID.x, contag_landscape_325, ndvi_mean, moon_illu, rain_value, np_landscape_325) %>%
mutate(n = n(),
contag_landscape_325 = round(contag_landscape_325,2),
rain_value = round(mean(rain_value),3),
ndvi_mean = round(mean(ndvi_mean),3),
np_landscape_325 = round(np_landscape_325,2),
moon_illu = round(mean(moon_illu), 3)) %>%
# moon = round(moon_illu,4)) %>%
ungroup() %>%
# group_by(ID.x) %>%
# ungroup() %>%
dplyr::select(., RFclass, ID.x, bvg_char, everything()) %>%
# filter(n > 3) %>%
distinct() %>%
droplevels()
# dataframe$id_number <- as.factor(dataframe$id_number)
#
# rownames(dataframe) <- dataframe$id_number
# dataframe <- dplyr::select(dataframe, everything()) %>%
# distinct()
# dataframe$np_landscape_3k <- as.numeric(dataframe$np_landscape_3k)
# dataframe$contag_landscape_325 <- as.numeric(dataframe$contag_landscape_325)
dataframe_land_wide <- pivot_wider(dataframe_landscape, names_from = "RFclass", values_from = "n", values_fn = sum)
dataframe_land_wide$bird[is.na(dataframe_land_wide$bird)] <- 0
dataframe_land_wide$frog[is.na(dataframe_land_wide$frog)] <- 0
dataframe_land_wide$insect[is.na(dataframe_land_wide$insect)] <- 0
dataframe_norm_landscape <- dataframe_land_wide %>% mutate_at(c(3:7), ~decostand(., method = "range") %>% as.vector(.)) %>%
droplevels()
# nmds <- metaMDS(dataframe_norm_landscape[,c(6:8)], k = 2, trymax = 100)
#
# en <- envfit(nmds, dataframe_norm_landscape[,3:4], permutations = 999, na.rm = T)
# en
#
# plot(nmds$species)
# plot(en)
#
# data.scores = as.data.frame(scores(nmds)$sites)
#
# #add 'season' column as before
# data.scores$site = dataframe_land_wide$ID.x
# data.scores$bvg = dataframe_land_wide$bvg_char
# data.scores$id <- rownames(data.scores)
#
#
# species.scores <- as.data.frame(scores(nmds, "species"))
# species.scores$var <- rownames(species.scores)
#
# en_coord_cont = as.data.frame(scores(en, "vectors")) * ordiArrowMul(en)
# en_coord_cont$variables <- rownames(en_coord_cont)
# en_coord_cat = as.data.frame(scores(en, "factors")) * ordiArrowMul(en)
#
#
#
# ggplot(data = data.scores, aes(x = NMDS1, y = NMDS2)) +
# geom_point(data = data.scores, aes(colour = bvg), size = 3, alpha = 0.5) +
# # scale_colour_manual(values = c("orange", "steelblue")) +
# geom_segment(aes(x = 0, y = 0, xend = NMDS1, yend = NMDS2),
# data = en_coord_cont, size =1, alpha = 0.5, colour = "grey30") +
# geom_point(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
# shape = "diamond", size = 4, alpha = 0.6, colour = "navy") +
# geom_point(data = species.scores, aes(x = NMDS1, y = NMDS2, shape = var), size = 3) +
# geom_text(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
# label = row.names(en_coord_cat), colour = "navy", fontface = "bold") +
# geom_text(data = en_coord_cont, aes(x = NMDS1, y = NMDS2), colour = "grey30",
# fontface = "bold", label = row.names(en_coord_cont)) +
# theme(axis.title = element_text(size = 10, face = "bold", colour = "grey30"),
# panel.background = element_blank(), panel.border = element_rect(fill = NA, colour = "grey30"),
# axis.ticks = element_blank(), axis.text = element_blank(), legend.key = element_blank(),
# legend.title = element_text(size = 10, face = "bold", colour = "grey30"),
# legend.text = element_text(size = 9, colour = "grey30")) +
# labs(colour = "Site", shape = "Group")
# ggsave(getDataPath("Figures", "landscape_nmds_colouredbvg.jpg"))
# temp_total, moon, natural_cover_3k, np_landscape_3k, tca_landscape_325
PERMANOVA <- adonis2(dataframe_norm_landscape[,8:10]~ dataframe_norm_landscape$contag_landscape_325 + dataframe_norm_landscape$ndvi_mean + dataframe_norm_landscape$moon_illu + dataframe_norm_landscape$rain_value + dataframe_norm_landscape$np_landscape_325)
PERMANOVA
PERMANOVA2 <- adonis2(dataframe_norm_landscape[,8:10]~ dataframe_norm_landscape$np_landscape_325)
PERMANOVA2
# result$conv <- as.character(nmds$converged)
# result$stress <- as.numeric(nmds$stress)
# result$permanova_F <- as.numeric(PERMANOVA$F.Model[1])
# result$permanova_R2 <- as.numeric(PERMANOVA$aov.tab$R2[1])
# result$permanova_p <- as.numeric(PERMANOVA$aov.tab$`Pr(>F)`[1])
# result <- as.data.frame(result)
#Booroopki ----
filtered <- filter(data_og, site == "Booroopki" & RFclass != "bat" & RFclass != "mammal")
dataframe_landscape <- filtered %>%
dplyr::select(., ID.x, RFclass, bvg_char, period, rain_value, np_landscape_325) %>%
# group_by(ID.x, RFclass) %>%
# # mutate(temp_total = round(mean(temp_max)),
# # moon = round(mean(moon_illu), 2),
# mutate(natural_cover_3k = round(natural_cover_3k,2),
# tca_landscape_325 = round(tca_landscape_325,2),
# # water_3k = round(water_3k,2),
# np_landscape_3k = round(np_landscape_3k,2)) %>%
# ungroup() %>%
group_by(RFclass, ID.x, period, rain_value, np_landscape_325) %>%
mutate(n = n(),
# contag_landscape_325 = round(contag_landscape_325,2),
rain_value = round(mean(rain_value),3),
# ndvi_mean = round(mean(ndvi_mean),3),
np_landscape_325 = round(np_landscape_325,2)) %>%
# moon_illu = round(mean(moon_illu), 3)) %>%
# moon = round(moon_illu,4)) %>%
ungroup() %>%
# group_by(ID.x) %>%
# ungroup() %>%
dplyr::select(., RFclass, ID.x, bvg_char, everything()) %>%
# filter(n > 3) %>%
distinct() %>%
droplevels()
# dataframe$id_number <- as.factor(dataframe$id_number)
#
# rownames(dataframe) <- dataframe$id_number
# dataframe <- dplyr::select(dataframe, everything()) %>%
# distinct()
# dataframe$np_landscape_3k <- as.numeric(dataframe$np_landscape_3k)
# dataframe$contag_landscape_325 <- as.numeric(dataframe$contag_landscape_325)
dataframe_land_wide <- pivot_wider(dataframe_landscape, names_from = "RFclass", values_from = "n", values_fn = sum)
dataframe_land_wide$bird[is.na(dataframe_land_wide$bird)] <- 0
dataframe_land_wide$frog[is.na(dataframe_land_wide$frog)] <- 0
dataframe_land_wide$insect[is.na(dataframe_land_wide$insect)] <- 0
dataframe_norm_landscape <- dataframe_land_wide %>% mutate_at(c(6:8), ~decostand(., method = "range") %>% as.vector(.)) %>%
droplevels()
# nmds <- metaMDS(dataframe_norm_landscape[,c(6:8)], k = 2, trymax = 100)
#
# en <- envfit(nmds, dataframe_norm_landscape[,3:4], permutations = 999, na.rm = T)
# en
#
# plot(nmds$species)
# plot(en)
#
# data.scores = as.data.frame(scores(nmds)$sites)
#
# #add 'season' column as before
# data.scores$site = dataframe_land_wide$ID.x
# data.scores$bvg = dataframe_land_wide$bvg_char
# data.scores$id <- rownames(data.scores)
#
#
# species.scores <- as.data.frame(scores(nmds, "species"))
# species.scores$var <- rownames(species.scores)
#
# en_coord_cont = as.data.frame(scores(en, "vectors")) * ordiArrowMul(en)
# en_coord_cont$variables <- rownames(en_coord_cont)
# en_coord_cat = as.data.frame(scores(en, "factors")) * ordiArrowMul(en)
#
#
#
# ggplot(data = data.scores, aes(x = NMDS1, y = NMDS2)) +
# geom_point(data = data.scores, aes(colour = bvg), size = 3, alpha = 0.5) +
# # scale_colour_manual(values = c("orange", "steelblue")) +
# geom_segment(aes(x = 0, y = 0, xend = NMDS1, yend = NMDS2),
# data = en_coord_cont, size =1, alpha = 0.5, colour = "grey30") +
# geom_point(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
# shape = "diamond", size = 4, alpha = 0.6, colour = "navy") +
# geom_point(data = species.scores, aes(x = NMDS1, y = NMDS2, shape = var), size = 3) +
# geom_text(data = en_coord_cat, aes(x = NMDS1, y = NMDS2),
# label = row.names(en_coord_cat), colour = "navy", fontface = "bold") +
# geom_text(data = en_coord_cont, aes(x = NMDS1, y = NMDS2), colour = "grey30",
# fontface = "bold", label = row.names(en_coord_cont)) +
# theme(axis.title = element_text(size = 10, face = "bold", colour = "grey30"),
# panel.background = element_blank(), panel.border = element_rect(fill = NA, colour = "grey30"),
# axis.ticks = element_blank(), axis.text = element_blank(), legend.key = element_blank(),
# legend.title = element_text(size = 10, face = "bold", colour = "grey30"),
# legend.text = element_text(size = 9, colour = "grey30")) +
# labs(colour = "Site", shape = "Group")
# ggsave(getDataPath("Figures", "landscape_nmds_colouredbvg.jpg"))
# temp_total, moon, natural_cover_3k, np_landscape_3k, tca_landscape_325
PERMANOVA <- adonis2(dataframe_norm_landscape[,6:8]~ dataframe_norm_landscape$period + dataframe_norm_landscape$rain_value + dataframe_norm_landscape$np_landscape_325)
PERMANOVA
# result$conv <- as.character(nmds$converged)
# result$stress <- as.numeric(nmds$stress)
# result$permanova_F <- as.numeric(PERMANOVA$F.Model[1])
# result$permanova_R2 <- as.numeric(PERMANOVA$aov.tab$R2[1])
# result$permanova_p <- as.numeric(PERMANOVA$aov.tab$`Pr(>F)`[1])
# result <- as.data.frame(result)
|
98bedca915663edd10fa1725c50b23934870eef4 | ae4a257cc62e8ee4b90fda7e877750e59c0d8ba3 | /R/fun_plotSingleTS.R | 9113531523bf4548c66a5d7e09a4a41d14d53537 | [] | no_license | davidlamcm/Rtoolbox | 824a5f189c5e3ec89f841fe4e73f85605d894d6a | 238a6881f4df4a47365aeb0a2786fed62899e537 | refs/heads/master | 2021-01-19T14:09:35.775475 | 2018-08-22T15:16:44 | 2018-08-22T15:16:44 | 88,128,106 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 801 | r | fun_plotSingleTS.R | #' this function will be deprecated
#' wrapper for ggplot
#' @export
plotSingleTS <-function(ts, title){
if(class(ts) == "numeric"){
df = data.frame(date = as.Date(names(ts)), fx = ts)
}else if(class(ts) == "matrix"){
df = data.frame(date = as.Date(rownames(ts)), fx = ts)
}else if(class(ts) == "data.frame"){
df = data.frame(date = as.Date(rownames(ts)), fx = ts)
colnames(df) = c("date", "fx")
}else if(class(ts) == "zoo"){
df = data.frame(date = as.Date(index(ts)), fx = ts)
}else{stop(paste("method for type", class(ts), "not defined"))}
if(!missing(title)){
return(ggplot(data = df, aes(x=date, y= fx, group =1)) + geom_line()+ ggtitle(title))
}else{
return(ggplot(data = df, aes(x=date, y= fx, group =1)) + geom_line() )
}
}
|
48de56a8bfbd15cc0cd2e0fb5c7aca3843d7bbaa | b868dffc1ee10e55c092fef9f8d48b1131d137a9 | /rcourse_shiny/00_hello_world/ui.R | 7571fa1e988a691ad0a69144681a41b08d641299 | [] | no_license | simecek/small_shiny_projects | 5c7c0d4a42f4fa897309f964b5a8458362eacfe6 | 0ed3272674dd327587ea9d5ace35f0853c436f20 | refs/heads/master | 2016-09-05T14:51:08.282230 | 2015-06-03T20:21:53 | 2015-06-03T20:21:53 | 15,556,534 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 308 | r | ui.R | # User Interface
shinyUI(pageWithSidebar(
# Application title
headerPanel("Minimal Shiny App"),
# Sidebar with inputs
sidebarPanel(
textInput(inputId = "name", label = "Your name:", value = "World")
),
# Main panel with outputs
mainPanel(
textOutput("greeting")
)
))
|
84375cffe6cf5751c1318f43589192557934f1b5 | f1eed5032325b7b0c1c973ef8901855f3c9b2b0f | /Adamo_et_al_Rscript.R | dba800a088735179cde6d4d82ae5a7855aee2a09 | [] | no_license | josucham/Statistical-analyses-optimal-sampling-size-in-Mediterranean-forest-ecosystems | b6d02e8725b1a6bcfb074ed4edd50bd9d6ab50bb | 86c1cfee3ece3d792fdeca0fd21603c97b301945 | refs/heads/main | 2023-02-27T01:46:31.823803 | 2021-02-08T15:53:41 | 2021-02-08T15:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,911 | r | Adamo_et_al_Rscript.R | ###############################################################
## ##
## Sampling forest soils to describe fungal diversity ##
## and composition. Which is the optimal sampling size in ##
## Mediterranean pure and mixed pine-oak forests? ##
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
## ##
## Adamo et al. 2020 Journal Name ##
## ##
###############################################################
#Last update: 2020
#Citation: XXcode citing X doi.
#This is the code used in Adamo et al. to perform the statistical analyses
#that can be used in similar works.
###############################################################
# TABLE OF CONTENTS #
# #
# Line 31: Paper briefing and aims #
# Line 51: Required libraries #
# Line 61: Simulating and Importing the data #
# Line 88: Formatting the data # #
# Line 116: iNEXT extrapolated curves (aim 1) # #
# Line 171: Variance of Bray-Curtis matrix (aim 2) #
# Line 248: NMDS of differences between sample pools (aim 2) # #
# Line 376: Using the beta-indices (tbi, aim 3) #
# # #
###############################################################
#Briefing:####
#By using high throughput-DNA sequencing data (Illumina MiSeq), we identified the minimum number of pooled samples needed to obtain a reliable description of fungal communities
#in terms of diversity and composition in three different Mediterranean forests' stands..
#Twenty soil samples were randomly taken and kept separated in each of the three plots per forest type. After keeping one random sample which was not pooled, we obtained 5 composite samples
#consisting of pools of 3, 6, 10, 15 and 20 samples. We then sequenced the ITS2 using Illumina MiSeq.
#We further tested:
#1. The effect of sample pooling on fungal diversity, the iNEXT function was used to build
#rarefactions curves pooling together the individual samples.
#2. The variance of Bray-Curtis matrix between the number of sample pools for each forest type was compared
#using the betadisper function which is analogue to a Levene's test.
#3. Beta-diversity patterns and whether the core of most abundant fungal species
#is maintained between sites, we evaluated for each pool the species (or abundances-per-species)
#losses (B) and species gains (C) using the beta-indices (tbi function, Legendre, 2019, Ecology and Evolution).
#We used one-sample pool per each forest (sample 1) as a reference, and we compared pools with
#increasing number of samples (sample 3, 6, 10, 15 and 20) to identify species losses and gains.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#Required libraries####
library(vegan)
library(lattice)
library(ggplot2)
library(ggpubr)
library(adespatial)
library(iNEXT)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#Data simulation####
#The original data is still used in different unpublished works, thus we simulate a similar dataset here.
spp_data<-read.table("OTU_table.txt", header=T)# similar dataset to the original and you can perform your own simulation
m_env<-read.table("attribute_matrix.txt", header=T)
sps <- 2800# we can tell the number of species we want in our dataset
datos <- spp_data[,1:sps]#we take the number of species from the dataset we have previously imported, which corresponds here to 2800 species
vec <- c(as.matrix(datos))
vec2 <- round(runif(length(vec[vec>0]), min=0, max=100),0)#here we simulate the data changing the abundance of the species but keeping the same richness
vec[vec>0] <- vec2
df <- data.frame(matrix(vec, nrow(datos), sps))
names(df) <- paste0("sp", c(1:sps))
df
row.names(df) <- m_env$SAMPLE# this is the final simulated data
#read data
m_env<-read.table("attribute_matrix.txt", header=T)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#Formatting the data####
#we transform the data per forest type for downstream analyses
mixed1 <- df[1:18,]
mixed1.hell <- decostand(mixed1, 'hell')
Mixed_att <- factor(m_env[1:18, -c(1:3)])
Mixed_att
Pinus_att <- factor(m_env[36:53,-c(1:3)])
Pinus_att
Quercus_att <- factor(m_env[19:35, -c(1:3)])
Quercus_att
Pinus <-df[36:53,]
P_env <- factor(m_env[36:53,-c(1:3)])
Pinus.hell <- decostand(Pinus, 'hell')
Quercus <- df[19:35,]
Q_env <- factor(m_env[19:35, -c(1:3)])
Q_env
Quercus.hell <- decostand(Quercus, 'hell')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# iNEXT extrapolated curves between forest types (aim 1) ####
# We use the function iNEXT to assess the differences in richness between the number of soil sample pools per each forest type
# therefore, we will set the order q in the iNEXT function to zero
# sub corresponds to the composite samples, and the number indicate the numbers of subsamples that represent the given composite sample.
#Mixed stands, we create one matrix for each composite sample number and we will do so for each forest type
Sub_1<- as.matrix(t(df[c(1,7,13), ]))
Sub_3<- as.matrix(t(df[c(2,8,14), ]))
Sub_6 <- as.matrix(t(df[c(3,9,15),]))
Sub_10 <- as.matrix(t(df[c(4,10,16), ]))
Sub_15 <- as.matrix(t(df[ c(5,11,17), ]))
Sub_20 <- as.matrix(t(df[c(6,12,18), ]))
mixed1_hill <- list(SU.1 = Sub_1, SU.3 = Sub_3, SU.6 =Sub_6 , SU_10 = Sub_10 , SU_15 = Sub_15, SU_20 =Sub_20 )
typemixed1 <- lapply(mixed1_hill, as.abucount)
curve_mixed1 <- iNEXT((typemixed1 ), q=c(0), datatype = "abundance", endpoint = 100000 )#we want to test the differences in richness between composite samples
# richness so we will only add in the function q=c(0), which is the hill number 0 that stands for richness
curve_mixed1$DataInfo
mixed1_plot <- ggiNEXT(curve_mixed1 , type=1, color="site") ;mixed1_plot
#rarefaction curves in Pinus stands
Sub_1<- as.matrix(t(df[c(36,42,48), ] ))
Sub_3<- as.matrix(t(df[c(37,43,49), ]))
Sub_6 <- as.matrix(t(df[c(38,44,50),]))
Sub_10 <- as.matrix(t(df[c(39,45,51), ]))
Sub_15 <- as.matrix(t(df[c(40,46,52),]))
Sub_20 <- as.matrix(t(df[c(41,47,53),]))
Pinus_hill <- list(s.1 = Sub_1, s.3 = Sub_3, s.6 = Sub_6 , s_10 = Sub_10, s_15 = Sub_15 , s_20 =Sub_20 )
typePinus <- lapply(Pinus_hill, as.abucount)
curve_Pinus <- iNEXT((typePinus), q=c(0), datatype = "abundance", endpoint = 140000)
curve_Pinus$DataInfo
Pinus_plot <- ggiNEXT(curve_Pinus , type=1, color="site");Pinus_plot
#rarefaction curves in Quercus stands
Sub_1<- as.matrix(t(df[c(19,25,31), ]) )
Sub_3<- as.matrix(t(df[c(20,26,32), ]))
Sub_6 <- as.matrix(t(df[c(21,27), ]))
Sub_10 <- as.matrix(t(df[c(22,28), ]))
Sub_15 <- as.matrix(t(df[c(23,29,34), ]))
Sub_20 <- as.matrix(t(df[c(24,30,35), ]))
Quercus_hill<- list(ss.1 = Sub_1, ss.3 = Sub_3, ss.6 = Sub_6 , ss_10 = Sub_10, ss_15 = Sub_15, ss_20 =Sub_20 )
typeQuercus = lapply(Quercus_hill, as.abucount)
curve_Quercus<- iNEXT((typeQuercus), q=0, datatype = "abundance", endpoint = 150000)
curve_Quercus$DataInfo
Quercus_plot <- ggiNEXT(curve_Quercus , type=1, color="site");Quercus_plot
ggarrange(Pinus_plot, Quercus_plot,mixed1_plot, labels = c("a)", "b)", "c)"), nrow = 1, ncol = 3, common.legend= TRUE)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#Betadisper to test the variance of Bray-Curtis matrix between the number of sample pools (Aim 2)####
par(mfrow=c(1,1))
#Mixed forest
Mixed <- df[1:18,]
Mixed.hell <- decostand(Mixed, 'hell')
Mixed.m_beta <- vegdist(Mixed.hell, method = "bray")
mod_mix <- betadisper(Mixed.m_beta, Mixed_att , type = "centroid")
plot(mod_mix)
anova(mod_mix)
dfm <- data.frame(Distance_to_centroid=mod_mix$distances,Group=mod_mix$group)
groups <- mod_mix$group
m<- ggplot(data=dfm,aes(x=groups,y=Distance_to_centroid, fill= groups))+
geom_boxplot(alpha=0.5)+
scale_fill_brewer(palette = "Set1", name = "N. of sample pools", labels = c("1", "3", "6", "10", "15", "20")) +
ggtitle("Mixed" ) +
xlab("N. of sample pools") +
scale_x_discrete(labels=c("1", "3", "6", "10", "15", "20"))+
ylab("Distance to centroid") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5))+
theme(legend.position="top")+
theme(panel.grid = element_blank());m
#now we do the same for Pinus.
Pinus_bd <- df[36:53,]
Pinus_bd.hell <- decostand(Pinus_bd , 'hell')
dist.Pinus_bd <- vegdist(Pinus_bd.hell, method = "bray")
modt_P <- betadisper(dist.Pinus_bd , P_env, type = "centroid")
spp_Pinus_bd <- data.frame(Distance_to_centroid=modt_P$distances,Group=modt_P$group)
hsd = TukeyHSD(modt_P)
groups <- modt_P$group
betadisp_Pinus<- ggplot(data=spp_Pinus_bd,aes(x=Group,y=Distance_to_centroid, fill= Group))+
geom_boxplot(alpha=0.5)+
scale_fill_brewer(palette = "Set1", name = "N. of sample pools", labels = c("1", "3", "6", "10", "15", "20")) +
xlab("N. of sample pools") +
scale_x_discrete(labels=c("1", "3", "6", "10", "15", "20"))+
ggtitle("Pinus_s")+
ylab("Distance to centroid") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, face= "italic"))+
theme(legend.position="top")+
theme(panel.grid = element_blank());betadisp_Pinus
anova(modt_P)
TukeyHSD(modt_P)
# and now we perform the betadisper for Quercus
Quercus_bd <- df[19:35,]
Quercus_bd_hell <- decostand(Quercus_bd , 'hell')
dist.Quercus_bd <- vegdist(Quercus_bd_hell, method = "bray")
modt_Q <- betadisper(dist.Quercus_bd, Q_env, type = "centroid")
anova(modt_Q)
TukeyHSD(modt_Q)
spp_Quercus_bd <- data.frame(Distance_to_centroid=modt_Q$distances,Group=modt_Q$group)
groups <- modt_Q$group
Quercus_betadisp<- ggplot(data=spp_Quercus_bd,aes(x=Group,y=Distance_to_centroid, fill= Group))+
geom_boxplot(alpha=0.5)+
scale_fill_brewer(palette = "Set1", name = "N. of sample pools", labels = c("1", "3", "6", "10", "15", "20")) +
xlab("N. of sample pools") +
ggtitle("Quercus_r") +
scale_x_discrete(labels=c("1", "3", "6", "10", "15", "20"))+
ylab("Distance to centroid") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5, face= "italic"))+
theme(legend.position="top")+
theme(panel.grid = element_blank());Quercus_betadisp# we plotted the bestadisper results using ggplot2
# NMDS to diplay the lack of compositional differences between number of soil sample pools (aim 2) ####
# We use nmds to assess that there no difference in species composition between sample pools (between the composite sample)
par(mfrow=c(1,1))
spnumfrec<- specnumber(df ,MARGIN=2)
spnumfrec
m_spp<-df[,spnumfrec>6]#we look at the species present in more than 10% of the sites
m_spp2_h <- decostand(m_spp, "hell")
nmds <- metaMDS(m_spp2_h, distance = "bray")
plot(nmds, type = "n")
points(nmds, pch=20, col=as.numeric(m_env$Subsample))
ordiellipse(nmds,m_env$Subsample ,show.groups="1",kind="sd",conf=0.95, col=6,lwd=2,lty=1,font=2,label = T)
ordiellipse(nmds, m_env$Subsample,show.groups="3",kind="sd",conf=0.95,col=2,lwd=2,lty=2,font=2,label = T)
ordiellipse(nmds, m_env$Subsample,show.groups="6",kind="sd",conf=0.95,col=3,lwd=2,font=2,label = T)
ordiellipse(nmds, m_env$Subsample,show.groups="10",kind="sd",conf=0.95, col=5,lwd=2,lty=2,font=2,label = T)
ordiellipse(nmds, m_env$Subsample,show.groups="15",kind="sd",conf=0.95, col=4,lwd=2,lty=3,font=2,label = T)
ordiellipse(nmds, m_env$Subsample,show.groups="20",kind="sd",conf=0.95, col=9,lwd=2,lty=3,font=2,label = T)
adonis(m_spp2_h~m_env$Subsample)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#Assessing species losses (B) and species gains (C) using the beta-indices (tbi). Composite sample 1 used as reference and comparing the values with the other composite samples (aim 3)####
#Mixed forest
T1 <- as.data.frame((df [c(1,7,13),]))
M3_T2 <-as.data.frame((df [c(2,8,14),]))
M6_T2 <- as.data.frame(df [c(3,9,15),])
M10_T2 <-as.data.frame(df [c(4,10,16), ])
M15_T2 <- as.data.frame(df[c(5,11,17), ])
M20_T2 <- as.data.frame(df [c(6,12,18), ])
#to get the permutation to work the function TBI must be changed to randomise it with n >2
#comparing composite sample 1 with composite sample 3
M3.TBI <- TBI(T1, M3_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
M3.TBI$t.test_B.C #non significant p-value because of the low number of samples used in the permutations
#comparing composite sample 1 with composite sample 6
M6.TBI <- TBI(T1,M6_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
M6prova.TBI$t.test_B.C
#comparing composite sample 1 with composite sample 10
M10.TBI <- TBI(T1,M10_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
M10.TBI$t.test_B.C
#comparing composite sample 1 with composite sample 15
M15.TBI <- TBI(T1,M15_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
M15.TBI$t.test_B.C
#comparing composite sample 1 with composite sample 20
M20.TBI <- TBI(T1,M20_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
M20.TBI$t.test_B.C
PT1 <- as.data.frame(df[c(36,42,48),])
P3_T2 <- as.data.frame(df[c(37,43,49),])
P6_T2 <- as.data.frame(df[c(39,44,50),])
P10_T2 <- as.data.frame(df[c(40,45,51),])
P15_T2 <- as.data.frame(df[c(41,46,52),])
P20_T2 <- as.data.frame(df[c(42,47,53),])
#Pinus comparing composite sample 1 with composite sample 3
P_3.TBI <- TBI(ST1, S3_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
P_3.tbi <- as.data.frame(sp1_3.TBI$BCD.mat)
#Pinus comparing composite sample 1 with composite sample 6
P_6.TBI <- TBI(ST1,S6_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
P_6.tbi <- as.data.frame(sp1_6.TBI$BCD.mat)
#Pinus comparing composite sample 1 with composite sample 10
P_10.TBI <- TBI(ST1,S10_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
P_10.TBI$t.test_B.C
#Pinus comparing composite sample 1 with composite sample 15
P_15.TBI <- TBI(ST1,S15_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
P_15.TBI$t.test_B.C
P_20.TBI <- TBI(ST1, S20_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
P_20.TBI$t.test_B.C
#Quercus
QT1 <- as.data.frame(df[c(19,25,31),])
Q3_T2 <- as.data.frame(df[c(20,26,32), ])
Q6_T2 <- as.data.frame(df[c(21,33), ])
QT1.1 <-df[c(1,7), ]#otherwise is not possible to compare it with Q10
Q10_T2 <- as.data.frame(df[c(22,28), ])
Q15_T2 <- as.data.frame(df[c(23,29,34), ])
Q20_T2 <- as.data.frame(df[c(24,30,35),])
#tree sp2 comparing composite sample 1 with composite sample 3
Q_3.TBI <- TBI(Q=RT1,R3_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
Q_3.tbi <- as.data.frame(sp2_3.TBI$BCD.mat)
#comparing composite sample 1 with composite sample 6
Q_6.TBI <- TBI(RT1,R6_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
#comparing composite sample 1 with composite sample 10
Q_10.TBI <- TBI(RT1,R10_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
#comparing composite sample 1 with composite sample 15
Q_15.TBI <- TBI(RT1,R15_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
Q_15.tbi <- sp2_15.TBI$BCD.mat
#comparing composite sample 1 with composite sample 20
Q_20.TBI <- TBI(TT1,R20_T2, method = "%diff", nperm = 999, test.t.perm = TRUE)
|
6c921edbdba2d457e1555732f18865710a8c7083 | 547d9593d2808563e9a3abb970cf39d40d0cf024 | /man/concept_list.Rd | 7612e7ad0ec2d17f0f0a7242406153893bfce7d7 | [] | no_license | dncnbrn/EmsiR | c5176d65ea2661c69c50b755f4e5c530638b25b1 | 2d643be0af6b9b8838af65cb57a763dd209f46f8 | refs/heads/master | 2021-03-24T12:54:31.031726 | 2018-02-23T13:30:42 | 2018-02-23T13:30:42 | 75,728,416 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 402 | rd | concept_list.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/listing.R
\name{concept_list}
\alias{concept_list}
\title{Query concepts used on the Episteme API}
\usage{
concept_list()
}
\value{
A full listing of concepts with description by Country and Concept.
}
\description{
Queries the Episteme API regarding concepts used to structure data and returns them in an organised list.
}
|
82a9a39a46efae5e92969de23b0c55fc1493f3f6 | 04b076ef7e022127fae4431d99683eb1ae3651f4 | /sequence.writer-1.r | 9726ceb05321f20b00102298d6b8c462d333f286 | [] | no_license | jyhwang13/Malaria | ee0054dd081b88aa571670a669f0ba0481f20558 | 897400df5506e6de2c392af525a3d3834cf89a13 | refs/heads/master | 2022-08-25T16:24:06.228440 | 2020-05-22T03:35:12 | 2020-05-22T03:35:12 | 263,752,095 | 1 | 0 | null | 2020-05-22T03:35:13 | 2020-05-13T21:57:44 | R | UTF-8 | R | false | false | 2,387 | r | sequence.writer-1.r | ## Read in the data and references ##
## Set your working directory to Malaria if you're using GitHub Desktop ##
#library("seqinr")
#reference.genome <- read.fasta("Data/LR605956.fasta", as.string=TRUE, seqonly=TRUE)
#ref <- read.table("Data/ref.tab", header=TRUE, sep="\t")
#non <- read.table("Data/non.tab", header=TRUE, sep="\t")
#var <- read.table("Data/var.tab", header=TRUE, sep="\t")
#g.c <- unlist(strsplit(unlist(reference.genome),"")) # converts to vector
## Sequence Writer Script ##
sequences <- matrix(g.c, nrow = length(g.c), ncol = length(ref[1,])) # make as many copies of the reference genome as we have samples
test.places <- c(1,87,166,363)
#,562,694,1685,2035,3041,4256,4348, 4413, 4862,5002, 5030, 5123,
#5474, 5738, 5755, 5769, 6019,6150, 6516, 6558, 6583, 6822, 6892, 6926, 7028)
# Mauritania, Gambia, Guinea, Gambia, Kenya, Thailand, Tanzania, Ghana, Cambodia, Indonesia,
# Burkina Faso, Mali, Papua New Guinea, Peru, Bangladesh, Malawi, Vietnam, Colombia, Uganda,
# Myanmar, Laos, Congo DR, Nigeria, Madagascar, Camaroon, Ivory Coast, Ethiopia, Benin, Senegal
# 3039-3041 has weird entries?? Errors??
sequences.test <- matrix(g.c, nrow = length(g.c), ncol = length(test.places))
## This Code ONLY does SNPs ##
for(i in 1:length(test.places)) # cycle thru our test places
{
for (j in 1:length(ref[,1])) # cycle through all positions on genome
{
if((nchar(as.character(var[j,3])) == 1) && (nchar(as.character(var[j,4])) == 1)) # restricts us to just SNPs
{
pos <- ref[j,2] # which nucleotide does the position start on?
if(ref[j,test.places[i]+2] <= non[j,test.places[i]+2]) # does the alternative outweigh the reference?
{
sequences.test[pos,i] <- as.character(var[j,4]) # change the "pos" position of the ith genome to the alt nucleotide (truncated)
} # else keep the reference
}
}
}
sample.names <- names(ref)
print(sample.names[test.places[2]+2])
i <- 1
this.sample <- sample.names[test.places[i]+2]
out.string <- paste('>',this.sample,sep="")
#out.seq <- sequences.test[,i]
file.name <- paste(this.sample,".fasta",sep="")
out.seq <- paste(sequences.test[,i], sep="", collapse="")
write.table(file=file.name,x=out.string,col.names=FALSE,row.names=FALSE,sep="",quote=FALSE)
write.table(file=file.name,x=out.seq,col.names=FALSE,row.names=FALSE,sep="",quote=FALSE,append=TRUE)
|
22354ec36d650f17d68627afb48a3e0214c8eba8 | 18d34c00f43e3fe1478c8fee96fcb1fddba0159c | /easyRasch/man/likelihood.Rd | bb162010973e6bb442b4fa1a5c43f3f54df53274 | [] | no_license | noahb4626/midterm | 21392e4be11d0bb34c8115a0d79582d54bde02ce | 48b5d7a4ac0f383a416cae495b231493c16af2ca | refs/heads/master | 2021-04-09T13:23:46.854034 | 2018-03-17T23:08:03 | 2018-03-17T23:08:03 | 125,652,611 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 581 | rd | likelihood.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/likelihood.R
\name{likelihood}
\alias{likelihood}
\alias{likelihood,}
\alias{ANY-method}
\title{likelihood function}
\usage{
likelihood(raschObj = "Rasch", theta = "numeric")
}
\arguments{
\item{raschObj}{An object of class Rasch}
\item{theta}{A proposed value of theta_j}
}
\value{
The following
\item{likelihood}{The calculated likelihood from Equation 2 on assignment sheet}
}
\description{
likelihood function
}
\note{
Likelihood function
}
\author{
Noah Bardash: \email{noah.bardash@wustl.edu}
}
|
17caef72dd128275098fec0eb46d4836e2195e24 | 9199adb97f6dfa16fd72a30e938c576f95e0177a | /plot4.R | ff2a5e1025b7851125b426219ad29413f98017a8 | [] | no_license | Smally12345/ExData_Plotting1 | d512d5a1b4e43faba0d4b9d01667221bc1c07159 | 2237364a1c5b743a663a9d6a0e71c46d76191f38 | refs/heads/master | 2022-10-09T00:35:06.701944 | 2020-06-06T13:30:27 | 2020-06-06T13:30:27 | 269,905,419 | 0 | 0 | null | 2020-06-06T07:28:30 | 2020-06-06T07:28:30 | null | UTF-8 | R | false | false | 1,147 | r | plot4.R | #Import Libraries
library(sqldf)
# Load Data
df <- read.csv.sql("./exdata_data_household_power_consumption/household_power_consumption.txt","select * from file where Date = '1/2/2007' or Date = '2/2/2007' ",sep=";")
# Combining Date and Time variables
df$DateTime <- paste(df$Date, df$Time)
df$DateTime <- strptime(df$DateTime, format = "%d/%m/%Y %H:%M:%S")
#plotting
par(mfrow = c(2,2))
# Top Left
plot(df$DateTime, df$Global_active_power, xlab="datetime", ylab="Global Active Power", type = "l")
# Top Right
plot(df$DateTime, df$Voltage, xlab="datetime", ylab="Voltage", type = "l")
# Bottom Left
plot(df$DateTime, df$Sub_metering_1, xlab = "datetime", ylab = "Energy sub metering", type ="l")
lines(df$DateTime, df$Sub_metering_2, col = "red")
lines(df$DateTime, df$Sub_metering_3, col = "blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col=c("black","red", "blue"), cex=0.5, y.intersp = 0.5, box.lty = 0, bty = "n")
#Bottom Right
plot(df$DateTime, df$Global_reactive_power, xlab="datetime", ylab="Global Active Power", type = "l")
dev.copy(png, file = "plot4.png")
dev.off()
|
22046dd0b41d6997bcab1f5a5a365fae2b7c0379 | cacdb82f37df0974a142c1d6689eb70a815e46f2 | /man/containsJunctions-function.Rd | 8a823002a4c13d05572558682cc3cda60051eccc | [] | no_license | xiahui625649/ASpli | 63b9f76d6cf2cd48893213d93d0906e0d54c6811 | 489d61db50d6a43cfaabbeb31f0f83fedde1d1ea | refs/heads/main | 2023-03-15T00:51:53.602547 | 2021-03-10T17:28:28 | 2021-03-10T17:28:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 952 | rd | containsJunctions-function.Rd | \name{ Examine ASpliDU objects }
\alias{containsJunctions}
\alias{containsGenesAndBins}
\title{ Examine ASpliDU objects }
\description{
AspliDU object may contain results of differential expression of genes,
differential usage of bins and junctions, however not everything is
calculated at the same or even present. Calculations for genes and bins can
be done independently from junctions. Functions \code{containsJunctions} and
\code{containsGenesAndBins} allow to interrogate an ASpliDU object about the
kind of results it contain.
}
\usage{
containsJunctions( du )
containsGenesAndBins( du )
}
\arguments{
\item{ du }{
An ASpliDU object.
}
}
\value{
A logical value that indicates that results for genes and bins, or results for
junctions are available in the object. }
\author{ Estefania Mancini, Andres Rabinovich, Javier Iserte, Marcelo Yanovsky, Ariel Chernomoretz }
\examples{
# see ASpli package
}
|
726c1ae8088aba921fd245e975af812102ae89bb | f8fefd6e05f938595ca789108d4d10b958a3d0e1 | /Assignment 7_2.R | a6953e63767b8339a05df211fcaa52f071f4bb88 | [] | no_license | vadde18srinu/Assignment-7.2 | 937a849109c9941e631b858c2f8b5b0656e7f9c6 | d6c9529bf307d245d64773b777fdc6bfc3be9090 | refs/heads/master | 2020-03-27T00:02:48.317548 | 2018-08-21T16:16:58 | 2018-08-21T16:16:58 | 145,589,293 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,244 | r | Assignment 7_2.R | 1. Write a program to create barplots for all the categorical columns in mtcars.
library(tidyr)
library(ggplot2)
library(purrr)
# or `library(tidyverse)
df<-data.frame(mtcars,row.names = NULL, stringsAsFactors = default.stringsAsFactors())
df
# all categorical columns
mtcars %>%
keep(is.numeric) %>%
gather() %>%
ggplot(aes(value)) +
facet_wrap(~ key, scales = "free") +
geom_bar()
# barplots for categorical one column "carb" barplot.
barplot (table(mtcars$carb),
main = "Car Distribution",
xlab = "Numbers of carb",
col = c("darkblue", "green", "red","yellow","lightblue","darkgreen"),
names.arg = c("1carb","2carb","3carb","4carb", "6carb", "8carb"))
2. Create a scatterplot matrix by gear types in mtcars dataset.
#pairs(~mpg+am+cyl+wt+qsec+vs, data=mtcars,
#pairs(~mpg+disp+hp+drat+gear+carb, data=mtcars,
pairs(~mpg+., data=mtcars,
main="mtcars Scatterplot Matrix")
3. Write a program to create a plot density by class variable.
names(mtcars)
rownames(mtcars)
sapply(mtcars,class)
# Filled Density Plot
d <- density(mtcars$class)
plot(d, main="class variables")
polygon(d, col="lightblue", border="red")
|
b7c9ea3c4ee784ce16d007c2a978111e1d84c7e0 | a375669877a8bf6cbd6e1359a8737bbd0620ad5b | /analyzeResults.R | 39a18464c30b23174a679d827c95a4cc9317ae9f | [
"MIT"
] | permissive | joongchan1/abmodelr | 138fc640a9057af5bccdef0903d0e3430e226b1c | 0c36e82c805f6d2bbb7d903cc1bcc7bec5e38e2e | refs/heads/master | 2023-03-19T00:40:12.321329 | 2020-08-27T09:35:09 | 2020-08-27T09:35:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 943 | r | analyzeResults.R | library(tidyverse)
library(DT)
library(data.table)
library(yaml)
library(rstudioapi)
file <- selectFile(caption = "Select File", label = "Select",
path = getActiveProject(), filter = "All YAML Files (*.yml)",
existing = TRUE)
config <- read_yaml(file)
# read results
rds_filename <- config$outputfilename
results_data <- read_rds(rds_filename)
user <- results_data$user
exposure <- results_data$exposure
# analyze results
user %>% ggplot() +
aes(topic_1) + geom_histogram()
names(exposure) <- paste0("V", 1:dim(exposure)[2])
df <- as_tibble(exposure) %>%
rownames_to_column() %>%
rename(news_post = rowname) %>%
gather(step, value, -news_post) %>%
mutate(step = str_remove(step, "V")) %>%
mutate(step = as.numeric(step)) %>%
mutate(news_post = factor(news_post))
df %>% ggplot() +
aes(x = step, y = value, color = news_post) +
geom_line() +
guides(color = FALSE)
|
137fc8282f323e807ba439d49d26185a7a387e4e | b2530aca6ec1073942c76a6cf5fff058dd3de7f4 | /scripts/MortonArb_Phenology_AnnualReport_2022-12_YearEnd.R | b99408df5e1942f7b4d8e59a4163465be40d784b | [] | no_license | MortonArb-ForestEcology/Phenology_LivingCollections | 27f8565ad37a979cfb247148065883a9368b7c87 | 0e650f99acf6d0393ac545c4cd7afe465a644cc5 | refs/heads/master | 2023-09-01T01:25:55.313477 | 2023-08-22T13:47:35 | 2023-08-22T13:47:35 | 87,115,785 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 33,875 | r | MortonArb_Phenology_AnnualReport_2022-12_YearEnd.R | # A new script with some combined collections analyses/graphs for the end of the year report
library(ggplot2)
###setting the file path to mac or windows##
path.google <- "/Volumes/GoogleDrive/My Drive/" # Mac
path.out <- file.path(path.google, "G://My Drive/LivingCollections_Phenology/Reports")
#path.figs <- file.path(path.google, "LivingCollections_Phenology/Reports/2022_02_EndOfYear_Report/figures_2022_end")
# this is for google ->#
path.figs <- "G://My Drive/LivingCollections_Phenology/Reports/2022_02_EndOfYear_Report/figures_2022_end"
if(!dir.exists("../data")) dir.create("../data/")
if(!dir.exists("../figures/")) dir.create("../figures/")
# -----------------------------------
# 1. Arb Data
# -----------------------------------
source("clean_google_form.R")
#year 2022
acer22 <- clean.google(collection="Acer", dat.yr=2022)
acer22$Collection <- as.factor("Acer")
acer22$Year <- lubridate::year(acer22$Date.Observed)
summary(acer22)
acer21 <- clean.google(collection="Acer", dat.yr=2021)
acer21$Collection <- as.factor("Acer")
acer21$Year <- lubridate::year(acer21$Date.Observed)
summary(acer21)
acer20 <- clean.google(collection="Acer", dat.yr=2020)
acer20$Collection <- as.factor("Acer")
acer20$Year <- lubridate::year(acer20$Date.Observed)
summary(acer20)
acer19 <- clean.google(collection="Acer", dat.yr=2019)
acer19$Collection <- as.factor("Acer")
acer19$Year <- lubridate::year(acer19$Date.Observed)
summary(acer19)
quercus22 <- clean.google(collection="Quercus", dat.yr=2022)
quercus22$Collection <- as.factor("Quercus")
quercus22$Year <- lubridate::year(quercus22$Date.Observed)
summary(quercus22)
quercus21 <- clean.google(collection="Quercus", dat.yr=2021)
quercus21$Collection <- as.factor("Quercus")
quercus21$Year <- lubridate::year(quercus21$Date.Observed)
summary(quercus21)
quercus20 <- clean.google(collection="Quercus", dat.yr=2020)
quercus20$Collection <- as.factor("Quercus")
quercus20$Year <- lubridate::year(quercus20$Date.Observed)
summary(quercus20)
quercus19 <- clean.google(collection="Quercus", dat.yr=2019)
quercus19$Collection <- as.factor("Quercus")
quercus19$Year <- lubridate::year(quercus19$Date.Observed)
summary(quercus19)
quercus18 <- clean.google(collection="Quercus", dat.yr=2018)
quercus18$Collection <- as.factor("Quercus")
quercus18$Year <- lubridate::year(quercus18$Date.Observed)
summary(quercus18)
ulmus22 <- clean.google(collection="Ulmus", dat.yr=2022)
ulmus22$Collection <- as.factor("Ulmus")
ulmus22$Year <- lubridate::year(ulmus22$Date.Observed)
summary(ulmus22)
ulmus21 <- clean.google(collection="Ulmus", dat.yr=2021)
ulmus21$Collection <- as.factor("Ulmus")
ulmus21$Year <- lubridate::year(ulmus21$Date.Observed)
summary(ulmus21)
ulmus20 <- clean.google(collection="Ulmus", dat.yr=2020)
ulmus20$Collection <- as.factor("Ulmus")
ulmus20$Year <- lubridate::year(ulmus20$Date.Observed)
summary(ulmus20)
tilia22 <- clean.google(collection="Tilia", dat.yr=2022)
tilia22$Collection <- as.factor("Tilia")
tilia22$Year <- lubridate::year(tilia22$Date.Observed)
summary(tilia22)
#binding, but leaving tilia out because
dat.all <- rbind(ulmus22, quercus22, acer22, ulmus20, ulmus21, quercus18, quercus19, quercus20, quercus21, acer19, acer20, acer21)
dat.all$yday <- lubridate::yday(dat.all$Date.Observed)
summary(dat.all)
#creating a df for spring only obs for quercus and acer since we did not collect data for ulmus in spring 2021 or any trees in spring 2020
dat.spring <- rbind(quercus22, acer22, quercus18, quercus19, quercus21, acer19, acer21)
dat.spring$spring <- lubridate::yday(dat.spring$Date.Observed)
summary(dat.spring)
#########generating a 2022 only df for funsies
dat.22 <- rbind(quercus22,acer22, ulmus22, tilia22)
dat.22$yday <- lubridate::yday(dat.22$Date.Observed)
summary(dat.22)
#Getting a graph of colored leaf observations
###########
###########
dat.lc <- dat.all[dat.all$leaf.color.observed=="Yes", c("Date.Observed", "Species", "PlantNumber", "Year", "leaf.color.observed", "Collection")]
dat.lc <- dat.lc[!is.na(dat.lc$PlantNumber),]
summary(dat.lc)
head(dat.lc)
#finding the minimimum and maximum range and mean of the dates fall color was observed on our trees.
#Note the na.rm=T which is removing N/A values
min(dat.lc$Date.Observed)
max(dat.lc$Date.Observed)
range(dat.lc$Date.Observed)
mean(dat.lc$Date.Observed,na.rm=T)
#Now make my Yday
dat.lc$yday <- lubridate::yday(dat.lc$Date.Observed)
summary(dat.lc)
#only looking at trees that showed fall color from 9/1 on
dat.llc <- dat.lc [dat.lc$yday>=180,]
summary(dat.llc)
#aggregating quercus.lf so it shows me the date of first leaf color for every plant number and species
leaf.color <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.llc, FUN=min, na.rm=T)
summary(leaf.color)
head(leaf.color)
cbp1 <- c("#999999", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
#Graphing
ggplot(data=leaf.color) +
#png(file.path(path.figs,"All_First_Leaf_Color.png"), height=4, width=6, units="in", res=320)+
facet_grid(Collection~ .,scales="free_y") + # This is the code that will stack everything
geom_density(alpha=0.5, aes(x=yday, fill=as.factor(Year),color=as.factor(Year))) +
xlim(150, 365)+
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Mean Day of First Leaf Color Present", x="Day of Year", fill="Year")
dev.off()
ggplot(data=leaf.color) +
# png(file.path(path.figs,"All_First_Leaf_Color.png"), height=4, width=6, units="in", res=320)+
facet_grid(Collection~ . ) + # This is the code that will stack everything
geom_histogram(alpha=0.5, binwidth =10, aes(x=yday, fill=as.factor(Year), color=as.factor(Year))) +
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Leaf Color Present", x="Day of Year")
dev.off()
### getting leaf color intensity
dat.lci <- dat.all[dat.all$leaf.color.observed=="Yes", c("Date.Observed", "Species", "Year", "PlantNumber", "leaf.color.intensity", "Collection")]
summary(dat.lci)
dat.lci <- dat.lci[!is.na(dat.lci$PlantNumber),]
summary(dat.lci)
#Checking to make sure date ranges are correct
min(dat.lci$Date.Observed)
max(dat.lci$Date.Observed)
mean(dat.lci$Date.Observed)
range(dat.lci$Date.Observed)
#Setting my yday
dat.lci$yday <- lubridate::yday(dat.lci$Date.Observed)
summary(dat.lci)
#setting my yday to only show dates later in the season and the current date
dat.lci <- dat.lci [dat.lci$yday>=200,]
#dat.lci <- dat.lci [dat.lci$yday<=Sys.Date(),]
summary(dat.lci)
#removing "0 and NA's
dat.lci <- aggregate(yday ~ PlantNumber + Species + Year + Collection + leaf.color.intensity + Date.Observed , dat=dat.lci, FUN=min, NA.rm=T)
summary(dat.lci)
head(dat.lci)
dat.lci$yday <- lubridate::yday(dat.lci$Date.Observed)
summary(dat.lci)
#leaves.present.intensity <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.lci, FUN=min, NA.rm=T)
#summary(leaves.present.intensity)
#head(leaves.present.intensity)
#png(file.path(path.figs,"Leaf_Present_Intensity.png"), height=4, width=6, units="in", res=320)
ggplot(data=dat.lci) +
geom_histogram(alpha=1.5, binwidth =10, aes(x=yday, fill=leaf.color.intensity,))+
facet_grid(Collection~ .)+
#scale_fill_manual(name= "leaf.present.intensity", values=c("101-1,000"="red", "1,001-10,000"="orange", "11-100"="yellow", "3-10"="green", ">10,000"="blue", "0"="NA", "NA"="NA")) +
#scale_color_manual(name="leaf.present.intensity", values=c("101-1,000"="red", "1,001-10,000"="orange", "11-100"="yellow", "3-10"="green", ">10,000"="blue", "0"="NA", "NA"="NA")) +
theme_bw()+
labs(title="Leaf color Intensity", x="Day of Year",)
dev.off()
###########
###########
#Getting a graph of colored leaf observations
###########
###########
#doing freq
ggplot(data=leaf.color) +
# png(file.path(path.figs,"All_First_Leaf_Color_freqpoly.png"), height=4, width=6, units="in", res=320)+
facet_grid(Collection~ .) + # This is the code that will stack everything
geom_freqpoly(alpha=0.5, bins = 45, aes(x=yday,color=as.factor(Year), fill=as.factor(Year))) +
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Average Day of First Leaf Color", x="Day of Year")
dev.off()
##########
##########
#Getting a graph of falling leaf observations
###########
###########
dat.fl <- dat.all[dat.all$leaf.falling.observed=="Yes", c("Date.Observed", "Species", "PlantNumber", "Year", "leaf.falling.observed", "Collection")]
dat.fl <- dat.fl[!is.na(dat.fl$PlantNumber),]
summary(dat.fl)
head(dat.fl)
#finding the minimimum and maximum range and mean of the dates fall color was observed on our trees.
#Note the na.rm=T which is removing N/A values
min(dat.fl$Date.Observed)
max(dat.fl$Date.Observed)
range(dat.fl$Date.Observed)
mean(dat.fl$Date.Observed,na.rm=T)
#Now make my Yday
dat.fl$yday <- lubridate::yday(dat.fl$Date.Observed)
summary(dat.fl)
#only looking at trees that showed fall color in the last half of the year
dat.ffl <- dat.fl [dat.fl$yday>=180,]
summary(dat.ffl)
falling.leaves <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.ffl, FUN=min, na.rm=T)
summary(falling.leaves)
head(falling.leaves)
#Graphing
ggplot(data=falling.leaves) +
#png(file.path(path.figs,"All_First_Falling_Leaf_dens.png"), height=4, width=6, units="in", res=320)+
facet_grid(Collection ~ .) + # This is the code that will stack everything
geom_density(alpha=0.5, aes(x=yday, fill=as.factor(Year), color=as.factor(Year))) +
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Average Day of First Falling Leaves", x="Day of Year")
dev.off()
###########
###########
#Getting a graph of breaking leaf bud observations
###########
###########
dat.lb <- dat.spring[dat.spring$leaf.breaking.buds.observed=="Yes", c("Date.Observed", "Species", "PlantNumber", "Year", "leaf.breaking.buds.observed", "Collection")]
dat.lb <- dat.lb[!is.na(dat.lb$PlantNumber),]
summary(dat.lb)
head(dat.lb)
#finding the minimimum and maximum range and mean of the dates breaking leaf buds were observed on our trees.
#Note the na.rm=T which is removing N/A values
min(dat.lb$Date.Observed)
max(dat.lb$Date.Observed)
range(dat.lb$Date.Observed)
mean(dat.lb$Date.Observed,na.rm=T)
#Now make my Yday
dat.lb$yday <- lubridate::yday(dat.lb$Date.Observed)
summary(dat.lb)
#only looking at trees that showed breaking leaf buds in the first half of the year
dat.lb <- dat.lb [dat.lb$yday<=180,]
summary(dat.lb)
#aggregating quercus.lf so it shows me the date of first breaking leaf buds for every plant number and species
breaking.buds <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.lb, FUN=min, na.rm=T)
summary(breaking.buds)
head(breaking.buds)
#Graphing
#png(file.path(path.figs,"Leaf_Breaking_Buds_dens.png"), height=4, width=6, units="in", res=320)
ggplot(data=breaking.buds) +
facet_grid(Collection~ .) + # This is the code that will stack everything
geom_density(alpha=0.5, aes(x=yday, fill=as.factor(Year), color=as.factor(Year))) +
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
labs(title="Average Day of First Breaking Leaf Buds", x="Day of Year")
dev.off()
###########
###########
#Getting a graph of leaves present observations
###########
###########
dat.lp <- dat.spring[dat.spring$leaf.present.observed=="Yes", c("Date.Observed", "Species", "PlantNumber", "Year", "leaf.present.observed", "Collection")]
dat.lp <- dat.lp[!is.na(dat.lp$PlantNumber),]
summary(dat.lp)
head(dat.lp)
#finding the minimimum and maximum range and mean of the dates leaf present was observed on our trees.
#Note the na.rm=T which is removing N/A values
min(dat.lp$Date.Observed)
max(dat.lp$Date.Observed)
range(dat.lp$Date.Observed)
mean(dat.lp$Date.Observed,na.rm=T)
#Now make my Yday
dat.lp$yday <- lubridate::yday(dat.lp$Date.Observed)
summary(dat.lp)
#only looking at trees that showed leaf present in the first half of the year
dat.lp <- dat.lp [dat.lp$yday<=250,]
#summary(dat.lp)
#aggregating quercus.lf so it shows me the date of first leaf present for every plant number and species
leaves.present <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.lp, FUN=min, na.rm=T)
summary(leaves.present)
head(leaves.present)
#Graphing
#png(file.path(path.figs,"Leaf_Present_dens.png"), height=4, width=6, units="in", res=320)
ggplot(data=leaves.present) +
facet_grid(Collection~ .) + # This is the code that will stack everything
geom_density(alpha=0.5, aes(x=yday, fill=as.factor(Year), color=as.factor(Year))) +
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Average Day of Leaves Present", x="Day of Year")
dev.off()
###########
###########
#Getting a graph of leaves present intensity
###########
###########
dat.lpi <- dat.all[dat.all$leaf.present.observed=="Yes", c("Date.Observed", "Species", "Year", "PlantNumber", "leaf.present.intensity", "Collection")]
summary(dat.lpi)
dat.lpi <- dat.lpi[!is.na(dat.lpi$PlantNumber),]
summary(dat.lpi)
#Checking to make sure date ranges are correct
min(dat.lpi$Date.Observed)
max(dat.lpi$Date.Observed)
mean(dat.lpi$Date.Observed)
range(dat.lpi$Date.Observed)
#Setting my yday
dat.lpi$yday <- lubridate::yday(dat.lpi$Date.Observed)
summary(dat.lpi)
#setting my yday to only show dates later in the season and the current date
#dat.lpi <- dat.lpi [dat.lpi$yday>=180,]
#dat.lpi <- dat.lpi [dat.lpi$yday<=Sys.Date(),]
#summary(dat.lpi)
#removing "0 and NA's
dat.lpi <- aggregate(yday ~ PlantNumber + Species + Year + Collection + leaf.present.intensity + Date.Observed , dat=dat.lpi, FUN=min, NA.rm=T)
summary(dat.lpi)
head(dat.lpi)
dat.lpi$yday <- lubridate::yday(dat.lpi$Date.Observed)
summary(dat.lpi)
#leaves.present.intensity <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.lpi, FUN=min, NA.rm=T)
#summary(leaves.present.intensity)
#head(leaves.present.intensity)
#png(file.path(path.figs,"Leaf_Present_Intensity.png"), height=4, width=6, units="in", res=320)
ggplot(data=dat.lpi) +
geom_histogram(alpha=1.5, binwidth =10, aes(x=yday, fill=leaf.present.intensity,))+
facet_grid(Year~Collection)+
#scale_fill_manual(name= "leaf.present.intensity", values=c("101-1,000"="red", "1,001-10,000"="orange", "11-100"="yellow", "3-10"="green", ">10,000"="blue", "0"="NA", "NA"="NA")) +
#scale_color_manual(name="leaf.present.intensity", values=c("101-1,000"="red", "1,001-10,000"="orange", "11-100"="yellow", "3-10"="green", ">10,000"="blue", "0"="NA", "NA"="NA")) +
theme_bw()+
labs(title="Leaves Present Intensity", x="Day of Year",)
dev.off()
###########
###########
#Getting a graph of leaves increasing in size observations
###########
###########
dat.li <- dat.spring[dat.spring$leaf.increasing.observed=="Yes", c("Date.Observed", "Species", "PlantNumber", "Year", "leaf.increasing.observed", "Collection")]
dat.li <- dat.li[!is.na(dat.li$PlantNumber),]
summary(dat.li)
head(dat.li)
#finding the minimimum and maximum range and mean of the dates leaves increasing in size was observed on our trees.
#Note the na.rm=T which is removing N/A values
min(dat.li$Date.Observed)
max(dat.li$Date.Observed)
range(dat.li$Date.Observed)
mean(dat.li$Date.Observed,na.rm=T)
#Now make my Yday
dat.li$yday <- lubridate::yday(dat.li$Date.Observed)
summary(dat.li)
#only looking at trees that showed leaves increasing in size in the first half of the year
dat.li <- dat.li [dat.li$yday<=180,]
dat.li <- dat.li [dat.li$yday>=61,]
summary(dat.li)
#aggregating quercus.lf so it shows me the date of first leaf increasing in size for every plant number and species
leaves.increasing <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.li, FUN=min, na.rm=T)
summary(leaves.increasing)
head(leaves.increasing)
#Graphing
#png(file.path(path.figs,"Leaf_Increasing_dens.png"), height=4, width=6, units="in", res=320)
ggplot(data=leaves.increasing) +
facet_grid(Collection~ ., scales = "free_x") + # This is the code that will stack everything
geom_density(alpha=0.5, aes(x=yday, fill=as.factor(Year), color=as.factor(Year))) + xlim(70, 180)+
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Average Day of Leaves Increasing in Size Observed", x="Day of Year")
dev.off()
ggplot(data=leaves.increasing) +
facet_grid(Collection~ ., scales = "free_x") + # This is the code that will stack everything
geom_histogram(alpha=0.5, aes(x=yday, fill=as.factor(Year), color=as.factor(Year))) +
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Average Day of Leaves Increasing in Size Observed", x="Day of Year")
dev.off()
###########
###########
#Getting a graph of flower buds observations
##########
###########
dat.fb <- dat.spring[dat.spring$flower.buds.observed=="Yes", c("Date.Observed", "Species", "PlantNumber", "Year", "flower.buds.observed", "Collection")]
dat.fb <- dat.fb[!is.na(dat.fb$PlantNumber),]
summary(dat.fb)
head(dat.fb)
#finding the minimimum and maximum range and mean of the dates flower buds were observed on our trees.
#Note the na.rm=T which is removing N/A values
min(dat.fb$Date.Observed)
max(dat.fb$Date.Observed)
range(dat.fb$Date.Observed)
mean(dat.fb$Date.Observed,na.rm=T)
#Now make my Yday
dat.fb$yday <- lubridate::yday(dat.fb$Date.Observed)
summary(dat.fb)
#only looking at trees that showed flower buds in the first half of the year
dat.fb <- dat.fb [dat.fb$yday<=180,]
summary(dat.fb)
#aggregating quercus.lf so it shows me the date of first flower buds for every plant number and species
flower.buds <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.fb, FUN=min, na.rm=T)
summary(flower.buds)
head(flower.buds)
#Graphing
#png(file.path(path.figs,"All_Flowers_or_Flower_Buds.png"), height=4, width=6, units="in", res=320)
ggplot(data=flower.buds) +
facet_grid(Collection~ .) + # This is the code that will stack everything
geom_density(alpha=0.5, aes(x=yday, fill=as.factor(Year), color=as.factor(Year))) + xlim(65,180)+
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Average Day of Flower Buds or Flowers Observed", x="Day of Year")
dev.off()
###########
###########
#Getting a graph of open flowers observations
###########
###########
dat.fo <- dat.spring[dat.spring$flower.open.observed=="Yes", c("Date.Observed", "Species", "PlantNumber", "Year", "flower.open.observed", "Collection")]
dat.fo <- dat.fo[!is.na(dat.fo$PlantNumber),]
summary(dat.fo)
head(dat.fo)
#finding the minimimum and maximum range and mean of the dates open flowers were observed on our trees.
#Note the na.rm=T which is removing N/A values
min(dat.fo$Date.Observed)
max(dat.fo$Date.Observed)
range(dat.fo$Date.Observed)
mean(dat.fo$Date.Observed,na.rm=T)
#Now make my Yday
dat.fo$yday <- lubridate::yday(dat.fo$Date.Observed)
summary(dat.fo)
#only looking at trees that showed open flowers in the first half of the year
#dat.fo <- dat.fo [dat.fo$yday<=180,]
summary(dat.fo)
#aggregating quercus.lf so it shows me the date of open flowers for every plant number and species
flower.open <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.fo, FUN=min, na.rm=T)
summary(flower.open)
head(flower.open)
#removing 2020 because there were no spring observations
flower.open <- flower.open[!flower.open$Year=="2020",]
summary(flower.open)
#Graphing
#png(file.path(path.figs,"All_Flowers_Open.png"), height=4, width=6, units="in", res=320)
ggplot(data=flower.open) +
facet_grid(Collection~ .) + # This is the code that will stack everything
geom_density(alpha=0.5, aes(x=yday, fill=as.factor(Year), color=as.factor(Year))) + xlim(65,300)+
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Average Day of Open Flower Observed", x="Day of Year")
dev.off()
###########
###########
#Getting a graph of pollen observations
###########
###########
dat.fp <- dat.[dat.spring$flower.pollen.observed=="Yes", c("Date.Observed", "Species", "PlantNumber", "Year", "flower.pollen.observed", "Collection")]
dat.fp <- dat.fp[!is.na(dat.fp$PlantNumber),]
summary(dat.fp)
head(dat.fp)
#finding the minimimum and maximum range and mean of the dates pollen was observed on our trees.
#Note the na.rm=T which is removing N/A values
min(dat.fp$Date.Observed)
max(dat.fp$Date.Observed)
range(dat.fp$Date.Observed)
mean(dat.fp$Date.Observed,na.rm=T)
#Now make my Yday
dat.fp$yday <- lubridate::yday(dat.fp$Date.Observed)
summary(dat.fp)
#only looking at trees that showed pollen in the first half of the year
#dat.fp <- dat.fp [dat.fp$yday<=180,]
summary(dat.fp)
#aggregating quercus.lf so it shows me the date of first pollen for every plant number and species
flower.pollen <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.fp, FUN=min, na.rm=T)
summary(flower.pollen)
head(flower.pollen)
#removing 2020 because there were no spring observations
flower.pollen <- flower.pollen[!flower.pollen$Year=="2020",]
#Graphing
#png(file.path(path.figs,"All_Flowers_Pollen.png"), height=4, width=6, units="in", res=320)
ggplot(data=flower.pollen) +
facet_grid(Collection~ .) + # This is the code that will stack everything
geom_density(alpha=0.5, aes(x=yday, fill=as.factor(Year), color=as.factor(Year))) + xlim(60,175)+
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Average Day of Flower Pollen Observed", x="Day of Year")
dev.off()
######## Need to add fruit phenophases Now
#########subsetting out for fruit present
dat.fr <- dat.spring[dat.spring$fruit.present.observed=="Yes", c("Date.Observed", "Species", "Year", "PlantNumber", "fruit.present.observed", "Collection")]
summary(dat.fr)
dat.fr <- dat.fr[!is.na(dat.fr$PlantNumber),]
summary(dat.fr)
#Checking to make sure date ranges are correct
min(dat.fr$Date.Observed)
max(dat.fr$Date.Observed)
mean(dat.fr$Date.Observed)
range(dat.fr$Date.Observed)
#Setting my yday
dat.fr$yday <- lubridate::yday(dat.fr$Date.Observed)
summary(dat.fr)
#setting my yday to only show dates later in the season and the current date
#dat.fr <- dat.fr [dat.fr$yday<=09,]
#dat.fr <- dat.fr [dat.fr$yday<=Sys.Date(),]
#summary(dat.fr)
#aggregating to only show me observations that are present
fruit.present <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.fr, FUN=min, na.rm=T)
summary(fruit.present)
head(fruit.present)
#removing 2020 because there were no spring observations
fruit.present <- fruit.present[!fruit.present$Year=="2020",]
ggplot(data=fruit.present) +
# png(file.path(path.figs,"Fruit_present_Oak_Maple.png"), height=4, width=6, units="in", res=320)+
facet_grid(Collection~ .) + # This is the code that will stack everything
geom_density(alpha=0.5, aes(x=yday, fill=as.factor(Year), color=as.factor(Year)))+ xlim(60, 300)+
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Average Day of Fruit Present Observed", x="Day of Year")
dev.off()
########
#subsetting out for ripe fruit
dat.rf <- dat.spring[dat.spring$fruit.ripe.observed=="Yes", c("Date.Observed", "Species", "Year", "PlantNumber", "fruit.ripe.observed", "Collection")]
summary(dat.rf)
dat.rf <- dat.rf[!is.na(dat.rf$PlantNumber),]
summary(dat.rf)
#Checking to make sure date ranges are correct
min(dat.rf$Date.Observed)
max(dat.rf$Date.Observed)
mean(dat.rf$Date.Observed)
range(dat.rf$Date.Observed)
#Setting my yday
dat.rf$yday <- lubridate::yday(dat.rf$Date.Observed)
summary(dat.rf)
#setting my yday to only show dates later in the season and the current date
#dat.rf <- dat.rf [dat.rf$yday>=180,]
#dat.rf <- dat.rf [dat.rf$yday<=Sys.Date(),]
#summary(dat.rf)
#aggregating to only show me observations that are present
ripe.fruit <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.rf, FUN=min, na.rm=T)
summary(ripe.fruit)
head(ripe.fruit)
#removing 2020 because there were no spring observations
ripe.fruit <- ripe.fruit[!ripe.fruit$Year=="2020",]
ggplot(data=ripe.fruit) +
# png(file.path(path.figs,"Ripe_Fruit_Present_All.png"), height=4, width=6, units="in", res=320)+
facet_grid(Collection~., scales = "free_y") + # This is the code that will stack everything
geom_density(alpha=0.5, aes(x=yday, fill=as.factor(Year), color=as.factor(Year))) +xlim(60, 365)+
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Average Day of Ripe Fruit Observed", x="Day of Year")
dev.off()
##############
#subsetting out for fruit drop
dat.fd <- dat.spring[dat.spring$fruit.drop.observed=="Yes", c("Date.Observed", "Species", "Year", "PlantNumber", "fruit.drop.observed", "Collection")]
summary(dat.fd)
dat.fd <- dat.fd[!is.na(dat.fd$PlantNumber),]
summary(dat.fd)
#Checking to make sure date ranges are correct
min(dat.fd$Date.Observed)
max(dat.fd$Date.Observed)
mean(dat.fd$Date.Observed)
range(dat.fd$Date.Observed)
#Setting my yday
dat.fd$yday <- lubridate::yday(dat.fd$Date.Observed)
summary(dat.fd)
#setting my yday to only show dates later in the season and the current date
#dat.fd <- dat.fd [dat.fd$yday>=180,]
#dat.fd <- dat.fd [dat.fd$yday<=Sys.Date(),]
#summary(dat.fd)
#aggregating to only show me observations that are present
fruit.drop <- aggregate(yday ~ PlantNumber + Species + Year + Collection , data=dat.fd, FUN=min, na.rm=T)
summary(fruit.drop)
head(fruit.drop)
#removing 2020 because there were no spring observations
ripe.fruit <- ripe.fruit[!ripe.fruit$Year=="2020",]
ggplot(data=fruit.drop) +
# png(file.path(path.figs,"Fruit__Drop_Present_All.png"), height=4, width=6, units="in", res=320)+
facet_grid(Collection~ ., scales = "free_y") + # This is the code that will stack everything
geom_density(alpha=0.5, aes(x=yday, fill=as.factor(Year), color=as.factor(Year))) + xlim(40, 365)+
scale_fill_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"= "#F0E442")) +
scale_color_manual(name="Year", values=c("2018"="maroon4", "2019"="#009E73", "2020"="gray", "2021"="#0072B2", "2022"="#F0E442")) +
theme_bw()+
labs(title="Average Day of Fruit Drop Observed", x="Day of Year")
dev.off()
############
#getting averages for date of phenophases occurace in certain years
###########
####Open flowers quercus
dat.ofa18 <- quercus18[quercus18$flower.open.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "flower.open.observed")]
summary(dat.of)
#####Fruit Present quercus & acer
#2018 quercus
dat.fpa18 <- quercus18[quercus18$fruit.present.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.present.observed")]
summary(dat.fpa18)
#2019 quercus
dat.fpa19 <- quercus19[quercus19$fruit.present.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.present.observed")]
summary(dat.fpa19)
#2021 quercus
dat.fpa21 <- quercus21[quercus21$fruit.present.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.present.observed")]
summary(dat.fpa21)
#2019 acer
dat.afpa19 <- acer19[acer19$fruit.present.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.present.observed")]
summary(dat.afpa19)
#2021 acer
dat.afpa21 <- acer21[acer21$fruit.present.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.present.observed")]
summary(dat.afpa21)
##### Ripe fruit####
#quercus 21
dat.rfa21 <- quercus21[quercus21$fruit.ripe.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.ripe.observed")]
summary(dat.rfa21)
#2019 acer
dat.arfa19 <- acer19[acer19$fruit.ripe.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.ripe.observed")]
summary(dat.arfa19)
#2021 acer
dat.arfa21 <- acer21[acer21$fruit.ripe.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.ripe.observed")]
summary(dat.arfa21)
### Fruit Drop
#quercus 21
dat.fda21 <- quercus21[quercus21$fruit.drop.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.drop.observed")]
summary(dat.rfa21)
#2019 acer
dat.afda19 <- acer19[acer19$fruit.drop.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.drop.observed")]
summary(dat.arfa19)
#2021 acer
dat.afda21 <- acer21[acer21$fruit.drop.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.drop.observed")]
summary(dat.arfa21)
##### breaking leaf buds 22
dat.bbq22 <- quercus22[quercus22$leaf.breaking.buds.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "leaf.breaking.buds.observed")]
summary(dat.bbq22)
dat.bba22 <- acer22[acer22$leaf.breaking.buds.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "leaf.breaking.buds.observed")]
summary(dat.bba22)
### Flower buds 22
dat.fbq22 <- quercus22[quercus22$flower.buds.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "flower.buds.observed")]
summary(dat.bbq22)
dat.fba22 <- acer22[acer22$flower.buds.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "flower.buds.observed")]
summary(dat.bba22)
### open flowers 22
dat.ofq22 <- quercus22[quercus22$flower.open.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "flower.open.observed")]
summary(dat.ofq22)
dat.ofa22 <- acer22[acer22$flower.open.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "flower.open.observed")]
summary(dat.ofa22)
### pollen 22
dat.pfq22 <- quercus22[quercus22$flower.pollen.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "flower.pollen.observed")]
summary(dat.ofq22)
dat.pfa22 <- acer22[acer22$flower.pollen.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "flower.pollen.observed")]
summary(dat.ofa22)
### fruit present
dat.fpq22 <- quercus22[quercus22$fruit.present.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.present.observed")]
summary(dat.fpq22)
dat.fpa22 <- acer22[acer22$fruit.present.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.present.observed")]
summary(dat.fpa22)
### Ripe fruit
dat.frq22 <- quercus22[quercus22$fruit.ripe.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.ripe.observed")]
summary(dat.frq22)
dat.fra22 <- acer22[acer22$fruit.ripe.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.ripe.observed")]
summary(dat.fra22)
### Fruit Drop
dat.fdq22 <- quercus22[quercus22$fruit.drop.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.drop.observed")]
summary(dat.frq22)
dat.fda22 <- acer22[acer22$fruit.drop.observed=="Yes", c("Date.Observed","Date.Observed", "Species", "Year", "PlantNumber", "fruit.drop.observed")]
summary(dat.fda22)
|
84f2998a67658b15aeb887ec106b4f71749ddde1 | 231ecba59e56af6fecd46fd8f48138988046e99d | /dependencies.R | 93d5ab09c531e42bfcd59de50625c771cf2c5195 | [] | no_license | Mina-N/LDA_App | 4946e270ba11d2b92c6e1705ae2b9c7df2e9b169 | 5d7b48389b8a867a7d1ba0013e38dc4776fddec2 | refs/heads/master | 2023-02-28T21:09:30.969912 | 2021-01-31T21:20:18 | 2021-01-31T21:20:18 | 281,758,441 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 760 | r | dependencies.R | # Install dependencies necessary for app
# Found at:
# https://stackoverflow.com/questions/4090169/elegant-way-to-check-for-missing-packages-and-install-them
if (!require("pacman")) {
install.packages("pacman")
}
## R Shiny Package
pacman::p_load("shiny")
## Data Frame Packages
pacman::p_load("dplyr","stringr","readr","readxl")
## Data Visualization Packages
pacman::p_load("ggplot2","Rtsne","LDAvis")
## Text Mining Packages
# Note: data.table has non-zero exit status when installing from source
# recommend selecting "no" for "Do you want to install from sources...?"
pacman::p_load("data.table","Matrix","text2vec","tm",
"SnowballC","rARPACK","ggupset")
## packages required for server.R
pacman::p_load("servr", "jsonlite")
|
7c902121b9132daec678093e6201147e0ba7f0eb | be3a64d387d48f193419476a258c829e4100cb67 | /week_02/hw02-2.R | 2d8333b8771bdce475d88b33720f4e629652fb99 | [] | no_license | claireliu14/CSX_RProject | bd327da65a2b66592b30644591bf39b8c678fc25 | 53dc6da8d7ad0da42f2457a26efa1dec2784011d | refs/heads/master | 2020-03-22T06:53:22.902040 | 2019-04-17T03:21:37 | 2019-04-17T03:21:37 | 139,664,400 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,474 | r | hw02-2.R | source('pttTestFunction.R')
id = c(1:10)
URL = paste0("https://www.ptt.cc/bbs/WomenTalk/index", id, ".html") # paste預設會有空格,paste0則沒有
filename = paste0(id, ".txt")
pttTestFunction(URL[1], filename[1])
mapply(pttTestFunction, URL = URL, filename = filename)
rm(list=ls(all.names = TRUE))
library(NLP) # install.packages("NLP")
library(tm) # install.packages("tm")
library(jiebaRD) # install.packages("jiebaRD")
library(jiebaR) # install.packages("jiebaR") 中文文字斷詞
library(RColorBrewer)
library(wordcloud) #install.packages("wordcloud")
filenames <- list.files(getwd(), pattern="*.txt")
files <- lapply(filenames, readLines)
docs <- Corpus(VectorSource(files))
toSpace <- content_transformer(function(x, pattern) {
return (gsub(pattern, " ", x))
}
)
docs <- tm_map(docs, toSpace, "※")
docs <- tm_map(docs, toSpace, "◆")
docs <- tm_map(docs, toSpace, "‧")
docs <- tm_map(docs, toSpace, "的")
docs <- tm_map(docs, toSpace, "我")
docs <- tm_map(docs, toSpace, "你")
docs <- tm_map(docs, toSpace, "推")
docs <- tm_map(docs, toSpace, "了")
docs <- tm_map(docs, toSpace, "是")
docs <- tm_map(docs, toSpace, "看板")
docs <- tm_map(docs, toSpace, "作者")
docs <- tm_map(docs, toSpace, "發信站")
docs <- tm_map(docs, toSpace, "批踢踢實業坊")
docs <- tm_map(docs, toSpace, "[a-zA-Z]")
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, stripWhitespace)
docs
mixseg = worker()
jieba_tokenizer=function(d){
unlist(segment(d[[1]],mixseg))
}
seg = lapply(docs, jieba_tokenizer)
freqFrame = as.data.frame(table(unlist(seg)))
freqFrame = freqFrame[order(freqFrame$Freq,decreasing=TRUE), ]
library(knitr)
kable(head(freqFrame, 10), format = "markdown")
par(family=("Heiti TC Light"))
wordcloud(freqFrame$Var1,freqFrame$Freq,
scale=c(5,0.1),min.freq=50,max.words=150,
random.order=TRUE, random.color=FALSE,
rot.per=.1, colors=brewer.pal(8, "Dark2"),
ordered.colors=FALSE,use.r.layout=FALSE,
fixed.asp=TRUE)
library(stringr)
ptt.url <- "https://www.ptt.cc"
gossiping.url <- str_c(ptt.url, "/bbs/Gossiping")
gossiping.url
gossiping.session <- html_session(url = gossiping.url)
gossiping.session
gossiping.form <- gossiping.session %>%
html_node("form") %>%
html_form()
gossiping.form
gossiping <- submit_form(
session = gossiping.session,
form = gossiping.form,
submit = "yes"
)
gossiping |
dc020a70cb87a9fe69f6f253ab3817080e772669 | 60eb6bc5bed9927212e98fccd29bd2008f23cb2f | /app.R | a097ed94776f9178d7e82dc950733a8f1f989898 | [] | no_license | limnavonglouis/covid-19-projections | 77a1e81878897cc15c4c590a40833ddcec7fbadf | 1e36496c940777b32b89d17d704fa22bbcbb252c | refs/heads/master | 2021-04-14T19:08:44.449594 | 2020-03-25T12:48:59 | 2020-03-25T12:48:59 | 249,258,153 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,095 | r | app.R | library(shiny)
library(shinythemes)
library(dplyr)
library(rvest)
source('plot_curve.R')
source('plot_model.R')
source('plot_model_compared.R')
source('plot_truncated.R')
## SCRIPT ##
path <- ("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/")
data_confirmed <- read.csv(paste(path, "time_series_covid19_confirmed_global.csv", sep = ""))
data_country_confirmed <- data_confirmed[,-c(1,3,4)] %>% group_by(Country.Region) %>% summarise_all(funs(sum))
data_deaths <- read.csv(paste(path, "time_series_covid19_deaths_global.csv", sep = ""))
data_country_deaths <- data_deaths[,-c(1,3,4)] %>% group_by(Country.Region) %>% summarise_all(funs(sum))
# data_recovered <- read.csv(paste(path, "time_series_19-covid-Recovered.csv", sep = ""))
# data_country_recovered <- data_recovered[,-c(1,3,4)] %>% group_by(Country.Region) %>% summarise_all(funs(sum))
## VARIABLES ##
country_choices <- data_country_confirmed[,'Country.Region']
## UI ##
ui <- fluidPage(theme = shinytheme("flatly"),
titlePanel("Covid-19 Daily Cases Evolution"),
selectInput("count_selection", "Select the data to plot.", choices = c('deaths', 'confirmed cases'),
selected = 'confirmed cases', multiple = FALSE, selectize = TRUE),
tabsetPanel(
tabPanel("Actual",
sidebarLayout(
sidebarPanel(
selectInput("country_list", "Select the list of countries",choices = country_choices,
selected = c('France', 'China'), multiple = TRUE, selectize = TRUE),
sliderInput("count_start", "Select the count start",
value = 0, min = 0, max = 600),
p("The plot shows the daily evolution of the number of confirmed cases or deaths."),
p("The count start can be adpated depending on the compared countries."),
strong("count start:"),
p("Number of cases at which the plot starts."),
p("The data used in the plot can be found in the table below.")
),
mainPanel(
plotOutput('plot_compared_countries')
)
),
fluidRow(
column(1),
column(10,dataTableOutput('table_country'))
)
),
tabPanel("Projection",
sidebarLayout(
sidebarPanel(
selectInput("model_country_list", "Select the list of countries",choices = country_choices,
selected = c('France', 'China'), multiple = TRUE, selectize = TRUE),
sliderInput("model_count_start", "Select the count start",
value = 50, min = 0, max = 600),
p("The evolution of the daily cases can be modeled using the data points from the first days of the epidemic."),
p("This enables us to project the future trajectory of the number of cases."),
p("More details can be found in the Methodology tab.")
),
mainPanel(
plotOutput('plot_model_compared')
)
)
),
tabPanel("Methodology",
sidebarLayout(
sidebarPanel(
selectInput("country_name", "Select the country", choices = country_choices,
selected = 'France', multiple = FALSE, selectize = TRUE),
sliderInput("country_count_start", "Select the count start",
value = 50, min = 0, max = 600),
numericInput("country_data", "Select the number of data point", value = 20),
p("Changing the number of data points used to make the estimation shows the day-to-day changes of the projection.")
),
mainPanel(
plotOutput('plot_main_model')
)
),
fluidRow(
br(),
column(1),
column(7,
strong("Formulation of the model", style = "font-size:205%"),
br(), br(),
p("The evolution of the number of cases is modeled with a population growth model."),
withMathJax(),
helpText('$$\\frac{K}{1+\\left ( \\frac{K-P_{0}}{P_{0}} \\right )e^{-rt}}$$'),
p("\\(K\\) is the carrying capacity and is the theoterical maximum for the number of cases."),
p("\\(r\\) is a parameter proportional to the growth rate."),
p("\\(P_{0}\\) is the initial value which is related to the count start."),
p("\\(t\\) is the number of days after the start of the epidemic.")
),
column(3, br(), br(), br(),
p("For this example:", style = "font-size:150%"),
h4("\\(K\\) ="),
h4(textOutput('K'), align = "center"),
h4("\\(r\\) ="),
h4(textOutput('r'), align = "center")
)
)
)
# tabPanel("Current",
#
# sidebarLayout(
# sidebarPanel(
# selectInput("curve_country_list", "Select the list of countries", choices = country_choices,
# selected = c('France', 'China'), multiple = TRUE, selectize = TRUE),
# p("The current number of cases is an important indicator to follow for the capacity of health institutions such as hospitals."),
# p("Current cases = "),
# p("Confirmed cases - Recovered cases - Deaths"),
# br(), br(),
# selectInput("curve_country_name", "Select the country", choices = country_choices,
# selected = 'China', multiple = FALSE, selectize = TRUE),
# p("The breakdown shows the repartition for a given country.")
# ),
# mainPanel(
# plotOutput('plot_curve_compared'),
# plotOutput('plot_breakdown')
#
# )
# )
# )
) # end of tabsetPanel
) # end of fluidPage
## SERVER ##
server <- function(input, output) {
output$table_country <- renderDataTable(plot_compared_countries(country_list = input$country_list,
data_type = input$count_selection,
data_country_confirmed = data_country_confirmed,
data_country_deaths = data_country_deaths,
start = input$count_start)[[2]])
output$plot_compared_countries <- renderPlot(plot_compared_countries(country_list = input$country_list,
data_type = input$count_selection,
data_country_confirmed = data_country_confirmed,
data_country_deaths = data_country_deaths,
start = input$count_start)[[1]])
output$plot_main_model <- renderPlot(plot_main(data_country_confirmed = data_country_confirmed,
data_country_deaths = data_country_deaths,
data_type = input$count_selection,
country_name = input$country_name,
count_start = input$country_count_start,
n = input$country_data)[[2]])
output$plot_model_compared <- renderPlot(plot_model_compared(country_list = input$model_country_list,
data_country_confirmed = data_country_confirmed,
data_country_deaths = data_country_deaths,
data_type = input$count_selection,
start = input$model_count_start))
output$K <- renderText(coef(plot_main(data_country_confirmed = data_country_confirmed,
data_country_deaths = data_country_deaths,
data_type = input$count_selection,
country_name = input$country_name,
count_start = input$country_count_start,
n = input$country_data)[[1]])['K'])
output$r <- renderText(coef(plot_main(data_country_confirmed = data_country_confirmed,
data_country_deaths = data_country_deaths,
data_type = input$count_selection,
country_name = input$country_name,
count_start = input$country_count_start,
n = input$country_data)[[1]])['r'])
output$plot_curve_compared <- renderPlot(plot_curve(country_names = input$curve_country_list,
data_country_confirmed = data_country_confirmed,
data_country_deaths = data_country_deaths,
data_country_recovered = data_country_recovered))
output$plot_breakdown <- renderPlot(plot_breakdown(country_name = input$curve_country_name,
data_country_confirmed = data_country_confirmed,
data_country_deaths = data_country_deaths,
data_country_recovered = data_country_recovered))
}
shinyApp(ui, server) |
6c679253284b5c0622984fbb8420bf992320cf75 | 47aff768b8b66536f6355d57295c9edd9e1ff79a | /inst/demos/xgboost.R | 4f00f8701c3e79a83bbc8fc9c134826632f28dd7 | [] | no_license | grayskripko/rsmac | 559e4eb5e9257f2265c3099a1364183469a61b15 | 8c28cbfb5c2d072face6e6b92f8e088263ecbd4b | refs/heads/master | 2021-01-13T14:55:31.731237 | 2017-03-02T16:54:47 | 2017-03-02T16:54:47 | 76,677,051 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,020 | r | xgboost.R | library(rsmac)
grid <- list(
nrounds = list(type='discrete', init=5, min=3, max=30),
max.depth = list(type='discrete', init=4, min=3, max=10),
eta = list(type='continuous', init=0.5, min=0.01, max=0.99),
subsample = list(type='continuous', init=1, min=0.5, max=1))
objective <- function(max.depth, eta, subsample, nrounds) {
params <- list(objective = "binary:logistic",
max.depth = max.depth, eta=eta, subsample=subsample)
cv_history <- xgb.cv(params, xgb.DMatrix(train[['data']], label = train[['label']]),
nrounds = nrounds, # nthread = max(1, parallel::detectCores() - 2),
nfold = 2, verbose=F, prediction=F)
cv_score <- min(cv_history[, 3, with=F])
cv_score
}
pysmac_args <- list(max_evaluations=50)
res <- rsmac_minimize(grid, objective, pysmac_args, init_rcode = {
library(xgboost)
data(agaricus.train, package='xgboost')
train <- agaricus.train
})
stopifnot(res$target_min < 0.001)
cat('\n')
print(res)
|
70028b36b22e5176d96e36a7f2d77b1567386d66 | e93ce01e189bd4d553aff97605ec4776e96d3355 | /tables/create-table.R | 457b61d771d215996ad5220566c4324a40c08a80 | [] | no_license | ybkamaleri/indicator | ae36080f7874fdcf0a7c67927f52c5a85e9a29bf | badf59244016987141c8cf9ef3ca698274a4e33b | refs/heads/main | 2023-02-03T21:40:14.062915 | 2020-12-23T14:45:12 | 2020-12-23T14:45:12 | 323,649,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,549 | r | create-table.R | ## Create tables in SQLite
## ------------------------
library(DBI)
library(RSQLite)
dbc <- DBI::dbConnect(SQLite(), dbname = "indicator.db")
sq_title <- '
CREATE TABLE "tbl_title" (
"id" INTEGER NOT NULL UNIQUE,
"rap" INTEGER,
"title" TEXT NOT NULL,
"intro" TEXT,
"def" TEXT,
"dim_kh" TEXT,
"dim_nh" TEXT,
"file_kh" TEXT,
"file_nh" TEXT,
PRIMARY KEY("id" AUTOINCREMENT)
);
'
DBI::dbSendQuery(
conn = dbc,
statement = sq_title)
sq_section <- '
CREATE TABLE "tbl_section" (
"id" INTEGER,
"title" TEXT,
"sect" TEXT,
FOREIGN KEY(id) REFERENCES tbl_title(id)
);
'
RSQLite::dbSendQuery(
conn = dbc,
statement = sq_section)
RSQLite::dbSendQuery(
conn = dbc,
"CREATE TABLE tbl_title(
id INTEGER,
tkort TEXT,
title TEXT,
PRIMARY KEY (id))",
overwrite = TRUE)
## Create tables R ways with data.frame
## ------------------------------------
## But can't specify PRIMARY or FOREIGN KEY
## Can be used to add data to database
library(RSQLite)
dbc2 <- RSQLite::dbConnect(SQLite(), dbname = "indicator2.db")
df_title <- data.frame(id = integer(),
rap = integer(),
title = character(),
intro = character(),
stringsAsFactors = FALSE
)
DBI::dbWriteTable(conn = dbc2,
name = "tbl_title",
value = df_title,
overwrite = FALSE,
append = TRUE)
|
d8b69e18c04cd3183fd6771d8c3db01babdafdc1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/hydroTSM/examples/climograph.Rd.R | 9e6681828d51af382e2749950bab1249adad73ef | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 461 | r | climograph.Rd.R | library(hydroTSM)
### Name: climograph
### Title: Climograph
### Aliases: climograph
### Keywords: manip
### ** Examples
######################
## Ex1: Loading the DAILY precipitation, maximum and minimum air temperature at
## station Maquehue Temuco Ad (Chile)
data(MaquehueTemuco)
pcp <- MaquehueTemuco[, 1]
tmx <- MaquehueTemuco[, 2]
tmn <- MaquehueTemuco[, 3]
## Plotting the climograph
m <- climograph(pcp=pcp, tmx=tmx, tmn=tmn, na.rm=TRUE)
|
85f929244a4df3a9fc2d60a129941488d8ac9ab2 | 0a0a04adad2a286a74017b572e51dc82dc3ef786 | /R/Rd_df.R | 30fdb24925032c08272ff306002ee3546bfd1c87 | [
"MIT"
] | permissive | armcn/covtracer | 7e408e67a8ac1376e397a19cff7dc9dfa23f1ac2 | 9ccfacd171a2698bca7935f6bc88241d04b78d11 | refs/heads/main | 2023-08-18T20:40:23.765988 | 2021-09-30T15:56:29 | 2021-09-30T15:56:29 | 411,432,330 | 0 | 0 | NOASSERTION | 2021-09-28T20:40:59 | 2021-09-28T20:40:58 | null | UTF-8 | R | false | false | 2,095 | r | Rd_df.R | #' Create a tabular representation of man file information
#'
#' Provides Rd index info with a few additional columns of information about
#' each exported object. Returns one record per documented object, even if
#' multiple objects alias to the same documentation file.
#'
#' @inheritParams as.package
#' @return A \code{data.frame} of documented object information with variables:
#' \describe{
#' \item{index}{A \code{numeric} index of documentation files associated with
#' documentation objects}
#' \item{file}{A \code{character} filename of the Rd file in the "man" directory}
#' \item{filepath}{A \code{character} file path of the Rd file in the "man"
#' directory}
#' \item{alias}{\code{character} object names which are aliases for the
#' documentation in \code{filepath}}
#' \item{is_exported}{A \code{logical} indicator of whether the aliased object
#' is exported from the package namespace}
#' \item{doctype}{A \code{character} representing the Rd docType field.}
#' }
#'
#' @examples
#' package_source_dir <- system.file("examplepkg", package = "covtracer")
#' Rd_df(package_source_dir)
#'
#' @export
Rd_df <- function(x) {
x <- as.package(x)
db <- tools::Rd_db(dir = x$path)
exports <- parseNamespaceFile(basename(x$path), dirname(x$path))$exports
# as suggested in ?tools::Rd_db examples
aliases <- lapply(db, .tools$.Rd_get_metadata, "alias")
keywords <- lapply(db, .tools$.Rd_get_metadata, "keyword")
doctype <- vapply(db, function(i) {
doctype <- attr(i, "meta")$docType
if (length(doctype)) doctype else NA_character_
}, character(1L))
aliases <- aliases[sort(names(aliases))] # avoid OS-specific file sorting
naliases <- vapply(aliases, length, integer(1L))
files <- rep(names(db), times = naliases)
doctype <- rep(doctype, times = naliases)
filepaths <- file.path(normalizePath(x$path), "man", files)
aliases <- unlist(aliases, use.names = FALSE)
data.frame(
file = files,
filepath = filepaths,
alias = aliases,
is_exported = aliases %in% exports,
doctype = doctype,
stringsAsFactors = FALSE
)
}
|
7030cd03729e2505cccd230b87a4e509780fcd49 | 1b04d38b874db3e4806947c592d2c807836a13c0 | /global.R | 31ea4f0e03eee4692053cd4ef3c8ff046634f2d4 | [] | no_license | jkniz/example | aae2ae9a68cda7e1970c09aa20727a871cc3aed4 | 87720261895e51dc8f7ae7bf67b967afb7dc0346 | refs/heads/master | 2023-06-15T20:22:08.125879 | 2021-07-02T15:51:50 | 2021-07-02T15:51:50 | 382,393,332 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 393 | r | global.R | c_vars_choices <- c(
"Sex (0=w, 1=m)" = "sex", "Age" = "age", "Height" = "height", "Weight" = "weight", "BMI" = "bmi",
"Diet" = "diet", "Cholesterol" = "chol", "Smoker" = "smoker", "Cigarettes per day" = "cigs_per_day", "Packyears" = "packyears",
"Alcohol (g/day)" = "alc", "Tumour size" = "size", "Bilirubin" = "bili", "Hepatitis B" = "hbv", "Hepatitis C" = "hcv", "Diabetes" = "dia"
)
|
857ebbce0beef47c0523dac77c4eb5290678528a | b343820292bb28211a637033e3a4cdbb02921ccd | /ppp2020/R/RcppExports.R | 22405cf75247d233b9909a5e3384e648f29bb816 | [] | no_license | mintbora/test2 | 48187070f46de47e01f7a1543fa7474361460227 | 3f326cd7fac7251068e6153f8842e6c7d1834286 | refs/heads/master | 2021-05-19T17:52:23.376896 | 2020-04-07T05:27:17 | 2020-04-07T05:27:17 | 252,054,510 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 860 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
rcpparma_hello_world <- function() {
.Call(`_ppp2020_rcpparma_hello_world`)
}
rcpparma_outerproduct <- function(x) {
.Call(`_ppp2020_rcpparma_outerproduct`, x)
}
rcpparma_innerproduct <- function(x) {
.Call(`_ppp2020_rcpparma_innerproduct`, x)
}
rcpparma_bothproducts <- function(x) {
.Call(`_ppp2020_rcpparma_bothproducts`, x)
}
#' pmf for zero-inflated poisson
#' @param p proportion of structural zero's
#' @param theta the poisson mean
#' @param y the observed value
#' @param loga Logical. Whether to return the log probability or not.
#' @return the probability mass of the zero-inflated poisson distribution
#' @export
dzip <- function(p, theta, y, loga) {
.Call(`_ppp2020_dzip`, p, theta, y, loga)
}
|
a85caab4b44f4adc5ee85d3c6d4a3ec8671018dc | 181f74e926f422784d90465a9a50492460bc5bd3 | /R/limitedImpute.R | 125c0e157ec6c6f864e5763ec2bb8abbc3773e99 | [] | no_license | tranlm/lrecImpact | dbab81fa5d4e2df4badc6af8280387223ec2f4f0 | 8d66c0918be23eccbc245f4bd3c77c4c28adb8e4 | refs/heads/master | 2021-03-12T19:53:43.024672 | 2015-06-17T16:52:40 | 2015-06-17T16:52:40 | 34,067,274 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,777 | r | limitedImpute.R | ###############################################################################
# Description: Does last observation carried forward, but with a limit in how
# far the imputation will occur.
#
# Author: Linh Tran <tranlm@berkeley.edu>
# Date: Feb 13, 2015
###############################################################################
#' Limited last observation carried forward imputation
#'
#' \code{limitedImpute} will carry forward the last observation up to a specified threshold of days.
#'
#' This function is meant for indicators with missing values known to only occur for a limited amount of time. Examples include pregnancy (45-weeks) and tuberculosis treatment (8 months). Once the specified threshold has passed, the indicator reverts back to 0. If the values are non-missing, the value gets used regardless of whether it surpasses the threshold.
#'
#' @param var Variable to be imputed forward in time.
#' @param date Date variable corresponding to when \code{var} was measured.
#' @param limit Number of days the forward imputation is limited to.
#' @return The function returns a vector of length \code{var}, with the limited imputation carried out.
#'
#' @export
limitedImpute = function(var, date, limit) {
ind = 0; indDate = NA
new.var = rep(NA, length(var))
for(i in 1:length(var)) {
# Event occurs
if(!is.na(var[i]) & var[i]==1 & ind==0) {
ind = 1; indDate = date[i]
new.var[i] = var[i]
#Not indnant
} else if(!is.na(var[i]) & var[i]==0) {
ind = 0; indDate = NA
new.var[i] = var[i]
#Missing
} else if(is.na(var[i])) {
if(ind==0) new.var[i] = 0
if(ind==1) {
if(as.numeric(date[i] - indDate) <= limit) {
new.var[i] = 1
} else new.var[i] = 0
}
} else new.var[i] = var[i]
}
return(new.var)
}
|
c6864e2e0fb495d3006e2f8c47f36de1913c3847 | 9509cefb9198144bde21774fdd5b3c7a68a005a7 | /R/misc.R | 6c98f7f1c59f767b0dd8168ccbaf9e9d4c481469 | [] | no_license | jeffreyhanson/marxan | cc5d39f4a0980f8a7c4d1cf041a500721b03c198 | fff42df08ac0a8ad1f762f6402d15698babf1dff | refs/heads/master | 2021-08-07T12:54:25.036732 | 2016-11-03T04:53:28 | 2016-11-03T04:53:28 | 29,377,383 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 5,345 | r | misc.R | #' @include RcppExports.R marxan-internal.R
NULL
#' Test if GDAL is installed on computer
#'
#' This function tests if GDAL is installed on the computer.
#' If not, download it here: \url{http://download.osgeo.org/gdal}.
#'
#' @return "logical" is GDAL installed?
#' @seealso \code{\link[gdalUtils]{gdal_setInstallation}}.
#' @export
#' @examples
#' is.gdalInstalled()
is.gdalInstalled<-function() {
suppressWarnings(findGdalInstallationPaths())
return(!is.null(getOption("gdalUtils_gdalPath")))
}
#' Rasterize polygon data using GDAL
#'
#' This function converts a "SpatialPolygonsDataFrame" to a "RasterLayer" using GDAL.
#' It is expected to be faster than \code{\link[raster]{rasterize}} for large datasets.
#' However, it will be significantly slower for small datasets because the data will need to be written and read from disk.
#' @param x "SpatialPolygonsDataFrame" object.
#' @param y "RasterLayer" with dimensions, extent, and resolution to be used as a template for new raster.
#' @param field "character" column name with values to burn into the output raster. If not supplied, default behaviour is to burn polygon indices into the "RasterLayer".
#' @param ... not used.
#' @export
#' @return "RasterLayer" object.
#' @seealso \code{\link[raster]{rasterize}}, \code{\link{is.gdalInstalled}}.
#' @examples
#' data(taspu,tasinvis)
#' x<-rasterize.gdal(taspu[1:5,],tasinvis[[1]])
setGeneric('rasterize.gdal', function(x,y, ...) standardGeneric('rasterize.gdal'))
#' @rdname rasterize.gdal
#' @export
setMethod(
'rasterize.gdal',
signature(x="SpatialPolygonsDataFrame", y="RasterLayer"),
function(x, y, field=NULL) {
if (is.null(field)) {
x@data$id<-seq_len(nrow(x@data))
field<-'id'
}
if (!field %in% names(x@data))
stop(paste0("x@data does not have a field called ",field, "."))
writeOGR(x, tempdir(), 'polys', driver='ESRI Shapefile', overwrite=TRUE)
writeRaster(setValues(y, NA), file.path(tempdir(), 'rast.tif'), NAflag=-9999, overwrite=TRUE)
return(gdal_rasterize(file.path(tempdir(), 'polys.shp'), file.path(tempdir(), 'rast.tif'), l="polys", a=field, output_Raster=TRUE)[[1]])
}
)
#' Test if Marxan is installed on computer
#'
#' This function determines if Marxan is installed on the computer, and will update \code{\link[base]{options}}.
#'
#' @param verbose should messages be printed?
#' @return "logical" Is it installed?
#' @seealso \code{\link[base]{options}}, \code{\link{findMarxanExecutablePath}}.
#' @export
#' @examples
#' options()$marxanExecutablePath
#' is.marxanInstalled()
is.marxanInstalled<-function(verbose=FALSE) {
if (!verbose)
return(!is.null(options()$marxanExecutablePath) & file.exists(options()$marxanExecutablePath))
if (!is.null(options()$marxanExecutablePath)) {
if (file.exists(options()$marxanExecutablePath))
cat('marxan R package successfully installed\n')
} else {
cat('marxan R package cannot find Marxan executable files.\n')
}
return(invisible())
}
#' Find Marxan executable suitable for computer
#'
#' This function checks the computer's specifications and sets options('marxanExecutablePath') accordingly.
#' Marxan executables can be downloaded from \url{http://www.uq.edu.au/marxan/marxan-software}, and installed by unzipping the files contents, and copying them into the /bin folder in this package's installation directory.
#' If a suitable executable cannot be found, this function will fail and provide information.
#'
#' @seealso \code{\link{is.marxanInstalled}}.
#' @return "logical" Is Marxan installed?
#' @export
#' @examples
#' # marxan executable files should be copied to this directory
#' system.file("bin", package="marxan")
#' # look for Marxan
#' \donttest{
#' findMarxanExecutablePath()
#' }
#' # was Marxan found?
#' is.marxanInstalled()
findMarxanExecutablePath<-function() {
# if path already set then return it
if(!is.null(options()$marxanExecutablePath))
return(options()$marxanExecutablePath)
# if path not set then set it
if (.Platform$OS.type=="windows") {
if (.Platform$r_arch=="x64") {
path=list.files(system.file("bin", package="marxan"), "^Marxan.*x64.exe$",full.names=TRUE)
} else if (.Platform$r_arch=="i386") {
path=system.file('bin/Marxan.exe', package="marxan")
} else {
stop('Marxan will only run in 64bit or 32bit Windows environments.')
}
} else {
if (.Platform$OS.type=="unix") {
if (Sys.info()[["sysname"]]=="Darwin") {
if (Sys.info()[["machine"]]=="x86_64") {
path=list.files(system.file("bin", package="marxan"), "^MarOpt.*Mac64",full.names=TRUE)
} else if (Sys.info()[["machine"]]=="i686") {
path=list.files(system.file("bin", package="marxan"), "^MarOpt.*Mac32",full.names=TRUE)
}
} else {
if (Sys.info()[["machine"]]=="x86_64") {
path=list.files(system.file("bin", package="marxan"), "^MarOpt.*Linux64",full.names=TRUE)
} else if (Sys.info()[["machine"]]=="i686") {
path=list.files(system.file("bin", package="marxan"), "^MarOpt.*Linux32",full.names=TRUE)
}
}
} else {
stop("Only Windows, Mac OSX, and Linux systems are supported.")
}
}
# check that path is valid
if (length(path)!=1 || !file.exists(path))
stop(paste0("Marxan executable files not found.\nDownload marxan from ",marxanURL,",\nand copy the files into: ", system.file("bin", package="marxan")))
options(marxanExecutablePath=path)
}
|
72f31788ef8a9223b69d49f4c38162dd64e188be | 0cae9ad507ca79182eb5c880895c03c2665fceed | /tests/testthat/test-lsd-exchange.R | d5375961c23de0575572a6bf0473a708bbcd2286 | [
"MIT"
] | permissive | Maxprofs/debkeepr | 73fd8fd6e241cba3517e3305e12c76f2f4751cc1 | 18243cf1df2419c04479dcaa38804f1c48fe69c2 | refs/heads/master | 2020-04-14T15:23:19.967461 | 2018-09-19T00:16:56 | 2018-09-19T00:16:56 | 163,924,836 | 0 | 1 | NOASSERTION | 2019-01-03T04:54:41 | 2019-01-03T04:54:41 | null | UTF-8 | R | false | false | 9,110 | r | test-lsd-exchange.R | context("test-lsd-exchange.R")
suppressPackageStartupMessages(library(tibble))
suppressPackageStartupMessages(library(dplyr))
x <- c(10, 3, 2)
y <- c(20, 5, 8)
dec <- c(5.85, 17.35, 10)
b1 <- c(20, 12)
b2 <- c(8, 16)
x_b2 <- to_lsd(x, b2)
y_b2 <- to_lsd(y, b2)
list1 <- list(c(30, 10, 9), c(10.725, 18.65, 11), c(-26, -11, -10))
list2 <- list(x, y, dec)
list1_b1 <- to_lsd(list1, b1)
list2_b2 <- to_lsd(list(x, y, dec), b2)
rate_list <- list(c(0, 33, 4), c(0, 30, 0), c(0, 40, 0))
tbl_b1 <- tibble(lsd = list1_b1)
tbl_b2 <- tibble(lsd = list2_b2)
rates <- to_lsd(list(c(0, 33, 4), c(0, 33, 4), c(0, 33, 4)), b1)
## Error messages from exchange_rate_check ##
test_that("non-vector is an error", {
expect_error(deb_invert_rate(data.frame(a = c(1:4), b = c(5:8))),
paste("exchange_rate must be a list of class lsd, or an object that can be coerced to this",
" class, namely a numeric vector of length 3 or a list of such vectors.",
sep = "\n"))
})
test_that("non-numeric is an error", {
expect_error(deb_invert_rate(c("hello", "goodbye")),
"exchange_rate must be a numeric vector")
expect_error(deb_invert_rate(list(c("hello", "goodbye"), c(TRUE, FALSE))),
"exchange_rate must be a list of numeric vectors")
})
test_that("length of exchange_rate is 3", {
expect_error(deb_invert_rate(c(10, 9, 3, 5)),
paste("exchange_rate must be a vector of length of 3.",
"There must be a value for pounds, shillings, and pence.",
sep = "\n"))
expect_error(deb_invert_rate(list(c(10, 9, 3, 5), c(6, 3), c(4, 6, 8))),
paste("exchange_rate must be a list of numeric vectors of length 3.",
"There must be a value for pounds, shillings, and pence.",
sep = "\n"))
})
test_that("exchange_rate_check works", {
expect_error(deb_exchange(x, shillings_rate = "a"),
"shillings_rate must be numeric")
expect_error(deb_exchange(x, shillings_rate = c(31, 32)),
"shillings_rate must be a numeric vector of length 1")
})
test_that("deb_exchange works", {
expect_equal(deb_exchange(x, shillings_rate = 24),
deb_multiply(x, x = 24/20))
expect_equal(deb_exchange(x, shillings_rate = 30),
to_lsd(c(15, 4, 9), b1))
expect_equal(deb_exchange(x, shillings_rate = 30 + 3/12),
to_lsd(c(15, 7, 3.475), b1))
expect_equal(deb_exchange(x, shillings_rate = 30 + 3/12, round = 0),
to_lsd(c(15, 7, 3), b1))
expect_equal(deb_exchange(x, shillings_rate = 30, bases = c(8, 16)),
to_lsd(c(38, 7, 11.5), b2))
})
test_that("deb_exchange is vectorized", {
expect_equal(deb_exchange(list1, shillings_rate = 30),
to_lsd(list(c(45, 16, 1.5),
c(17, 11, 1.2),
c(-39, -17, -9)), b1))
expect_equal(deb_exchange(list1, shillings_rate = 16, bases = b2),
to_lsd(list(c(62, 5, 2),
c(26, 2, 4.4),
c(-54, -7, -4)), b2))
expect_equal(deb_exchange(list1, shillings_rate = 30, round = 0),
to_lsd(list(c(45, 16, 2),
c(17, 11, 1),
c(-39, -17, -9)), b1))
})
test_that("deb_exchange works with lsd objects", {
expect_identical(deb_exchange(x_b2, shillings_rate = 12),
deb_exchange(x, shillings_rate = 12, bases = b2))
expect_identical(deb_exchange(list1_b1, shillings_rate = 12),
deb_exchange(list1, shillings_rate = 12, bases = b1))
expect_identical(deb_exchange(list2_b2, shillings_rate = 12, round = 0),
deb_exchange(list2, shillings_rate = 12, bases = b2, round = 0))
})
test_that("deb_exchange works with lsd column", {
# mutated column is lsd
expect_s3_class(mutate(tbl_b1, ex = deb_exchange(lsd, shillings_rate = 12))$ex, "lsd")
expect_equal(deb_bases(mutate(tbl_b2, ex = deb_exchange(lsd, shillings_rate = 12))$ex),
c(s = 8, d = 16))
# mutated column is same as normal deb_exchange
expect_identical(mutate(tbl_b1, lsd = deb_exchange(lsd, shillings_rate = 12))$lsd,
deb_exchange(list1_b1, shillings_rate = 12))
expect_identical(mutate(tbl_b2, lsd = deb_exchange(lsd, shillings_rate = 12)),
tibble(lsd = deb_exchange(list2_b2, shillings_rate = 12)))
})
test_that("normalized_to_sd helper works",{
expect_equal(normalized_to_sd(c(1, 11, 0), b1), c(0, 31, 0))
expect_equal(normalized_to_sd(to_lsd(c(1, 11, 0), b1), b1), to_lsd(c(0, 31, 0), b1))
expect_equal(normalized_to_sd(list(x, y), b1),
to_lsd(list(c(0, 203, 2),
c(0, 405, 8)), b1))
})
test_that("normalized_to_d helper works",{
expect_equal(normalized_to_d(c(1, 11, 6), b1), c(0, 0, 378))
expect_equal(normalized_to_d(to_lsd(c(1, 11, 6), b1), b1), to_lsd(c(0, 0, 378), b1))
expect_equal(normalized_to_d(list(x, y), bases = b1),
to_lsd(list(c(0, 0, 2438),
c(0, 0, 4868)), b1))
})
test_that("deb_exchange_rate works", {
expect_equal(deb_exchange_rate(c(166, 13, 4), c(100, 0, 0)),
to_lsd(c(0, 12, 0), b1))
expect_equal(deb_exchange_rate(c(100, 0, 0), c(166, 13, 4)),
to_lsd(c(0, 33, 4), b1))
expect_equal(deb_exchange_rate(c(100, 0, 0), c(166, 13, 0), round = 0),
to_lsd(c(0, 33, 4), b1))
expect_equal(deb_exchange_rate(c(100, 0, 0), c(166, 2, 10), bases = c(8, 16)),
to_lsd(c(0, 13, 4.9), b2))
expect_equal(deb_exchange_rate(c(20, 10, 8), c(10, 5, 4), bases = c(40, 24)),
to_lsd(c(0, 20, 0), c(40, 24)))
expect_equal(deb_exchange_rate(c(166, 13, 4), c(100, 0, 0), output = "pence"),
to_lsd(c(0, 0, 144), b1))
expect_equal(deb_exchange_rate(c(100, 0, 0), c(166, 13, 4), output = "pence"),
to_lsd(c(0, 0, 400), b1))
expect_equal(deb_exchange_rate(c(166, 13, 4), c(100, 0, 0), output = "pounds"),
to_lsd(c(0, 12, 0), b1))
expect_equal(deb_exchange_rate(c(100, 0, 0), c(166, 13, 4), output = "pounds"),
to_lsd(c(1, 13, 4), b1))
})
test_that("deb_exchange_rate is vectorized", {
expect_equal(length(deb_exchange_rate(list1, list2)), 3)
expect_equal(deb_exchange_rate(list1, list2, round = 0),
to_lsd(list(c(0, 6, 8),
c(0, 34, 8),
c(0, -5, -1)), b1))
expect_equal(deb_exchange_rate(list1, list2, bases = b2, round = 0),
to_lsd(list(c(0, 2, 10),
c(0, 12, 9),
c(0, -2, -6)), b2))
})
test_that("deb_exchange_rate works with lsd objects", {
expect_identical(deb_exchange_rate(x_b2, y_b2), deb_exchange_rate(x, y_b2))
expect_identical(deb_exchange_rate(list1_b1, x), deb_exchange_rate(list1, x, bases = b1))
expect_identical(deb_exchange_rate(x, list2_b2), deb_exchange_rate(x, list2, bases = b2))
})
test_that("deb_exchange_rate works with lsd column", {
# tbl with two currencies
tbl_rate <- mutate(tbl_b1, flemish = deb_exchange(lsd, shillings_rate = 33 + 4/12))
expect_identical(mutate(tbl_rate, rate = deb_exchange_rate(lsd, flemish))$rate,
rates)
})
test_that("deb_invert_rate works", {
expect_equal(deb_invert_rate(c(0, 33, 4)), to_lsd(c(0, 12, 0), b1))
expect_equal(deb_invert_rate(c(0, 12, 0)), to_lsd(c(0, 33, 4), b1))
expect_equal(deb_invert_rate(c(0, 33, 0), round = 0), to_lsd(c(0, 12, 1), b1))
expect_equal(deb_invert_rate(c(0, 12, 0), output = "pence"),
to_lsd(c(0, 0, 400), b1))
expect_equal(deb_invert_rate(c(0, 12, 0), output = "pounds"),
to_lsd(c(1, 13, 4), b1))
expect_equal(deb_invert_rate(c(0, 12, 0), bases = b2), to_lsd(c(0, 5, 5.33333), b2))
})
test_that("deb_invert_rate is vectorized", {
expect_equal(length(deb_invert_rate(rate_list)), 3)
expect_equal(deb_invert_rate(rate_list),
to_lsd(list(c(0, 12, 0),
c(0, 13, 4),
c(0, 10, 0)), b1))
expect_equal(deb_invert_rate(rate_list, bases = c(40, 12)),
to_lsd(list(c(0, 48, 0),
c(0, 53, 4),
c(0, 40, 0)), c(40, 12)))
expect_equal(deb_invert_rate(rate_list, bases = c(20, 16), round = 0),
to_lsd(list(c(0, 12, 0),
c(0, 13, 5),
c(0, 10, 0)), c(20, 16)))
})
test_that("deb_invert_rate works with lsd objects", {
expect_identical(deb_invert_rate(x_b2),
deb_invert_rate(x, bases = b2))
expect_identical(deb_invert_rate(list1_b1),
deb_invert_rate(list1, bases = b1))
expect_identical(deb_invert_rate(list2_b2, round = 0),
deb_invert_rate(list2, bases = b2, round = 0))
})
test_that("deb_invert_rate works with lsd column", {
expect_identical(mutate(tibble(lsd = rates), inverse = deb_invert_rate(lsd))$inverse,
deb_invert_rate(rates))
})
|
b68020a20e92a8f14b3401bc61b7cfa808e499b5 | f0dfd6c391b6b5a0bbb80be6a5ad03c4010bc8f1 | /R/samonDataCheck.R | e7f0b703c76558f80fb661cd94a5ff3e76179262 | [] | no_license | cran/samon | c8f985d1955f2c9e7743e399d6f4dee2f4e370b0 | 5995a43da42b0b7b46d424603a0c11f8627e59d6 | refs/heads/master | 2022-04-28T16:15:59.910849 | 2020-05-05T12:40:02 | 2020-05-05T12:40:02 | 100,425,023 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,185 | r | samonDataCheck.R | # Samon: Summarizes data in an input data matrix
# particularly summarizes missing data patterns.
# ----------------------------------------------------
samonDataCheck <- function( data ) {
NT <- ncol(data)
N <- nrow(data)
min = min(data, na.rm=TRUE)
max = max(data, na.rm=TRUE)
## desc[,1] Valid (non-missing) baseline
## desc[,2] last available t
## desc[,3] last available value
## desc[,4] number of observed values
desc <- matrix(0,N,4)
missPattern <- rep(paste( rep("*",NT), sep="",collapse=""),N)
for ( i in 1:N ) {
desc[i,] <- rep(0,4)
for ( t in 1:NT ) {
if ( is.na(data[i,t])) substr(missPattern[i],t,t+1) <- "_"
if ( (!is.na(data[i,t]))) {
desc[i,2] <- t
desc[i,3] <- data[i,t]
desc[i,4] <- desc[i,4] + 1
}
}
}
desc[,1] <- 1 - is.na(data[,1])
missingBaseline <- any(desc[,1] == 0)
NinterMiss <- desc[,2] - desc[,4]
intermittentMissing <- any( NinterMiss != 0 )
completeData <- sum( desc[,4] == NT )
cat("\n\n")
cat("Samon Data Check:\n")
cat("--------------------------------------------------\n")
cat(sprintf("Number of timepoints: %9.0f\n", NT ))
cat(sprintf("Number of subjects: %9.0f\n", N ))
cat(sprintf("Minimum observed value: %9.0f\n", min ))
cat(sprintf("Maximum observed value: %9.0f\n", max ))
cat(sprintf("Average number of timepoints on study: %9.2f\n", mean(desc[,2]) ))
cat(sprintf("Total number of observed values: %9.0f\n", sum(desc[,4]) ))
cat(sprintf("Subjects observed at final timepoint: %9.0f\n", sum(desc[,2]==NT) ))
cat(sprintf("Subjects observed at all timepoints: %9.0f\n", completeData ))
cat("\n")
if ( missingBaseline == 1 ) {
cat(sprintf("Missing baseline data found:\n"))
cat(sprintf(" subjects = %8.0f \n", N - sum(data[,1])))
cat("\n")
}
if ( intermittentMissing == 1 ) {
cat(sprintf("Intermittent Missing data found:\n"))
cat(sprintf(" subjects with IM = %9.0f \n", N - sum( desc[,2] == desc[,4] )))
cat(sprintf(" number of IM values = %9.0f \n", sum( desc[,2] - desc[,4] )))
cat("\n")
}
ntab <- table(missPattern)
ptab <- prop.table(ntab)
missHead = paste( paste( rep(" ",NT+3), sep="", collapse=""), paste( rep(" ",7), sep="", collapse=""), "N", paste( rep(" ",1), sep="", collapse=""), "proportion", "\n")
cat("\n")
cat("Missing Patterns:\n")
cat(missHead)
for ( nm in names(ntab) ) {
cat( nm, " : ", format(ntab[nm], justify="right", width=8), format(round(ptab[nm],4), nsmall=4, digits=4, justify="right", width=12), "\n" )
}
cat("\n\n")
dimnames(desc)[[2]] <- list("baseline","lastTime","lastValue","NValues")
ret <- list( N = N, NT = NT, missingBaseline = missingBaseline, intermittentMissing = intermittentMissing, desc = desc, missingPatterns = missPattern, NmissingTable=ntab, PmissingTable=ptab )
return(ret)
}
|
57bc17d19453f2a5cbc25f2fad13f4b894426bce | 0955238ee6664bd0be407edf02d6ed30c9c76b2f | /dup_to_OTU_table.R | 2d797811a21ec5c31345432317c5ce6401600454 | [] | no_license | reikopm/mbonlive_banzai | e19e8c2c4ec247a2ce21d62895d12769277ad4f8 | 80c7b2c61ea3349cd1b51ba465cf1b829de4f054 | refs/heads/master | 2021-09-06T03:07:45.249023 | 2018-02-01T22:42:17 | 2018-02-01T22:42:17 | 107,599,272 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,849 | r | dup_to_OTU_table.R | # translate duplicates to OTUs: was named collapse_dup_by_OTU
# arguments:
# 1: path to duplicate table (rows = sequence names, cols = sample names, cells = counts (integers))
# 2: dup to otu table (two columns; headers "Query" and "Match"; cells = sequence names of query and corresponding match from OTU clustering process)
# 3: otu table path (to be written)
# 20150813 removed fourth argument: 4: concatenated directory (obsolete?))
# was only used in one place: setwd(arguments[4])
# automated script:
arguments <- commandArgs(TRUE)
# load gtools to use function mixedsort
library(gtools)
# Read in duplicates files
# read.csv("dups.csv", row.names = 1)
dups <- read.csv(arguments[1], row.names = 1)
# as of NextSeq run in July 2015, dereplication had to be majorly overhauled, resulting in transposed duplicate table.
dups <- t(dups)
# Read in dups to OTUs files
dups_to_OTUs <- read.csv(arguments[2], header=TRUE, stringsAsFactor=FALSE)
OTUs <- dups_to_OTUs$Match[
match(
rownames(dups),
dups_to_OTUs$Query
)
]
# for each column (sample), sum the rows (duplicates) that belong to the same OTU
OTU_table <- aggregate(dups, list(OTUs), FUN = sum)
# Make the rownames the values stored in the new first colum
rownames(OTU_table) <- OTU_table[,1]
# remove that column
OTU_table <- OTU_table[,-1]
# sort by rowname
OTU_table <- OTU_table[mixedsort(rownames(OTU_table)),]
# write output to CSV file
write.csv(x = OTU_table, file = arguments[3], quote = FALSE)
# Most of this was an artifact of the weirdness of the old usearch output (uc format)
# unique(c(dup_to_OTU[,1], dup_to_OTU[,2]))
# dups_to_collapse <- split(dup_to_OTU[,1], dup_to_OTU[,2])
# dups_to_collapse <- lapply(dups_to_collapse, as.character)
# dups_to_collapse <- mapply(c, as.list(names(dups_to_collapse)), dups_to_collapse)
# dups_to_collapse <- sapply(dups_to_collapse, unique)
# dups_to_collapse <- sapply(dups_to_collapse, sort)
#
#
# no_clusters <- dups[!rownames(dups) %in% unique(unlist(dups_to_collapse)),]
#
# consolidated_dups <- list()
# for(i in 1:length(dups_to_collapse)){
# consolidated_dups[[i]] <- colSums(dups[dups_to_collapse[[i]],])
# }
# consolidated_dups <- do.call(rbind, consolidated_dups)
# rownames(consolidated_dups) <- sapply(dups_to_collapse, function(x) {x[[1]]})
#
# sort(as.numeric(gsub("DUP_", "", rownames(ALL_CLUSTERS))))
# ALL_CLUSTERS <- rbind(as.data.frame(consolidated_dups), no_clusters)
# order(rownames(ALL_CLUSTERS))
# write.csv(ALL_CLUSTERS, "all_clusters.csv")
# ALL_CLUSTERS <- read.csv("")
# confirm there are no duplicate otus:
# which(duplicated(rownames(ALL_CLUSTERS)))
# this one is weird, among many others
# dups_to_collapse[2416]
# write.table(dups_to_collapse, "dups_to_collapse.txt")
# dups_to_collapse_vec <- unlist(dups_to_collapse)
#
# dupped <- dups_to_collapse_vec[duplicated(dups_to_collapse_vec)]
# dups_removed <- dup_to_OTU[-which(dup_to_OTU[,2] %in% dupped),]
# dups_removed_tmp <- lapply(split(dups_removed[,1], dups_removed[,2]), as.character)
# dups_removed <- mapply(c, as.list(names(dups_removed_tmp)), dups_removed_tmp)
# which(duplicated(unlist(dups_removed)))
# X <- stack(setNames(dups_to_collapse, seq_along(dups_to_collapse)))
# TAB <- table(X)
# TAB.mat <- as.matrix(TAB)
# dup_rows <- TAB.mat[which(rowSums(TAB.mat) > 1),]
# dup_cols <- dup_rows[,which(colSums(dup_rows) > 0)]
# # dup_cols[,which(colSums(dup_cols) > 1)]
# identical(sort(rownames(dup_cols)),sort(dupped))
# # edit(dup_cols)
# dups_to_collapse[as.numeric(colnames(dup_cols))]
#
# TRASH:
# Read in files of chimaeras vs "not chimaeras"
# chimaeras <- read.table("chimaeras.txt")
# not_chimaeras <- read.table("not_chimaeras.txt")
# tail(chimaeras)
# dups_chimaeras <- dups[as.character(chimaeras[,1]),]
# dups_no_chimaeras <- dups[as.character(not_chimaeras[,1]),]
# WHAT THE FUCK IS GOING ON HERE???
# sapply(consolidated_otus, duplicated)
# consolidated_otus <- list()
# for(i in 1:nrow(dup_cols)){
# consolidated_otus[[i]] <- Reduce(union, dups_to_collapse[as.numeric(names(which(dup_cols[i,] > 0)))])
# }
# consol <- stack(setNames(consolidated_otus, seq_along(consolidated_otus)))
# consol.mat <- as.matrix(table(consol))
# colSums(consol.mat)
# duplicated(unlist(consolidated_otus))
# consolidated_2 <- list()
# for(i in 1:nrow(consol.mat)){
# consolidated_2[[i]] <- Reduce(union, consolidated_otus[as.numeric(names(which(consol.mat[i,] > 0)))])
# }
# consol2 <- stack(setNames(consolidated_2, seq_along(consolidated_2)))
# consol2.mat <- as.matrix(table(consol2))
# colSums(consol2.mat)
# consolidated_3 <- list()
# for(i in 1:nrow(consol.mat)){
# consolidated_3[[i]] <- Reduce(union, consolidated_2[as.numeric(names(which(consol2.mat[i,] > 0)))])
# }
# Reduce(union, dups_to_collapse[as.numeric(names(which(dup_cols[35,] > 0)))])
# Reduce(intersect, dups_to_collapse) |
b25ee39de57152932f294c19965d1cf24caf4f8d | 6c0d3585561e667ba0f6657ebd28bf582ebe75bf | /test/stat.R | 015863bcaa79d964f5ac742f6bc431913eacd2d7 | [] | no_license | Carolwz/stateacher | 22035c29efd32f69b8b6884eb70e7b9844ffd238 | e48db63215862e3e5cf85868de51fcc5381e8b8d | refs/heads/master | 2023-08-10T17:56:20.328406 | 2021-10-10T15:23:28 | 2021-10-10T15:23:28 | 401,298,414 | 0 | 0 | null | 2021-09-27T14:35:29 | 2021-08-30T10:09:27 | HTML | UTF-8 | R | false | false | 1,852 | r | stat.R | library(yaml)
library(formattable)
# args <- commandArgs(T)
# folder <- args[1]
# folder <- 'CMU-DS'
folder <- ''
# setwd("C:/Users/RY/git/stateacher/Data/")
setwd(paste0('/home/runner/work/stateacher/stateacher/Data/', folder, '/'))
load_yaml <- function(x){
yaml_end_idx <- which(!is.na(stringr::str_locate(readLines(x, encoding = 'utf-8'), pattern = '^(---)'))[,1])[2]
x <- readLines(x, encoding = 'utf-8')[1:yaml_end_idx]
x <- yaml.load(x)
return(x)
}
f <- list.files(pattern = paste0('.*md$'), recursive = TRUE, full.names = TRUE)
f <- grep('.md', f, value = TRUE)
f_yaml_length <- unlist(lapply(f, function(x) length(unlist(load_yaml(x)))))
md_Stat <- function(x, section = templateNames) {
txt = readLines(x, encoding = 'UTF-8')
txt_N = length(txt)
txt_nchar = nchar(txt)
txtSectionInd = grep('^# ', txt)
ind_N = length(txtSectionInd)
ind1 = txtSectionInd + 1
ind2 = c(txtSectionInd[-1] - 1, txt_N)
# 乘数调整
flag = 1 * (sign(ind2 -ind1) > 0.5)
tab = unlist(lapply(1:length(ind1),
function(i) flag[i] * sum(txt_nchar[ind1[i]:ind2[i]])))
# 减去的长度
tab[1] = tab[1] - 13
names(tab) = grep('^# ', txt, value = TRUE)
tab[which(is.na(tab))] = 0
return(tab)
}
md_tab = unlist(lapply(f, function(x) sum(md_Stat(x)>0)))
dat = data.frame(id = seq_len(length(f)), name = f, yaml_inut = f_yaml_length, md_input = md_tab)
tb <- formattable(dat, list(yaml_inut = color_tile("white", "orange")))
html_header="
<head>
<meta charset=\"utf-8\">
<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">
<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\">
</head>
<body>
"
write(paste(html_header, tb, sep=""), file = paste0("summary.html"))
print("Your summary.html file has been generated")
|
43d040651da31bd01ea7287d761ae7f57f36f542 | 395fd0b0d5c18cd9aae42a7e0a907ae2f2c30a13 | /man/cv.ramsvm.Rd | 1cb739fa6ae506e26a06a189ccb31b9d7cdb1c8a | [] | no_license | bbeomjin/dbvsmsvm | 7a9565828dae249670616c8da1dba6d06945ea55 | a3712baa96b8a15a5e8eb6c7d134892395dad14e | refs/heads/master | 2023-08-30T01:15:34.732352 | 2021-11-02T14:54:14 | 2021-11-02T14:54:14 | 285,000,083 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,557 | rd | cv.ramsvm.Rd | \name{cv.ramsvm}
\alias{cv.ramsvm}
\title{
Cross-Validation for ramsvm
}
\description{
Perform cross-validation for the optimal lambda of \code{ramsvm}.
}
\usage{
cv.ramsvm(x = NULL, y, gamma = 0.5, valid_x = NULL, valid_y = NULL, nfolds = 10,
lambda_seq = 2^{seq(-10, 15, length.out = 100)},
kernel = c("linear", "gaussian"), kparam = 1,
scale = FALSE, criterion = c("0-1", "loss"), optModel = FALSE, nCores = 1, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A \emph{n} x \emph{p} data matrix, where \emph{n} is the number of observations and \emph{p} is the number of variables.
}
\item{y}{
A response vector with three and more labels.
}
\item{gamma}{
The convex combination parameter of the loss function.
}
\item{valid_x}{
A validation data matrix for selecting \code{lambda} and threshold parameter \emph{v} (optional). If \code{valid_x=NULL}, \code{nfolds}-fold cross-validation is performed.
}
\item{valid_y}{
A validation response vector (optional).
}
\item{nfolds}{
The number of folds for cross-validation.
}
\item{lambda_seq}{
A sequence of regularization parameter to control a level of \emph{l_2}-penalty.
}
\item{kernel}{
A character string representing one of type of kernel.
}
\item{kparam}{
A parameter needed for kernel.
}
\item{scale}{
A logical value indicating whether to scale the variables. If \code{scale=TRUE}, \code{x} is scaled to zero mean and unit variance.
}
\item{criterion}{
A type of criterion evaluating prediction performance of cross-validation.
}
\item{optModel}{
A logical. Whether to obtain the optimal classification model.
}
\item{nCores}{
The number of cores to use for parallel computing.
}
\item{...}{
Other arguments that can be passed to ramsvm function.
}
}
\value{
An S3 object of class "\code{ramsvm}" containing the following slots
\item{opt_param}{The optimal lambda and kernel parameter.}
\item{opt_valid_err}{A minimum value of cross-validation errors.}
\item{opt_ind}{An index of optimal lambda.}
\item{valid_err}{Cross-validation errors.}
\item{nfolds}{The number of folds for cross-validation.}
\item{opt_model}{If \code{optModel=TRUE}, classification model with the optimal lambda is returned.}
\item{call}{The call of \code{cv.ramsvm}.}
}
\references{}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
require(dbvsmsvm)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory. |
784de044dfd74e67e09200a6c352b442a1d473ee | d5bc5d4334969fec87fb2dd85ee03a70f4864caf | /tests/testthat/test-resample.R | 91f670e6ce8d3a1239c2c701b9c21d16c73f0417 | [
"MIT"
] | permissive | bbuchsbaum/neuroim2 | 46e14e10efb2b992e78741b0aa6737bde6971e33 | 2184bfafd75ab51e0c9d67db83076b08bbf3f40b | refs/heads/master | 2023-05-03T11:19:36.844512 | 2023-04-26T00:06:50 | 2023-04-26T00:06:50 | 109,069,233 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,052 | r | test-resample.R | library(testthat)
library(neuroim2)
# Test the resample(NeuroVol, NeuroVol) function
test_that("resample(NeuroVol, NeuroVol) works correctly", {
source <- NeuroVol(array(rnorm(64 * 64 * 64), c(64, 64, 64)), NeuroSpace(c(64, 64, 64)))
target <- NeuroVol(array(rnorm(64 * 64 * 64), c(64, 64, 64)), NeuroSpace(c(64, 64, 64)))
interpolation <- 3L
resampled_vol <- resample(source, target, interpolation)
expect_s4_class(resampled_vol, "NeuroVol")
expect_equal(dim(resampled_vol), dim(target))
expect_equal(space(resampled_vol), space(target))
})
# Test the resample(NeuroVol, NeuroSpace) function
test_that("resample(NeuroVol, NeuroSpace) works correctly", {
source <- NeuroVol(array(rnorm(64 * 64 * 64), c(64, 64, 64)), NeuroSpace(c(64, 64, 64)))
target_space <- NeuroSpace(c(64, 64, 64))
interpolation <- 3L
resampled_vol <- resample(source, target_space, interpolation)
expect_s4_class(resampled_vol, "NeuroVol")
expect_equal(dim(resampled_vol), dim(target_space))
expect_equal(space(resampled_vol), target_space)
})
|
e93018393e29dd15e4d69c6780e6a696e3b1aa0e | 44fd4f5a8a5112b2c1a993bc7c7791de075a2fe6 | /R/utilities-constants.R | bb2fb36d9da5dd8c2d48e0f087b95f6f017a37a9 | [] | no_license | cran/shinyobjects | c0387c748ee8bf0ac174795707ca593a4bd4968d | fae1a0ed20966000e4753d8722eb6ded23d9e385 | refs/heads/master | 2022-11-22T10:23:18.694483 | 2020-07-29T04:50:02 | 2020-07-29T04:50:02 | 263,379,933 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 193 | r | utilities-constants.R | #' Valid strings for assignments/column names
#' @noRd
strings_to_find <- function() {
paste0(
"^((library|require)\\(|",
"[\\w\\._\\$0:9]+",
"(\\s)?(<-|=[^=]))"
)
}
|
323a90cd2d30d9bcfdbdbcfadeb1ed3ae6dddc9f | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googlewebmastersv3.auto/man/UrlSampleDetails.Rd | 4b983fc809b45ecd65fe0297851ac6561b5fdef0 | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 618 | rd | UrlSampleDetails.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/webmasters_objects.R
\name{UrlSampleDetails}
\alias{UrlSampleDetails}
\title{UrlSampleDetails Object}
\usage{
UrlSampleDetails(containingSitemaps = NULL, linkedFromUrls = NULL)
}
\arguments{
\item{containingSitemaps}{List of sitemaps pointing at this URL}
\item{linkedFromUrls}{A sample set of URLs linking to this URL}
}
\value{
UrlSampleDetails object
}
\description{
UrlSampleDetails Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Additional details about the URL, set only when calling get().
}
|
cb198766b90a1c4e18dea0adde444b104d3fb800 | d50ea3daf164132528c0268ce01446aa38ac7eab | /man/getConds.Rd | 402bac3679f542e7d63dce7cf05ea5e1fc12190b | [
"MIT"
] | permissive | ydhwang/rfinference | d8f33ca498137ccd2a802b5fbb14b00f451ff8ab | 51e2c4aea10f2bf178aa5914218e04dcd27460d7 | refs/heads/master | 2020-10-01T02:11:43.854469 | 2020-04-27T18:39:42 | 2020-04-27T18:39:42 | 227,429,537 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 630 | rd | getConds.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rfinference.R
\name{getConds}
\alias{getConds}
\title{getConds}
\usage{
getConds(tree)
}
\arguments{
\item{tree}{a single tree object obtained from using \code{"randomForest"} and \code{"getTree"}}
}
\value{
a statement defining the leaf (associated with "id"), by collecting the entire "ancestry"
}
\description{
give the partition statements for each leaf; a wrapper using cond_root and other functions
included for backward compatibility
}
\details{
using this function recursively, we can find the family ancestry.
}
\examples{
# getConds(tree)
}
|
f0702fe72d6d7920dd01f09ab14678c15ebf788d | aeebd1497c7446e8ec967ba774ca5e016ce062a4 | /fund 2004-2017/13-quant.R | 244d7ef1e240bd8fde5b246ffaacb6d17e16ae8f | [] | no_license | shenfan2018/shenfan2018 | 212e881877df52b8772905b5a3546739cd4b5921 | 0bb70a7b0cdb0dc4d14a9576b02b6f22c7e9dfdb | refs/heads/master | 2020-04-01T18:05:52.266821 | 2019-11-20T12:45:09 | 2019-11-20T12:45:09 | 153,471,302 | 1 | 0 | null | null | null | null | GB18030 | R | false | false | 2,952 | r | 13-quant.R | # 最后一个回归
# 先加入是否量化
quant <- fread("C://Users//shenfan//Desktop//基金经理文献//俞老师//我的//Fund_Strategy.csv")
setnames(quant, c("量化", "策略", "模型"), c("quant", "strategy", "model"))
quant <- quant[, id := sprintf("%06d", MasterFundCode)
][, .(id, quant, strategy, model)]
load("fund9.RData")
data.quant <- quant[fund.9, on = .(id)
][is.na(quant), quant := 0
][year > 2014, .SD]
data.quant <- data.quant[, yesno := duplicated(alpha_3), keyby = .(id, year, sem)
][yesno == FALSE, .SD]
# 这里改成升序了
data.quant <- data.quant[order(year, sem, category2, sem_return.1)
][, rank.1 := sequence(.N), keyby = .(year, sem, category2)
][, returnna := ifelse(is.na(sem_return.1), 1, 0)
][returnna == 1, rank.1 := sem_return.1]
# risk taking change driven by net flow,数值太小了
data.quant <- data.quant[, fund.risk.c := fund.risk.c * 100
][, fund.sk.c := fund.sk.c * 100]
liner5 <- plm(fund.risk.c ~ netflow.1 * strategy + rank.1 + logfund_size.1 + logfund_age.1, data.quant, model = "within", effect = "twoways", index = c("id", "DateQ"))
liner6 <- plm(fund.sk.c ~ netflow.1 * strategy + rank.1 + logfund_size.1 + logfund_age.1, data.quant, model = "within", effect = "twoways", index = c("id", "DateQ"))
# 分两组的
load("fund9.RData")
data <- fund.9
data <- data[, yesno := duplicated(alpha_3), keyby = .(id, year, sem)
][yesno == FALSE, .SD]
# rank.1
data <- data[order(year, sem, category2, sem_return.1)
][, rank.1 := sequence(.N), keyby = .(year, sem, category2)
][, returnna := ifelse(is.na(sem_return.1), 1, 0)
][returnna == 1, rank.1 := sem_return.1]
data <- data[, fund.risk.c := fund.risk.c * 100
][, fund.sk.c := fund.sk.c * 100]
#对logfund_age分2组
data.age <- data[order(DateQ, logfund_age.1)]
data.age <- data.age[, group := ntile(logfund_age.1, 2), keyby = DateQ
][, old := ifelse(group == 2, 1, 0)]
liner1 <- plm(fund.risk.c ~ netflow.1 * old + logfund_size.1 + rank.1, data.age, model = "within", effect = "twoways", index = c("id", "DateQ"))
liner2 <- plm(fund.sk.c ~ netflow.1 * old + logfund_size.1 + rank.1, data.age, model = "within", effect = "twoways", index = c("id", "DateQ"))
#对logfund_size
# 分两组的
#对logfund_size分2组
data.size <- data[order(DateQ, logfund_size.1)]
data.size <- data.size[, group := ntile(logfund_size.1, 2), keyby = DateQ
][, big := ifelse(group == 2, 1, 0)]
liner3 <- plm(fund.risk.c ~ netflow.1 * big + logfund_age.1 + rank.1, data.size, model = "within", effect = "twoways", index = c("id", "DateQ"))
liner4 <- plm(fund.sk.c ~ netflow.1 * big + logfund_age.1 + rank.1, data.size, model = "within", effect = "twoways", index = c("id", "DateQ"))
stargazer(liner1, liner2, liner3, liner4, liner5, liner6, type = "html", out = "C://Users//shenfan//Desktop//data//2//group.doc", add.lines = list(c("fund", "yes", "yes", "yes", "yes", "yes", "yes"), c("time", "yes", "yes", "yes", "yes", "yes", "yes")))
|
3b68e3ded4ee8cc377b0102ed9a8acba2057804f | a6c93aacfc26bb5c6763fb4bc82f5fb2734c8831 | /man/bcgsc.ca_CHOL.IlluminaHiSeq_DNASeq.1.somatic.maf.Rd | 9b2aae53747792030614a8e369d2ffe6d9c5d152 | [] | no_license | BioinformaticsFMRP/TCGAbiolinks | c07c7a6d5b669b51a34a58a6213072b7c2f15cba | a4c3804dff8fc520436d49a440f9e22eebaa9735 | refs/heads/master | 2023-06-15T03:30:01.304727 | 2023-06-05T20:43:18 | 2023-06-05T20:43:18 | 31,545,433 | 262 | 125 | null | 2023-06-05T20:43:19 | 2015-03-02T14:56:03 | R | UTF-8 | R | false | true | 324 | rd | bcgsc.ca_CHOL.IlluminaHiSeq_DNASeq.1.somatic.maf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TCGAbiolinks.R
\docType{data}
\name{bcgsc.ca_CHOL.IlluminaHiSeq_DNASeq.1.somatic.maf}
\alias{bcgsc.ca_CHOL.IlluminaHiSeq_DNASeq.1.somatic.maf}
\title{TCGA CHOL MAF}
\format{
A tibble: 3,555 x 34
}
\description{
TCGA CHOL MAF
}
\keyword{internal}
|
a5ac37f6cdb0244a3aef2e1a5ae5b78f1bb8cd4f | 477a844ec2797463c5c29e44f62ffbbed63f2241 | /Legacy/R Scripts/Part 1/LARenewables.R | d64e33836c24b05a4334ee91a578a4d5c3ea6851 | [] | no_license | ischerr/ScottishEnergyStatsProcessing | f5e81cefc4d4cdfdcaa3851a86664a37974352f5 | f21d110c4c91d4c91c55409fd577f07819c526a9 | refs/heads/master | 2022-01-20T03:20:48.630704 | 2022-01-17T16:07:06 | 2022-01-17T16:07:06 | 232,823,735 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,724 | r | LARenewables.R | library(readxl)
print("LARenewables")
#Set Starting year for Loop
yearstart <- 2014
#Set Current Year to be loop end
yearend <- format(Sys.Date(), "%Y")
### Set Working Directory ###
setwd("J:/ENERGY BRANCH/Statistics/Energy Statistics Processing")
### Loop to extract year sheets from source Data ###
for (year in yearstart:yearend) {
# TryCatch allows the code to continue when there is an error.
# This is used when there is no data for the corresponding year in the loop.
tryCatch({
#Allow Code to continue running even if there are errors ###
### Read Source Data ###
LARenewables <-
read_excel(
"Data Sources/LA Renewables/Current.xlsx",
sheet = paste("LA - Generation, ", year, sep = ""),
skip = 2
)
### Scottish Subset ###
LARenewables <- subset(LARenewables, Country == "Scotland")
### Add Current Loop Year as Column ###
LARenewables$Year <- year
}, error = function(e) {
cat("ERROR :", conditionMessage(e), "\n")
})
tryCatch({
#Allow Code to continue running even if there are errors ###
### Read Source Data ###
LARenewables <-
read_excel(
"Data Sources/LA Renewables/Current.xlsx",
sheet = paste("LA - Generation, ", year, "r", sep = ""),
skip = 2
)
### Scottish Subset ###
LARenewables <- subset(LARenewables, Country == "Scotland")
### Add Current Loop Year as Column ###
LARenewables$Year <- year
}, error = function(e) {
cat("ERROR :", conditionMessage(e), "\n")
})
}
### Export to CSV ###
write.table(
LARenewables,
"R Data Output/LARenewables.txt",
sep = "\t",
na = "0",
row.names = FALSE
) |
f6a498c224083e2d6f98faad5be343bff32f7aa8 | 9a2f76d016d2b9f3b803131045bc071df961c48f | /intro.R | aaf644a484022d6841a54972a54c769c09a93843 | [] | no_license | s375301/hopr | 7c3d92ad1bec63257c00a764f37e6b2387ea0e59 | 3097e3308c90ef04bfb67c57210518db3c6b5b4f | refs/heads/main | 2023-02-04T17:07:30.663199 | 2020-12-27T05:09:27 | 2020-12-27T05:09:27 | 324,496,668 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 615 | r | intro.R | die <- 1:6
die - 1
die / 2
die * die
die + 1:2
die + 1:4 # vector recycling
die %*% die
sample(die, size = 2)
args(round)
sample(die, size = 2, replace = TRUE)
dice <- sample(die, size = 2, replace = TRUE) # remain same
roll <- function() {
die <- 1:6
dice <- sample(die, size = 2, replace = TRUE)
sum(dice) # The last line of code means return
}
roll()
dice
### arguments
roll2 <- function(bones) {
dice <- sample(bones, size = 2, replace = TRUE)
sum(dice)
}
roll2(1:4)
### The default values
roll2 <- function(bones = 1:6) {
dice <- sample(bones, size = 2, replace = TRUE)
sum(dice)
}
roll2(1:4) |
380dec1c769b57b02d40953b29aeab8048c9d0e6 | 1f6d79658ce351eafa3bf83cf38949d82b58de2f | /data-raw/listing_duplicated_columns.r | e721771509b60534113ea0b78d125f6f0a3fa700 | [
"MIT"
] | permissive | USCCANA/netdiffuseR | 3dd061f8b9951f7bdc5ec69cded73144f6a63cf7 | 7c5c9a7d4a8120491bfd44d6e307bdb5b66c18ae | refs/heads/master | 2023-09-01T08:26:19.951911 | 2023-08-30T15:44:09 | 2023-08-30T15:44:09 | 28,208,077 | 85 | 23 | NOASSERTION | 2020-03-14T00:54:59 | 2014-12-19T00:44:59 | R | UTF-8 | R | false | false | 501 | r | listing_duplicated_columns.r | # This script checks which variables can be repeated...
listing_duplicated_columns <- function(x) {
# Fixing factors to strings
x <- data.frame(lapply(x, function(y)
if (is.factor(y)) as.character(y) else y
), stringsAsFactors = FALSE)
k <- ncol(x)
vnames <- colnames(x)
ans <- NULL
for (i in 1L:ncol(x))
for (j in i:ncol(x)) {
if (i == j) next
if (all(x[,i] == x[,j], na.rm = TRUE))
ans <- c(ans, list(c(vnames[c(i,j)])))
}
do.call(rbind, ans)
}
|
ee84ba553a3614fe47bdc149e3e94c9456a81f75 | 4483e0c1924204c74d10b30333cd6529ca529d7e | /tests/testthat/test_curds.R | 47ead2836e5b1ecbabd1e4590d7a8bc897153443 | [] | no_license | BorjaZ/NLMR | e6f35912b9af62644543fe183834b70609c06936 | e0516590cc2adc1a27bb453a9a3dd27dca9037d9 | refs/heads/master | 2021-04-28T18:00:32.276707 | 2018-02-16T13:42:29 | 2018-02-16T13:42:29 | 121,864,946 | 1 | 0 | null | 2018-02-17T15:12:04 | 2018-02-17T15:12:04 | null | UTF-8 | R | false | false | 231 | r | test_curds.R | # nolint start
context("nlm_curds")
test_that("nlm_curds is a good boy", {
curds <- nlm_curds(c(0.5, 0.3), c(6, 2))
expect_that(curds, is_a("RasterLayer"))
expect_equal(length(unique(curds@data@values)), 2)
})
# nolint end
|
3f6659829f081b6a540544c02a115e72d15b4202 | 1542b8ef5c6387facf4d49f8fd4f6b5ef5d8e9c0 | /man/xCrosstalk.Rd | ef56a3f39a7c4704c55177048978ef7b857b7b88 | [] | no_license | wuwill/XGR | 7e7486614334b664a05e389cd646678c51d1e557 | c52f9f1388ba8295257f0412c9eee9b7797c2029 | refs/heads/master | 2020-04-12T12:38:04.470630 | 2018-12-19T17:40:30 | 2018-12-19T17:40:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 12,564 | rd | xCrosstalk.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xCrosstalk.r
\name{xCrosstalk}
\alias{xCrosstalk}
\title{Function to identify a pathway crosstalk}
\usage{
xCrosstalk(data, entity = c("Gene", "GR"),
significance.threshold = NULL, score.cap = NULL,
build.conversion = c(NA, "hg38.to.hg19", "hg18.to.hg19"),
crosslink = c("genehancer", "PCHiC_combined", "GTEx_V6p_combined",
"nearby"), crosslink.customised = NULL, cdf.function = c("original",
"empirical"), scoring.scheme = c("max", "sum", "sequential"),
nearby.distance.max = 50000, nearby.decay.kernel = c("rapid", "slow",
"linear", "constant"), nearby.decay.exponent = 2,
networks = c("KEGG", "KEGG_metabolism", "KEGG_genetic",
"KEGG_environmental", "KEGG_cellular", "KEGG_organismal",
"KEGG_disease",
"REACTOME", "PCommonsDN_Reactome"), seed.genes = T,
subnet.significance = 0.01, subnet.size = NULL,
ontologies = c("KEGGenvironmental", "KEGG", "KEGGmetabolism",
"KEGGgenetic", "KEGGcellular", "KEGGorganismal", "KEGGdisease"),
size.range = c(10, 2000), min.overlap = 10, fdr.cutoff = 0.05,
crosstalk.top = NULL, glayout = layout_with_kk, verbose = T,
RData.location = "http://galahad.well.ox.ac.uk/bigdata")
}
\arguments{
\item{data}{a named input vector containing the significance level for
genes (gene symbols) or genomic regions (GR). For this named vector,
the element names are gene symbols or GR (in the format of
'chrN:start-end', where N is either 1-22 or X, start/end is genomic
positional number; for example, 'chr1:13-20'), the element values for
the significance level (measured as p-value or fdr). Alternatively, it
can be a matrix or data frame with two columns: 1st column for gene
symbols or GR, 2nd column for the significance level. Also supported is
the input with GR only (without the significance level)}
\item{entity}{the entity. It can be either "Gene" or "GR"}
\item{significance.threshold}{the given significance threshold. By
default, it is set to NULL, meaning there is no constraint on the
significance level when transforming the significance level into
scores. If given, those below this are considered significant and thus
scored positively. Instead, those above this are considered
insignificant and thus receive no score}
\item{score.cap}{the maximum score being capped. By default, it is set
to NULL, meaning that no capping is applied}
\item{build.conversion}{the conversion from one genome build to
another. The conversions supported are "hg38.to.hg19" and
"hg18.to.hg19". By default it is NA (no need to do so)}
\item{crosslink}{the built-in crosslink info with a score quantifying
the link of a GR to a gene. See \code{\link{xGR2xGenes}} for details}
\item{crosslink.customised}{the crosslink info with a score quantifying
the link of a GR to a gene. A user-input matrix or data frame with 4
columns: 1st column for genomic regions (formatted as "chr:start-end",
genome build 19), 2nd column for Genes, 3rd for crosslink score
(crosslinking a genomic region to a gene, such as -log10 significance
level), and 4th for contexts (optional; if nor provided, it will be
added as 'C'). Alternatively, it can be a file containing these 4
columns. Required, otherwise it will return NULL}
\item{cdf.function}{a character specifying how to transform the input
crosslink score. It can be one of 'original' (no such transformation),
and 'empirical' for looking at empirical Cumulative Distribution
Function (cdf; as such it is converted into pvalue-like values [0,1])}
\item{scoring.scheme}{the method used to calculate seed gene scores
under a set of GR (also over Contexts if many). It can be one of "sum"
for adding up, "max" for the maximum, and "sequential" for the
sequential weighting. The sequential weighting is done via:
\eqn{\sum_{i=1}{\frac{R_{i}}{i}}}, where \eqn{R_{i}} is the
\eqn{i^{th}} rank (in a descreasing order)}
\item{nearby.distance.max}{the maximum distance between genes and GR.
Only those genes no far way from this distance will be considered as
seed genes. This parameter will influence the distance-component
weights calculated for nearby GR per gene}
\item{nearby.decay.kernel}{a character specifying a decay kernel
function. It can be one of 'slow' for slow decay, 'linear' for linear
decay, and 'rapid' for rapid decay. If no distance weight is used,
please select 'constant'}
\item{nearby.decay.exponent}{a numeric specifying a decay exponent. By
default, it sets to 2}
\item{networks}{the built-in network. For direct (pathway-merged)
interactions sourced from KEGG, it can be 'KEGG' for all,
'KEGG_metabolism' for pathways grouped into 'Metabolism',
'KEGG_genetic' for 'Genetic Information Processing' pathways,
'KEGG_environmental' for 'Environmental Information Processing'
pathways, 'KEGG_cellular' for 'Cellular Processes' pathways,
'KEGG_organismal' for 'Organismal Systems' pathways, and 'KEGG_disease'
for 'Human Diseases' pathways. 'REACTOME' for protein-protein
interactions derived from Reactome pathways. Pathways Commons
pathway-merged network from individual sources, that is,
"PCommonsDN_Reactome" for those from Reactome}
\item{seed.genes}{logical to indicate whether the identified network is
restricted to seed genes (ie input genes with the signficant level). By
default, it sets to true}
\item{subnet.significance}{the given significance threshold. By
default, it is set to NULL, meaning there is no constraint on
nodes/genes. If given, those nodes/genes with p-values below this are
considered significant and thus scored positively. Instead, those
p-values above this given significance threshold are considered
insigificant and thus scored negatively}
\item{subnet.size}{the desired number of nodes constrained to the
resulting subnet. It is not nulll, a wide range of significance
thresholds will be scanned to find the optimal significance threshold
leading to the desired number of nodes in the resulting subnet.
Notably, the given significance threshold will be overwritten by this
option}
\item{ontologies}{the ontologies supported currently. It can be 'AA'
for AA-curated pathways, KEGG pathways (including 'KEGG' for all,
'KEGGmetabolism' for 'Metabolism' pathways, 'KEGGgenetic' for 'Genetic
Information Processing' pathways, 'KEGGenvironmental' for
'Environmental Information Processing' pathways, 'KEGGcellular' for
'Cellular Processes' pathways, 'KEGGorganismal' for 'Organismal
Systems' pathways, and 'KEGGdisease' for 'Human Diseases' pathways),
'REACTOME' for REACTOME pathways or 'REACTOME_x' for its sub-ontologies
(where x can be 'CellCellCommunication', 'CellCycle',
'CellularResponsesToExternalStimuli', 'ChromatinOrganization',
'CircadianClock', 'DevelopmentalBiology', 'DigestionAndAbsorption',
'Disease', 'DNARepair', 'DNAReplication',
'ExtracellularMatrixOrganization', 'GeneExpression(Transcription)',
'Hemostasis', 'ImmuneSystem', 'Metabolism', 'MetabolismOfProteins',
'MetabolismOfRNA', 'Mitophagy', 'MuscleContraction', 'NeuronalSystem',
'OrganelleBiogenesisAndMaintenance', 'ProgrammedCellDeath',
'Reproduction', 'SignalTransduction', 'TransportOfSmallMolecules',
'VesicleMediatedTransport')}
\item{size.range}{the minimum and maximum size of members of each term
in consideration. By default, it sets to a minimum of 10 but no more
than 2000}
\item{min.overlap}{the minimum number of overlaps. Only those terms
with members that overlap with input data at least min.overlap (3 by
default) will be processed}
\item{fdr.cutoff}{fdr cutoff used to declare the significant terms. By
default, it is set to 0.05}
\item{crosstalk.top}{the number of the top paths will be returned. By
default, it is NULL meaning no such restrictions}
\item{glayout}{either a function or a numeric matrix configuring how
the vertices will be placed on the plot. If layout is a function, this
function will be called with the graph as the single parameter to
determine the actual coordinates. This function can be one of
"layout_nicely" (previously "layout.auto"), "layout_randomly"
(previously "layout.random"), "layout_in_circle" (previously
"layout.circle"), "layout_on_sphere" (previously "layout.sphere"),
"layout_with_fr" (previously "layout.fruchterman.reingold"),
"layout_with_kk" (previously "layout.kamada.kawai"), "layout_as_tree"
(previously "layout.reingold.tilford"), "layout_with_lgl" (previously
"layout.lgl"), "layout_with_graphopt" (previously "layout.graphopt"),
"layout_with_sugiyama" (previously "layout.kamada.kawai"),
"layout_with_dh" (previously "layout.davidson.harel"),
"layout_with_drl" (previously "layout.drl"), "layout_with_gem"
(previously "layout.gem"), "layout_with_mds", and
"layout_as_bipartite". A full explanation of these layouts can be found
in \url{http://igraph.org/r/doc/layout_nicely.html}}
\item{verbose}{logical to indicate whether the messages will be
displayed in the screen. By default, it sets to true for display}
\item{RData.location}{the characters to tell the location of built-in
RData files. See \code{\link{xRDataLoader}} for details}
}
\value{
an object of class "cPath", a list with following components:
\itemize{
\item{\code{ig_paths}: an object of class "igraph". It has graph
attribute (enrichment, and/or evidence, gp_evidence and membership if
entity is 'GR'), ndoe attributes (crosstalk)}
\item{\code{gp_paths}: a 'ggplot' object for pathway crosstalk
visualisation}
\item{\code{gp_heatmap}: a 'ggplot' object for pathway member gene
visualisation}
\item{\code{ig_subg}: an object of class "igraph".}
}
}
\description{
\code{xCrosstalkGenes} is supposed to identify maximum-scoring pathway
crosstalk from an input graph with the node information on the
significance (measured as p-values or fdr). It returns an object of
class "cPath".
}
\examples{
\dontrun{
# Load the XGR package and specify the location of built-in data
library(XGR)
RData.location <- "http://galahad.well.ox.ac.uk/bigdata/"
# 1) at the gene level
data(Haploid_regulators)
## only PD-L1 regulators and their significance info (FDR)
data <- subset(Haploid_regulators, Phenotype=='PDL1')[,c('Gene','FDR')]
## pathway crosstalk
cPath <- xCrosstalk(data, entity="Gene", network="KEGG",
subnet.significance=0.05, subnet.size=NULL,
ontologies="KEGGenvironmental", RData.location=RData.location)
cPath
## visualisation
pdf("xCrosstalk_Gene.pdf", width=7, height=8)
gp_both <-
gridExtra::grid.arrange(grobs=list(cPath$gp_paths,cPath$gp_heatmap),
layout_matrix=cbind(c(1,1,1,1,2)))
dev.off()
# 2) at the genomic region (SNP) level
data(ImmunoBase)
## all ImmunoBase GWAS SNPs and their significance info (p-values)
ls_df <- lapply(ImmunoBase, function(x) as.data.frame(x$variant))
df <- do.call(rbind, ls_df)
data <- unique(cbind(GR=paste0(df$seqnames,':',df$start,'-',df$end),
Sig=df$Pvalue))
## pathway crosstalk
df_xGenes <- xGR2xGenes(data[as.numeric(data[,2])<5e-8,1],
format="chr:start-end", crosslink="PCHiC_combined", scoring=T,
RData.location=RData.location)
mSeed <- xGR2xGeneScores(data, significance.threshold=5e-8,
crosslink="PCHiC_combined", RData.location=RData.location)
subg <- xGR2xNet(data, significance.threshold=5e-8,
crosslink="PCHiC_combined", network="KEGG", subnet.significance=0.1,
RData.location=RData.location)
cPath <- xCrosstalk(data, entity="GR", significance.threshold=5e-8,
crosslink="PCHiC_combined", networks="KEGG", subnet.significance=0.1,
ontologies="KEGGenvironmental", RData.location=RData.location)
cPath
## visualisation
pdf("xCrosstalk_SNP.pdf", width=7, height=8)
gp_both <-
gridExtra::grid.arrange(grobs=list(cPath$gp_paths,cPath$gp_heatmap),
layout_matrix=cbind(c(1,1,1,1,2)))
dev.off()
# 3) at the genomic region (without the significance info) level
Age_CpG <- xRDataLoader(RData.customised='Age_CpG',
RData.location=RData.location)[-1,1]
CgProbes <- xRDataLoader(RData.customised='CgProbes',
RData.location=RData.location)
ind <- match(Age_CpG, names(CgProbes))
gr_CpG <- CgProbes[ind[!is.na(ind)]]
data <- xGRcse(gr_CpG, format='GRanges')
## pathway crosstalk
df_xGenes <- xGR2xGenes(data, format="chr:start-end",
crosslink="PCHiC_combined", scoring=T, RData.location=RData.location)
subg <- xGR2xNet(data, crosslink="PCHiC_combined", network="KEGG",
subnet.significance=0.1, RData.location=RData.location)
cPath <- xCrosstalk(data, entity="GR", crosslink="PCHiC_combined",
networks="KEGG", subnet.significance=0.1,
ontologies="KEGGenvironmental", RData.location=RData.location)
cPath
}
}
\seealso{
\code{\link{xDefineNet}}, \code{\link{xCombineNet}},
\code{\link{xSubneterGenes}}, \code{\link{xGR2xNet}},
\code{\link{xEnricherGenesAdv}}, \code{\link{xGGnetwork}},
\code{\link{xHeatmap}}
}
|
0f072908ce8becb843e93f69dffda63522f66f3c | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.analytics/man/glue_batch_delete_table_version.Rd | 18081ae3190d80546a201a5503ba11772941e0e2 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,111 | rd | glue_batch_delete_table_version.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_batch_delete_table_version}
\alias{glue_batch_delete_table_version}
\title{Deletes a specified batch of versions of a table}
\usage{
glue_batch_delete_table_version(
CatalogId = NULL,
DatabaseName,
TableName,
VersionIds
)
}
\arguments{
\item{CatalogId}{The ID of the Data Catalog where the tables reside. If none is provided,
the Amazon Web Services account ID is used by default.}
\item{DatabaseName}{[required] The database in the catalog in which the table resides. For Hive
compatibility, this name is entirely lowercase.}
\item{TableName}{[required] The name of the table. For Hive compatibility, this name is entirely
lowercase.}
\item{VersionIds}{[required] A list of the IDs of versions to be deleted. A \code{VersionId} is a string
representation of an integer. Each version is incremented by 1.}
}
\description{
Deletes a specified batch of versions of a table.
See \url{https://www.paws-r-sdk.com/docs/glue_batch_delete_table_version/} for full documentation.
}
\keyword{internal}
|
375b0732a51cb993046ea5399084217bf5e727e2 | 530a21a9c80490b10be949f023b17bc56cd2ed87 | /Statistics/modules/stats_test32_anova_for_selection_error_and_bubblesize.R | fc942416e6c2f726afeab523e3039c3577d20de9 | [
"MIT"
] | permissive | ssafty/graph-viz-eye-tracker | 86778ff7f211c1d48a9b46888197f5304bc8405f | f552b09f704e52a9927ca123453b84cfaad673a6 | refs/heads/master | 2023-07-12T23:51:48.023880 | 2017-05-05T11:23:20 | 2017-05-05T11:23:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,657 | r | stats_test32_anova_for_selection_error_and_bubblesize.R | ######## Statistical tests for SelectionError ##################### for 'BubbleSize'
###################################################################
# 1. ANOVA with the Sphericity test
df_anova <- marg_PB_data_frame
df_anova[5] <- NULL # remove CorrectedSelectedTime we only analyze SelectionError
df_anova[3] <- NULL # remove SelectedTime we only analyze SelectionError
df_anova_matrix <- with(df_anova,
cbind(
SelectionError[BubbleSize == "Large"],
SelectionError[BubbleSize == "Medium"],
SelectionError[BubbleSize == "Small"]
)
)
df_anova_model <- lm(df_anova_matrix ~ 1)
df_anova_design <- factor(c("Large", "Medium", "Small"))
options(contrasts = c("contr.sum", "contr.poly"))
df_anova_aov <- Anova(df_anova_model, idata = data.frame(df_anova_design), idesign = ~df_anova_design, type = "III")
summary(df_anova_aov, multivariate = F)
xxxx = "
Univariate Type III Repeated-Measures ANOVA Assuming Sphericity
SS num Df Error SS den Df F Pr(>F)
(Intercept) 0.32246 1 0.35068 11 10.1147 0.008755 **
df_anova_design 0.01853 2 0.31824 22 0.6404 0.536635
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Mauchly Tests for Sphericity
Test statistic p-value
df_anova_design 0.84712 0.43623
Greenhouse-Geisser and Huynh-Feldt Corrections
for Departure from Sphericity
GG eps Pr(>F[GG])
df_anova_design 0.86739 0.5167
HF eps Pr(>F[HF])
df_anova_design 1.015483 0.5366346
"
# rm
rm(df_anova, df_anova_aov, df_anova_design, df_anova_matrix, df_anova_model)
rm(xxxx) |
27fd75470e1d1329415f0bda5bafd1d17cca3983 | 752cffe764883b04304591b87c4d76848d60626d | /src/generator.R | c7a437e1e3d5bf1864cac8f5f0acc287ea383c8e | [
"MIT"
] | permissive | samtaylor54321/dog-gans | d7a979f42d07dd0ee6fbe5dc91104c220f2fc427 | e1094a2faaad69305f84bb7e12b1758a77c8cc1c | refs/heads/master | 2022-11-18T06:57:03.046565 | 2020-07-15T20:37:41 | 2020-07-15T20:37:41 | 275,195,757 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,578 | r | generator.R | library(keras)
library(magrittr)
library(R6)
Generator <- R6Class("Generator",
public = list(
generator = NA,
initialize = function(latent_dim, channels) {
private$latent_dim = latent_dim
private$channels = channels
private$generator_input = layer_input(shape = private$latent_dim)
private$generator_output = private$generator_input %>%
layer_dense(units = 128 * 32 * 32) %>%
layer_activation_leaky_relu() %>%
layer_reshape(target_shape = c(32, 32, 128)) %>%
layer_conv_2d(filters = 256, kernel_size = 5,
padding = "same") %>%
layer_activation_leaky_relu() %>%
layer_conv_2d_transpose(filters = 256, kernel_size = 4,
strides = 2, padding = "same") %>%
layer_activation_leaky_relu() %>%
layer_conv_2d(filters = 256, kernel_size = 5,
padding = "same") %>%
layer_activation_leaky_relu() %>%
layer_conv_2d(filters = 256, kernel_size = 5,
padding = "same") %>%
layer_activation_leaky_relu() %>%
layer_conv_2d(filters = private$channels, kernel_size = 7,
activation = "tanh", padding = "same")
self$generator = keras_model(private$generator_input,
private$generator_output)
}
),
private = list(
latent_dim = NA,
channels = NA,
generator_input = NA,
generator_output = NA
)
)
|
390b3489a64078d1fe787a4325acd04015d1004b | 26cb660689bc2e02cacb4a0dc74941fe48465ee8 | /Proteomics/explore_data.R | e6d292eff3ec3334d2621d2d02720a22c24a7574 | [] | no_license | rnabioinfor/METTL1_m7G | 8f73e58b74a0e57622eb81f9090ba6db2822e871 | 70b5ba41032db5b53ad872de3b65c144ca67c058 | refs/heads/master | 2023-05-24T13:17:58.301154 | 2021-06-11T03:58:19 | 2021-06-11T03:58:19 | 375,522,246 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,507 | r | explore_data.R | ###########################################
# fischerlab.org
# Explore SILAC data
# ESF 2020-09-24
# Explore SILAC data
# Tmp script - no repository yet
###########################################
# Load dependencies:
#source("~/R-helper-functions/mypairs.R")
library("gplots")
library("stringr")
library("limma")
library("dplyr")
library("readxl")
library(data.table)
suppressPackageStartupMessages(library("gridExtra"))
library(ggpubr)
# Get and set working directory
# list PD protein file names from "proteins" subfolder
upgenes <- list()
downgenes <- list()
plotlist <- list()
results <- "results_no_less_3"
for(i in c("setA","setB","setC","setD")) {
message(i)
# Load data
D.exp_A <- as.data.frame(read.delim(paste0("./data/", i, ".txt"), sep="\t", na = "NaN"))
#### Explore dataset A ####
#Filter data to include only data with a minimum of 2 peptides (3 would be better)
sel <- D.exp_A$Rep1.Number.of.peptides >= 3 & D.exp_A$Rep2.Number.of.peptides >= 3 & D.exp_A$Rep3.Number.of.peptides >= 3
### Total of 3741 proteins with min of 2 peptides
D.exp_A <- D.exp_A[sel,]
# Explore data quality
# create matrix with heavy to light ratios D - include mean/median at this point
D <- as.matrix(D.exp_A[,c(5,6,8,9,11,12)])
rownames(D) <- D.exp_A$Protein.Id
#Create log2 transformed matrix
A <- log2(D)
#Check cross-correlation
# log2 transformed ratios
#mypairs(A)
#Modest correlation
AA <- as.data.table(A)
rp1 = ggscatter(AA, x = "Rep1.sum.H_L.ratio", y = "Rep2.sum.H_L.ratio", size = 2, alpha= 0.6, add = "reg.line", add.params = list(color = "#00AFBB", fill = "lightgray"), conf.int = TRUE) + labs(x=paste0("Rep1.sum.H_L.ratio"),y=paste0("Rep2.sum.H_L.ratio")) +stat_cor(method = "pearson", label.x.npc = 0.2, label.y.npc = 0.8)
rp2 = ggscatter(AA, x = "Rep2.sum.H_L.ratio", y = "Rep3.sum.H_L.ratio", size = 2, alpha= 0.6, add = "reg.line", add.params = list(color = "#00AFBB", fill = "lightgray"), conf.int = TRUE) + labs(x=paste0("Rep2.sum.H_L.ratio"),y=paste0("Rep3.sum.H_L.ratio")) +stat_cor(method = "pearson", label.x.npc = 0.2, label.y.npc = 0.8)
rp3 = ggscatter(AA, x = "Rep1.sum.H_L.ratio", y = "Rep3.sum.H_L.ratio", size = 2, alpha= 0.6, add = "reg.line", add.params = list(color = "#00AFBB", fill = "lightgray"), conf.int = TRUE) + labs(x=paste0("Rep1.sum.H_L.ratio"),y=paste0("Rep3.sum.H_L.ratio")) +stat_cor(method = "pearson", label.x.npc = 0.2, label.y.npc = 0.8)
plotlist[["a"]] = rp1
plotlist[['b']] = rp2
plotlist[['c']] = rp3
glist <- lapply(plotlist, ggplotGrob)
ggsave(paste0(results,"/scatter_",i,".pdf"), marrangeGrob(grobs = glist, layout_matrix =matrix(1:3, nrow = 1,ncol=3, byrow=TRUE)),width=15,height=6)
## Data does not look terrible, but after applying filter for 2 peptides only left with few proteins.
## Maybe filter later for outliers
## Correlation is modest. With 2 peptides no advantage for median so go with sum
#subset to only sum
A <- A[,c(1,3,5)]
#normalize data
plotDensities(A)
Anorm <- normalizeBetweenArrays(A)
plotDensities(Anorm)
#mypairs(Anorm)
### Apply moderated t test
fit <- lmFit(Anorm)
fit <- eBayes(fit)
tt <- topTable(fit, number=nrow(Anorm))
D.exp_AA <- as.data.table(D.exp_A,keep.rownames=T)
D.exp_AA <- D.exp_AA[,.(ID=Protein.Id,Gene.Symbol,Description)]
ttt <- as.data.table(tt,keep.rownames=T)
setnames(ttt,"rn","ID")
ttt <- merge(D.exp_AA,ttt,by="ID",all.y=T)
write.csv(ttt, file = paste0(results,"/hitlist_heavy_to_light_",i,".csv"), row.names = T)
upgenes[[i]] <- ttt[logFC >= log2(1.2) & adj.P.Val < 0.05,ID]
downgenes[[i]] <- ttt[logFC <= -log2(1.2) & adj.P.Val < 0.05,ID]
# Set thresholds for graphs
pval <- 0.05
lfc <- 1
ttt <- ttt[!is.na(logFC),]
hits <- ttt$logFC >= lfc & ttt$P.Value <= pval | ttt$logFC <= -lfc & ttt$P.Value <= pval
maxFC <- abs(max(ttt$logFC))+2
maxP <- -log10(min(ttt$P.Value))+2
# volcano plot
pdf(file = paste0(results,"/volcano_heavy_to_light_",i,".pdf"), width = 6, height = 6)
plot(x=ttt$logFC,
y=-log10(ttt$P.Value), # data in the limma table is already log2 transformed
ylab="-log10 P value", # x-axis label
xlab=paste("log2FC Heavy / Light"), # y-axis label
cex=0.8, # point size
xlim=c(-maxFC,maxFC),
ylim=c(0,maxP),
pch=20, # point style
col=ifelse(hits, "sienna1", "#22222222"))
abline(h=-log10(pval), lty=2) # add vertical dashed line for p-value cutoff
abline(v=c(-1,1)*(lfc), lty=2) # add two horizontal dashed lines for log fold change cutoffs
if (sum(hits)>0){ # adds gene symbol as a text label to points if they are beyond the cutoffs
text(y = -log10(ttt[hits,P.Value]), # x coordinate of label
x = ttt[hits,logFC], # y coordinate of label
adj = c(-0.25,0.55), # adjustment to offset label from point
labels=ttt[hits,Gene.Symbol], # character strings to be used as labels
col="black", # text color
cex=0.6)} # text size
dev.off()
#### Create a heatmap plot summarizing the data
#create color palette from green to red
hits2 <- rownames(A) %in% ttt[hits, ID]
#Create matrix for heatmap
m <- A[hits2,]
sel <- rownames(m) != ""
m <- m[sel,]
m <- m[!(is.infinite(m[,1]) | is.infinite(m[,2]) | is.infinite(m[,3])),]
my_palette <- colorRampPalette(c("royalblue1", "black", "sienna1"))(n=299)
### Heatmap for % RA
col_breaks = c(seq(-0.75,-0.25, length=100), # for green
seq(-0.24, 0.24, length=100), # for black
seq(0.25, 0.75, length=100)) # for red
pdf(file = paste0(results,"/heatmap_heavy_to_light_",i,".pdf"), width = 6, height = 6)
library(ggplot2)
library(gplots)
heatmap.2(m,
na.rm = T,
#cellnote = format(round(a, 2), nsmall = 2),
main = "Relative abundance",
notecol = "black",
#density.info = "none",
trace = "none",
margins = c(12,9),
col=my_palette,
# breaks = col_breaks,
#hclustfun = function(x) hclust(a, method ="complete"),
dendrogram = c("both"))
dev.off()
rm(my_palette,col_breaks)
}
library(VennDiagram)
library(grid)
library(gridBase)
library(lattice)
temp <- venn.diagram(list(setA_up=upgenes[["setA"]],setB_up=upgenes[["setB"]],setC_down=downgenes[["setC"]],setD_down=downgenes[["setD"]]), fill = c("green","yellow","red","blue"), alpha = c(0.5, 0.5, 0.5, 0.5), cex = 1,cat.fontface = 2,lty =2, filename = NULL,category.names=c('setA_up','setB_up','setC_down','setD_down'))
plot.new()
pdf(paste0(results,"/updown.veen.pdf"), width = 6, height = 6)
grid.draw(temp)
dev.off()
|
af6ffca55730ab1ada0feabf7d1d323587fb23fa | 4bc7519c101f9f8ed2f9f49ebf9414836b60f574 | /Introduction to Probability and Statistics/06/figures/pValueCPRStudySmallSampleAnalysisInSmallSampleSection/pValueCPRStudySmallSampleAnalysisInSmallSampleSection.R | d8a9e6e2518dce5b64dc3d3470a09898c49e4b53 | [] | no_license | nishanmudalige/ebooks | dcbf11662c511abd2be8669f098a6751d1f794ab | abfeed951a14415c5967ca21cf73cc794fc6c9a3 | refs/heads/master | 2023-05-30T07:12:15.349608 | 2023-05-20T22:19:26 | 2023-05-20T22:19:26 | 287,881,227 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,519 | r | pValueCPRStudySmallSampleAnalysisInSmallSampleSection.R | library(openintro)
data(COL)
set.seed(1)
tr <- rep(1:2, c(50, 40))
su <- c(rep(c("s", "d"), c(11, 39)), rep(c("s", "d"), c(14, 26)))
N <- 10^5
d <- rep(NA, N)
for(i in 1:N){
trf <- sample(tr)
p1 <- sum(su[trf == 1] == "s") / 50
p2 <- sum(su[trf == 2] == "s") / 40
d[i] <- p2 - p1
}
sum((d) > 0.1299) / N
myPDF("pValueCPRStudySmallSampleAnalysisInSmallSampleSection.pdf", 5*1.35, 2.3*1.35, mar=c(2, 2.5, 0.5, 0.5))
hist(d, breaks=seq(-0.4, 0.4, 0.02), col=COL[7,3], main="", xlab="Differences under the null hypothesis", ylab="", axes=FALSE)
axis(1)
axis(2, at=(0:3)*N/20, labels=c(0, NA, NA, 3)/20)
hist(d[d > 0.1299], breaks=seq(-0.4, 0.4, 0.02), col=COL[1], add=TRUE)
#hist(d[d < -0.1299], breaks=seq(-0.4, 0.4, 0.02), col="#4488AA", add=TRUE)
abline(h=0)
lines(rep(0.13, 2), c(0, 3)*N/25, lty=3, lwd=1.7)
text(0.13, 3*N/25, "0.13", pos=3, cex=0.7)
#lines(rep(-0.13, 2), c(0, 3)*N/25, lty=3, lwd=1.7)
#text(-0.13, 3*N/25, "-0.13", pos=3, cex=0.7)
dev.off()
#N <- 50000
#d <- rpois(N, 3)
#d <- d - mean(d)
#par(mfrow=2:1)
#dd <- sample(d, N/10)
#pv <- rep(NA, N/10)
#for(i in 1:(N/10)){
# if(dd[i] < 0){
# pv[i] <- 2*sum(d <= dd[i]) / N
# } else {
# pv[i] <- 2*sum(d >= dd[i]) / N
# }
#}
#br <- rep(NA, 200)
#for(j in 1:200){
# br[j] <- sum(pv < 0.01*j)/N*10
#}
#plot(br)
#abline(0, 1/100)
#dd <- sample(d, N/10)
#pv <- rep(NA, N/10)
#for(i in 1:(N/10)){
# pv[i] <- sum(abs(d) >= abs(dd[i])) / N
#}
#br <- rep(NA, 200)
#for(j in 1:200){
# br[j] <- sum(pv < 0.01*j)/N*10
#}
#plot(br)
#abline(0, 1/100)
|
e0a3a83bfdc8ff22ceee0c3219b2dd02824eacfb | a7dd545bd4529bce3364fbd8078e26ad86499aea | /PufferSphere/ICOADS3/old/obs_3.0.R | 6795e0d57058c0bb0f110e13e212d144f0ef2061 | [] | no_license | philip-brohan/weather.case.studies | 335d15e4bc183f0139b56d411cc016f98b7be0b0 | 2139647d51156e1cd7f227b088fdc66ea079920e | refs/heads/master | 2021-04-18T20:21:05.017207 | 2018-06-19T16:36:45 | 2018-06-19T16:36:45 | 42,944,588 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,404 | r | obs_3.0.R | #!/usr/bin/Rscript --no-save
# Global obs coverage - icoads3.0 and ICOADS2.5
# Puffersphere version.
library(GSDF)
library(GSDF.WeatherMap)
library(parallel)
library(lubridate)
library(IMMA)
year<-1800
month<-1
day<-2
hour<-0
n.total<-as.integer(365.25*(150)-4) # Total number of days to be rendered
Imagedir<-sprintf("%s/images/icoads_3.0",Sys.getenv('SCRATCH'))
if(!file.exists(Imagedir)) dir.create(Imagedir,recursive=TRUE)
datadir<-'/data/local/hadpb'
use.cores<-7
c.date<-ymd_hms(sprintf("%04d/%02d/%02d %02d:00:00",year,month,day,hour))
Options<-WeatherMap.set.option(NULL)
Options<-WeatherMap.set.option(Options,'lat.min',-90)
Options<-WeatherMap.set.option(Options,'lat.max',90)
Options<-WeatherMap.set.option(Options,'lon.min',-190)
Options<-WeatherMap.set.option(Options,'lon.max',190)
Options$vp.lon.min<- -180
Options$vp.lon.max<- 180
Options<-WeatherMap.set.option(Options,'wrap.spherical',T)
Options<-WeatherMap.set.option(Options,'show.mslp',F)
Options<-WeatherMap.set.option(Options,'show.ice',T)
Options<-WeatherMap.set.option(Options,'show.obs',T)
Options<-WeatherMap.set.option(Options,'show.fog',F)
Options<-WeatherMap.set.option(Options,'show.wind',F)
Options<-WeatherMap.set.option(Options,'show.temperature',F)
Options<-WeatherMap.set.option(Options,'show.precipitation',F)
Options<-WeatherMap.set.option(Options,'temperature.range',12)
Options<-WeatherMap.set.option(Options,'obs.size',1.5)
Options<-WeatherMap.set.option(Options,'obs.colour',rgb(255,0,0,255,
maxColorValue=255))
Options<-WeatherMap.set.option(Options,'land.colour',rgb(0,0,0,255,
maxColorValue=255))
Options<-WeatherMap.set.option(Options,'sea.colour',rgb(100,100,100,255,
maxColorValue=255))
Options<-WeatherMap.set.option(Options,'pole.lon',160)
Options<-WeatherMap.set.option(Options,'pole.lat',35)
Options<-WeatherMap.set.option(Options,'background.resolution','high')
Options<-WeatherMap.set.option(Options,'ice.colour',Options$land.colour)
obs.cache<-list()
ReadObs.cache<-function(file.name,start,end) {
result<-NULL
if(!is.null(obs.cache[[file.name]])) {
result<-obs.cache[[file.name]]
} else {
if(length(names(obs.cache))>2) {
obs.cache<-list()
gc(verbose=FALSE)
}
obs.cache[[file.name]]<-ReadObs(file.name)
result<-obs.cache[[file.name]]
}
w<-which(is.na(result$HR))
if(length(w)>0) result$HR[w]<-12
result.dates<-ymd_hms(sprintf("%04d-%02d-%02d %02d:%02d:00",
as.integer(result$YR),
as.integer(result$MO),
as.integer(result$DY),
as.integer(result$HR),
as.integer((result$HR%%1)*60)))
w<-which(result.dates>=start & result.dates<end)
result<-result[w,]
return(result)
}
# Get observations from ICOADS
ICOADS.3.0.get.obs<-function(year,month,day,hour,duration) {
start<-ymd_hms(sprintf("%04d-%02d-%02d %02d:30:00",year,month,day,hour))-
hours(duration/2)
end<-start+hours(duration)
files<-unique(c(sprintf("%s/icoads_3.0/ICOADS_R3_Beta3_%04d%02d.dat.gz",
datadir,as.integer(year(start)),
as.integer(month(start))),
sprintf("%s/icoads_3.0/ICOADS_R3_Beta3_%04d%02d.dat.gz",
datadir,as.integer(year(end)),
as.integer(month(end)))))
result<-data.frame()
for(file in files) {
o<-ReadObs.cache(file,start,end)
if(length(colnames(result))==0) {
result<-o
} else {
cols <- intersect(colnames(result), colnames(o))
result<-rbind(result[,cols], o[,cols])
}
}
w<-which(result$LON>180)
if(length(w)>0) result$LON[w]<- result$LON[w]-360
return(result)
}
land<-WeatherMap.get.land(Options)
plot.day<-function(l.count) {
n.date<-c.date+days(l.count)
year<-year(n.date)
month<-month(n.date)
day<-day(n.date)
#hour<-hours(n.date)
hour<-12
image.name<-sprintf("%04d-%02d-%02d:%02d.png",year,month,day,hour)
ifile.name<-sprintf("%s/%s",Imagedir,image.name)
if(file.exists(ifile.name) && file.info(ifile.name)$size>0) return()
print(sprintf("%d %04d-%02d-%02d - %s",l.count,year,month,day,
Sys.time()))
obs.ic<-ICOADS.3.0.get.obs(year,month,day,hour,72)
png(ifile.name,
width=1080*WeatherMap.aspect(Options),
height=1080,
bg=Options$sea.colour,
pointsize=24,
type='cairo')
Options$label<-sprintf("%04d-%02d-%02d",year,month,day)
pushViewport(dataViewport(c(Options$vp.lon.min,Options$vp.lon.max),
c(Options$lat.min,Options$lat.max),
extension=0))
WeatherMap.draw.land(NULL,Options)
if(length(obs.ic$LAT)>0) {
obs.ic$Latitude<-obs.ic$LAT
obs.ic$Longitude<-obs.ic$LON
WeatherMap.draw.obs(obs.ic,Options)
}
upViewport()
dev.off()
gc(verbose=FALSE)
}
mclapply(seq(0,n.total),plot.day,mc.cores=use.cores,mc.preschedule=TRUE)
#lapply(seq(0,n.total),plot.day)
|
73ea2935c04e455d03b4fc27261a370fcfb57824 | 705a5ddabe1e92e803cf9db7872058fb96008131 | /old/misc/neuralnet.r | 322970d8d506464b68370f72eee673abbb9fdc4a | [] | no_license | MHOOO/axle | 39f45654191f8cf98aa7eec9a6f4f5d46c708d95 | 84c2f949b7acde3450f118146767cbb19481ea7b | refs/heads/master | 2021-04-09T15:28:03.772427 | 2013-01-14T09:05:33 | 2013-01-14T09:05:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,846 | r | neuralnet.r |
# neuralnet.r
x = c(x0, x1, x2, x3)
z1 = Theta1 %*% x
a1 = g(z1)
z2 = Theta1 %*% a1
a2 = g(z2) # a0 bias unit = 1
z3 = Theta2 %*% a2
a3 = g(z3)
# h(x) = a3
# cost function J(Theta):
cost <- function(Theta) {
- (1/m)
* ( Sum(i=1..m) (
Sum(k=1..K) (
yi_k * log( h(theta, xi)_k ) +
(1-yi_k) * log(1 - h(theta, xi)_k)
)
)
)
+ (lambda / 2 * m)
* ( Sum(l=1..L-1) (
Sum(i=1..s_l) (
Sum(j=1..(s_l+1) ) (
(Thetal_j)^2
)
)
)
)
}
# "error" of coast for al_j (unit j in layer l)
delta <- function(l, j) {
# unit j
# layer l
Sum(n=1..N) ( # N units in layer l+1
Thetal_(n, j) * delta(l+1, n)
)
}
gradApprox <- function(theta) {
for( i <- 1 to n) {
thetaPlus = theta;
thetaPlus(i) = thetaPlus(i) + EPSILON
thetaMinus = theta;
thetaMinus(i) = thetaMinus(i) + EPSILON
gradApprox(i) = (J(thetaPlus) - J(thetaMinus)) / (2 * EPSILON)
}
}
# gradient checking:
# compare gradApprox to DVec to help QA the implementation
# Overflow of implementation plan:
# 1 random initialization:
Theta1 = rand(10,11) * (2 * INIT_EPSILON ) - INIT_EPSILON;
# 2 implement FP to get h_theta(xi) for any xi
# 3 implement cost function J(Theta)
# 4 implement BP to compute partial derivatives
# d/dTheta^(l)_jk J(Theta)
for i = 1: m {
perform fp and bp using example (xi,yi)
get activations al and delta derms deltal for l = 2 .. L
Deltal := Deltal + delta^(l+1) * (a^(l))t
}
compute d/dTheta^(?)_jk * J(Theta)
# 5 using gradient checking to compare
# BP vs numerical estimation
# of gradient J(Theta)
# Then disable gradient checking code
#
# 6 Using gradient descent or advanced optimization method with BP to try to
# minimize J(Theta) as a function of parameters Theta
|
6cd09410d85bf923ada673b46a1ae01f163d9775 | 7899f5f9635b592fe4cdfc13b466db980f66ea5f | /R/download.R | 96619ffb76c33914e21cdf4337c4d9c819afe56b | [] | no_license | bquast/Making-Next-Billion-Demand-Access | b628d4be89e25516e7f097f792f54102c47c6664 | 5aa95bd2c1375d20f727788b28fc8adf264131bb | refs/heads/master | 2023-04-26T10:26:53.444860 | 2023-04-16T12:19:16 | 2023-04-16T12:19:16 | 45,461,115 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,939 | r | download.R | # download.R
# ------------------------------------------------------------------
# download files from www.datafirst.ac.za/dataportal
# NOTA BENE: must be logged in to portal in order to downlaod files
#
# Bastiaan Quast
# bquast@gmail.com
# skip line and add description of files ignored by git
write('\n', file = '.gitignore', append = TRUE)
write('# data files', file = '.gitignore', append = TRUE)
write('data/', file = '.gitignore', append = TRUE)
write('\n', file = '.gitignore', append = TRUE)
# Wave 1
## download the SAS-version of the Wave 1 dataset
sas1 <- 'data/nids-w1-2008-v5.3-20150619-sas.zip'
download.file(url = 'https://www.datafirst.uct.ac.za/dataportal/index.php/catalog/451/download/6036', destfile = sas1)
unzip(zipfile = sas1, exdir = 'data')
## download the SPSS-version of the Wave 1 dataset
spss1 <- 'data/nids-w1-2008-v5.3-20150619-spss.zip'
download.file(url = 'https://www.datafirst.uct.ac.za/dataportal/index.php/catalog/451/download/6037', destfile = spss1)
unzip(zipfile = spss1, exdir = 'data')
## download the Stata-version of the Wave 1 dataset (Stata 12)
stata1 <- 'data/nids-w1-2008-v5.3-20150619-stata12.zip'
download.file(url = 'https://www.datafirst.uct.ac.za/dataportal/index.php/catalog/451/download/6038', destfile = stata1)
unzip(zipfile = stata1, exdir = 'data')
# Wave 2
## download the SAS-version of the Wave 2 dataset
sas2 <- 'data/nids-w2-2010-2011-v2.3-20150619-sas.zip'
download.file(url = 'https://www.datafirst.uct.ac.za/dataportal/index.php/catalog/452/download/5999', destfile = sas2)
unzip(zipfile = sas2, exdir = 'data')
## download the SPSS-version of the Wave 2 dataset
spss2 <- 'data/nids-w2-2010-2011-v2.3-20150619-spss.zip'
download.file(url = 'http://datafirst.uct.ac.za/dataportal/index.php/catalog/452/download/6000', destfile = spss2)
unzip(zipfile = spss2, exdir = 'data')
## download the Stata-version on the Wave 2 dataset (Stata 12)
stata2 <- 'data/nids-w2-2010-2011-v2.3-20150619-stata12.zip'
download.file(url = 'http://datafirst.uct.ac.za/dataportal/index.php/catalog/452/download/6001', destfile = stata2)
unzip(zipfile = stata2, exdir = 'data')
# Wave 3
## download the SAS-version onf the Wave 3 dataset
sas3 <- 'data/nids-w3-2012-v1.3-20150619-sas.zip'
download.file(url = 'http://datafirst.uct.ac.za/dataportal/index.php/catalog/453/download/7275', destfile = sas3)
unzip(zipfile = sas3, exdir = 'data')
## download the SPSS-version of the Wave 1 dataset
spss3 <- 'data/nids-w3-2012-v1.3-20150619-spss.zip'
download.file(url = 'https://www.datafirst.uct.ac.za/dataportal/index.php/catalog/451/download/7276', destfile = spss3)
unzip(zipfile = spss3, exdir = 'data')
## download the Stata-version of the Wave 1 dataset (Stata 12)
stata3 <- 'data/nids-w3-2012-v1.3-20150619-stata.zip'
download.file(url = 'https://www.datafirst.uct.ac.za/dataportal/index.php/catalog/451/download/7277', destfile = stata3)
unzip(zipfile = stata3, exdir = 'data')
|
356e2afef2ca3f192f486b9186ca4235f1321a31 | 838208376db364c7dd849ae138217e3e9ea419e4 | /SVR - SARIMA - NN Parallel SA.R | 3b4174b34f51616db82b110cdde99423547c7eee | [] | no_license | vcarlsberg/arimar | 87d9a55f22d2414cab3c1ddf2c29917b5183bf7e | 00b60ce21bc2b15cdbe0689661909b4a50878267 | refs/heads/master | 2020-12-27T11:13:36.816629 | 2020-06-22T09:23:06 | 2020-06-22T09:23:06 | 237,881,927 | 0 | 1 | null | 2020-04-10T06:02:42 | 2020-02-03T04:05:29 | R | UTF-8 | R | false | false | 3,074 | r | SVR - SARIMA - NN Parallel SA.R | #SVR - ARIMA - NN Parallel SA
library(forecast)
library(fable)
library(forecastHybrid)
library(nnet)
library(readxl)
library(GA)
library(Metrics)
library(tidyverse)
library(TSrepr)
library(svrpath)
library(e1071)
library(NMOF)
Dataset_Surabaya <- read_excel("C:/Users/asus/OneDrive - Institut Teknologi Sepuluh Nopember/Kuliah/Thesis/Dataset_Surabaya.xlsx")
data_outflow<-data.frame(tahun=Dataset_Surabaya[["Tahun"]],
bulan=Dataset_Surabaya[["Bulan"]],
y=Dataset_Surabaya[["K10000"]])
data_outflow$bulan<-match(data_outflow$bulan,month.abb)
data_outflow<-na.omit(data_outflow)
head<-head(data_outflow)
tail<-tail(data_outflow)
daftar.mape.mae.smape<-data.frame(fh=NULL,mape=NULL,mae=NULL,smape=NULL)
#daftar.mae<-data.frame(fh=NULL,mae=NULL)
#daftar.smape<-data.frame(fh=NULL,smape=NULL)
#daftar.mape<-rbind(daftar.mape,data.frame(fh=21,mape=12))
myts <- ts(data_outflow[["y"]],start=c(head[1,1], head[1,2]), end=c(2017, 12), frequency=12)
#myts <- ts(data_outflow_10000, frequency=12)
myts_2018<-ts(data_outflow[["y"]],start=c(2018, 1), end=c(2018, 12), frequency=12)
components.ts = decompose(myts)
plot(components.ts)
lambda <- BoxCox.lambda(myts,lower = 0)
testFun <- function(x)
{
svm_model.tuning <- svm(x=c(1:length(myts)),y=data_outflow$y[1:length(myts)],
kernel="radial",gamma=2^x[1],cost = 2^x[2])
mape(myts,svm_model.tuning$fitted)
}
for(x in c(1:12))
{
print(x)
forecast_horizon<-x
#arima
arima.model <- auto.arima(myts,trace=FALSE,seasonal = TRUE,
start.p=1,start.q=1,lambda = lambda)
fitted.arima<-arima.model[["fitted"]]
forecast.arima<-forecast(arima.model,h=forecast_horizon)
#svr grid search
levels <- list(a = -50:50, b = -10:10)
res <- gridSearch(testFun, levels)
svm_model <- svm(x=c(1:length(myts)),y=data_outflow$y[1:length(myts)],
kernel="radial",
gamma=2^res$minlevels[1],
cost = 2^res$minlevels[2])
fitted.svm<-ts(svm_model$fitted)
nd <- (length(myts)+1):(length(myts)+forecast_horizon)
forecast.svm<-predict(svm_model,newdata = data.frame(x=nd))
#nnetar
set.seed(34)
nnetar.model<-nnetar(myts,size = 30,lambda=lambda)
#CVar(myts,k=10,h=12,nnetar(myts,lambda=lambda))
forecast::accuracy(nnetar.model)
fitted.nnetar<-nnetar.model[["fitted"]]
forecast.nnetar<-forecast(nnetar.model,h=forecast_horizon)
yhat<-1/3*forecast.svm+1/3*forecast.arima[["mean"]]+1/3*forecast.nnetar[["mean"]]
daftar.mape.mae.smape<-rbind(daftar.mape.mae.smape,
data.frame(fh=forecast_horizon,
smape=smape(myts_2018[1:forecast_horizon],yhat),
mae=mae(myts_2018[1:forecast_horizon],yhat),
mape=mape(myts_2018[1:forecast_horizon],yhat),
rmse=rmse(myts_2018[1:forecast_horizon],yhat)
)
)
}
Metrics::mase()
|
0bbf6da367176b64d9df6b5ee83dff02ce1da3fa | d175703f8d1de8846380ae92af020ae70ed78843 | /ui.R | c9659e7d3840352191c8436907d1e082d45c401a | [] | no_license | jordaoalves/Analisar-Gratificacoes---IPERN | 3cc54846144f571c5e207bbaf800f32f8b4e0f7a | 298f85b85e7df2e73233e4c19ba11c5f2b5fe1ff | refs/heads/master | 2022-04-24T14:22:16.713522 | 2020-04-27T03:33:45 | 2020-04-27T03:33:45 | 259,187,062 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,116 | r | ui.R | useShinyjs()
dashboardPage(skin = "red",
dashboardHeader(title = "Analisar Gratificações - IPERN", titleWidth = 320,
tags$li(class = "dropdown",
tags$a(href="http://www.ipe.rn.gov.br/", target="_blank",
tags$img(height = "17px", alt="SNAP Logo", src='logoIpernPNG.png')
)
),
tags$li(actionLink("openModal",
label = "", icon = icon("info")),
class = "dropdown")),
dashboardSidebar(disable = TRUE),
dashboardBody( tags$head(tags$style(HTML(".multicol{font-size:12px;
height:auto;
-webkit-column-count: 2;
-moz-column-count: 2;
column-count: 2;
}
div.checkbox {margin-top: 0px;}"))),
fluidRow(
useShinyjs(),
box(
title = "Entrada e saída dos arquivos"
,status = "primary"
,solidHeader = TRUE
,width = 2
,fileInput("file1",h4("Selecione os contracheques em PDF de 01/1995 a 07/2001"),
accept = c(".pdf"),multiple = TRUE
)
,fileInput("file2",h4("Selecione os contracheques em PDF a partir de 08/2001"),
accept = c(".pdf"),multiple = TRUE
)
,actionButton("start", "Start")
,tags$br()
,tags$br()
,downloadButton('baixarDespacho', 'Baixar Despacho')
,tags$br()
,radioButtons('format', NULL, c('HTML', 'Word'), selected = 'HTML',inline = TRUE)
,tags$br()
,downloadButton('baixarFichaFinanceira', 'Baixar Ficha Financeira')
,tags$br()
,tags$br()
,downloadButton('baixarRelatorioAuxiliar', 'Baixar Relatório Auxiliar')
),
box(
title = "Informações sobre o Despacho"
,status = "primary"
,solidHeader = TRUE
,width = 3
,textInput("periodoPesquisa", h4("Período de pesquisa dos dados:"),
value = "Set/1995 a Dez/2001", placeholder = "Digite o período da pesquisa dos dados")
,textInput("nProcesso", h4("Número do Processo:"),
value = "",
placeholder = "Digite o número do Processo")
,textInput("nomePessoa", h4("Nome do interessado:"),
value = "",
placeholder = "Digite o nome do interessado")
,selectInput("setorResponsavel", h4("Responsável pelo despacho:"),
choices = list("CCONTRI-IPERN" = "CCONTRI", "CFP-IPERN" = "CFP"), selected = "CCONTRI")
,selectInput("destinatario", h4("Destinatário do processo:"),
choices = list("Gabinete da Presidência do IPERN" = "ao Gabinete da Presidência do IPERN",
"Procuradoria Geral do IPERN" = "à Procuradoria Geral do IPERN"),
selected = "à Procuradoria Geral do IPERN")
,dateInput("dataDespacho", h4("Data de criação do Despacho:"), value = NULL, min = NULL, max = NULL,
format = "dd/mm/yyyy", startview = "month", weekstart = 0,
language = "pt-BR")
),
box(
title = "Rúbricas das gratificações a serem analisadas"
,status = "primary"
,solidHeader = TRUE
,width = 7
,actionLink("selectall","Select All")
,tags$br()
,tags$br()
,tags$div(align = "left",
class = "multicol",
checkboxGroupInput("rubricas",NULL,
c("GRATIFICACAO DE INSALUBRIDADE (cód. 113) < 08/01" = "113",
"COMPL.SALARIAL DEC.6045 (cód. 185) < 08/01" = "185",
"GRATIFICACAO PLANTAO (cód. 188) < 08/01" = "188",
"GRATIFICACAO ADICIONAL NOTURNO (cód. 190) < 08/01" = "190",
"GRATIFICACAO PERMAN.LEI 5.334 (cód. 191) < 08/01" = "191",
"VANTAGEM PESSOAL LEI 6.192 Art. 11 (cód. 200) < 08/01" = "200",
"GRADES - ART 1º LEI 6271 (cód. 208) < 08/01" = "208",
"LIMINAR JUDICIAL (cód. 331) < 08/01" = "331",
"GRAT. ASS. ESP. - GRAE (cód. 336) < 08/01" = "336",
"VENCIMENTOS (cód. 401) < 08/01" = "401",
"GRATIFICACAO DE INSALUBRIDADE (cód. 408) < 08/01" = "408",
"GRATIFICACAO DE PLANTAO (cód. 441) < 08/01" = "441",
"GRATIFICACAO DE ADICIONAL NOTURNO (cód. 442) < 08/01" = "442",
"GRADES ART. 1 L 6271 (cód. 465) < 08/01" = "465",
"GRAT. ASS. ESP. - GRAE (cód. 478) < 08/01" = "478",
"GRAT ARC. (cód. 907) < 08/01" = "907",
"GRATIFICACAO DE PLANTAO EM UNIDADE DE SAUDE (cód. 35) > 07/01" = "35",
"GRATIFICACAO DE ADICIONAL NOTURNO (cód. 37) > 07/01" = "37",
"GRATIFICACAO DE INSALUBRIDADE (cód. 47) > 07/01" = "47",
"GRATIFICACAO DE DESEMPENHO EM SERVICO DE SAUDE - GRADES (cód. 51) > 07/01" = "51",
"VANTAGEM PESSOAL DO ART 15 DA LC Nº 333/2006 (PCCR DA SESAP) (cód. 118) > 07/01" = "118",
"PLANTAO EVENTUAL (cód. 131) > 07/01" = "131",
"GDAAC INCORPORADA - GRAT DESEMP ATIV ALTA COMPLEXIDADE (cód. 162) > 07/01" = "162",
"DIFERENCA DE NIVEL - VENCIMENTO (cód. 224) > 07/01" = "224",
"GRATIFICACAO DE JORNADA ESPECIAL (cód. 291) > 07/01" = "291",
"GRATIFICACAO ESPECIAL DE LOCALIZACAO GEOGRAFICA (cód. 293) > 07/01" = "293",
"GRATIFICACAO ATIVIDADE ESTADUAL - GAEST (cód. 295) > 07/01" = "295",
"GRAT DESEMPENHO DE ATIVIDADE DE ALTA COMPLEXIDADE - GDAAC (cód. 299) > 07/01" = "299",
"INDENIZACAO (cód. 403) > 07/01" = "403"
),selected=c("GRATIFICACAO DE INSALUBRIDADE (cód. 113) < 08/01" = "113",
"COMPL.SALARIAL DEC.6045 (cód. 185) < 08/01" = "185",
"GRATIFICACAO PLANTAO (cód. 188) < 08/01" = "188",
"GRATIFICACAO ADICIONAL NOTURNO (cód. 190) < 08/01" = "190",
"GRATIFICACAO PERMAN.LEI 5.334 (cód. 191) < 08/01" = "191",
"VANTAGEM PESSOAL LEI 6.192 Art. 11 (cód. 200) < 08/01" = "200",
"GRADES - ART 1º LEI 6271 (cód. 208) < 08/01" = "208",
"LIMINAR JUDICIAL (cód. 331) < 08/01" = "331",
"GRAT. ASS. ESP. - GRAE (cód. 336) < 08/01" = "336",
"VENCIMENTOS (cód. 401) < 08/01" = "401",
"GRATIFICACAO DE INSALUBRIDADE (cód. 408) < 08/01" = "408",
"GRATIFICACAO DE PLANTAO (cód. 441) < 08/01" = "441",
"GRATIFICACAO DE ADICIONAL NOTURNO (cód. 442) < 08/01" = "442",
"GRADES ART. 1 L 6271 (cód. 465) < 08/01" = "465",
"GRAT. ASS. ESP. - GRAE (cód. 478) < 08/01" = "478",
"GRAT ARC. (cód. 907) < 08/01" = "907",
"GRATIFICACAO DE PLANTAO EM UNIDADE DE SAUDE (cód. 35) > 07/01" = "35",
"GRATIFICACAO DE ADICIONAL NOTURNO (cód. 37) > 07/01" = "37",
"GRATIFICACAO DE INSALUBRIDADE (cód. 47) > 07/01" = "47",
"GRATIFICACAO DE DESEMPENHO EM SERVICO DE SAUDE - GRADES (cód. 51) > 07/01" = "51",
"VANTAGEM PESSOAL DO ART 15 DA LC Nº 333/2006 (PCCR DA SESAP) (cód. 118) > 07/01" = "118",
"PLANTAO EVENTUAL (cód. 131) > 07/01" = "131",
"GDAAC INCORPORADA - GRAT DESEMP ATIV ALTA COMPLEXIDADE (cód. 162) > 07/01" = "162",
"DIFERENCA DE NIVEL - VENCIMENTO (cód. 224) > 07/01" = "224",
"GRATIFICACAO DE JORNADA ESPECIAL (cód. 291) > 07/01" = "291",
"GRATIFICACAO ESPECIAL DE LOCALIZACAO GEOGRAFICA (cód. 293) > 07/01" = "293",
"GRATIFICACAO ATIVIDADE ESTADUAL - GAEST (cód. 295) > 07/01" = "295",
"GRAT DESEMPENHO DE ATIVIDADE DE ALTA COMPLEXIDADE - GDAAC (cód. 299) > 07/01" = "299",
"INDENIZACAO (cód. 403) > 07/01" = "403"
)))
)
)
)
) |
18654c5d19fc79368efbe101228ebbb1ad8bb3b9 | 7e5b6017b2c9ad669cdcf7ad709560a2ef78e94c | /code/packages_list.R | 01d086294ab425bdcc42093ea02fad6d42a61b49 | [] | no_license | acrossthetidyverse/tidycast | 676b5d9a24cdf8d68b9722019679c411ef232dd6 | 76df6739adea53a936dd36a3f8fac52bbb136baf | refs/heads/master | 2021-01-12T00:35:55.563019 | 2017-02-18T06:39:01 | 2017-02-18T06:39:01 | 78,745,553 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 420 | r | packages_list.R | tidypacks <-
c("tidyverse/broom",
"hadley/dplyr",
"wesm/feather",
"tidyverse/forcats",
"hadley/ggplot2",
"tidyverse/haven",
"hadley/httr",
"rstats-db/hms",
"jeroenooms/jsonlite",
"hadley/lubridate",
"tidyverse/magrittr",
"hadley/modelr",
"hadley/purrr",
"tidyverse/readr",
"hadley/readxl",
"tidyverse/stringr",
"tidyverse/tibble",
"hadley/rvest",
"tidyverse/tidyr",
"hadley/xml2")
|
72f59c3c8e67d78738c5c75d6a3ca09b24c219a3 | 11dd3782354ca82cc5dfc6996d3b707e5f563010 | /man/network_plot.Rd | 8b641ff784ce092e442d3dd509132fb5857b19d2 | [
"MIT",
"BSD-2-Clause"
] | permissive | pmartR/pmartRseq | 49010cceff1a173e924e7b467781cec86c0aaae6 | 75d573e528d14a6563a69be46c9a326c13423738 | refs/heads/master | 2020-12-31T07:33:06.348449 | 2018-02-09T00:39:18 | 2018-02-09T00:39:18 | 86,605,696 | 2 | 1 | BSD-2-Clause | 2018-02-09T00:39:19 | 2017-03-29T16:35:39 | R | UTF-8 | R | false | true | 1,657 | rd | network_plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network_plot.R
\name{network_plot}
\alias{network_plot}
\title{Generate Network Plot}
\usage{
network_plot(netGraph, omicsData = NULL, modData = NULL,
colour = "Phylum", vsize = FALSE, legend.show = TRUE,
legend.pos = "bottomleft")
}
\arguments{
\item{netGraph}{an object of class 'networkGraph', created by \code{\link{pmartRseq_igraph}}}
\item{omicsData}{Optional, an object of the class 'seqData' usually created by \code{\link{as.seqData}}, if want to colour by taxonomy and/or scale vertices by abundance}
\item{modData}{Optional, an object of class 'modData', created by \code{\link{detect_modules}}, if want to colour by modules.}
\item{colour}{Optional, if desired, can colour vertices by a taxonomic level or 'Module' for module. Use 'NULL' if no colour is desired.}
\item{vsize}{Logical, should vertices be scaled by median abundance of taxa}
\item{legend.show}{Logical, should a legend be shown. Default is TRUE.}
\item{legend.pos}{Optional, if legend==TRUE, where to position the legend. Default is 'bottomleft'.}
}
\value{
A network graph.
}
\description{
This function generates a network plot for the network data.
}
\details{
A network graph is created for the network(s) that were generated.
}
\examples{
\dontrun{
library(mintJansson)
data(rRNA_data)
mynetwork <- network_calc(omicsData = rRNA_data)
mygraph <- pmartRseq_igraph(netData = mynetwork, coeff=0.6, pval=NULL, qval=0.05)
network_plot(omicsData = rRNA_data, netGraph = mygraph, colour = "Phylum", vsize = TRUE, legend.show = TRUE, legend.pos = "bottomleft")
}
}
\author{
Allison Thompson
}
|
438cedc611a1013996d16cf9d3bc5f13a1ba60a7 | b33f25e07cb53816d0609a9d25df7bcfb0d5a9c4 | /R/prepare_inputs.R | 6f8fb8d33c5b9a24f7f36265fe94dfccf9a04104 | [] | no_license | jaybee84/ceres | cdd53431902016d9253c2f7915ed4404b2294ddf | c57fd3f5ec07f9cdd82e79efe406c5d7acb310dd | refs/heads/master | 2023-03-16T09:58:22.875794 | 2019-03-21T14:20:29 | 2019-03-21T14:20:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,546 | r | prepare_inputs.R | #' Map guide to locus
#' @importFrom Biostrings DNAStringSet
#' @importFrom Biostrings writeXStringSet
#' @export
#'
map_guide_to_locus <- function(guides,
genome_id="hg19",
bowtie_exe="bowtie",
samtools_exe="samtools",
temp_dir=tempdir(),
write_rds_output_path=NULL, guide_length=20) {
guides_fa <- file.path(temp_dir, "guides.fa")
guides_sam <- file.path(temp_dir, "guides.sam")
guides_bam <- file.path(temp_dir, "guides.bam")
guides %>% unique %>%
set_names(.,.) %>%
Biostrings::DNAStringSet() %>%
Biostrings::writeXStringSet(guides_fa)
bowtie_cmd <- paste(bowtie_exe, "-t -p 4 -a -v 0 -f -S", genome_id,
guides_fa, guides_sam)
system(bowtie_cmd)
samtools_cmd <- paste(samtools_exe, "view -bS -o",
guides_bam, guides_sam)
system(samtools_cmd)
alns <- guideAlignments(guides_bam, max.alns=100,
include.no.align=T, as.df=T, guide_length=guide_length, genome_id=genome_id)
if(!is.null(write_rds_output_path)){
cat(paste('Writing the mapping of sgRNAs to the genome in', write_rds_output_path, 'csv file'))
saveRDS(alns, write_rds_output_path)
}
return(alns)
}
#' Intersect locus with copy number segment
#' @importFrom GenomeInfoDb Seqinfo
#' @export
#'
intersect_locus_with_cn_seg <-
function(cn_seg, guide_alns,
cell_lines=NULL,
genomeinfo=Seqinfo(genome="hg19"),
chromosomes=paste0("chr", c(as.character(1:22),"X","Y")),
do_parallel=F) {
if (!is.null(cell_lines)) {
cn_seg <- cn_seg[names(cn_seg) %in% cell_lines]
}
cn_seg_gr <- plyr::llply(cn_seg,
makeGRangesFromDataFrame,
seqinfo=genomeinfo, keep.extra.columns=T)
guide_alns_gr <- guide_alns %>%
dplyr::filter(!is.na(rname)) %>%
dplyr::mutate(Chr = rname,
Start = Cut.Pos,
End = Cut.Pos,
AlnID = str_c(Guide, Chr, Start, strand, sep="_")) %>%
dplyr::distinct(Guide, Chr, Start, End, AlnID) %>%
dplyr::filter(Chr %in% chromosomes) %>%
makeGRangesFromDataFrame(seqinfo=genomeinfo, keep.extra.columns=T)
guide_no_alns <- guide_alns %>%
dplyr::filter(is.na(rname)) %>%
dplyr::distinct(Guide, .keep_all=T)
guide_cn <-
intersect_guide_with_copy_number(guide_alns_gr, cn_seg_gr,
CN.column="CN",
guide.column="AlnID",
do_parallel=do_parallel) %>%
rbind(matrix(0, dimnames=list(guide_no_alns$Guide, colnames(.)),
nrow=nrow(guide_no_alns), ncol=ncol(.)))
}
#' @importFrom plyr "."
load_cn_seg_file <-
function(cn_seg_file,
chromosomes=paste0("chr", c(as.character(1:22),"X", "Y"))) {
read_tsv(cn_seg_file,
col_types="ccddid") %>%
set_colnames(c("CellLine", "Chr", "Start", "End",
"Num_Probes", "CN")) %>%
dplyr::mutate(Chr = ifelse(str_detect(Chr, "^chr"), Chr, str_c("chr", Chr)),
Start = as.integer(Start),
End = as.integer(End)) %>%
dplyr::filter(Chr %in% chromosomes) %>%
dplyr::group_by(CellLine) %>%
dplyr::mutate(CN = if(any(CN < 0)){2 * 2^CN}else{CN}) %>%
dplyr::ungroup() %>%
plyr::dlply(.(CellLine))
}
get_gene_annotations <- function(genes, guide_alns,
chromosomes=paste0("chr",c(as.character(1:22), "X", "Y")),
genomeinfo=Seqinfo(genome="hg19")) {
gene_annot_grs <- genes %>%
makeGRangesFromDataFrame(seqinfo=genomeinfo, keep.extra.columns=T)
guide_aln_grs <- guide_alns %>%
dplyr::select(Guide, Chr=rname, Start=Cut.Pos, strand) %>%
dplyr::mutate(End = Start) %>%
dplyr::filter(Chr %in% chromosomes) %>%
dplyr::distinct() %>%
makeGRangesFromDataFrame(seqinfo=genomeinfo, keep.extra.columns=T)
hits <- findOverlaps(guide_aln_grs, gene_annot_grs, ignore.strand=T) %>%
as.data.frame
gene_df <- hits %>%
dplyr::transmute(Guide = guide_aln_grs$Guide[queryHits],
Chr = seqnames(guide_aln_grs)[queryHits] %>% as.character(),
Cut.Pos = start(guide_aln_grs)[queryHits] %>% as.integer(),
Strand = strand(guide_aln_grs)[queryHits] %>% as.character(),
Gene = gene_annot_grs$gene[subjectHits],
GeneID = gene_annot_grs$gene_id[subjectHits],
CDS_Strand = strand(gene_annot_grs)[subjectHits] %>% as.character(),
CDS_Start = start(gene_annot_grs)[subjectHits] %>% as.integer(),
CDS_End = end(gene_annot_grs)[subjectHits] %>% as.integer()) %>%
dplyr::distinct()
}
load_ccds_genes <- function(ccds_file,
chromosomes=paste0("chr", c(as.character(1:22),"X","Y"))) {
ccds <- read_tsv(ccds_file,
col_types=cols("#chromosome" = col_character(),
"cds_from" = col_integer(),
"cds_to" = col_integer())) %>%
dplyr::rename(chromosome=`#chromosome`) %>%
dplyr::mutate(chromosome = str_c("chr", chromosome)) %>%
dplyr::filter(ccds_status %in% c("Public", "Reviewed, update pending", "Under review, update"),
chromosome %in% chromosomes,
!is.na(cds_from), !is.na(cds_to))
ccds_exon <- ccds %>%
dplyr::mutate(cds_interval = str_replace_all(cds_locations, "[\\[\\]]", "") %>%
str_split("\\s*,\\s*")) %>%
tidyr::unnest(cds_interval) %>%
dplyr::group_by(gene, gene_id, cds_locations) %>%
dplyr::mutate(exon_code = ifelse(cds_strand=="+", 1:dplyr::n(), dplyr::n():1)) %>%
dplyr::ungroup() %>%
dplyr::mutate(cds_start = str_extract(cds_interval, "^[0-9]+") %>% as.integer,
cds_end = str_extract(cds_interval, "[0-9]+$") %>% as.integer) %>%
dplyr::select(gene, gene_id, chromosome, start=cds_start, end=cds_end, strand=cds_strand,
gene_start=cds_from, gene_end=cds_to, exon_code)
}
#' Generation of the guides from a gct dep_file
#' @param dep_file file path of guide-level dependency data
#' @param write_rds_output_path Optional: Will write guide_dep into a rds file in top of returning the matrix
#'
#' @return Matrix with a description attribute
#' @export
#'
generate_guides <- function(dep_file, write_rds_output_path=NULL){
guide_dep <- read.gct(dep_file) %>%
set_rownames(str_extract(rownames(.), "^[ACGT]+")) %>%
{.[unique(rownames(.)),]} %>%
remove.rows.all.nas()
if(!is.null(write_rds_output_path)){
cat(paste('Writing the generated guides in', write_rds_output_path, 'Rds file'))
saveRDS(guide_dep, write_rds_output_path)
}
return(guide_dep)
}
#' CERES main routine
#'
#' @param inputs_dir directory path to write CERES inputs
#' @param dep_file file path of guide-level dependency data. !!Not necessary if you have generate your guides with generate_guides
#' @param pre_generated_guides Optional: Matrix generated by the generated_guides(dep_file) function. Will fasten this function (since we skip the reading of gct)
#' @param cn_seg file path of segmented copy number data
#' @param gene_annot_file file path of gene annotation
#' @param rep_map file path of replicate map
#' @param guide_alns_file Optional: file path of the guide mapped (use map_guide_to_locus to generate)
#'
#' @return Returns invisibly. Only called for its effects.
#'
#' @importFrom GenomeInfoDb Seqinfo
#' @export
#'
prepare_ceres_inputs <-
function(inputs_dir,
dep_file,
pre_generated_guides_file=NULL,
cn_seg_file,
gene_annot_file,
rep_map_file,
genome_id="hg19",
chromosomes=paste0("chr",c(as.character(1:22), "X", "Y")),
dep_normalize="zmad",
bowtie_exe="bowtie",
samtools_exe="samtools",
do_parallel=F,
guide_alns_file=NULL) {
genomeinfo <- Seqinfo(genome=genome_id)[chromosomes]
dir.create(inputs_dir, recursive=T, showWarnings=F)
cat("loading dependency data...\n\n")
guide_dep <- NULL
if(is.null(pre_generated_guides_file)){
guide_dep <- generate_guides(dep_file)
}
else{
guide_dep <- readRDS(pre_generated_guides_file)
}
guide_length <- nchar(rownames(guide_dep)[2])
rep_map <- readr::read_tsv(rep_map_file)
cat("loading copy number data...\n\n")
cn_seg <- load_cn_seg_file(cn_seg_file, chromosomes=chromosomes)
guide_alns <- NULL
if(is.null(guide_alns_file)){
cat("mapping sgRNAs to the genome...\n\n")
guide_alns <- map_guide_to_locus(rownames(guide_dep), genome_id=genome_id,
bowtie_exe=bowtie_exe, samtools_exe=samtools_exe,
guide_length=guide_length)
}
else{
cat("reading sgRNAs mapped to the genome...\n\n")
guide_alns <- readRDS(guide_alns_file)
}
cell_lines <- rep_map %>%
dplyr::filter(Replicate %in% colnames(guide_dep)) %$%
unique(CellLine)
cat("getting copy number data per locus...\n\n")
guide_cn_mat <- intersect_locus_with_cn_seg(cn_seg, guide_alns,
cell_lines=cell_lines,
genomeinfo=genomeinfo,
chromosomes=chromosomes,
do_parallel=do_parallel)
locus_cn <- guide_cn_mat[str_detect(rownames(guide_cn_mat), "chr"), , drop=F] %>%
set_rownames(rownames(.) %>% str_extract("chr.+$")) %>%
{.[rownames(.), , drop=F]} %>%
remove.rows.all.nas()
non_targeting_cn <- guide_cn_mat[!str_detect(rownames(guide_cn_mat), "chr"), , drop=F]
guide_locus_df <- guide_alns %>%
dplyr::transmute(Guide,
Locus = str_c(rname, Cut.Pos, strand, sep="_")) %>%
dplyr::distinct()
cat("mapping loci to gene coding regions...\n\n")
ccds <- load_ccds_genes(ccds_file=gene_annot_file,
chromosomes=chromosomes)
gene_df <- get_gene_annotations(ccds, guide_alns,
genomeinfo=genomeinfo,
chromosomes=chromosomes)
locus_gene_df <- gene_df %>%
dplyr::transmute(Locus = str_c(Chr, Cut.Pos, Strand, sep="_"),
Gene) %>%
dplyr::distinct()
cat("normalizing data...\n\n")
rep_map <- read_tsv(rep_map_file)
cell_lines_to_use <- intersect(rep_map$CellLine, colnames(locus_cn))
loci_to_use <- intersect(guide_locus_df$Locus, rownames(locus_cn))
guides_to_use <-
intersect(rownames(guide_dep),
c(guide_locus_df %>% dplyr::filter(Locus %in% loci_to_use) %$% Guide,
rownames(non_targeting_cn)))
guide_dep <- guide_dep[guides_to_use,
rep_map %>%
dplyr::filter(CellLine %in% cell_lines_to_use) %$%
Replicate, drop=F]
if (dep_normalize=="zmad") {
guide_dep <- plyr::aaply(guide_dep, 2, function(cl) {
(cl - median(cl, na.rm=T)) / mad(cl, na.rm=T)
}) %>% t
} else if (dep_normalize=="zscore") {
guide_dep <- plyr::aaply(guide_dep, 2, function(cl) {
(cl - mean(cl, na.rm=T)) / sd(cl, na.rm=T)
}) %>% t
} else if (dep_normalize=="none") {
} else {
stop("Error: normalization not recognized")
}
guide_locus_df <- guide_locus_df %>%
dplyr::filter(Guide %in% guides_to_use,
Locus %in% loci_to_use) %>%
dplyr::mutate(Value = 1)
locus_gene_df <- locus_gene_df %>%
dplyr::filter(Locus %in% loci_to_use) %>%
dplyr::mutate(Value = 1)
cat("writing to disk...\n\n")
# TODO: Is it useful to copy this to guide_sample_dep if already existing as .Rds via generate_guides?
saveRDS(guide_dep, file.path(inputs_dir, "guide_sample_dep.Rds"))
saveRDS(guide_locus_df, file.path(inputs_dir, "guide_locus.Rds"))
saveRDS(locus_gene_df, file.path(inputs_dir, "locus_gene.Rds"))
saveRDS(locus_cn, file.path(inputs_dir, "locus_sample_cn.Rds"))
rep_map %>% dplyr::filter(Replicate %in% colnames(guide_dep)) %>%
saveRDS(file.path(inputs_dir, "replicate_map.Rds"))
invisible(NULL)
}
|
4324a8fa8dd724a1f3954bcbd51b9eca462a2e3b | a66023a86ed6cb864361285c8ba37e6a46531abc | /schmidtWorkshop.R | d28f96478717d43ebd81e6a1d6d86a31587cd0ef | [
"MIT"
] | permissive | zhangyd10/schmidtWorkshop | 83ddc6be110fdb103a65ae1f22d656344becf8b9 | 48a441bebfea1e862c36c86b25ce65f1c3102dfa | refs/heads/master | 2020-04-16T08:08:20.765319 | 2018-07-25T16:22:04 | 2018-07-25T16:22:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 28,211 | r | schmidtWorkshop.R |
sshhh <- function(a.package){
suppressWarnings(suppressPackageStartupMessages(
library(a.package, character.only=TRUE)))
}
pkgs <- c("data.table", "plotly", "Rgraphviz", "graph", "BiocGenerics", "parallel", "magrittr")
loads <- sapply(pkgs, sshhh)
###################################################################/
# Descrption: mi.univariate.subset
#
###################################################################/
mi.univariate.subset = function( )
{
selection = c( "I251", "E780", "M206", "S9211", "M4792", "I422", "C837", "I461", "B24", "G35")
return( mi.univariate()[ Code %in% selection ][ order( pValue )] )
}
###################################################################/
# Descrption: mi.univariate
#
###################################################################/
mi.univariate = function( originalCols = FALSE )
{
file = 'MI_GRS_tree.rdata'
load( file )
data = as.data.table( MI_GRS_tree )
if( originalCols == TRUE )
return( data )
setnames( data, c( "coding", "meaning", "Naffected", "BETA", "Pval" ), c( "Code", "Description", "N", "beta", "pValue") )
return( data[, .( Code, Description, N, beta, pValue ) ][ order( pValue ) ] )
}
###################################################################/
# Descrption: ABO.univariate
#
###################################################################/
ABO.univariate = function()
{
file = 'ABO.1df.res.rdata'
load( file )
data = as.data.table( data )
return( data[ !is.na( pValue ), .( Code = coding, Description = meaning, N = COUNTS, beta = log( OR ), pValue ) ][ order( pValue ) ] )
}
###################################################################/
# Descrption: Data object with likelihood surfaces for the ABO
# SNP in the ICD-10 UK Biobank data set
###################################################################/
ABO.lk.surfs = function( ) {
file = 'ABO.lk.surfs.rdata'
load( file )
return( ABO.data )
}
ABO.lk.pars = function( ) {
file = 'ABO.lk.surfs.rdata'
load( file )
return( ABO.pars )
}
###################################################################/
# Descrption: Data object with likelihood surfaces for the MI GRS
# in the ICD-10 UK Biobank data set
###################################################################/
MI_GRS.lk.surfs = function( ) {
file = 'MI_GRS_UKBB_data.rdata'
load( file )
return( list( llk.data = llk.data, tree = tree, prior = prior ) )
}
###################################################################
## Descrption: function to draw the likelihood surface in the GRS
## analysis
###################################################################
grs.plot.lk.surface <- function(
code = NULL,
tree = NULL,
prior = NULL,
lk.surfs = NULL
) {
if ( is.null(code) | is.null(tree) | is.null(prior) | is.null(lk.surfs) )
stop("missing function argument.\n")
code.idx <- which(tree$coding %in% code)
code.meaning <- tree[code.idx,'meaning']
plot(
prior$b.grid,
lk.surfs[[code.idx]]$op,
xlab = 'beta',
ylab = 'likelihood (scaled)',
pch = 19,
col = 'black',
bty = 'l',
main = code.meaning
)
}
###################################################################/
# Descrption: draw_tree_univariate
###################################################################/
draw_tree_univariate <- function(
data,
title = "Univariate Analysis",
pValueThreshold = 1e-5,
pValueSaturation = 1e-50
)
{
# make sure data is correct form
if( !is.data.table( data ) )
data = as.data.table( data )
if( length( setdiff( c( "Code", "pValue", "beta" ), names( data ) ) != 0 ) )
throw( "input data requires pValue, beta and Code columns")
data = data[ ,.( coding = Code, Pval = pValue, BETA = beta ) ]
# get whole tree data
tree = mi.univariate( originalCols = TRUE )
tree = tree[ ,.( ID, Par, coding, meaning ) ]
tree = data[ tree, on = "coding" ]
# convert threshold to log
lThresh = log( pValueThreshold ) / log( 10 )
lSat = log( pValueSaturation) / log( 10 )
if( lSat > lThresh )
lWidth = 1e-5
else
lWidth = lThresh - lSat
# convert data to form required by Adrian's function
pp = data.table( tree )[ , .( BETA = ifelse( is.na( BETA ), 0, BETA ), logPval = ifelse( is.na( Pval), 0, ifelse( Pval < 1e-185, -185, log( Pval ) / log( 10 ) ) ) ) ]
pp[ , effPp := ifelse( logPval > lThresh, 0, pmin( 1, ( lThresh - logPval ) / lWidth ) ) ]
pp[ , effPp := ifelse( effPp > 0, effPp / 2 + 0.5, 0 ) ]
pp = as.matrix( pp[ , .( ifelse( BETA < 0, effPp, 0 ), 1 - effPp, ifelse( BETA > 0, effPp, 0 ) ) ] )
# finally drawer tree
draw_tree( as.data.frame( tree ), pp, tree_title = title,trim_tree_pp = 0.01, measureName = "pValue", measureValueFunc = function( t, p ) return( as.data.table( t )[ , format( Pval, digits = 3 ) ] ) )$plot
}
###################################################################/
# Descrption: draw_tree
###################################################################/
draw_tree <- function(
tree = NULL,
pp = NULL,
tree_title = "Tree",
only.get.stats = FALSE,
trim_tree_pp = NULL,
measureName = "PP_active",
measureValueFunc = function( tree, pp ) return( round( 1 - pp[ ,2 ] , 2) )
)
{
# remove tree if posterior probability is to low
if( !is.null( trim_tree_pp ) )
{
tmp <- trim_tree( tree = tree, pp = pp, pp.thr = trim_tree_pp )
tree <- tmp$tree
pp <- tmp$pp
}
matrix <- matrix(0, ncol = nrow(tree), nrow = nrow(tree))
for( i in 1:( nrow( tree ) - 1 ) )
{
p <- tree[i,"Par"]
c <- tree[i,"ID"]
matrix[p,c] <- 1
}
rownames(matrix) <- tree$ID
colnames(matrix) <- tree$ID
labels = tree$ID
graph <- new("graphAM", adjMat = matrix, edgemode = 'directed')
lGraph <- layoutGraph(graph)
ninfo <- nodeRenderInfo(lGraph)
node_state <- apply(pp,1,function(x) return( which.max(x) )) - 2
node_labels <- paste(
tree$meaning,
"<br>",
"State: ",node_state,
"<br>",
measureName, "= ", measureValueFunc( tree, pp ),
sep = ""
)
nodeRI = data.frame(
NODE = names(ninfo$nodeX),
PP1 = pp[,1],
PP2 = pp[,2],
PP3 = pp[,3],
NODEX = ninfo$nodeX,
NODEY = ninfo$nodeY,
MEANING = tree$meaning,
LABEL = node_labels
)
col_pal_risk <- colorRampPalette( c( "white", rgb(112, 28, 28, max=255) ) )( 100 )
col_pal_prot <- colorRampPalette( c( "white", rgb(8, 37, 103, max=255) ) )(100)
cols1 <- map2color(pp[,3], col_pal_risk, limits = c(0,1))
cols2 <- map2color(pp[,1], col_pal_prot, limits = c(0,1))
bar.cols <- rep("white",nrow(tree))
state.col <- apply(pp[,c(1,3)],1,which.max)
bar.cols[state.col == 1] <- cols2[state.col == 1]
bar.cols[state.col == 2] <- cols1[state.col == 2]
cols <- rep("white",nrow(tree))
idx <- which(node_state == 1)
cols[idx] <- cols1[idx]
idx <- which(node_state == -1)
cols[idx] <- cols2[idx]
nodeRI$COL <- bar.cols
attrs <- list(node = list(fillcolor = 'white'), edge = list(arrowsize=0.5))
names(cols) <- labels
nattrs <- list(fillcolor=cols)
nodes <- buildNodeList(graph, nodeAttrs=nattrs, defAttrs=attrs$node)
edges <- buildEdgeList(graph)
vv <- agopen(name="foo", nodes=nodes, edges=edges, attrs=attrs,
edgeMode="directed")
x <- vv
y <- x@layoutType
x <- graphLayout(x, y)
ur <- upRight(boundBox(x))
bl <- botLeft(boundBox(x))
out <- list()
out$nodeRI <- nodeRI
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Initalize plotly
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
p <- plotly::plot_ly()
xlim1 <- getX(ur)*1.02
xlim0 <- -xlim1*0.02
xlim <- c(xlim0, xlim1)
## Add an axis.
p = plotly::layout(
p,
title = tree_title,
xaxis = list(
title = "",
showgrid = FALSE,
showticklabels = FALSE,
showline = FALSE,
zeroline = FALSE,
range = xlim
),
yaxis = list(
title = "",
showgrid = FALSE,
showticklabels = FALSE,
showline = FALSE,
zeroline = FALSE,
range = c(getY(bl), getY(ur))
),
showlegend = FALSE
)
out$xlim = xlim
## Add the edges
edges <- AgEdge(x)
edges.p <- list()
for( i in 1:length(edges) ) {
edge <- edges[[i]]
node.to <- edge@head
node.from <- edge@tail
for ( j in 1:length(splines(edge)) ) {
z <- splines(edge)[[j]]
points <- matrix(unlist(pointList(z)),ncol=2,byrow=TRUE)
p <- add_trace(
p,
x = points[,1],
y = points[,2],
type = "scatter",
mode = "lines",
hoverinfo = "none",
line = list(color = "gray"),
showlegend = FALSE
)
}
edges.p[[i]] <- points
heads = bezierPoints(z)
head_from = heads[nrow(heads)-1, ]
head_to = heads[nrow(heads),]
}
## Add the nodes
order <- order(pp[,2],decreasing=TRUE)
p = plotly::add_trace( p,
x = nodeRI$NODEX[order],
y = nodeRI$NODEY[order],
type = "scatter",
mode = "markers",
text = nodeRI$LABEL[order],
hoverinfo = "text",
marker = list(
size = 20,
symbol = "circle",
color = cols[order],
line = list( color = "black", width = 1)
),
showlegend = FALSE
)
out$plot <- p
out$edges <- edges.p
return(out)
}
###################################################################/
# Descrption: map2color
###################################################################/
map2color <- function( x, pal, limits = range(x) )
{
return( pal[ findInterval(
x,
seq(limits[1],limits[2],length.out=length(pal)+1),
all.inside=TRUE
) ] )
}
###################################################################/
# Descrption: trim_tree
###################################################################/
trim_tree <- function( tree = tree, pp = pp, pp.thr = 0.75 )
{
idx <- which(pp[,2] < 1 - pp.thr)
t2 <- tree[idx,]
siblings <- unique(unlist(lapply(t2$ID,get_tree_siblings,tree)))
paths_to_root <- unique(unlist(lapply(t2$ID,get_path_ids_to_root,tree)))
nodes_to_keep <- sort(unique(c(t2$ID,siblings,paths_to_root)),decreasing=F)
t2 <- tree[tree$ID %in% nodes_to_keep, ]
pp2 <- pp[tree$ID %in% nodes_to_keep,]
new_id <- 1:nrow(t2)
new_par <- new_id[match(t2$Par,t2$ID)]
t2$ID <- new_id
t2$Par <- new_par
t2[nrow(t2),'Par'] <- 0
o <- list(tree=t2,pp=pp2)
return(o)
}
###################################################################/
# Descrption: get_tree_siblings
###################################################################/
get_tree_siblings <- function(id,tree)
{
par_id <- tree[ tree$ID %in% id, "Par"]
sibling_ids <- tree[ tree$Par %in% par_id, "ID"]
return(sibling_ids)
}
###################################################################/
# Descrption: get_path_ids_to_root
###################################################################/
get_path_ids_to_root <- function( id, tree )
{
out <- id
root_id <- tree[ nrow(tree), "ID"]
while( ! root_id %in% out )
out <- unique( c ( out, tree[ tree$ID %in% out, "Par" ] ) )
return(out)
}
grs.trim_tree <- function( tree = tree, pp = pp, pp.thr = 0.75 )
{
idx <- which(pp$POST_ACTIVE >= pp.thr)
t2 <- tree[idx,]
siblings <- unique(unlist(lapply(t2$ID,get_tree_siblings,tree)))
paths_to_root <- unique(unlist(lapply(t2$ID,get_path_ids_to_root,tree)))
nodes_to_keep <- sort(unique(c(t2$ID,siblings,paths_to_root)),decreasing=F)
t2 <- tree[tree$ID %in% nodes_to_keep, ]
pp2 <- pp[tree$ID %in% nodes_to_keep,]
new_id <- 1:nrow(t2)
new_par <- new_id[match(t2$Par,t2$ID)]
t2$ID <- new_id
t2$Par <- new_par
t2[nrow(t2),'Par'] <- 0
o <- list(tree=t2,pp=pp2)
return(o)
}
###################################################################/
# Descrption: GRS draw_tree
###################################################################/
grs.draw_tree <- function(
tree = NULL,
pp = NULL,
tree_title = "GRS Tree",
only.get.stats = FALSE,
trim_tree_pp = NULL
) {
if ( ! is.null( trim_tree_pp ) ) {
tmp <- grs.trim_tree( tree = tree, pp = pp, pp.thr = trim_tree_pp )
tree <- tmp$tree
pp <- tmp$pp
}
matrix <- matrix(0, ncol = nrow(tree), nrow = nrow(tree))
for( i in 1:( nrow( tree ) - 1 ) ) {
p <- tree[i,"Par"]
c <- tree[i,"ID"]
matrix[p,c] <- 1
}
rownames(matrix) <- tree$ID
colnames(matrix) <- tree$ID
labels = tree$ID
graph <- new("graphAM", adjMat = matrix, edgemode = 'directed')
lGraph <- layoutGraph(graph)
ninfo <- nodeRenderInfo(lGraph)
node_labels <- paste(
tree$meaning,"<br>",
"beta: ",round(pp$max_b,2),"<br>",
"PP: ",round(pp$POST_ACTIVE,2),"<br>",
sep = ""
)
nodeRI = data.frame(
NODE = names(ninfo$nodeX),
PP = as.numeric(pp$POST_ACTIVE),
NODEX = ninfo$nodeX,
NODEY = ninfo$nodeY,
MEANING = tree$meaning,
LABEL = node_labels
)
col_pal_risk <- colorRampPalette( c( "white", rgb(112, 28, 28, max=255) ) )( 100 )
col_pal_prot <- colorRampPalette( c( "white", rgb(8, 37, 103, max=255) ) )(100)
cols1 <- map2color(pp$POST_ACTIVE, col_pal_risk, limits = c(0,1))
cols2 <- map2color(pp$POST_ACTIVE, col_pal_prot, limits = c(0,1))
bar.cols <- rep("white",nrow(tree))
bar.cols[pp$max_b < 0] <- cols2[pp$max_b < 0]
bar.cols[pp$max_b > 0] <- cols1[pp$max_b > 0]
nodeRI$COL <- bar.cols
attrs <- list(node = list(fillcolor = 'white'), edge = list(arrowsize=0.5))
cols <- nodeRI$COL
names(cols) <- labels
nattrs <- list(fillcolor=cols)
nodes <- buildNodeList(graph, nodeAttrs=nattrs, defAttrs=attrs$node)
edges <- buildEdgeList(graph)
vv <- agopen(name="foo", nodes=nodes, edges=edges, attrs=attrs,
edgeMode="directed")
x <- vv
y <- x@layoutType
x <- graphLayout(x, y)
ur <- upRight(boundBox(x))
bl <- botLeft(boundBox(x))
out <- list()
out$nodeRI <- nodeRI
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Initalize plotly
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
p <- plotly::plot_ly()
xlim1 <- getX(ur)*1.02
xlim0 <- -xlim1*0.02
xlim <- c(xlim0, xlim1)
## Add an axis.
p = plotly::layout(
p,
title = tree_title,
xaxis = list(
title = "",
showgrid = FALSE,
showticklabels = FALSE,
showline = FALSE,
zeroline = FALSE,
range = xlim
),
yaxis = list(
title = "",
showgrid = FALSE,
showticklabels = FALSE,
showline = FALSE,
zeroline = FALSE,
range = c(getY(bl), getY(ur))
),
showlegend = FALSE
)
out$xlim = xlim
## Add the edges
edges <- AgEdge(x)
edges.p <- list()
for( i in 1:length(edges) ) {
edge <- edges[[i]]
node.to <- edge@head
node.from <- edge@tail
for ( j in 1:length(splines(edge)) ) {
z <- splines(edge)[[j]]
points <- matrix(unlist(pointList(z)),ncol=2,byrow=TRUE)
p <- add_trace(
p,
x = points[,1],
y = points[,2],
type = "scatter",
mode = "lines",
hoverinfo = "none",
line = list(color = "gray"),
showlegend = FALSE
)
}
edges.p[[i]] <- points
heads = bezierPoints(z)
head_from = heads[nrow(heads)-1, ]
head_to = heads[nrow(heads),]
}
## Add the nodes
order <- order(nodeRI$PP,decreasing=F)
tmp <- nodeRI[order,]
p = plotly::add_trace(
p,
x = nodeRI$NODEX[order],
y = nodeRI$NODEY[order],
type = "scatter",
mode = "markers",
text = nodeRI$LABEL[order],
hoverinfo = "text",
marker = list(
size = 20,
symbol = "circle",
color = nodeRI$COL[order],
line = list( color = "black", width = 0.5)
),
showlegend = FALSE
)
return(p)
}
###################################################################/
# Descrption: plot.qq
#
# assume the data is normally distributed, convert to normal and
# plot against theoretical z-score
###################################################################/
plot.qq = function( data, annot.col = 'Description' )
{
require(plotly)
## remove missing data
data <- na.omit(data)
## convert the pValues to a normal standard error
data[ , zData :=-qnorm( pValue )]
## now add theoritical z vales
data = data[ order( zData ) ]
data[ , zTheor := qnorm( (1:data[,.N ] - 0.5 )/data[,.N ] ) ]
## make plot
plot = plot_ly( data, x = ~zTheor, y = ~zData, type = "scatter", mode = "markers", name = "data", text = data[, get( annot.col )], hoverinfo = "text" )
plot = layout( plot,
xaxis = list( title = "theoretical z-score", range = list( floor( data[,min( zTheor ) ] ), ceiling( data[, max( zTheor ) ] ) ) ),
yaxis = list( title = "data z-score" ),
title = "Q-Q Plot"
)
plot = add_lines( plot, x = ~zData, y = ~zData, name = "null" )
return( plot )
}
#' calculate BF
#'
#' @export
#'
calc.lBF <- function(
pars = NULL,
data.sub = NULL,
w0 = 2,
log10 = TRUE
) {
if( is.null(pars) | is.null(data.sub) ) {
stop("Missing input data.\n")
}
if (ncol(data.sub)==2) w0 <- 1;
llk.full <- calc.llk.tree(pars, data.sub);
q00 <- pars$p.stay + pars$p.switch * (1-pars$pi1);
lq00 <- log(q00);
n.trans <- nrow(data.sub)-1;
l.p.null <- log(1-pars$pi1)+n.trans*lq00;
l.lk.null <- sum(data.sub[,w0]);
tmp <- c(llk.full, l.p.null+l.lk.null);
mx <- max(tmp);
tmp <- exp(tmp-mx);
lBF <- mx+log(tmp[1]-tmp[2])-l.lk.null-log(1-exp(l.p.null));
if (log10) lBF <- lBF/log(10);
return(lBF);
}
#' Function to get marginal posterior on -/0/+ profile
#'
#' @param arg input
#' @export
#' @examples
#' marginal.posterior.profile()
#'
marginal.posterior.profile <- function(
pars = NULL,
data.sub = NULL
) {
## Get forward and G matrices
tmp <- calc.llk.tree(pars, data.sub, returnForwardMatrix=T, returnGMatrix=T);
f <- tmp$f;
g <- tmp$g;
## Build parents list and reverse F traversal order
parents <- rep(0, pars$n.phenos);
for (i in pars$t.path) parents[pars$ontology[[i]]] <- i;
ord <- c(rev(pars$t.path), pars$terminals);
## Construct backward matrix
b <- array(0, dim(f));
b[ord[1],] <- log(pars$stat.dist);
for (i in ord[-1]) {
r.i <- b[parents[i],]+f[parents[i],]-g[i,];
mx <- max(r.i);
tmp <- mx+log(sum(exp(r.i-mx)));
tmp <- tmp+pars$lp.switch+log(pars$stat.dist);
tmp2 <- -pars$theta.tree+b[parents[i],]+f[parents[i],]-g[i,];
tmp3 <- cbind(tmp2, tmp);
mx <- apply(tmp3, 1, max);
b[i,] <- log(rowSums(exp(tmp3-mx)))+mx;
}
## Posteriors
tmp <- b+f;
mx <- apply(tmp, 1, max);
pp <- exp(tmp-mx);
pp <- pp/rowSums(pp);
return(pp);
}
#' Function to calculate integrated likelihood for set of variants up tree
#'
#' @export
#'
calc.llk.tree<-function(
pars,
data.sub,
returnForwardMatrix = FALSE,
returnGMatrix = FALSE
) {
## Get integrated likelihood at nodes - will be overwritten for internal nodes
mx <- apply(data.sub, 1, max);
d <- exp(data.sub-mx);
llk.integrated <- log(d %*% pars$stat.dist)+mx;
if (returnGMatrix) g <- array(0, dim(d));
for (i in pars$t.path) {
emiss.node<-data.sub[i,]; # Emissions at node
data.sub[i,]<-0;
for (j in pars$ontology[[i]]) {
tmp1<-cbind(data.sub[j,]-pars$theta.tree, llk.integrated[j]+pars$lp.switch);
mx1<-apply(tmp1, 1, max);
tmp2<-mx1+log(rowSums(exp(tmp1-mx1)));
data.sub[i,]<-data.sub[i,]+tmp2;
if (returnGMatrix) g[j,]<-tmp2;
}
data.sub[i,]<-data.sub[i,]+emiss.node;
mx<-max(data.sub[i,]);
llk.integrated[i]<-mx+log(sum(exp(data.sub[i,]-mx)*pars$stat.dist));
}
if (returnForwardMatrix) {
if (!returnGMatrix) {
return(data.sub);
} else {
return(list(f=data.sub, g=g));
}
} else {
return(llk.integrated[i]);
}
}
grs.marginal.posterior <- function(
tree,
prior,
p.stay,
p.switch,
llk.data
) {
null.id <- which.min(abs(prior$b.grid))
## Get forward and G matrices
tmp <- grs.calc.llk.tree(
tree,prior,p.stay,p.switch,
llk.data,
TRUE, TRUE
)
f <- tmp$f;
g <- tmp$g;
b <- list()
llk.tree.b <- array(0, c(nrow(tree), 2))
colnames(llk.tree.b) <- c("LLK.0","LLK.1")
b[[nrow(tree)]] <- list(op=prior$prior, lmx=0)
for( i in (nrow(tree)-1):1 ) {
np <- tree[i,"Par"]
tmp <- log(b[[np]]$op) + log(f[[np]]$op) - log(g[[i]]$op)
mx <- max(tmp)
tmp <- exp(tmp-mx)
val <- p.switch*sum(tmp)
b.tmp <- p.stay*tmp + prior$prior*val
mx2 <- max(b.tmp)
b[[i]] <- list(
op=b.tmp/mx2,
lmx = mx + log(mx2) + b[[np]]$lmx + f[[np]]$lmx - g[[i]]$lmx
)
}
bs.post.dec <- list()
for (i in 1:nrow(tree)) {
tmp <- grs.get.posterior.node_1d(
f,
b,
prior, id=i, log.plot=T,plot=F, verbose=F);
tmp[[4]] <- paste(tmp[[4]],collapse='-')
tmp[[5]] <- paste(tmp[[5]],collapse='-')
bs.post.dec[[i]] <- tmp
}
summed_llk <- c()
for( i in 1:nrow(tree) ) {
summed_llk[i] <- sum(f[[i]]$op * b[[i]]$op) + f[[i]]$lmx + b[[i]]$lmx
}
out <- do.call(rbind,bs.post.dec)
out$POST_ACTIVE <- as.numeric(out$POST_ACTIVE)
out$max_b <- as.numeric(out$max_b)
out$b_ci_lhs <- as.numeric(out$b_ci_lhs)
out$b_ci_rhs <- as.numeric(out$b_ci_rhs)
out <- cbind(tree,out)
return(out)
}
grs.calc.lBF <- function(
tree,
prior,
p.stay,
p.switch,
llk.data,
eps = 1e-200
) {
p00 <- p.stay+p.switch*(1-pi1);
logp00 <- log(p00);
i.ter <- tree[which(!(tree[,'ID'] %in% tree[,'Par'])),'ID'];
i.par <- setdiff(tree[,'ID'], i.ter);
i.with.data <- which(tree$selectable %in% "Y")
null.id <- which.min(abs(prior$b.grid))
llk.full <- grs.calc.llk.tree(
tree,prior,p.stay,p.switch,
llk.data
)
## calculate LLK under model with no active states and prior on this
llk.full.null<-rep(0, nrow(tree));
for (i in i.ter) {
tmp <- llk.data[[i]]$op[null.id]
if( tmp < eps ) tmp <- eps
llk.full.null[i] <- log(tmp) + llk.data[[i]]$lmx
}
for (i in i.par) {
w.d <- which(tree[,'Par']==i);
if(! i %in% i.with.data ) {
llk.full.null[i] <- sum(llk.full.null[w.d]) + length(w.d)*logp00;
} else {
j <- which(i.with.data == i)
llk.full.null[i] <- sum(llk.full.null[w.d]) + length(w.d)*logp00 +
log(llk.data[[j]]$op[null.id]) + llk.data[[j]]$lmx
}
}
llk.full.null.mrca <- llk.full.null[nrow(tree)]+log(1-pi1);
l.p.full.null <- log(1-pi1)+(nrow(tree)-1)*logp00;
mrca <- nrow(tree);
llk.full.mrca <- log(llk.full$llk[mrca,2])+llk.full$llk[mrca,3];
## Get BF
tmp <- c(llk.full.mrca, llk.full.null.mrca);
mx <- max(tmp);
tmp2 <- mx + log(exp(tmp[1]-mx) - exp(tmp[2]-mx)) - llk.full.null.mrca;
tmp3 <- l.p.full.null - log(1-exp(l.p.full.null));
log10_treeBF <- (tmp2 + tmp3)/log(10)
return(as.numeric(log10_treeBF))
}
grs.calc.llk.tree <- function(
tree,
prior,
p.stat,
p.switch,
llk.data,
returnF = FALSE,
returnG = FALSE
) {
i.ter <- tree[which(!(tree[,'ID'] %in% tree[,'Par'])),'ID'];
i.par <- setdiff(tree[,'ID'], i.ter);
i.with.data <- which(tree$selectable %in% "Y")
null.id <- which.min(abs(prior$b.grid))
if(returnG) g.surf.tree <- list();
llk.tree <- array(0,c(nrow(tree),3))
colnames(llk.tree) <- c("LLK.0","LLK.1","Max.LLK.alt")
for ( i in i.ter ) {
llk.tree[i,1] <- log(llk.data[[i]]$op[null.id]) + llk.data[[i]]$lmx
llk.tree[i,2] <- sum( prior$prior * llk.data[[i]]$op )
llk.tree[i,3] <- llk.data[[i]]$lmx
}
for ( i in i.par ) {
## find child nodes
w.d <- which(tree[,'Par'] == i)
tmp <- array(1, dim(llk.data[[2]]$op))
s1 <- 0
for( j in w.d ) {
tmp.part <- p.stay * llk.data[[j]]$op + p.switch * llk.tree[j,2]
tmp <- tmp * tmp.part
if(returnG) {
gmx <- max(tmp.part)
g.surf.tree[[j]] <- list(
op = tmp.part/gmx, lmx = log(gmx) + llk.data[[j]]$lmx
)
}
s1 <- s1 + llk.data[[j]]$lmx
}
if( i %in% i.with.data ) {
j = which( i.with.data == i )
tmp.part <- llk.data[[j]]$op
tmp <- tmp * tmp.part
s1 <- s1 + llk.data[[j]]$lmx
}
mx <- max(tmp)
tmp <- tmp/mx
llk.data[[i]] <- list(op=tmp, lmx = s1 + log(mx))
llk.tree[i,1] <- log(llk.data[[i]]$op[null.id]) + llk.data[[i]]$lmx
llk.tree[i,2] <- sum( prior$prior * llk.data[[i]]$op )
llk.tree[i,3] <- llk.data[[i]]$lmx
}
if( returnF ) {
if( ! returnG ) {
return(list(llk=llk.tree,f=data.llk))
} else {
return(list(llk=llk.tree,f=llk.data, g = g.surf.tree))
}
} else {
mrca <- nrow(tree)
llk.full.mrca <- log(llk.tree[mrca,2]) + llk.tree[mrca,3]
return(list(llk=llk.tree,llk.full.mrca=llk.full.mrca))
}
}
grs.get.posterior.node_1d <- function(
forward, backward, prior, id = 1, plot = FALSE,
return.ci = TRUE, verbose = FALSE, ci.level = 0.95, log.plot = TRUE
) {
null.id <- which.min(abs(prior$b.grid))
tmp <- forward[[id]]$op * backward[[id]]$op
tmp <- tmp/sum(tmp)
post.null <- tmp[null.id]
post.active <- 1 - post.null
tmp[null.id] <- 0
tmp <- tmp/sum(tmp)
mx <- arrayInd(which.max(tmp), dim(tmp))
if (verbose)
cat("\nNode ", id)
if (verbose)
cat("\nMax at b1 = ", prior$b.grid[mx[1]])
if (verbose)
cat("\nSummed LLK = ", log(sum(forward[[id]]$op * backward[[id]]$op)) +
forward[[id]]$lmx + backward[[id]]$lmx)
if (return.ci) {
oo <- order(tmp, decreasing = T)
cs <- cumsum(tmp[arrayInd(oo, dim(tmp))])
w.ci <- oo[c(1, which(cs <= ci.level))]
inds <- arrayInd(w.ci, dim(tmp))
rg <- range(prior$b.grid[inds[, 1]])
if (verbose)
cat("\nCI b1(", ci.level, ") = ", paste(rg, collapse = " - "),
sep = "")
}
if (plot) {
if (log.plot)
tmp <- log(tmp)
plot(x = prior$b.grid, y = tmp,
main = paste("Node", id), xlab = "B1", ylab = "log(post)")
}
if (verbose)
cat("\n\n")
out <- data.frame(
max_b = prior$b.grid[mx[1]],
summed_llk = log(sum(forward[[id]]$op * backward[[id]]$op)) +
forward[[id]]$lmx + backward[[id]]$lmx,
b_ci_lhs = rg[1],
b_ci_rhs = rg[2],
POST_ACTIVE = post.active)
return(out)
}
|
e063fd73e66cd73df1d916d472936b000c74fb15 | 32886d6690229de10c70952678bc085af1395ff6 | /Hackerrank/Bon_Appetit.R | fefd1b52be173564bbe3244f6dd6e415458af88d | [] | no_license | jimmyyih518/ChallengeR | 4334276a24713b49d3af1aad28d73b6842e7096a | 3b9e0ca0cf5a2e4432f279296a6212c6a136f6ca | refs/heads/master | 2022-12-07T22:58:13.744239 | 2020-08-19T03:26:44 | 2020-08-19T03:26:44 | 280,044,701 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 382 | r | Bon_Appetit.R | # Enter your code here. Read input from STDIN. Print output to STDOUT
f <- file("stdin")
open(f)
l1 = as.numeric(unlist(strsplit(readLines(f, n = 1), split=" ")))
l2 = as.numeric(unlist(strsplit(readLines(f, n = -1), split=" ")))
n=l1[1]
k=l1[2]+1
bill=l2[1:n]
b=l2[n+1]
billa = sum(bill[-k])/2
if(billa == b){
out = 'Bon Appetit'
}
if(billa != b){
out = b-billa
}
cat(out)
|
ae4f7e19c6b9b8556674fc121db60fbf6b9d18e3 | 73107b9e1fdb61642c680b31be5c2678a3e12366 | /tests/testthat/test-utils.R | d65cdb1562962b57ac3966825f8db2ff4871e22c | [
"MIT"
] | permissive | poissonconsulting/subfoldr | 555eba231e037156339dc1a8bc7f142b7218107e | 7004693ef8c088b41b0bc4397e95ee4dc437f7ac | refs/heads/master | 2021-07-23T00:49:23.745886 | 2021-02-12T22:22:36 | 2021-02-12T22:22:36 | 79,256,325 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,421 | r | test-utils.R | context("utils")
test_that("add full stop", {
expect_identical(add_full_stop("x"), "x.")
expect_identical(add_full_stop("x."), "x.")
})
test_that("capitalize_first_letter_words", {
expect_identical(capitalize_first_letter_words("x"), "X")
expect_identical(capitalize_first_letter_words("oeuoe oo"), "Oeuoe Oo")
expect_identical(capitalize_first_letter_words("tgI"), "TgI")
})
test_that("file_path", {
reset_all()
expect_identical(file_path(), character(0))
expect_identical(file_path("x", "z"), "x/z")
expect_identical(file_path("x", "", "z"), "x/z")
expect_identical(file_path("x/", "z"), "x/z")
expect_identical(file_path("x/", "", "z"), "x/z")
})
test_that("sub", {
expect_identical(get_sub(), "")
expect_identical(set_sub("x", "z"), "x/z")
expect_identical(get_sub(), "x/z")
expect_identical(reset_sub(), "")
expect_identical(get_sub(), "")
})
test_that("main", {
expect_identical(get_main(), "output")
expect_identical(set_main("x", "z"), "x/z")
expect_identical(get_main(), "x/z")
expect_identical(reset_main(), "output")
expect_identical(get_main(), "output")
reset_all()
})
test_that("all", {
expect_true(reset_all())
})
test_that("sub_names", {
expect_identical(sub_names("1")[[1]], "1")
expect_identical(sub_names("")[[1]], "")
expect_identical(sub_names("1/3")[[1]], c("1", "3"))
})
test_that("nsubs", {
expect_identical(nsubs("1"), 1L)
expect_identical(nsubs("1/3"), 2L)
expect_identical(nsubs(c("1/3", ".")), c(2L, 1L))
})
test_that("list_files", {
files <- list_files(file.path(system.file(package = "subfoldr"), "output", "tables"), report = TRUE)
names(files) <- NULL # names depend on where run
expect_identical(files,
c("first/2nd/third/TG","first/second/data2",
"first/second/mtcars2", "first/second/mtcars3"))
})
test_that("subs_matrix", {
files <- list_files(file.path(system.file(package = "subfoldr"), "output", "tables"), report = TRUE)
expect_identical(subs_matrix(files[1]), matrix(c("first", "2nd", "third", "TG"), ncol = 1))
expect_identical(subs_matrix(files), matrix(c("first", "2nd", "third", "TG", "first", "second", "data2", "", "first", "second", "mtcars2", "", "first", "second", "mtcars3", ""), ncol = 4))
})
test_that("drop_rows", {
subs_matrix <- matrix(as.character(1:4), ncol = 2)
expect_identical(drop_rows(subs_matrix, drop = list(character(0))), c(FALSE, FALSE))
expect_identical(drop_rows(subs_matrix, drop = list("oeu", "11")), c(FALSE, FALSE))
expect_error(drop_rows(subs_matrix, drop = list("oeu", "11", "eee")))
expect_identical(drop_rows(subs_matrix, drop = list("1")), c(TRUE, FALSE))
expect_identical(drop_rows(subs_matrix, drop = list("2", "1")), c(FALSE, FALSE))
expect_identical(drop_rows(subs_matrix, drop = list("1", "4")), c(TRUE, TRUE))
})
test_that("rename_heading", {
expect_identical(rename_heading(1:2, c("1" = "x", "3" = "zz")), c("x", "2"))
})
test_that("rename_headings", {
subs_matrix <- matrix(as.character(1:4), ncol = 2)
expect_identical(rename_headings(subs_matrix, headings = list(character(0))), subs_matrix)
expect_identical(rename_headings(subs_matrix, headings = list(c("1" = "x"))), matrix(as.character(c("x", 2:4)), ncol = 2))
expect_identical(rename_headings(subs_matrix, headings = list(c("1" = "x", "4" = "zz"))), matrix(as.character(c("x", 2:4)), ncol = 2))
expect_identical(rename_headings(subs_matrix, headings = list(c("1" = "x"), c("4" = "zz"))), matrix(as.character(c("x", 2:3, "zz")), ncol = 2))
})
test_that("set_headers", {
subs_matrix <- matrix(as.character(1:4), ncol = 2)
})
test_that("order_heading", {
expect_identical(order_heading(c("1", "2"), character(0), locale = "en"), c("000001", "000002"))
expect_identical(order_heading(c("2", "1"), character(0), locale = "en"), c("000002", "000001"))
expect_identical(order_heading(c("2", "2", "1", "1"), character(0), locale = "en"), c("000002", "000002", "000001", "000001"))
expect_identical(order_heading(c("1", "2", "this"), c("that" = "Blah", "this" = "This Title"), locale = "en"), c("000002", "000003", "000001"))
})
test_that("order_headings", {
subs_matrix <- matrix(as.character(1:4), ncol = 2)
expect_identical(order_headings(subs_matrix, list(character(0)), locale = "en"), c(1L, 2L))
expect_identical(order_headings(subs_matrix, list(c("5" = "not", "3" = "this")), locale = "en"), c(2L, 1L))
})
|
beda7ebea7a2e648e21a0601a2c53a67656c160b | febcbdc28985329bfc0880b9ebf3a8109eefd55b | /Machine-Learning/Lecture/Advanced R Programming/Date and Timestamps.R | 6ddb4fce9f338f47e85feeba64a62af98ecddd95 | [] | no_license | saanghyuk/Data_Analysis_with_R | fd4d3c996e215992db8167f4664e64d151833340 | 8c6654c885b26876e68462e84aec59806e76d865 | refs/heads/master | 2021-09-13T20:47:22.079511 | 2018-05-04T04:23:28 | 2018-05-04T04:23:28 | 120,984,482 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 855 | r | Date and Timestamps.R |
#Dates
Sys.Date()
today <- Sys.Date()
class(today) #Date Object
#Date as Character
c<- "1990-01-01"
my.date <- as.Date(c)
class(my.date)
as.Date("Nov-03-1990") #이러면 안되겠지
#Foramtting
# %d Day of the month (decimal number)
# %m Month (decimal number)
# %b Month (abbreviated)
# %B Month (full name)
# %y Year (2 digit)
# %Y Year (4 digit)
my.date <- as.Date("Nov-03-90", format="%b-%d-%y")
my.date
as.Date("June,01,2002", format="%B,%d,%Y")
#POSIXct #portabla operating system interface
as.POSIXct("11:02:03", format="%H:%M:%S")
#이제부터 무조건 이걸로 쓰면 됨.
?strptime #여기에 다 써있음. 이게 기본
time<-strptime("11:02:03", format="%H:%M:%S")
help(strptime)
#strptime("date", format="")
class(time)
Sys.Date()
hi<-strptime(Sys.Date(), "%Y-%m-%d")
#이 친구들은 class찍어보면, POSIXlt로 나옴.
|
487705618be13753e8dbd663d6182c59c8f09c09 | c33101fd9e8dc2992d63147c7ae450508e33ad63 | /TESTSVM/R/4Movie Analysis.R | d331a7f33e4382598f568f3e9b6adb9eb8662400 | [] | no_license | bgg11117/USBoxOfficeModel | 2bdfccd40ab5cefb2438c6a379fef29d63201f07 | d934f018f7a8f1de022cc20a55d8b616e5f9a2c3 | refs/heads/master | 2020-07-25T17:47:12.980782 | 2019-11-04T06:34:45 | 2019-11-04T06:34:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,777 | r | 4Movie Analysis.R | #rm(list=ls(all=TRUE))
library(e1071)
library(scales)
library(plyr)
library(ggplot2)
library(tidyr)
library(lattice)
library(stringi)
library(quantreg)
library(SparseM)
library(caret)
library(partykit)
library(grid)
library(C50)
library(rpart)
library(ipred)
library(MASS)
library(kernlab)
library(randomForest)
library(vcd)
library(class)
library(gmodels)
library(xgboost)
library(Matrix)
library(dplyr)
library(DT)
Sys.setlocale("LC_TIME", "English")
AAPL = read.csv('./input/Youtubelist.csv')
AAPL <- AAPL %>% filter(Youtube.Views<100000000)
AAPX <- AAPL
AAPB <- AAPL
#--------------feature engineering-------------------
AAPL$Distrubutor[AAPL$Distrubutor=='DreamWorks'] <- 'Paramount'
AAPL$Distrubutor[AAPL$Distrubutor=='Miramax'] <- 'Buena Vista'
AAPL$Distrubutor[AAPL$Distrubutor %in% c('Columbia','Samuel Goldwyn',
'MGM','TriStar')] <- 'Sony / Columbia'
AAPL[grepl('Lions',AAPL$Distrubutor),][8] <- 'Lionsgate'
levels(AAPL$Distrubutor)[levels(AAPL$Distrubutor)=='Unknown'] <- 'NA'
AAPL$Distrubutor[AAPL$Distrubutor=='Unknown'] <- 'NA'
#trick(one class)
levels(AAPL$Distrubutor)[levels(AAPL$Distrubutor)=="IFC"] <- "Independent Studio"
AAPL$Distrubutor[!AAPL$Distrubutor%in% c('Sony / Columbia','Warner Bros.','Fox',
'Universal','Buena Vista','Paramount','Lionsgate',"NA")] <- "Independent Studio"
#fox searchlight , sony
#AAPL[grepl('Sony',AAPL$Distrubutor),][8] <- 'Sony / Columbia'
#AAPL[grepl('Fox',AAPL$Distrubutor),][8] <- 'Fox'
AAPL$MPAA[AAPL$MPAA=='GP'] <- 'PG'
levels(AAPL$MPAA)[levels(AAPL$MPAA)=="Unknown"] <- "NA"
AAPL$MPAA[AAPL$MPAA %in% c('Not Yet Rated','Unrated')] <- 'NA'
levels(AAPL$Genre)[levels(AAPL$Genre)=="Unknown"] <- "NA"
#data processing
AAPL.svm = AAPL[,-c(1:5,7,12)]#Range to process
for(i in 1:7)
{AAPL.svm = AAPL.svm[!is.na(AAPL.svm[,i]),]}
AAPL.svm$Y = c(rep(0,length(AAPL.svm$Box)))
ranklist = c(-1,1e7L,5e7L,1e8L)
ranklist = as.numeric(ranklist)
for(i in 2:length(ranklist))
{
AAPL.svm$Y[AAPL.svm$Box<=ranklist[i] & AAPL.svm$Box>ranklist[i-1]] = i-1
}
AAPL.svm$Y[AAPL.svm$Box>ranklist[length(ranklist)]] = length(ranklist)
AAPL.svm = AAPL.svm[,-6]
AAPL.svm= as.data.frame(AAPL.svm)
AAPL.svm$Y = as.factor(AAPL.svm$Y)
AAPL.svm$Youtube.Views = scale(AAPL.svm$Youtube.Views)
AAPL.svm$Runtime = scale(AAPL.svm$Runtime)
AAPL.svm[AAPL.svm=="NA"]<-NA
AAPL.svm <- na.omit(AAPL.svm)
AAPL.svm$Genre <- droplevels(AAPL.svm$Genre)
AAPL.svm$MPAA <- droplevels(AAPL.svm$MPAA)
AAPL.svm$Distrubutor <- droplevels(AAPL.svm$Distrubutor)
#movie predict - svm + xgboost
#sparse_matrix for xgboost
set.seed(123)
train_sample <- sample(nrow(AAPL.svm),nrow(AAPL.svm)*0.8)
AAPL.svm_train <- AAPL.svm[train_sample,]
AAPL.svm_test <- AAPL.svm[-train_sample,]
outputvector <- as.numeric((AAPL.svm_train$Y))-1 #xgboost(startfrom 0)
train_sparse_matrix <- sparse.model.matrix(Y~.-1, data = AAPL.svm_train)
test_sparse_matrix <- sparse.model.matrix(Y~.-1, data = AAPL.svm_test )
head(train_sparse_matrix)
#cross-validation to choose the parameters
m=nlevels(AAPL.svm$Y)
param = list("objective" = "multi:softprob",
"eval_metric" = "mlogloss",
"num_class" = m)
cv.nround <- 200
cv.nfold <- 10
bst.cv = xgb.cv(param=param, data=train_sparse_matrix, label=outputvector,
nfold = cv.nfold, nrounds = cv.nround)
nround <- which(bst.cv$test.mlogloss.mean==min(bst.cv$test.mlogloss.mean))
#select the number of trees with the smallest test mlogloss for model building
bst <- xgboost(data = train_sparse_matrix, label = outputvector,
param=param, nrounds = nround )
pred <- predict(bst,test_sparse_matrix)
pred = t(matrix(pred,m,length(pred)/m))
pred = levels(AAPL.svm_test$Y)[max.col(pred)]
# confusion matrix
xg_accuracy = table(AAPL.svm_test$Y,pred)
xg_accuracy
xg_ac <- sum(diag(xg_accuracy))/sum(xg_accuracy)
#accuracy = 66%
#xgboost importance(train set or entity?)
#AAPL.sparse.matrix <- sparse.model.matrix(Y~.-1, data = AAPL.svm)(if all)
importance <- xgb.importance(train_sparse_matrix @Dimnames[[2]], model = bst)
head(importance)
xgb.plot.importance(head(importance))
#SVM- evaluating model performance
#tuned = tune.svm(Y ~ ., data = AAPL.svm_train, gamma = 2^(-7:-5), cost = 2^(2:4))
#summary(tuned)
svm.model = svm(x=train_sparse_matrix , y = AAPL.svm_train$Y, kernal='radial', type = 'C-classification'
, cost = 16, gamma = 0.03125)
#data = AAPL.svm or train_sparse_matrix(with Y)
AAPL.svm_pred = predict(svm.model, test_sparse_matrix)
table(AAPL.svm_pred, AAPL.svm_test$Y)
correction <- AAPL.svm_pred == AAPL.svm_test$Y
prop.table(table(correction))
svm_ac <- round(prop.table(table(correction))[[2]],2)
#accuracy = 65%
#----- sparse matrix for no split(use occasionally)
#sparse_matrix <- sparse.model.matrix(Y~.-1, data = AAPL.svm)
#---------if classify only 2 classes
#method 1 : xgb.cv( just change the objective)
#"objective" = "binary:logistic"
#method 2 : no xgb.cv
#outputvector <- AAPL.svm[,c(7)] == '1'
#bst <- xgboost(data = sparse_matrix, label = outputvector, max.depth = 4,
#eta = 1, nthread = 2, nround = 10,objective = "binary:logistic")
#---------------------------- knn
knn_train_sparse <-sparse.model.matrix(Y~.-1, data = AAPL.svm_train)
trainoutputvector <- AAPL.svm_train$Y
knn_test_sparse <- sparse.model.matrix(Y~.-1, data = AAPL.svm_test)
testoutputvector <- AAPL.svm_test$Y
knn.fit <- knn(train=knn_train_sparse, test=knn_test_sparse , cl= trainoutputvector,
k=10)
CrossTable(x=testoutputvector , y=knn.fit , prop.chisq = FALSE)
prop.table(table(testoutputvector==knn.fit))
knn_ac <- round(prop.table(table(testoutputvector==knn.fit))[[2]],2)
#accuracy 63%
#----------------------------
#bonus
control <- trainControl(method = 'repeatedcv',number=10,repeats = 3)
#Cart
set.seed(300)
fit.cart <- train(Y~., data = AAPL.svm_train , method = 'rpart',trControl = control)
AAPL.cart_pred<- predict(fit.cart,AAPL.svm_test)
table(AAPL.cart_pred, AAPL.svm_test$Y)
correction <- AAPL.cart_pred == AAPL.svm_test$Y
prop.table(table(correction))
cart_ac <- round(prop.table(table(correction))[[2]],2)
#accuracy 60%
#LDA
set.seed(300)
fit.lda <- train(Y~., data = AAPL.svm_train , method = 'lda',trControl = control)
AAPL.lda_pred<- predict(fit.lda,AAPL.svm_test)
table(AAPL.lda_pred, AAPL.svm_test$Y)
correction <- AAPL.lda_pred == AAPL.svm_test$Y
prop.table(table(correction))
lda_ac <- round(prop.table(table(correction))[[2]],2)
#accuracy 63%
#RandomForest
set.seed(300)
fit.rf <- train(Y~., data = AAPL.svm_train , method = 'rf',trControl = control)
AAPL.rf_pred<- predict(fit.rf,AAPL.svm_test)
table(AAPL.rf_pred, AAPL.svm_test$Y)
correction <- AAPL.rf_pred == AAPL.svm_test$Y
prop.table(table(correction))
rf_ac <- round(prop.table(table(correction))[[2]],2)
#accuracy 64%
#collect resamples
resampleresults <- resamples(list(CART = fit.cart , LDA = fit.lda , RF = fit.rf))
summary(resampleresults)
#plot accuracy
results <- t(as.data.frame(list(XGboost = xg_ac, SVM= svm_ac, knn= knn_ac,
CART = cart_ac , RF = rf_ac)))
results <- as.data.frame(results)
colnames(results) <- 'Accuracy'
ggplot(results,aes(reorder(rownames(results),-Accuracy),Accuracy))+
geom_bar(stat='identity',fill="#009E73",width=0.25)+
coord_cartesian(ylim = c(0,1))+
geom_text(data = results,aes(x= rownames(results),y=Accuracy,label=Accuracy,vjust=-1,size=3))+
ggtitle('ML method accuracy')+xlab('method')+
theme(panel.background = element_blank(),
axis.line.x = element_line(color='black',size=0.25),
axis.line.y = element_line(color='black',size=0.25),
axis.ticks.x=element_blank(),
legend.position = 'None')
ggsave('method.png')
#Accuracy : Xgboost is the best
#----------------ggplot data analysis---------------------
AAPX <- AAPX[,-c(1,3,12)]
AAPX$Distrubutor[AAPX$Distrubutor=='DreamWorks'] <- 'Paramount'
AAPX$Distrubutor[AAPX$Distrubutor=='Miramax'] <- 'Buena Vista'
AAPX$Distrubutor[AAPX$Distrubutor %in% c('Columbia','Samuel Goldwyn',
'MGM','TriStar')] <- 'Sony / Columbia'
AAPX[grepl('Lions',AAPX$Distrubutor),][6] <- 'Lionsgate'
levels(AAPX$Distrubutor)[levels(AAPX$Distrubutor)=='Unknown'] <- 'NA'
AAPX$Distrubutor[AAPX$Distrubutor=='Unknown'] <- 'NA'
#trick(one class)
AAPX$MPAA[AAPX$MPAA=='GP'] <- 'PG'
levels(AAPX$MPAA)[levels(AAPX$MPAA)=="Unknown"] <- "NA"
AAPX$MPAA[AAPX$MPAA %in% c('Not Yet Rated','Unrated')] <- 'NA'
levels(AAPX$Genre)[levels(AAPX$Genre)=="Unknown"] <- "NA"
AAPX[AAPX=="NA"]<-NA
AAPX<- na.omit(AAPX)
AAPX$Genre <- droplevels(AAPX$Genre)
AAPX$MPAA <- droplevels(AAPX$MPAA)
AAPX$Distrubutor <- droplevels(AAPX$Distrubutor)
any(is.na(AAPX))
AAPX$Release.Date<- as.Date(AAPX$Release.Date, format = "%Y/%m/%d")
#AAPX$Month <- format(AAPX$Release.Date,"%b")
AAPX$Month <- factor(AAPX$Month, levels = c("Jan", "Feb","Mar","Apr",'May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec'))
#Box per MPAA
MPAA <- AAPX %>% group_by(MPAA)%>% summarise(Avg_Box=round(mean(Box),2)) %>%
arrange(desc(Avg_Box))
datatable(MPAA)
ggplot(MPAA,aes(reorder(MPAA,Avg_Box),Avg_Box,group=1,fill=MPAA))+geom_bar(stat='identity')+
xlab("MPAA")+ylab("Avg_Box")+ggtitle("Avg_Box of MPAA from 1980-2016")+
theme(axis.text.x = element_text())+ coord_flip()
#Box per Genre
Genre <- AAPX %>% group_by(Genre)%>%summarise(Avg_Box=round(mean(Box),2))%>%
arrange(desc(Avg_Box))
datatable(Genre)
ggplot(Genre ,aes(reorder(Genre,Avg_Box),Avg_Box,group=1,fill=Genre))+geom_bar(stat='identity')+
xlab("Genre")+ylab("Avg_box") +theme(legend.position='None')+coord_flip()
Genre10 <- AAPX %>% group_by(Genre)%>%summarise(Avg_Box=round(mean(Box),2))%>%
arrange(desc(Avg_Box))%>%top_n(10)
datatable(Genre10)
ggplot(Genre10 ,aes(reorder(Genre,Avg_Box),Avg_Box,group=1,fill=Genre))+geom_bar(stat='identity')+
xlab("Genre")+ylab("Avg_box") +theme(legend.position='None')+coord_flip()+
theme(axis.ticks = element_blank(),panel.background = element_blank(),
axis.text.x= element_text(),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))
ggsave('Genre10.png', width = 20, height = 10, units = "cm")
#Box per Genre,MPAA
MG <- AAPX %>% group_by(MPAA,Genre) %>%summarise(Avg_Box=round(mean(Box),2))%>%ungroup()%>%
arrange(desc(Avg_Box))
datatable(MG)
MG1 <- AAPX %>% group_by(MPAA,Genre) %>%summarise(Avg_Box=round(mean(Box),2))%>%
arrange(desc(Avg_Box))
ggplot(MG1,aes(MPAA,Avg_Box,color=MPAA,fill=MPAA))+geom_bar(stat='identity')+
scale_y_sqrt(limits=c(0,5e+08))+facet_wrap(~Genre)+
geom_hline(aes(yintercept = 1e+08),color='red',size=0.5)
ggsave('MG1.png', width = 35, height = 35, units = "cm")
# AAPX$Title[which.max(AAPX$Youtube.Views)]
#Box per month
bm <- AAPX %>% group_by(Month) %>%summarise(Avg_Box=round(mean(Box),2))%>%ungroup()%>%
arrange(desc(Avg_Box))
datatable(bm)
bm1 <- AAPX %>% group_by(Month) %>%summarise(Avg_Box=round(mean(Box),2))%>%
arrange(desc(Avg_Box))
ggplot(bm1, aes(Month, Avg_Box,fill=Month)) +
geom_bar(stat='identity') + theme(legend.position = 'top')+
ggtitle("Avg Box by Month")+ coord_flip()+
theme(panel.background = element_blank(),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),)
ggsave('bm1.png', width = 20, height = 10, units = "cm")
#Box per month before 2011 vs after 2011
bmb <- AAPX %>% filter(Year<2011 & Box>1e+08)%>%group_by(Month) %>%summarise(Avg_Box=round(mean(Box),2))%>%
arrange(desc(Avg_Box))
ggplot(bmb, aes(Month, Avg_Box,group=1)) +
geom_line() + theme(legend.position = 'none')+
ggtitle("Avg Box by Month")
bma <- AAPX %>% filter(Year>=2011& Box>1e+08)%>%group_by(Month) %>%summarise(Avg_Box=round(mean(Box),2))%>%
arrange(desc(Avg_Box))
ggplot() +
geom_line(data=bmb, aes(x=Month, y=Avg_Box,group=1), color='blue') +
geom_line(data=bma, aes(x=Month, y=Avg_Box,group=1), color='red')+
theme(panel.background=element_blank(),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))
ggsave('Year2011.png')
#Box per Genre, Month
GenRe <- AAPX %>% group_by(Month,Genre) %>%summarize(Avg_box=round(mean(Box),2))%>%ungroup()%>%
arrange(desc(Avg_box))
datatable(GenRe)
ggplot(GenRe,aes(Month,Avg_box,color=Month,fill=Month))+geom_bar(stat='identity')+
scale_y_sqrt(limits=c(0,5e+08))+facet_wrap(~Genre)+geom_hline(aes(yintercept = 1e+08),color='red',size=0.5)+
theme(axis.text.x=element_text(angle = 90, hjust = 1))
ggsave('GenRe.png', width = 35, height = 35, units = "cm")
#Box per MPAA,Month
Mom <- AAPX %>% group_by(Month,MPAA) %>%summarize(Avg_box=round(mean(Box),2))%>%ungroup()%>%
arrange(desc(Avg_box))
datatable(Mom)
ggplot(Mom, aes(Month, MPAA, fill = Avg_box)) +
geom_tile(color = "white") +
ggtitle("Avg Box by Month and MPAA")
#or
ggplot(Mom, aes(Month, Avg_box, group = MPAA,color=MPAA)) +
geom_line() + ggtitle("Avg Box by Month and MPAA")+
theme(panel.background = element_blank(),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))
ggsave('Mom.png', width = 20, height = 10, units = "cm")
#Box per distributor
Dis6 <- AAPX %>% group_by(Distrubutor)%>% filter(Distrubutor %in% c('Sony / Columbia',
'Warner Bros.','Fox','Universal','Buena Vista','Paramount'))%>%
summarise(count = n(),avg_box=round(mean(Box),2))%>%ungroup() %>%arrange(desc(avg_box))
datatable(Dis6)
ggplot(Dis6,aes(reorder(Distrubutor,avg_box),avg_box,fill=Distrubutor))+geom_bar(stat='identity')+
theme(legend.position='None')+coord_flip()+ xlab("Big 6 Dis")
#Box per distrubutor in terms of month
Mis6 <- AAPX %>% group_by(Distrubutor,Month)%>% filter(Distrubutor %in% c('Sony / Columbia',
'Warner Bros.','Fox','Universal','Buena Vista','Paramount'))%>%
summarise(count = n(),avg_box=round(mean(Box),2))%>%ungroup() %>%arrange(desc(avg_box))
datatable(Mis6)
ggplot(Mis6,aes(reorder(Distrubutor,avg_box),avg_box,fill=Distrubutor))+
geom_bar(stat='identity')+ facet_wrap(~Month)+
theme(legend.position='None',axis.text.x= element_text(angle = -90, hjust = 0.5))+
coord_flip()+ xlab("Big 6 Dis")
ggsave('Mis6.png', width = 20, height = 10, units = "cm")
#Box per distrubutor in terms of Year
Yis6 <- AAPX %>% group_by(Distrubutor,Year)%>% filter(Distrubutor %in% c('Sony / Columbia',
'Warner Bros.','Fox','Universal','Buena Vista','Paramount'))%>%
summarise(count = n(),avg_box=round(mean(Box),2))%>%ungroup() %>%arrange(desc(avg_box))
datatable(Yis6)
ggplot(Yis6,aes(Year,avg_box,color=Distrubutor))+
geom_line(size=0.5)+theme(axis.text.x= element_text(),
panel.background = element_blank(),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))+
xlab("Big 6 Dis")
ggsave('Yis6.png', width = 20, height = 10, units = "cm")
#Youtube vs Box
ggplot(AAPX,aes(Youtube.Views,Box))+geom_point()
#(Year > 2013)
YouB <- AAPX %>%select(Title,Box,Youtube.Views)
ggplot(YouB,aes(Youtube.Views,Box,label=Title))+geom_point()+geom_smooth()+
geom_text(check_overlap = TRUE)+
theme(axis.text.x= element_text(),
panel.background = element_blank(),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))
ggsave('YouB.png')
YouB1 <- AAPX %>%filter(Year >2012)%>% select(Title,Box,Youtube.Views)
ggplot(YouB1,aes(Youtube.Views,Box,label=Title))+geom_point()+geom_smooth()+
geom_text(check_overlap = TRUE)+
theme(axis.text.x= element_text(),
panel.background = element_blank(),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))
ggsave('YouB1.png')
#correlation between box & youtube from 2005-2016
correlation <- data.frame()
for (i in c(2005:2016)){
AAPA <- AAPL %>% filter(Year>=i)
x <- AAPA[13]
y <- AAPA[14]
corre <- round(cor(x, y),2)
correlation = rbind(correlation,corre)
}
row.names(correlation) <- c(2005:2016)
colnames(correlation) <- 'r'
ggplot(correlation,aes(rownames(correlation),r,group=1)) +
geom_line(color='blue')+
geom_text(data =correlation,aes(x= rownames(correlation),y=r,label=r,vjust=-1,size=3))+
ggtitle("Corr between Box & Youtube Views")+xlab('Year')+
theme(panel.background = element_blank(),
axis.line.x = element_line(color='black',size=0.25),
axis.line.y = element_line(color='black',size=0.25),
axis.ticks.x=element_blank(),
legend.position = 'None')
#or
ggplot(correlation,aes(rownames(correlation),r)) +
geom_bar(stat='identity',fill="#009E73",width=0.25)+
coord_cartesian(ylim = c(0,1))+
ggtitle("Corr between Box & Youtube Views")+xlab('Year')+
geom_text(data =correlation,aes(x= rownames(correlation),y=r,label=r,vjust=-1,size=3))+
theme(panel.background = element_blank(),
axis.line.x = element_line(color='black',size=0.25),
axis.line.y = element_line(color='black',size=0.25),
axis.ticks.x=element_blank(),
legend.position = 'None')
ggsave('corr.png')
# MPAA,Box,Year
MBY <- AAPX %>% group_by(MPAA,Year) %>%summarise(avg_box = round(mean(Box),2)) %>% ungroup()%>%
arrange(desc(avg_box))
datatable(MBY)
MBY1 <- AAPX %>% group_by(MPAA,Year) %>%summarise(avg_box = round(mean(Box),2)) %>%
arrange(desc(avg_box))
ggplot(MBY1,aes(Year,avg_box,group=MPAA,color=MPAA))+geom_line()
# Genre ,Box,Year
GBY <- AAPX %>% group_by(Genre,Year) %>%summarise(avg_box = round(mean(Box),2)) %>% ungroup()%>%
arrange(desc(avg_box))
datatable(GBY)
GBY1 <- AAPX %>% group_by(Genre,Year) %>%summarise(avg_box = round(mean(Box),2)) %>%
arrange(desc(avg_box))
ggplot(GBY1,aes(Year,avg_box,color=Year,fill=Year))+geom_bar(stat='identity')+
scale_y_sqrt(limits=c(0,5e+08))+ facet_wrap(~Genre)+geom_hline(aes(yintercept = 1e+08),color='red',size=0.5)
ggsave('GBY1.png', width = 20, height = 20, units = "cm")
#MPAA max box office pk
MPAAmax <- AAPX %>% group_by(MPAA,Year)%>%summarise(Box = max(Box))
MPAAmaxT <- merge(MPAAmax, AAPX, by = "Box")
MPAAmaxT <- MPAAmaxT[,c(1:4)]
#rename(MPAAmaxT, c("MPAA.x"="MPAA", "Year.x"="Year"))
colnames(MPAAmaxT)[2] <- 'MPAA'
colnames(MPAAmaxT)[3] <- 'Year'
colnames(MPAAmaxT)[1] <- 'maxbox'
ggplot(MPAAmaxT,aes(Year,maxbox,group=MPAA,color=MPAA,label=Title))+geom_line()+
geom_text(check_overlap = TRUE,data=subset(MPAAmaxT, (MPAA=='R'| MPAA=='PG-13')&
maxbox >= 2e8L))+
theme(axis.text.x= element_text(),
panel.background = element_blank(),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))
ggsave('MPAAmaxT.png', width = 20, height = 10, units = "cm")
#Year max box office in terms of MPAA pk
MPmax <- AAPX %>% group_by(Year)%>%summarise(Box = max(Box))
MPmaxT <- merge(MPmax, AAPX, by = "Box")
MPmaxT <- MPmaxT[,c(1:3,11)]
#rename(MPAAmaxT, c("MPAA.x"="MPAA", "Year.x"="Year"))
colnames(MPmaxT)[2] <- 'Year'
colnames(MPmaxT)[1] <- 'maxbox'
ggplot(MPmaxT,aes(Year,maxbox,group=MPAA,fill=MPAA,label=Title))+geom_bar(stat='identity')+
geom_text(check_overlap = TRUE,data=subset(MPAAmaxT, maxbox >= 3e8L))+
theme(axis.text.x= element_text(),
panel.background = element_blank(),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5))
ggsave('MPmaxT.png', width = 20, height = 10, units = "cm")
#Year max box office in terms of Genre pk
Genremax <- AAPX %>% group_by(Year)%>%summarise(Box = max(Box))
GenremaxT <- merge(Genremax, AAPX, by = "Box")
GenremaxT <- GenremaxT[,c(1:3,9)]
#rename(MPAAmaxT, c("Genre.x"="Genre", "Year.x"="Year"))
colnames(GenremaxT)[2] <- 'Year'
colnames(GenremaxT)[1] <- 'maxbox'
ggplot(GenremaxT,aes(Year,maxbox,group=Genre,fill=Genre,label=Title))+
geom_bar(stat='identity')+
geom_text(check_overlap = TRUE,data=subset(GenremaxT, maxbox >= 4e8L))+
theme(axis.text.x= element_text(),
panel.background = element_blank(),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
legend.position = 'top')
ggsave('GenremaxT.png', width = 20, height = 10, units = "cm")
# detailed Box per MPAA
library(xts)
library(dygraphs)
backDate <- function(x) as.POSIXct(strptime(x, '%Y-%m-%d'))
xt <- AAPX%>%filter(MPAA=='G')%>%group_by(MPAA,Release.Date)%>%summarise(avg_box=mean(Box))%>%
arrange(desc(avg_box))
xt$Release.Date <- backDate(xt$Release.Date)
xt1 <- AAPX%>%filter(MPAA=='PG')%>%group_by(MPAA,Release.Date)%>%summarise(avg_box=mean(Box))%>%
arrange(desc(avg_box))
xt1$Release.Date <- backDate(xt1$Release.Date)
xt2 <- AAPX%>%filter(MPAA=='PG-13')%>%group_by(MPAA,Release.Date)%>%summarise(avg_box=mean(Box))%>%
arrange(desc(avg_box))
xt2$Release.Date <- backDate(xt2$Release.Date)
xt3 <- AAPX%>%filter(MPAA=='R')%>%group_by(MPAA,Release.Date)%>%summarise(avg_box=mean(Box))%>%
arrange(desc(avg_box))
xt3$Release.Date <- backDate(xt3$Release.Date)
xt4 <- AAPX%>%filter(MPAA=='NC-17')%>%group_by(MPAA,Release.Date)%>%summarise(avg_box=mean(Box))%>%
arrange(desc(avg_box))
xt4$Release.Date <- backDate(xt4$Release.Date)
dxts <- xts(xt1, order.by=xt1$Release.Date)
dygraph(dxts, main="Box Office per time") %>%
dySeries("Release.Date", label = "MPAA") %>%
dyRangeSelector(height = 10)
|
2b403b40300e1517dbb8ff2235df3fba57ac4851 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/Rfast/R/skew.R | 22f0be5c0eebdf6935b48092a5d014fadb71be74 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 460 | r | skew.R | #[export]
skew <- function(x, pvalue = FALSE){
n <- length(x)
y <- x - sum(x) / n
b1 <- n * sum( y^3 )
nm1 <- n - 1
b11 <- ( sum( y^2) / nm1 ) ^1.5
skewness <- b1 / ( nm1 * (n - 2) * b11 )
if (pvalue) {
vars <- 6 * n * nm1 / ( (n - 2) * (n + 1) * (n + 3) )
stat <- skewness^2/vars
pval <- pchisq(stat, 1, lower.tail = FALSE)
skewness <- c(skewness, pval)
names(skewness) <- c("skewness", "p-value")
}
skewness
}
|
29c91b85f745e687c8e59f1fd5fa3d1a9bb288c2 | bfd0b1430559f6843ee800c12d45c7bcaad448b2 | /etc/split_large_datasets.R | 48e1adcb26f3dca2242fdb070d7ba72bb9ebcc6a | [
"CC-BY-SA-4.0",
"Apache-2.0"
] | permissive | NCEAS/arcticdatautils | 0b77a4a64c25faf714622507ab9ea9ac5f6eaea7 | 6aadd37d08903fa44ef8b8235332da7556415d28 | refs/heads/main | 2023-06-24T23:50:32.488127 | 2022-11-04T23:08:37 | 2022-11-04T23:08:37 | 59,154,705 | 8 | 21 | Apache-2.0 | 2023-08-10T16:58:37 | 2016-05-18T22:06:41 | R | UTF-8 | R | false | false | 1,844 | r | split_large_datasets.R | # Here I split the largest datasets (>1000 files) into nested data packages
devtools::load_all(".")
# Load the entire inventory
inventory <- inv_init()
inventory <- inv_load_files(inventory, "../planning/files.txt")
inventory <- inv_load_sizes(inventory, "../planning/sizes.txt")
inventory <- inv_load_checksums(inventory, "../planning/checksums.txt")
inventory <- inv_add_extra_columns(inventory)
inventory <- inv_add_parent_package_column(inventory)
inventory <- theme_packages(inventory)
large <- inventory[inventory$package_nfiles > 1000,]
length(unique(large$package))
nrow(large)
large_data <- large[large$is_metadata == FALSE,]
nrow(large_data)
splits <- split(1:nrow(large_data), cut(1:nrow(large_data), breaks = 4))
# Give all data files PIDs first
large_data$created <- FALSE
large_data$ready <- TRUE
large_data$pid <- sapply(1:nrow(large_data), function(x) paste0("urn:uuid:", uuid::UUIDgenerate()))
large_data_group1 <- large_data[splits[[1]],]
large_data_group2 <- large_data[splits[[2]],]
large_data_group3 <- large_data[splits[[3]],]
large_data_group4 <- large_data[splits[[4]],]
sum(nrow(large_data_group1), nrow(large_data_group2), nrow(large_data_group3), nrow(large_data_group4)) == nrow(large_data)
any(duplicated(c(large_data_group1$file, large_data_group2$file, large_data_group3$file, large_data_group4$file)))
# Check that none of these files are in the 'data.rda'
load('inventory/data.rda')
any(duplicated(inventory$file, medium$file))
# Save them out
inventory <- large_data_group1
save(inventory, file = "inventory/large_data_group1.rda")
inventory <- large_data_group2
save(inventory, file = "inventory/large_data_group2.rda")
inventory <- large_data_group3
save(inventory, file = "inventory/large_data_group3.rda")
inventory <- large_data_group4
save(inventory, file = "inventory/large_data_group4.rda")
|
c5b4bf41dea989288c82888d547d5bb4140d3829 | 27a87d4a1e2f62f1a58c961601286b396ce0e1f5 | /R/glm.vc.R | 1b2f83b311f34ccf8463072c6870c302aaf63f47 | [] | no_license | cran/GLMpack | 4ef52e7cdd69a06eb5ee3c781a343dfd0a3adb7e | 7b8f74100ddf1458ec4e629385b2ac5614b7eb49 | refs/heads/master | 2020-06-22T17:34:37.568876 | 2019-07-19T08:40:05 | 2019-07-19T08:40:05 | 197,758,021 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 590 | r | glm.vc.R | #' Compute variance-covariance matrix.
#'
#' Calculate the (unscaled) variance-covariance matrix from a generalized linear model regression output. Used in `GLMpack` within the function `glm.summary()`.
#'
#' @param obj The regression output from glm().
#' @return The output is a matrix.
#' @examples
#' data(campaign)
#' attach(campaign)
#' cmpgn.out <- glm(TOTCONTR ~ CANDGENDER + PARTY + INCUMCHALL + HISPPCT,
#' family=Gamma(link = 'log'), data=campaign)
#' glm.vc(cmpgn.out)
#'
#' @export
glm.vc <- function(obj){
summary(obj)$dispersion * summary(obj)$cov.unscaled
}
|
e8e98f2aa797b6585f6a2485a58805cf94c5a1a0 | d987da8f69a9b8554e21c8c4a56ab6621ec36c95 | /depurar data.R | e291ad6818f05f7a17b917b3c8b4f41086e486bb | [] | no_license | KarenCalva/DGIP | eb2a5e96f0193975363ef75d56e9f2f2b23264d3 | 1c45d607f92e9ef44eb2063cfa70acadd0e36b8b | refs/heads/master | 2020-03-18T12:42:33.649017 | 2018-05-30T13:27:50 | 2018-05-30T13:27:50 | 134,740,006 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,420 | r | depurar data.R | library(data.table)
library(dplyr)
load("dt.RData")
key(dt) # "codigoEstudiante" "periodo"
## codigoEstudiante
# hay codest de 9 7 6 y 5 caracteres
unique(nchar(dt$codigoEstudiante))
## periodo
# # periodo como una vairbale tipo factor
dt$periodo <- as.factor(dt$periodo)
length(levels(dt$periodo)) # hay 20 periodos, desde el 2009-1 hasta el 2018-A
levels(dt$periodo) # periodos
sum(is.na(dt$periodo)) # está completo
grep("NULL",dt$periodo) # 0 NULL's
## numMatricula
sum(is.na(dt$numMatricula)) # está completo
grep("NULL",dt$numMatricula) # 0 NULL's
dt$numMatricula <- as.factor(dt$numMatricula) # variable tipo entera
levels(dt$numMatricula) # "1" "2" "3"
## paralelo
sum(is.na(dt$paralelo)) # está completo
grep("NULL",dt$paralelo) # 0 NULL's
dt$paralelo <- as.factor(dt$paralelo)
levels(dt$paralelo) # alfanumericos y alfabeticos
# por periodo hay un tipo de paralelos
dt <- dt %>% group_by(codigoEstudiante,periodo,numMatricula)
d0
d1$periodo <- as.factor(d1$periodo)
# hay periodos como 1 y 2, y como A y B
# preguntar sobre 2012-1 y 2012-A
length(d1[d1$periodo=="2012-1","codest"]) # 5225 estudiantes
length(d1[d1$periodo=="2012-A","codest"]) # 4757 estudiantes
d1[d1$periodo=="2012-1","codest"]%in% d1[d1$periodo=="2012-A","codest"]
sum(d1[d1$periodo=="2012-1","codest"]%in% d1[d1$periodo=="2012-A","codest"])
# hay 2568 del periodo 2012-1 que están en el periodo 2012-A
|
adc4ef0d4b54fae243c50aab58001e17dfe1d316 | 3b36fa3694da1e76451139985235b4bad33e95b1 | /individual_G49944026/ui.R | ac6208dc387537518efc19704f5f351c3834b1ab | [] | no_license | tianweima/shiny | 6f73c12977f54cbd7b7ce6b92cfd16faa3be485b | 24995028394c18768fb974b885399ac965246259 | refs/heads/master | 2020-08-15T22:42:58.909728 | 2019-10-16T00:02:21 | 2019-10-16T00:02:21 | 215,418,268 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,222 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define style of application and title
ui <- navbarPage("Comic Books Character: DC VS. MARVEL",
# Create description panel, incl. text output and file input functionality
tabPanel("Description", h2('Comic Books Are Still Made By Men, For Men And About Men'),
h5(textOutput("text")),
hr(),
tags$img(src = 'dcvsmarvel.png',width="900px")
),
# Create word cloud panel
tabPanel("Character Apperance",
sidebarPanel(
selectInput("selection", label = "DC or MARVEL",
choices = c("DC"= 'DC' ,"MARVEL"= 'MARVEL'))
),
# Show Word Cloud
mainPanel(
wordcloud2Output("word",width = "120%", height = "500px")
)
),
# Create a tab for plot
tabPanel("Plots",
sidebarPanel(selectInput("DORM",label = "DC VS MARVEL",choices = c("Character Added","Character of Gender Added","Character of LGBT Added"),selected = "Character Added")
),
mainPanel(plotOutput("plot1"),hr(),plotOutput("plot2"))
),
# Create dropdown menu with two data tables
navbarMenu("Table",
tabPanel("DC",tableOutput("table1")),
tabPanel("MARVEL",tableOutput("table2"))
)
) |
15c1dfe6ca4838ac02cea80fcbbf814cbb3c27e6 | f7c38a9793a87214d41e0f743b1b94606c89952a | /man/get_my_followed_artists.Rd | 8f9287bb8b7ce8874e09b31c515c8586c1f4862b | [] | no_license | TroyHernandez/tinyspotifyr | 34c44b2b7d219161aee565d505bc757beabfa09f | 3f10c2972a0004291f74bcbae4e4eb2cf6899f6c | refs/heads/master | 2023-03-07T03:42:52.948314 | 2021-02-21T13:49:16 | 2021-02-21T13:49:16 | 326,067,413 | 10 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,472 | rd | get_my_followed_artists.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/follow.R
\name{get_my_followed_artists}
\alias{get_my_followed_artists}
\title{Get the current user’s followed artists.}
\usage{
get_my_followed_artists(
limit = 20,
after = NULL,
authorization = get_spotify_authorization_code(),
include_meta_info = FALSE
)
}
\arguments{
\item{limit}{Optional. The maximum number of items to return. Default: 20. Minimum: 1. Maximum: 50.}
\item{after}{Optional. The last artist ID retrieved from the previous request.}
\item{authorization}{Required. A valid access token from the Spotify Accounts service. See the \href{https://developer.spotify.com/documentation/general/guides/authorization-guide/}{Web API authorization Guide} for more details. Defaults to \code{spotifyr::get_spotify_authorization_code()}. The access token must have been issued on behalf of the current user. Getting details of the artists or users the current user follows requires authorization of the \code{user-follow-read} scope. See \href{https://developer.spotify.com/documentation/general/guides/authorization-guide/#list-of-scopes}{Using Scopes}.}
\item{include_meta_info}{Optional. Boolean indicating whether to include full result, with meta information such as \code{"total"}, and \code{"limit"}. Defaults to \code{FALSE}.}
}
\value{
Returns a data frame of results containing user's followed artists.
}
\description{
Get the current user’s followed artists.
}
|
597455b9b6a51b7341b54439bae528e7cb2ec62b | bb42f8600b0d574209b2081a1c57fab2b5e25132 | /tesla_to_MA.R | 985e46ef1b8dbb9ea7be98cdc9c91d5d9323b48e | [] | no_license | delta-gm/TimeSeries | e4454d8cd0f839c554e1f6cfbfd0a327ad2b4e90 | 4a0f8142a65ef0192fe1e4a946265435d58c867a | refs/heads/main | 2023-03-22T23:10:11.779220 | 2021-03-18T03:58:58 | 2021-03-18T03:58:58 | 335,074,519 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 695 | r | tesla_to_MA.R | # tesla ts to MA
tesla<-read.csv("TSLA_2years.csv",header=TRUE)
tesla.ts<-ts(tesla[,2])
plot(tesla.ts,ylab='price',main='Tesla Price Data')
z<-filter(tesla.ts,rep(1/31,31),sides=2)
lines(tesla.MA,col='red')
par(mfrow=c(3,1))
y<-tesla.ts/tesla.MA
plot(y,ylab='scaled price',main='Transformed Tesla Price Data')
acf(na.omit(y),main='Autocorrelation Function of Transformed Tesla Data')
acf(na.omit(y), type='partial',main='Partial ACF of Transformed Tesla Data')
# z<-tesla.MA
par(mfrow=c(3,1))
plot(z,ylab='tesla ma price',main='tesla MA data')
plot(tesla.ts,ylab='price',main='Tesla Price Data')
acf(na.omit(z),main='ACF of Tesla MA')
acf(na.omit(z),type='partial',main='PACF of Tesla MA')
|
ec06fa2eeb906d6d5bee61fb20456cd5e45530fc | 07f837d8c5236fe5e75ef510cd296814452370ce | /h2o-perf/bench/tests/singlenode/kmeans_one-billion-rows/model.R | 8e07caa67c706441aff5a59ffb8cf1786238e018 | [
"Apache-2.0"
] | permissive | vkuznet/h2o | 6f9006a5186b964bac266981d9082aec7bc1067c | e08f7014f228cbaecfb21f57379970e6a3ac0756 | refs/heads/master | 2021-08-28T11:37:52.099953 | 2021-08-10T22:43:34 | 2021-08-10T22:43:34 | 20,032,996 | 0 | 0 | Apache-2.0 | 2021-08-10T22:43:35 | 2014-05-21T18:46:27 | Java | UTF-8 | R | false | false | 286 | r | model.R | source("../../../R/h2oPerf/prologue.R")
runKMeans.VA(centers = 6, cols = c('C1','C2','C3','C4','C5','C6','C8','C11'), iter.max = 100, normalize = FALSE)
correct_pass <<- as.numeric(abs(log(model@model$tot.withinss) - log(185462248582411)) < 5.0)
source("../../../R/h2oPerf/epilogue.R")
|
32973b1a928970f802a42faca4089178e2612680 | 65ea916d10603c8115266748f8282a2031067e87 | /distance calculator R.R | e2e8f30e18ec39901923c5462bc702ff676dbff4 | [] | no_license | shivamkumar319/compute-distance_km | 9d3d5d071a404b833138ce6e1075381311421eb7 | a0907da8c6801543d7963f44db3b526413eff82f | refs/heads/master | 2022-06-04T06:45:14.908095 | 2020-04-30T20:09:57 | 2020-04-30T20:09:57 | 260,307,743 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 252 | r | distance calculator R.R | computedistance <- function(lat1,long1,lat2,long2){
lat_1=lat1*pi/180
long_1=long1*pi/180
lat_2=lat2*pi/180
long_2=long2*pi/180
d1=3963*acos((sin(lat_1)*sin(lat2))+cos(lat_1)*cos(lat_2)*cos(long_2-long_1))
d=1.609344*d1
print(d)
} |
d3ed3a91fe39d5db18c5d0f9cd2a3cef80a16fb5 | c3fced9fa3881b8d07000adfb5bebe4213eaa4a4 | /ANALYSIS/DATA/control_IDFixedEffects.R | 46a9934231ad4742af6f5fa6f4d3fd1ecc69108b | [] | no_license | rafael-schuetz/Pareto | ea9c06cb588113bbdf6a3b5da27a2d2a22f37dc8 | 74c414268d429373b83ccfb27bf222ae25b97c32 | refs/heads/master | 2022-04-13T11:36:56.587595 | 2020-04-08T18:31:48 | 2020-04-08T18:31:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,262 | r | control_IDFixedEffects.R | ###Erstellen von Einrichtungsdummies, um fixed effects in die Regression einbauen zu können
##Name: dummy103, dummy104...dummy687
library(dplyr)
library(tidyverse)
library(magrittr)
library(tidyselect)
# Erstellung eines neuen Datensatzes, der nur die id als Variable enthält
dfcEF_id <- dfcEF[ ,c("id")]
#erstellen der ID-Dummies
dfcEF_id$dummy103 <- ifelse(dfc$id == 103, '1', '0')
dfcEF_id$dummy104 <- ifelse(dfc$id == 104, '1', '0')
dfcEF_id$dummy105 <- ifelse(dfc$id == 105, '1', '0')
dfcEF_id$dummy106 <- ifelse(dfc$id == 106, '1', '0')
dfcEF_id$dummy108 <- ifelse(dfc$id == 108, '1', '0')
dfcEF_id$dummy109 <- ifelse(dfc$id == 109, '1', '0')
dfcEF_id$dummy111 <- ifelse(dfc$id == 111, '1', '0')
dfcEF_id$dummy112 <- ifelse(dfc$id == 112, '1', '0')
dfcEF_id$dummy113 <- ifelse(dfc$id == 113, '1', '0')
dfcEF_id$dummy114 <- ifelse(dfc$id == 114, '1', '0')
dfcEF_id$dummy118 <- ifelse(dfc$id == 118, '1', '0')
dfcEF_id$dummy122 <- ifelse(dfc$id == 122, '1', '0')
dfcEF_id$dummy123 <- ifelse(dfc$id == 123, '1', '0')
dfcEF_id$dummy124 <- ifelse(dfc$id == 124, '1', '0')
dfcEF_id$dummy125 <- ifelse(dfc$id == 125, '1', '0')
dfcEF_id$dummy130 <- ifelse(dfc$id == 130, '1', '0')
dfcEF_id$dummy131 <- ifelse(dfc$id == 131, '1', '0')
dfcEF_id$dummy132 <- ifelse(dfc$id == 132, '1', '0')
dfcEF_id$dummy133 <- ifelse(dfc$id == 133, '1', '0')
dfcEF_id$dummy136 <- ifelse(dfc$id == 136, '1', '0')
dfcEF_id$dummy137 <- ifelse(dfc$id == 137, '1', '0')
dfcEF_id$dummy139 <- ifelse(dfc$id == 139, '1', '0')
dfcEF_id$dummy141 <- ifelse(dfc$id == 141, '1', '0')
dfcEF_id$dummy142 <- ifelse(dfc$id == 142, '1', '0')
dfcEF_id$dummy165 <- ifelse(dfc$id == 165, '1', '0')
dfcEF_id$dummy186 <- ifelse(dfc$id == 186, '1', '0')
dfcEF_id$dummy187 <- ifelse(dfc$id == 187, '1', '0')
dfcEF_id$dummy188 <- ifelse(dfc$id == 188, '1', '0')
dfcEF_id$dummy189 <- ifelse(dfc$id == 189, '1', '0')
dfcEF_id$dummy190 <- ifelse(dfc$id == 190, '1', '0')
dfcEF_id$dummy191 <- ifelse(dfc$id == 191, '1', '0')
dfcEF_id$dummy192 <- ifelse(dfc$id == 192, '1', '0')
dfcEF_id$dummy193 <- ifelse(dfc$id == 193, '1', '0')
dfcEF_id$dummy194 <- ifelse(dfc$id == 194, '1', '0')
dfcEF_id$dummy209 <- ifelse(dfc$id == 209, '1', '0')
dfcEF_id$dummy213 <- ifelse(dfc$id == 213, '1', '0')
dfcEF_id$dummy214 <- ifelse(dfc$id == 214, '1', '0')
dfcEF_id$dummy215 <- ifelse(dfc$id == 215, '1', '0')
dfcEF_id$dummy216 <- ifelse(dfc$id == 216, '1', '0')
dfcEF_id$dummy217 <- ifelse(dfc$id == 217, '1', '0')
dfcEF_id$dummy218 <- ifelse(dfc$id == 218, '1', '0')
dfcEF_id$dummy219 <- ifelse(dfc$id == 219, '1', '0')
dfcEF_id$dummy220 <- ifelse(dfc$id == 220, '1', '0')
dfcEF_id$dummy221 <- ifelse(dfc$id == 221, '1', '0')
dfcEF_id$dummy226 <- ifelse(dfc$id == 226, '1', '0')
dfcEF_id$dummy233 <- ifelse(dfc$id == 233, '1', '0')
dfcEF_id$dummy249 <- ifelse(dfc$id == 249, '1', '0')
dfcEF_id$dummy255 <- ifelse(dfc$id == 255, '1', '0')
dfcEF_id$dummy269 <- ifelse(dfc$id == 269, '1', '0')
dfcEF_id$dummy270 <- ifelse(dfc$id == 270, '1', '0')
dfcEF_id$dummy281 <- ifelse(dfc$id == 281, '1', '0')
dfcEF_id$dummy282 <- ifelse(dfc$id == 282, '1', '0')
dfcEF_id$dummy403 <- ifelse(dfc$id == 403, '1', '0')
dfcEF_id$dummy404 <- ifelse(dfc$id == 404, '1', '0')
dfcEF_id$dummy417 <- ifelse(dfc$id == 417, '1', '0')
dfcEF_id$dummy418 <- ifelse(dfc$id == 418, '1', '0')
dfcEF_id$dummy437 <- ifelse(dfc$id == 437, '1', '0')
dfcEF_id$dummy482 <- ifelse(dfc$id == 482, '1', '0')
dfcEF_id$dummy483 <- ifelse(dfc$id == 483, '1', '0')
dfcEF_id$dummy599 <- ifelse(dfc$id == 599, '1', '0')
dfcEF_id$dummy600 <- ifelse(dfc$id == 600, '1', '0')
dfcEF_id$dummy601 <- ifelse(dfc$id == 601, '1', '0')
dfcEF_id$dummy602 <- ifelse(dfc$id == 602, '1', '0')
dfcEF_id$dummy623 <- ifelse(dfc$id == 623, '1', '0')
dfcEF_id$dummy663 <- ifelse(dfc$id == 663, '1', '0')
dfcEF_id$dummy664 <- ifelse(dfc$id == 664, '1', '0')
dfcEF_id$dummy665 <- ifelse(dfc$id == 665, '1', '0')
dfcEF_id$dummy666 <- ifelse(dfc$id == 666, '1', '0')
dfcEF_id$dummy667 <- ifelse(dfc$id == 667, '1', '0')
dfcEF_id$dummy684 <- ifelse(dfc$id == 684, '1', '0')
dfcEF_id$dummy685 <- ifelse(dfc$id == 685, '1', '0')
dfcEF_id$dummy686 <- ifelse(dfc$id == 686, '1', '0')
dfcEF_id$dummy687 <- ifelse(dfc$id == 687, '1', '0')
#73 neue Variablen wurden erstellt, für 73 verschiedene ID´s
#keine ausgelassen, da wir Beta0 weglassen -> keine perfekte multikollinearität
# Problem: Der id-Dummies liegen im Datentyp "character" vor.
# Daher wird der Datentyp aller id-Dummies zu "numeric" umgewandelt.
dfcEF_id <- data.frame(lapply(dfcEF_id, function(x) as.numeric(as.character(x))))
# Hinzufügen der id-Dummies zum ursprünglichen Datensatz dfcEF
dfcEF$dummy103 <- dfcEF_id$dummy103
dfcEF$dummy104 <- dfcEF_id$dummy104
dfcEF$dummy105 <- dfcEF_id$dummy105
dfcEF$dummy106 <- dfcEF_id$dummy106
dfcEF$dummy108 <- dfcEF_id$dummy108
dfcEF$dummy109 <- dfcEF_id$dummy109
dfcEF$dummy111 <- dfcEF_id$dummy111
dfcEF$dummy112 <- dfcEF_id$dummy112
dfcEF$dummy113 <- dfcEF_id$dummy113
dfcEF$dummy114 <- dfcEF_id$dummy114
dfcEF$dummy118 <- dfcEF_id$dummy118
dfcEF$dummy122 <- dfcEF_id$dummy122
dfcEF$dummy123 <- dfcEF_id$dummy123
dfcEF$dummy124 <- dfcEF_id$dummy124
dfcEF$dummy125 <- dfcEF_id$dummy125
dfcEF$dummy130 <- dfcEF_id$dummy130
dfcEF$dummy131 <- dfcEF_id$dummy131
dfcEF$dummy132 <- dfcEF_id$dummy132
dfcEF$dummy133 <- dfcEF_id$dummy133
dfcEF$dummy136 <- dfcEF_id$dummy136
dfcEF$dummy137 <- dfcEF_id$dummy137
dfcEF$dummy139 <- dfcEF_id$dummy139
dfcEF$dummy141 <- dfcEF_id$dummy141
dfcEF$dummy142 <- dfcEF_id$dummy142
dfcEF$dummy165 <- dfcEF_id$dummy165
dfcEF$dummy186 <- dfcEF_id$dummy186
dfcEF$dummy187 <- dfcEF_id$dummy187
dfcEF$dummy188 <- dfcEF_id$dummy188
dfcEF$dummy189 <- dfcEF_id$dummy189
dfcEF$dummy190 <- dfcEF_id$dummy190
dfcEF$dummy191 <- dfcEF_id$dummy191
dfcEF$dummy192 <- dfcEF_id$dummy192
dfcEF$dummy193 <- dfcEF_id$dummy193
dfcEF$dummy194 <- dfcEF_id$dummy194
dfcEF$dummy209 <- dfcEF_id$dummy209
dfcEF$dummy213 <- dfcEF_id$dummy213
dfcEF$dummy214 <- dfcEF_id$dummy214
dfcEF$dummy215 <- dfcEF_id$dummy215
dfcEF$dummy216 <- dfcEF_id$dummy216
dfcEF$dummy217 <- dfcEF_id$dummy217
dfcEF$dummy218 <- dfcEF_id$dummy218
dfcEF$dummy219 <- dfcEF_id$dummy219
dfcEF$dummy220 <- dfcEF_id$dummy220
dfcEF$dummy221 <- dfcEF_id$dummy221
dfcEF$dummy226 <- dfcEF_id$dummy226
dfcEF$dummy233 <- dfcEF_id$dummy233
dfcEF$dummy249 <- dfcEF_id$dummy249
dfcEF$dummy255 <- dfcEF_id$dummy255
dfcEF$dummy269 <- dfcEF_id$dummy269
dfcEF$dummy270 <- dfcEF_id$dummy270
dfcEF$dummy281 <- dfcEF_id$dummy281
dfcEF$dummy282 <- dfcEF_id$dummy282
dfcEF$dummy403 <- dfcEF_id$dummy403
dfcEF$dummy404 <- dfcEF_id$dummy404
dfcEF$dummy417 <- dfcEF_id$dummy417
dfcEF$dummy418 <- dfcEF_id$dummy418
dfcEF$dummy437 <- dfcEF_id$dummy437
dfcEF$dummy482 <- dfcEF_id$dummy482
dfcEF$dummy483 <- dfcEF_id$dummy483
dfcEF$dummy599 <- dfcEF_id$dummy599
dfcEF$dummy600 <- dfcEF_id$dummy600
dfcEF$dummy601 <- dfcEF_id$dummy601
dfcEF$dummy602 <- dfcEF_id$dummy602
dfcEF$dummy623 <- dfcEF_id$dummy623
dfcEF$dummy663 <- dfcEF_id$dummy663
dfcEF$dummy664 <- dfcEF_id$dummy664
dfcEF$dummy665 <- dfcEF_id$dummy665
dfcEF$dummy666 <- dfcEF_id$dummy666
dfcEF$dummy667 <- dfcEF_id$dummy667
dfcEF$dummy684 <- dfcEF_id$dummy684
dfcEF$dummy685 <- dfcEF_id$dummy685
dfcEF$dummy686 <- dfcEF_id$dummy686
dfcEF$dummy687 <- dfcEF_id$dummy687
|
a839325b0c32256f4a11637024231c1324475da5 | 4739dc8381233fb37b55493eae5552c4edbb092f | /swc-R-module1.R | 49bc3d491c6709658fb9e2bd9c5bc0579e829cd8 | [] | no_license | hcraig2014/planets | a263b1f65eeab03143bf46b4679ed29e14a5d120 | 8cb2c344fdd2f2b4befbebee39c1b806010e3fe8 | refs/heads/master | 2016-09-10T19:47:20.566380 | 2015-06-18T23:47:33 | 2015-06-18T23:47:33 | 37,688,597 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,258 | r | swc-R-module1.R | setwd("C:/Users/hcraig/Documents/swc-R/r-novice-inflammation")
# read in file
dat<- read.csv(file="data/inflammation-01.csv", header=FALSE)
#first row, all columns
patient_1<- dat[1,]
# max inflammation for patient 1
max(patient_1)
# max inflammation for patient 2
max(dat[2,])
# minimum inflammation for var 7
min(dat[,7])
#mean
average_day_inflammation<- apply(dat, 2, mean)
plot(average_day_inflammation)
#min
min_day_inflammation<- apply(dat, 2, min)
plot(min_day_inflammation)
#max
max_day_inflammation<- apply(dat, 2, max)
plot(max_day_inflammation)
#sd
sd_day_inflammation<- apply(dat, 2, sd)
plot(sd_day_inflammation)
#create a function
fahr_to_kelvin<- function(temp) {
kelvin<- ((temp-32)* (5/9)) + 273.15
return(kelvin)
}
#freezing point of water
fahr_to_kelvin(32)
#boiling point of water
fahr_to_kelvin(212)
#function Mik
mik<- function(x){
y=x+2
return(y)
}
# another function
kelvin_to_celcius<- function(temp){
celcius<- temp - 273.15
return(celcius)
}
kelvin_to_celcius(0)
best_practice <- c("Write", "programs", "for", "people", "not", "computers")
asterisk <- "***" # R interprets a variable with a single value as a vector
# with one element.
fence<- function(a,b){
y <- c(b,a,b)
return(y)
}
fence(best_practice, asterisk)
##
analyze <- function(filename) {
# Plots the average, min, and max inflammation over time.
# Input is character string of a csv file.
dat <- read.csv(file = filename, header = FALSE)
avg_day_inflammation <- apply(dat, 2, mean)
plot(avg_day_inflammation)
max_day_inflammation <- apply(dat, 2, max)
plot(max_day_inflammation)
min_day_inflammation <- apply(dat, 2, min)
plot(min_day_inflammation)
}
pdf(file="inflammation-01.pdf")
analyze("data//inflammation-01.csv")
dev.off()
best_practice
print_words<- function(sentence){
print(sentence[1])
print(sentence[2])
print(sentence[3])
print(sentence[4])
print(sentence[5])
print(sentence[6])
}
print_words(best_practice)
print_words<- function(sentence){
for(i in 1:length(sentence)){
print(sentence[i])
print(i)
}
}
print_words(best_practice)
##for the analyse function for multiple files
filenames<- list.files(path="data", pattern="inflammation")
setwd("data/")
for (file in filenames){
pdf(file=paste0(file,".pdf"))
analyze(file)
dev.off()
print(paste0(file,".pdf"))
}
print_n<- function(N){
for(i in 1:N){
print(i)
}
}
print_n(20)
print_n(4)
vec<- c(4,8,15,16,23,42)
num<- 0
num_vec <- c(1,1)
for (i in 1:10) {
num<- num_vec [i]+num_vec[i+1]
num_vec[i+2]<-num
print(i)
}
analyze_all <- function(pattern) {
# Runs the function analyze for each file in the current working directory
# that contains the given pattern.
filenames <- list.files(path = "data", pattern = pattern, full.names = TRUE)
for (f in filenames) {
analyze(f)
}
}
#if
num<- 37
if(num > 100){
print("Greater") #if true runs this code
} else{
print("Not greater") #if false then runs this
}
print("done") #runs for all conditions
sign<- function(num){
if(num > 0){
return(1)
} else if (num == 0){
return(0)
} else {
return(-1)
}
}
sign(50)
# & and
# | or
(1 > 0 && -1 > 0)
(1 > 0 || -1 > 0)
(2 > 0 && -1 < 2)
|
4e3028c1f093dce64fbac6364b7022d0a475dd8d | d2a526079e3e2bf94acf633fd8709c383eea3755 | /WeibullPlot.R | 33f3952a62543655acd16eae30de7d72437b8a1d | [] | no_license | ValeriVoev/Reliability | 5ef12f0429608756e9955774e37b7980080df72c | 29b262711eb539737cbfb1812883ad661af32161 | refs/heads/master | 2020-12-30T16:14:49.510805 | 2019-12-06T12:52:39 | 2019-12-06T12:52:39 | 90,967,329 | 4 | 1 | null | 2019-12-06T12:52:41 | 2017-05-11T10:28:54 | R | UTF-8 | R | false | false | 4,563 | r | WeibullPlot.R | library(ggplot2)
library(dplyr)
library(ggthemes)
# Linearize y-axis on Weibull paper
WeibulLinTrans <- function (x) {log(log( 1/( 1 - x) )) }
# Generate some random Weibull data
x <- rweibull(100, shape = 1.8, scale = 60)
# Convert to data frame for convenient plotting
x.df <- data.frame(Lifetime = x)
x.df$rounded <- round(x.df$Lifetime, 2)
# Histogram of data
ggplot(x.df, aes(x = Lifetime)) +
geom_histogram(fill = "dodgerblue3", color = "black") +
ggtitle("Histogram of bearing lifetimes") +
xlab("Time (months)") + theme_tufte() +
theme( axis.text.x = element_text(size = 12), axis.text.y = element_text(size = 12),
axis.title=element_text(size=14,face="plain"), plot.title = element_text(size=18))
# Add x and y plot positions for Weibull plot
x.df <- x.df %>% arrange(Lifetime) %>% mutate(Rank = 1:nrow(x.df)) %>% mutate(MedRank = (Rank-0.3)/(nrow(x.df)+0.4)) %>%
mutate(Logx = log(Lifetime), LogFTrans = WeibulLinTrans(MedRank) )
# Run median rank regression
Regression <- lm(data = x.df, formula = LogFTrans ~ Logx )
eta <- exp( - (Regression$coefficients[1]/Regression$coefficients[2] ))
# Make beta and eta labels
Betalab <- paste(expression(beta) ,"==", round(Regression$coefficients[2] , 2))
Betalab <- paste(expression(beta) ,"==", round(Regression$coefficients[2] , 2))
Etalab <- paste(expression(eta) ,"==", round(eta , 2))
Parlab <- paste(Betalab, "/n", Etalab)
# Find L10 life
L10y <- WeibulLinTrans(0.1)
L10x <- (L10y - Regression$coefficients[1]) / Regression$coefficients[2]
y5yr <- Regression$coefficients[1] + Regression$coefficients[2] * log(60)
y10yr <- Regression$coefficients[1] + Regression$coefficients[2] * log(120)
# Define the X tick positions
XMarks <- as.vector(log( sort( c(min(2, min(x)), min(2, min(x))+1, 10, seq(from = 20, to = 100, by = 20) , 120, 150, exp(L10x)) ) ))
# Find failure rate at 60 and 120 months
y5yrprob <-pweibull(60, shape = Regression$coefficients[2], scale = eta)
y10yrprob <-pweibull(120, shape = Regression$coefficients[2], scale = eta)
# Define the Y tick positions
Yprobs <- round( c(0.01, 0.02, 0.05, seq(from = 0.1, to = 0.6, by = 0.1), 0.8, 0.99 , y5yrprob, y10yrprob), 2)
Yprobs <- Yprobs[!duplicated(Yprobs)]
YMarks <- WeibulLinTrans(Yprobs)
# Find position of "special" ticks corresponding to L10 life, 60 months and 120 months
XMarkSpec <- which(round(exp(XMarks),2) %in% round(c(exp(L10x), 60, 120 ),2))
YMarkSpec <- which(round(Yprobs,2) %in% round(c(0.1, y5yrprob, y10yrprob ),2))
# Set font and color for tick marks
facex <- rep("plain", length(XMarks))
facex[XMarkSpec] <- "bold"
colx <- rep("black", length(XMarks))
colx[XMarkSpec] <- c("dodgerblue4", "dodgerblue3", "dodgerblue2")
facey <- rep("plain", length(YMarks))
facey[YMarkSpec] <- "bold"
coly <- rep("black", length(XMarks))
coly[YMarkSpec] <- c("dodgerblue4", "dodgerblue3", "dodgerblue2")
# Make the Weibull plot on Weibull paper
ggplot(x.df, aes(x = Logx, y = LogFTrans)) +
geom_point(fill = "darkorchid4", alpha = 0.75, shape = 21, col = "black", size = 2) +
xlab("Lifetime (months)") + ylab("Failure Rate") +
scale_x_continuous(breaks = XMarks, minor_breaks = NULL, labels = round(exp(XMarks)), limits = c(0, log(max(x)+ 5) )) +
scale_y_continuous(breaks = YMarks, minor_breaks = NULL, labels = Yprobs) +
geom_smooth(method = "lm", col = "red", size = 0.5) +
annotate("text", x=log(4), y=WeibulLinTrans(0.3) , parse = T,
label= Betalab , size = 4, hjust = 0) +
annotate("text", x=log(4), y=WeibulLinTrans(0.2) , parse = T,
label= Etalab , size = 4, hjust = 0) +
geom_segment(aes(x = L10x, y = -Inf, xend = L10x, yend = L10y), col = "dodgerblue4", linetype = "dashed") +
geom_segment(aes(x = -Inf, y = L10y, xend = L10x, yend = L10y), col = "dodgerblue4", linetype = "dashed") +
geom_segment(aes(x = log(60), y = -Inf, xend = log(60), yend = WeibulLinTrans(y5yrprob)), col = "dodgerblue3", linetype = "dashed") +
geom_segment(aes(x = -Inf, y = WeibulLinTrans(y5yrprob), xend = log(60), yend = WeibulLinTrans(y5yrprob)), col = "dodgerblue3", linetype = "dashed") +
geom_segment(aes(x = log(120), y = -Inf, xend = log(120), yend = WeibulLinTrans(y10yrprob)), col = "dodgerblue2", linetype = "dashed") +
geom_segment(aes(x = -Inf, y = WeibulLinTrans(y10yrprob), xend = log(120), yend = WeibulLinTrans(y10yrprob)), col = "dodgerblue2", linetype = "dashed") +
theme_bw()+
theme( axis.text.x = element_text(face=facex, color = colx, size = 10), axis.text.y = element_text(face=facey, color = coly, size = 10) )
|
20d21bbba9c9eccbfffdfa84486cadebf792c985 | a0507e8fb57a389477341a81d26fb87994508438 | /Recipes/data_munging.R | d2a2f83ae8d133875b9da1492e94f748064c5136 | [] | no_license | dlimjy/R-general | 051872f503663fa57892979098dd4cc3f36598f5 | 1eb799e0169f620d718e8541b20ebac3efecb572 | refs/heads/master | 2021-09-15T08:15:47.167503 | 2018-05-29T05:51:14 | 2018-05-29T05:51:14 | 125,837,708 | 0 | 0 | null | 2018-05-29T05:51:15 | 2018-03-19T10:02:15 | R | UTF-8 | R | false | false | 2,030 | r | data_munging.R | ##### Some recipes for data munging
# Using titanic dtaset as an example
setwd("C:\\Analytics\\Kaggle\\Titanic")
titan <- fread("titan.csv")
# Null treatment ----------------------------------------------------------
titan %>% select(Age) %>% is.na %>% table # Check distribution of nulls
# Imputing nulls, stratified
age_med_M <- (titan %>% filter(Sex == "male"))$Age %>% median(na.rm = TRUE) # filter first, select column with $, then apply a function
age_med_F <- (titan %>% filter(Sex == "female"))$Age %>% median(na.rm = TRUE)
titan$Age[is.na(titan$Age)] <- -100 # Set null to some arbitrary value
titan$Age <- ifelse(titan$Age == -100, ifelse(titan$Sex == "male", median_age_M, titan$Age), titan$Age) # Use that arbitrary value + straifying variable to replace
titan$Age <- ifelse(titan$Age == -100, ifelse(titan$Sex == "female", median_age_F, titan$Age), titan$Age)
# Replacing blank strings
titan$TextColumn <- ifelse(titan$TextColumn == "", "Replacement", titan$TextColumn)
# Binning -----------------------------------------------------------------
# Bin age as an example
titan$Age_bin <- ifelse(titan$Age < 15, "< 15"
, ifelse(titan$Age < 20, "15 - 19"
, ifelse(titan$Age < 25, "20 - 24"
, ifelse(titan$Age < 30, "25 - 29"
, ifelse(titan$Age < 35, "30 - 34"
, ifelse(titan$Age < 40, "35 - 39"
, ifelse(titan$Age < 45, "40 - 44"
, ifelse(titan$Age < 50, "45 - 49"
, ifelse(titan$Age < 55, "50 - 54"
, ifelse(titan$Age < 60, "55 - 59"
, ifelse(titan$Age < 65, "60 - 64", "65+")))))))))))
# # Train test split ------------------------------------------------------
split_ratio <- 0.8 # Specify split ratio
split_size <- floor(0.8 * nrow(titan)) # Floor of number of rows based on split ratio
split_ind <- base::sample(seq_len(nrow(titan)), size = split_size) # Generate index for splitting
trainset <- titan[split_ind,]
testset <- titan[-split_ind,]
|
1fafbcb8443f6f1f7562cb33f322203c759fcd89 | 492b07e7900b1c4f058fc3c680b1fb709c707bc3 | /inputs/palermo/extract_crime_data.R | 9fbc4a24b36a2f33de8ed513a12e5f6fb736cfec | [
"MIT"
] | permissive | LABSS/PROTON-OC | 4133ba712fa0c69bd7849e94a628ec556e451eeb | a116103aed3f2286db40b092c2d797a59e8b1a39 | refs/heads/master | 2021-09-29T17:12:51.761952 | 2021-09-13T09:50:56 | 2021-09-13T09:50:56 | 148,494,620 | 3 | 1 | MIT | 2021-09-13T09:50:57 | 2018-09-12T14:37:00 | NetLogo | UTF-8 | R | false | false | 1,358 | r | extract_crime_data.R | library(tidyverse)
library(readxl)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
df <-
file.path("raw", "authors_sex_age_conditional_prob_ corretto.xlsx") %>%
read_excel(sheet = "conditional_probability") %>%
filter(between(row_number(), 11, 12)) %>%
select(-Year) %>%
gather(key=age, value="p", -Gender) %>%
rename(`male?`=Gender) %>%
mutate(
`male?` = if_else(`male?`=="Females", FALSE, TRUE),
p = as.numeric(p)
)
df2<-df %>%
mutate(
age = case_when(
age == "up to 13" ~ "0-13",
age == "65+" ~ "65-200",
TRUE ~ age
),
age =
age %>%
str_extract_all("\\d+") %>%
map(as.numeric) %>%
map((lift(seq)))
) %>%
unnest(age) %>%
select(`male?`,age,p) %>%
write_csv(file.path("data", "crime_rate_by_gender_and_age.csv"))
df3 <- df %>%
mutate(age = str_replace_all(age,"\\<|\\+|up to ", "")) %>%
separate(age, into = c("age_from", "age_to"), sep = "-") %>%
mutate(
age_from = as.numeric(age_from),
age_to = as.numeric(age_to),
age_to = case_when(
age_from == 13 ~ 13,
age_from == 65 ~ 200,
TRUE ~ age_to
),
age_from = case_when(
age_from == 13 ~ 0,
TRUE ~ age_from
)
) %>%
select(`male?`,age_from,age_to,p) %>%
write_csv(file.path("data", "crime_rate_by_gender_and_age_range.csv")) |
538712456026093a5448e9645dfd9751923c454c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ph2bye/examples/PostP.Rd.R | 4a7d2b6152e1e41660d17452f19a871a729f44eb | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 187 | r | PostP.Rd.R | library(ph2bye)
### Name: PostP
### Title: The posterior probability criterion function for Phase II
### single-arm design
### Aliases: PostP
### ** Examples
PostP(8,15,1,1,0.8)
|
d7ae41a3d8a66ec8cdc6b6cac4e83d5dd6fb8b81 | 8d16561b2642ff65cee39870545767c6d2ba6eb4 | /snp/Birdseed2geno.r | 32d8851e3bea5a5d6f5dbba5c8d8078d5af70f4a | [] | no_license | ZhenyuZ/eqtl | bc626cc778512692519f2a5c892f24b10ca22496 | 93a018ff5c05b2dca6bc0c0dcc01d04d49e9d222 | refs/heads/master | 2021-01-15T12:19:13.323323 | 2020-06-30T18:49:07 | 2020-06-30T18:49:07 | 35,136,342 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 4,416 | r | Birdseed2geno.r | # Birdseed2geno.r is a long script to do a lot of related things
# 1. Read the latest Affy annotation file to filter non dbSNP
# probeset, and generate plink map file.
# 2. Collect clinical data to combine and write a patient - gender
# - vitalstatus file
# 3. Modify SNP annotation file to include two base genotyp
# representation.
# 4. Read pre-collected birdseed genotyping matrix
# (row as probeset, and column as file name), convert column names
# to be aliquot barcode, filter out those without clinical data (gender)
# and not in [disease].matched.aliquots file, and then filter out
# non dbSNP SNPs. The outputs are [disease].geno.rda files and summary
# statistics geno.rda.stats file
options(stringsAsFactors=F)
require(RCurl)
source("~/github/eqtl/module.access.r")
source("~/github/eqtl/module.annotation.r")
# read sdrf list
sdrf.list <- read.delim("./snp/sdrf.list", h=F, stringsAsFactors=F)
# read diseases
diseases = unlist(read.delim("diseases.txt", h=F))
# read Affy SNP6 annotation data which is generated by
# ExtractAffyAnnot.r on GenomeWideSNP_6.na35.annot.csv
annot = read.delim("./meta/na35.CEU.txt", h=T)
# read TCGA SNP6 birdseed probeset in order
probeset = unlist(read.table("./meta/birdseed.probeset.txt", h=F))
# filter annotation by probeset
m = match(probeset, annot$probeset)
annot = annot[m, ]
# filter annotation by dbSNP id
w = which(substr(annot$dbSNP, 1,2) != "rs")
annot = annot[-w,]
# Generate and output plink map file
map = with(annot, cbind(chr, dbSNP, "0", pos))
colnames(map) = c("chr", "dbSNP", "dist", "pos")
map = data.frame(map)
write.table(map, "./plink/tcga.map", sep="\t", quote =FALSE, col.names=FALSE, row.names=FALSE)
# Extract gender information from clinical data
clin.summary = NULL
for(disease in diseases) {
# read clinical information from firehose downloaded files
clin.file = paste0("./clin/", disease, ".clin.merged.picked.txt")
clin = read.delim(clin.file, quote="\"", row.names=1, as.is=T)
colnames(clin) = gsub("\\.", "-", toupper(colnames(clin)))
clin = data.frame(t(clin))
# Extract gender information. male:1, female:2, other:-9
gender = tolower(clin$gender)
gender[which(gender=="male")] = "1"
gender[which(gender=="female")] = "2"
gender[which(! gender %in% c(1,2))] = "-9"
# make output
patient = rownames(clin)
output = data.frame(patient)
output$gender = gender
output$dead = clin$vital_status
clin.summary = rbind(clin.summary, output)
}
# write the result to file
# write.table(clin.summary, "./aliquot/clin.summary.txt", col.names=T, row.names=F, sep="\t", quote=F)
# Prepare annotation for ped format output
annot$code0 = paste(annot$Allele.A, annot$Allele.A, sep=" ")
annot$code1 = paste(annot$Allele.A, annot$Allele.B, sep=" ")
annot$code2 = paste(annot$Allele.B, annot$Allele.B, sep=" ")
# write.table(annot, "./meta/tcga.snp.annotation.txt", col.names=T, row.names=F, sep="\t", quote=F)
# initialize summary stats
output = data.frame(matrix(nrow=nrow(sdrf.list), ncol=3))
colnames(output) = c("disease", "numSample", "numSNP")
for(i in 1:nrow(sdrf.list)) {
# Extract disease and sdrf info
disease = sdrf.list$V1[i]
sdrf.link = sdrf.list$V2[i]
sdrf <- GetTCGATable(sdrf.link)
file.info <- ProcessSNP6Sdrf(sdrf, disease)
file.info <- file.info[substr(file.info$aliquot, 14, 15) == "10", ]
aliquot = file.info$aliquot
# Get Genotype data
birdseed.file = paste0("./snp/", disease, ".birdseed.rda")
load(birdseed.file)
# change file name to aliquot barcode
colnames(geno) = aliquot
# read matched aliquots and patients data, and filter genotype
match.file = paste0("./aliquot/", disease, ".matched.aliquots")
match = read.delim(match.file, h=T)
geno = geno[, which(colnames(geno) %in% match$SNP)]
# filter by clinical info
clin = merge(x=match, y=clin.summary, by.x="patients", by.y="patient")
clin = clin[which(clin$gender %in% c(1,2)), ]
m = match(clin$SNP, colnames(geno))
geno = geno[,m]
# filter out non dbSNP probeset
m = match(annot$probeset, rownames(geno))
geno = geno[m, ]
rownames(geno) = annot$dbSNP
# output geno.rda
geno.file = paste0("./snp/", disease, ".geno.rda")
save(geno, file=geno.file)
# Collect stats
output[i, ] = c(disease, ncol(geno), nrow(geno))
}
write.table(output, "./snp/geno.rda.stats", col.names=T, row.names=F, quote=F, sep="\t")
|
700b58bcc65cf3a9739bff4b4a00b824b829d2e7 | 7d2d098142c43d20382a9c4d44396a99b62c2be2 | /plot3.R | cbfed524c34441209ae7e70ca22d00096b095ba4 | [] | no_license | kcobs/ExData_Plotting1 | ea3c43a48cb62c03b6496b2ee169fb610971525c | cf688418ec26a9379640af1b5afecf093c76dd46 | refs/heads/master | 2021-01-18T05:34:45.084905 | 2016-06-14T23:02:24 | 2016-06-14T23:02:24 | 60,739,268 | 0 | 0 | null | 2016-06-09T00:10:47 | 2016-06-09T00:10:47 | null | UTF-8 | R | false | false | 1,341 | r | plot3.R | ##Get data
power <- read.table(file, header=T, sep=";")
power$Date <- as.Date(power$Date, format="%d/%m/%Y")
study_data <- power[(power$Date=="2007-02-01") | (power$Date=="2007-02-02"),]
study_data$Global_active_power <- as.numeric(as.character(study_data$Global_active_power))
study_data$Global_reactive_power <- as.numeric(as.character(study_data$Global_reactive_power))
study_data$Voltage <- as.numeric(as.character(study_data$Voltage))
study_data <- transform(study_data, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
study_data$Sub_metering_1 <- as.numeric(as.character(study_data$Sub_metering_1))
study_data$Sub_metering_2 <- as.numeric(as.character(study_data$Sub_metering_2))
study_data$Sub_metering_3 <- as.numeric(as.character(study_data$Sub_metering_3))
##CREATE PLOT
plot3 <- function() {
plot(study_data$timestamp,df$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(study_data$timestamp,df$Sub_metering_2,col="red")
lines(study_data$timestamp,df$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
dev.copy(png, file="plot3.png", width=480, height=480)
dev.off()
cat("plot3.png has been saved in", getwd())
}
> plot3() |
231f1efbce5556133c5c8f72f604d18d3c6ea69e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/biogram/examples/calc_cs.Rd.R | 7ca9333519ff3de6dee0cbdc359fa94a1119dd73 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 293 | r | calc_cs.Rd.R | library(biogram)
### Name: calc_cs
### Title: Calculate Chi-squared-based measure
### Aliases: calc_cs
### ** Examples
tar <- sample(0L:1, 100, replace = TRUE)
feat <- sample(0L:1, 100, replace = TRUE)
library(bit) # used to code vector as bit
calc_cs(feat, as.bit(tar), 100, sum(tar))
|
d4968aeaef97b33cd869b0d379c3e85e1ca0beae | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/r/generated/R/ComDayCqDamCoreImplGfxCommonsGfxRendererInfo.r | 60b5a75bf311364b58aa598eafca8e2c207e131a | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | R | false | false | 4,248 | r | ComDayCqDamCoreImplGfxCommonsGfxRendererInfo.r | # Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComDayCqDamCoreImplGfxCommonsGfxRendererInfo Class
#'
#' @field pid
#' @field title
#' @field description
#' @field properties
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComDayCqDamCoreImplGfxCommonsGfxRendererInfo <- R6::R6Class(
'ComDayCqDamCoreImplGfxCommonsGfxRendererInfo',
public = list(
`pid` = NULL,
`title` = NULL,
`description` = NULL,
`properties` = NULL,
initialize = function(`pid`, `title`, `description`, `properties`){
if (!missing(`pid`)) {
stopifnot(is.character(`pid`), length(`pid`) == 1)
self$`pid` <- `pid`
}
if (!missing(`title`)) {
stopifnot(is.character(`title`), length(`title`) == 1)
self$`title` <- `title`
}
if (!missing(`description`)) {
stopifnot(is.character(`description`), length(`description`) == 1)
self$`description` <- `description`
}
if (!missing(`properties`)) {
stopifnot(R6::is.R6(`properties`))
self$`properties` <- `properties`
}
},
toJSON = function() {
ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject <- list()
if (!is.null(self$`pid`)) {
ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject[['pid']] <- self$`pid`
}
if (!is.null(self$`title`)) {
ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject[['title']] <- self$`title`
}
if (!is.null(self$`description`)) {
ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject[['description']] <- self$`description`
}
if (!is.null(self$`properties`)) {
ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject[['properties']] <- self$`properties`$toJSON()
}
ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject
},
fromJSON = function(ComDayCqDamCoreImplGfxCommonsGfxRendererInfoJson) {
ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject <- jsonlite::fromJSON(ComDayCqDamCoreImplGfxCommonsGfxRendererInfoJson)
if (!is.null(ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$`pid`)) {
self$`pid` <- ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$`pid`
}
if (!is.null(ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$`title`)) {
self$`title` <- ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$`title`
}
if (!is.null(ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$`description`)) {
self$`description` <- ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$`description`
}
if (!is.null(ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$`properties`)) {
propertiesObject <- ComDayCqDamCoreImplGfxCommonsGfxRendererProperties$new()
propertiesObject$fromJSON(jsonlite::toJSON(ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$properties, auto_unbox = TRUE))
self$`properties` <- propertiesObject
}
},
toJSONString = function() {
sprintf(
'{
"pid": %s,
"title": %s,
"description": %s,
"properties": %s
}',
self$`pid`,
self$`title`,
self$`description`,
self$`properties`$toJSON()
)
},
fromJSONString = function(ComDayCqDamCoreImplGfxCommonsGfxRendererInfoJson) {
ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject <- jsonlite::fromJSON(ComDayCqDamCoreImplGfxCommonsGfxRendererInfoJson)
self$`pid` <- ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$`pid`
self$`title` <- ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$`title`
self$`description` <- ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$`description`
ComDayCqDamCoreImplGfxCommonsGfxRendererPropertiesObject <- ComDayCqDamCoreImplGfxCommonsGfxRendererProperties$new()
self$`properties` <- ComDayCqDamCoreImplGfxCommonsGfxRendererPropertiesObject$fromJSON(jsonlite::toJSON(ComDayCqDamCoreImplGfxCommonsGfxRendererInfoObject$properties, auto_unbox = TRUE))
}
)
)
|
82d64618380c3c783b6b6627e56637d6d13f9163 | fd0622e97276bba2c04d3c2fcba902cdfb65e214 | /packages/nimble/inst/classic-bugs/vol1/seeds/seeds-init.R | 9cee6a5c272f05beb40dfebd5740cd423f01f106 | [
"GPL-2.0-only",
"BSD-3-Clause",
"CC-BY-4.0",
"GPL-1.0-or-later",
"MPL-2.0",
"GPL-2.0-or-later"
] | permissive | nimble-dev/nimble | 7942cccd73815611e348d4c674a73b2bc113967d | 29f46eb3e7c7091f49b104277502d5c40ce98bf1 | refs/heads/devel | 2023-09-01T06:54:39.252714 | 2023-08-21T00:51:40 | 2023-08-21T00:51:40 | 20,771,527 | 147 | 31 | BSD-3-Clause | 2023-08-12T13:04:54 | 2014-06-12T14:58:42 | C++ | UTF-8 | R | false | false | 68 | r | seeds-init.R | "tau" <-
1
"alpha0" <-
0
"alpha1" <-
0
"alpha2" <-
0
"alpha12" <-
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.