blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2a9be545b22313089fe2f0e40599a552a9bbe72b | c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab | /man/gdkPixmapColormapCreateFromXpmD.Rd | d34d5ed04b8dd922af0c8f373d9b8b98227b9a31 | [] | no_license | cran/RGtk2.10 | 3eb71086e637163c34e372c7c742922b079209e3 | 75aacd92d4b2db7d0942a3a6bc62105163b35c5e | refs/heads/master | 2021-01-22T23:26:26.975959 | 2007-05-05T00:00:00 | 2007-05-05T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,326 | rd | gdkPixmapColormapCreateFromXpmD.Rd | \alias{gdkPixmapColormapCreateFromXpmD}
\name{gdkPixmapColormapCreateFromXpmD}
\title{gdkPixmapColormapCreateFromXpmD}
\description{Create a pixmap from data in XPM format using a particular
colormap.}
\usage{gdkPixmapColormapCreateFromXpmD(drawable, colormap, transparent.color, data)}
\arguments{
\item{\code{drawable}}{[\code{\link{GdkDrawable}}] a \code{\link{GdkDrawable}}, used to determine default values
for the new pixmap. Can be \code{NULL} if \code{colormap} is given.}
\item{\code{colormap}}{[\code{\link{GdkColormap}}] the \code{\link{GdkColormap}} that the new pixmap will be use.
If omitted, the colormap for \code{window} will be used.}
\item{\code{transparent.color}}{[\code{\link{GdkColor}}] the color to be used for the pixels
that are transparent in the input file. Can be \code{NULL},
in which case a default color will be used.}
\item{\code{data}}{[character] Pointer to a string containing the XPM data.}
}
\value{
A list containing the following elements:
\item{retval}{[\code{\link{GdkPixmap}}] the \code{\link{GdkPixmap}}.}
\item{\code{mask}}{[\code{\link{GdkBitmap}}] a pointer to a place to store a bitmap representing
the transparency mask of the XPM file. Can be \code{NULL},
in which case transparency will be ignored.}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
e0bdf5a395f07046ab989ec1ddaf02d930d5ff10 | 7c756f0eaa3779587672508437e5eef5db9b9236 | /R/simulate-nplcm.R | b262d24df406044d12afaa5987b1a7c8e8fbdfbe | [
"MIT"
] | permissive | vjcitn/baker | 6da8e43e717eeb68f00b80eb27cdd60dfc9336f8 | 140718dfb9e87d23543570f4a402a39ea024f095 | refs/heads/master | 2021-01-19T10:32:40.575869 | 2016-12-21T16:55:12 | 2016-12-21T16:55:12 | 82,190,742 | 0 | 0 | null | 2017-02-16T14:42:24 | 2017-02-16T14:42:24 | null | UTF-8 | R | false | false | 10,427 | r | simulate-nplcm.R | #' Simulation data from nested partially-latent class model (npLCM) family
#'
#' @details Use different case and control subclass mixing weights. Eta is of
#' dimension J times K. NB: document the elements in \code{set_parameter}. Also, current
#' function is written in a way to facilitate adding more measurement components.
#'
#' @param set_parameter True model parameters in the npLCM specification
#'
#'
#' @return A list of measurements, true latent statues:
#' \itemize{
#' \item{\code{data_nplcm}} a list of structured data (see \code{\link{nplcm}} for
#' description) for use in visualization
#' e.g., \code{\link{plot_logORmat}} or model fitting, e.g., \code{\link{nplcm}}.
#' The pathogen taxonomy is set to default "B".
#' \item{\code{template}} a matrix: rows for causes, columns for measurements;
#' generated as a lookup table to match mixture component parmeters for every type
#' (a particular cause) of indiviuals.
#' \item{\code{latent_cat}} integer values to indicate the latent category. The integer
#' code corresponds to the order specified in \code{set_parameter$etiology}.
#' Controls are coded as \code{length(set_parameter$etiology)+1}.)
#' }
#'
#' @seealso \link{simulate_latent} for simulating discrete latent status, given
#' which \link{simulate_brs} simulates bronze-standard data.
#'
#' @examples
#' K.true <- 2 # no. of latent subclasses in actual simulation.
#' # If eta = c(1,0), effectively, it is K.true=1.
#' J <- 21 # no. of pathogens.
#' N <- 600 # no. of cases/controls.
#'
#'
#' eta <- c(1,0)
#' # if it is c(1,0),then it is conditional independence model, and
#' # only the first column of parameters in PsiBS, ThetaBS matter!
#'
#' seed_start <- 20150202
#' print(eta)
#'
#' # set fixed simulation sequence:
#' set.seed(seed_start)
#'
#' ThetaBS_withNA <- c(.75,rep(c(.75,.75,.75,NA),5))
#' PsiBS_withNA <- c(.15,rep(c(.05,.05,.05,NA),5))
#'
#' ThetaSS_withNA <- c(NA,rep(c(0.15,NA,0.15,0.15),5))
#' PsiSS_withNA <- c(NA,rep(c(0,NA,0,0),5))
#'
#' # the following paramter names are set using names in the 'baker' package:
#' set_parameter <- list(
#' cause_list = c(LETTERS[1:J]),
#' etiology = c(c(0.36,0.1,0.1,0.1,0.1,0.05,0.05,0.05,
#' 0.05,0.01,0.01,0.01,0.01),rep(0.00,8)),
#' #same length as cause_list.
#' pathogen_BrS = LETTERS[1:J][!is.na(ThetaBS_withNA)],
#' pathogen_SS = LETTERS[1:J][!is.na(ThetaSS_withNA)],
#' meas_nm = list(MBS = c("MBS1"),MSS="MSS1"),
#' Lambda = eta, #ctrl mix
#' Eta = t(replicate(J,eta)), #case mix, row number equal to Jcause.
#' PsiBS = cbind(PsiBS_withNA[!is.na(PsiBS_withNA)],
#' rep(0,sum(!is.na(PsiBS_withNA)))),
#' ThetaBS = cbind(ThetaBS_withNA[!is.na(ThetaBS_withNA)],
#' rep(0,sum(!is.na(ThetaBS_withNA)))),
#' PsiSS = PsiSS_withNA[!is.na(PsiSS_withNA)],
#' ThetaSS = ThetaSS_withNA[!is.na(ThetaSS_withNA)],
#' Nu = N, # control size.
#' Nd = N, # case size.
#' SS = TRUE
#' )
#' simu_out <- simulate_nplcm(set_parameter)
#' data_nplcm <- simu_out$data_nplcm
#'
#' pathogen_display <- rev(set_parameter$pathogen_BrS)
#' plot_logORmat(data_nplcm,pathogen_display)
#'
#' @family simulation functions
#' @export
simulate_nplcm <- function(set_parameter) {
# simulate latent status
latent <- simulate_latent(set_parameter)
# simulate BrS measurements:
out_brs <- simulate_brs(set_parameter,latent)
# organize bronze-standard data:
MBS_list <-
list(out_brs$datres[,-grep("case",colnames(out_brs$datres)),drop = FALSE])
names(MBS_list) <- set_parameter$meas_nm$MBS
Mobs <- list(MBS = MBS_list, MSS=NULL, MGS = NULL)
if (!is.null(set_parameter$SS) && set_parameter$SS){
# simulate SS measurements:
out_ss <- simulate_ss(set_parameter,latent)
# silver-standard data:
MSS_list <-
list(out_ss$datres[,-grep("case",colnames(out_ss$datres)),drop = FALSE])
names(MSS_list) <- set_parameter$meas_nm$MSS
Mobs <- list(MBS = MBS_list, MSS = MSS_list, MGS = NULL)
}
Y <- out_brs$datres$case
X <- NULL
data_nplcm <- make_list(Mobs, Y, X)
#template <- out_brs$template
latent_cat <- latent$iLcatAllnumeric
make_list(data_nplcm,latent_cat)
}
#' Simulate Latent Status:
#' @param set_parameter parameters for measurements
#'
#' @return a list of latent status samples for use in sampling measurements. It
#' also includes a template to look up measurement parameters for each type of causes.
#' @family simulation functions
#' @export
#'
simulate_latent <- function(set_parameter) {
# etiology common to all measurements:
cause_list <- set_parameter$cause_list
etiology <- set_parameter$etiology
Jcause <- length(cause_list)
# sample size:
Nd <- set_parameter$Nd
Nu <- set_parameter$Nu
# simulate latent status (common to all measurements):
iLcat <- rep(NA,Nd)
#iLall <- matrix(NA,nrow = Nd + Nu,ncol = Jcause)
etiologyMat <- matrix(NA,nrow = Nd,ncol = Jcause)
# sample cause for cases:
for (i in 1:Nd) {
etiologyMat[i,] <- etiology
iLcat[i] <- sample(1:length(cause_list),1,prob = etiologyMat[i,])
}
iLnm <- cause_list[iLcat]
# pathogen_BrS <- set_parameter$pathogen_BrS
# J_BrS <- length(pathogen_BrS)
# convert categorical to template (cases):
#iL <- symb2I(iLcat,cause_list)
# convert back to categorical (cases):
#iLcat.case.numeric <- Imat2cat(iL,cause_list,cause_list)
# create
#iLall <- rbind(iL,matrix(0,nrow = Nu,ncol = Jcause))
#iLcatAllnumeric <- c(iLcat.case.numeric,rep(Jcause + 1,Nu))
#make_list(iLall,iLcatAllnumeric,iLcat.case.numeric,iL)
make_list(iLcat,iLnm)
}
#' Simulate Bronze-Standard Data
#'
#'
#' simulate BrS measurements:
#' @param set_parameter parameters for BrS measurements
#' @param latent_samples sampled latent status for all the subjects, for use in simulate
#' BrS measurements.
#'
#' @return a data frame with first column being case-control status (case at top) and
#' columns of bronze-standard measurements
#' @family simulation functions
#' @export
simulate_brs <- function(set_parameter,latent_samples) {
pathogen_BrS <- set_parameter$pathogen_BrS
cause_list <- set_parameter$cause_list
template <- make_template(pathogen_BrS,cause_list)
J_BrS <- length(pathogen_BrS)
PsiBS <- set_parameter$PsiBS
ThetaBS <- set_parameter$ThetaBS
Lambda <- set_parameter$Lambda
Eta <- set_parameter$Eta
iLcat <- latent_samples$iLcat
# sample size:
Nd <- set_parameter$Nd
Nu <- set_parameter$Nu
Zd <- rep(NA,Nd)
Md <- matrix(NA,nrow = Nd,ncol = J_BrS)
MdP <- Md
for (i in 1:Nd) {
Zd[i] = sample(1:ncol(Eta),1,prob = Eta[iLcat[i],])
for (j in 1:J_BrS) {
tmp <- template[iLcat[i],j]
MdP[i,j] = PsiBS[j,Zd[i]] * (1 - tmp) + tmp * ThetaBS[j,Zd[i]]
}
}
Md <- rvbern(MdP)
Zu <- rep(NA,Nu)
Mu <- matrix(NA,nrow = Nu,ncol = J_BrS)
MuP <- matrix(NA,nrow = Nu,ncol = J_BrS)
for (i in 1:Nu) {
Zu[i] <- sample(1:length(Lambda),1,prob = Lambda)
for (j in 1:J_BrS) {
MuP[i,j] <- PsiBS[j,Zu[i]]
}
}
Mu <- rvbern(MuP)
## organize case/control status, iL, BS, GS data into dataframes
datacolnames <- c("case", pathogen_BrS)
# datres <- data.frame(Y = c(rep(1,Nd),rep(0,Nu)),
# iLcat = iLcatAllnumeric,
# iL = iLall,
# MBS = rbind(Md,Mu))
# colnames(datres) <- datacolnames
# dat_meas <- datres[,rev(rev(1:ncol(datres))[1:J_BrS]),drop=FALSE]
#dat_case <- as.matrix(dat_meas[(1:set_parameter$Nd),])
#dat_ctrl <- as.matrix(dat_meas[-(1:set_parameter$Nd),])
# template <- make_template(pathogen_BrS, cause_list)
datres <-
data.frame(case = c(rep(1,Nd),rep(0,Nu)), MBS = rbind(Md,Mu))
colnames(datres) <- datacolnames
make_list(datres,template)
}
#' Simulate Silver-Standard Data
#'
#'
#' simulate SS measurements:
#' @param set_parameter parameters for SS measurements
#' @param latent_samples sampled latent status for all the subjects, for use in simulate
#' BrS measurements.
#'
#' @return a data frame with first column being case-control status (case at top) and
#' columns of silver-standard measurements
#' @family simulation functions
#' @export
simulate_ss <- function(set_parameter,latent_samples) {
pathogen_SS <- set_parameter$pathogen_SS
cause_list <- set_parameter$cause_list
template <- make_template(pathogen_SS,cause_list)
J_SS <- length(pathogen_SS)
PsiSS <- set_parameter$PsiSS
ThetaSS <- set_parameter$ThetaSS
iLcat <- latent_samples$iLcat
# sample size:
Nd <- set_parameter$Nd
Nu <- set_parameter$Nu
Md <- matrix(NA,nrow = Nd,ncol = J_SS)
MdP <- Md
for (i in 1:Nd) {
for (j in 1:J_SS) {
tmp <- template[iLcat[i],j]
MdP[i,j] = PsiSS[j] * (1 - tmp) + tmp * ThetaSS[j]
}
}
Md <- rvbern(MdP)
Mu <- matrix(NA,nrow = Nu,ncol = J_SS)
MuP <- matrix(NA,nrow = Nu,ncol = J_SS)
for (i in 1:Nu) {
for (j in 1:J_SS) {
MuP[i,j] <- PsiSS[j]
}
}
Mu <- rvbern(MuP)
## organize case/control status, iL, BS, GS data into dataframes
datacolnames <- c("case", pathogen_SS)
# datres <- data.frame(Y = c(rep(1,Nd),rep(0,Nu)), n
# iLcat = iLcatAllnumeric,
# iL = iLall,
# MBS = rbind(Md,Mu))
# colnames(datres) <- datacolnames
# dat_meas <- datres[,rev(rev(1:ncol(datres))[1:J_BrS]),drop=FALSE]
#dat_case <- as.matrix(dat_meas[(1:set_parameter$Nd),])
#dat_ctrl <- as.matrix(dat_meas[-(1:set_parameter$Nd),])
#template <- make_template(pathogen_SS, cause_list)
datres <-
data.frame(case = c(rep(1,Nd),rep(0,Nu)), MSS = rbind(Md,Mu))
colnames(datres) <- datacolnames
make_list(datres,template)
}
|
912948ad018bdcef90d10983da7653838a8bd864 | 5f03e099038f2b374eefd88dc5627b94e204fc43 | /linear model function.r | b11af9d0036b3753008d06d8fa2ba673c0a081f5 | [] | no_license | jatinr507/CS2610 | 18e79937bd2119045026c1137bbb156a6f8fc87f | 4c31e791d6cce9feccd755e5e5648b1f7e74343a | refs/heads/master | 2021-01-10T10:36:01.841813 | 2016-01-27T23:13:51 | 2016-01-27T23:13:51 | 50,544,632 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 513 | r | linear model function.r | linereg <- function(x,y){
n <- length(x)
data1 <- cbind(x,y)
data1
xbar <- mean(x)
ybar <- mean(y)
xc <- x-xbar
yc <- y-ybar
xs <- xc^2
b1 <- sum(xc*yc)/sum(xs)
b1
b0 <- ybar-b1*xbar
yhat <- b0+b1*x
e <- y-yhat
y <- b0+b1*x+e
y
plot(y~x)
abline(a=b0,b=b1)
b1
b0
print(linereg)
}
x <- c(630,370,616,700,430,568,1200,2976) #number of shopping centers in each state
y <- c(15.5,7.5,13.9,18.7,8.2,13.2,23,87.3) #retail sales in billions per state
linereg(x,y)
print(linereg)
|
820a9f4399944f4ad379d505cbc5d49d2bd3b509 | db01e20713fde3d6341dbb2f3e74ad64ae6c72c1 | /plot3.R | ca7ef6f77de8581ded77b7f1b84862b7e80db88b | [] | no_license | geisbsuj/ExData_Plotting1 | 323f7f824f6cb7fa6deca26510bd1ea9fbd13d73 | fd3ee68b1db55f83c6758368e5dbc163fcf75276 | refs/heads/master | 2021-01-06T04:06:36.745166 | 2018-03-01T16:41:58 | 2018-03-01T16:41:58 | 123,417,776 | 0 | 0 | null | 2018-03-01T10:10:36 | 2018-03-01T10:10:35 | null | UTF-8 | R | false | false | 991 | r | plot3.R | #read data
gap <- read.table("household_power_consumption.txt", header = TRUE, sep=";", stringsAsFactors = FALSE, dec = ".", na.strings ="?")
#Subset
sub_gap <- subset(gap, gap$Date =="1/2/2007" | gap$Date == "2/2/2007"| is.na(FALSE))
#eliminate potential na's(date and time do not have na's)
sub_gap <- sub_gap[!rowSums(is.na(sub_gap[7:8])),]
#Variable for x-axis
plot_varx <- strptime(paste(sub_gap$Date,sub_gap$Time, sep = " "), format = "%d/%m/%Y %H:%M:%S")
#Variables for y-axis
sub_met_1<- as.numeric(sub_gap$Sub_metering_1)
sub_met_2<- as.numeric(sub_gap$Sub_metering_2)
sub_met_3<- as.numeric(sub_gap$Sub_metering_3)
#plot
plot(plot_varx,sub_met_1,type = "l",xlab=" ", ylab="Enegry sub metering")
lines(plot_varx,sub_met_2, col="red")
lines(plot_varx, sub_met_3, col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col = c("black","red", "blue"),lty = 1)
dev.copy(png,"plot3.png", width = 480, height =480)
dev.off()
|
2fc751d1c51fc729f72878cde9630a9ae22a055f | 41624700d7fe9ecd40ed18d9bc12524cd4861aae | /src/3-do/API/cnvXmrnas/API_cnv_X_mrnas.R | c0749608b0e2777c72ce1797a997cc4c70cf3c2b | [] | no_license | mabba777/multiomics | b7dfd1668d29642f823d3113e7048ac0c9033b95 | caba3bf76c585e75febad3303b4d92dcd4f86058 | refs/heads/master | 2021-01-19T20:43:22.698926 | 2017-04-17T17:08:38 | 2017-04-17T17:08:38 | 88,539,031 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,095 | r | API_cnv_X_mrnas.R | #If the CNV has got repeated genes, it will use the first row in the order.
CnvXMrnas <- function(mrna, meth, meth.to.gene, output.path="~/",
output.file.name="methXMrna.csv",
r.minimium=0.7,
pearsons.method = "pearson",
inc.progress = F){
ptm <- proc.time()
total.rows=nrow(mrna)
print(paste("Running pipeline CNv_X_mrnas with", r.minimium,
"threshold and pearson's method:", pearsons.method, sep=" "))
# The result matix is created
res <- matrix(nrow=total.rows,ncol=4)
colnames(res)<-(c("Gene","Location", "CNV-mRNA correlation", "p-value"))
actual<-1
actual.n.correlated<-1
ids.in.cnv.dataset<-cnv[,1]
print("Start process!")
for (i in 1:nrow(mrna)) {
actual<-actual+1
actual.gen<-as.character(mrna[i,1])
position.in.cnv.dataset<-which(ids.in.cnv.dataset == actual.gen)
#Se queda con el primer CNV
if (length(position.in.cnv.dataset)>0){
position.in.cnv.dataset<-position.in.cnv.dataset[1]
actual.mrna<-mrna[i,2:ncol(mrna)]
actual.cnv<-cnv[position.in.cnv.dataset,2:ncol(cnv)]
if ((actual)%%500==0)print(paste("analised ", actual, " from ", total.rows))
if ((actual)%%1000==0) {
elapsedTime <- (proc.time() - ptm)[3]
print(paste(
"elapsed time: (seconds)", format2Print(elapsedTime),
" - (minutes)", format2Print(elapsedTime/60),
" - (hours)", format2Print(elapsedTime/60/60)
))
remainingTime <- ((total.rows*elapsedTime)/actual) - elapsedTime
print(paste("estimated remaining time (seconds)", format2Print(remainingTime),
" - (minutes)", format2Print(remainingTime/60),
" - (hours)", format2Print(remainingTime/60/60)
))
}
resultado.pearson<-cor.test(as.numeric(actual.mrna),
as.numeric(actual.cnv),
method = pearsons.method)
if (!is.na(abs(resultado.pearson$estimate))) {
if (abs(resultado.pearson$estimate) > r.minimium) {
location<-getGeneLocation(actual.gen);
newValue<-c(as.character(actual.gen), location,
resultado.pearson$estimate, resultado.pearson$p.value)
res[actual.n.correlated,1:4] <- newValue
actual.n.correlated<-actual.n.correlated+1
}
}
}
if(inc.progress) {
incProgress(1/nrow(cnv));
}
}
# deleting useless and unused rows
res <- res[c(1:actual.n.correlated-1),c(1:4)]
#if (!(folder.exists(output.path))) {dir.create(output.path)}
file.path<-paste(output.path, output.file.name, sep="")
write.table(res, file.path, sep="\t", row.names=FALSE,
col.names=TRUE, quote=FALSE)
print(proc.time() - ptm)
return (convertVectorToMatrix(res))
}
|
57be96ebe27cf5dfe282e9392e19cc4c1614ed9d | 527cff34256966e0c0798879da45e829d8d143c5 | /Hamilton/CheckInput.R | f62d137eda4d59305718ed44df45c776588d8c8f | [] | no_license | DomiSchr/Hamilton | 92286095c08fb2631d297039268aac202a0d9269 | 5b5b05f51cef7279d0ec954312e02cbb98d5b08f | refs/heads/master | 2022-11-14T00:35:32.203949 | 2020-07-07T14:57:05 | 2020-07-07T14:57:05 | 248,734,286 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,046 | r | CheckInput.R | CheckInput <- function(vector.population, integer.housesize) {
# Validates the input for population vectors and the housesize
# Author: Dominik Schröder
#
# Args:
# vector.population: a vector containing the population of each state per column.
# integer.housesize: the house size as variable.
#
print(integer.housesize)
if (typeof(integer.housesize) != "double" ||
integer.housesize %% 1 != 0) {
stop("House size must be an integer value!")
}
if (integer.housesize < 1) {
stop("House size cannot be less than 1!")
}
if (integer.housesize < length(vector.population)) {
stop("House size cannot be smaller than number of states!")
}
for (i in 1:length(vector.population)) {
tmp <- vector.population[i]
if (is.null(tmp)) {
stop("empty row")
}
if (typeof(tmp) != "double" || tmp %% 1 != 0) {
stop("Population size must be an integer value!")
}
if (tmp < 0) {
stop("Population size cannot be a negative value!")
}
}
} |
36b5ca3e89fd1fc60e234d8beeac8ec2b08907c4 | fa31be675e9c3fd68779b932c44a00ffa55d8904 | /Clase 4.R | 5567d4b4ef67f446b94703c54405461b84c2c0cb | [] | no_license | senboada/Curso-R | b79e53f03c598ef2c532f23e1c91dd3958abfce6 | 14fb62970e8386fcddb73b48a5c7b2f5d01454fd | refs/heads/main | 2023-08-12T10:32:22.313149 | 2021-10-12T23:25:36 | 2021-10-12T23:25:36 | null | 0 | 0 | null | null | null | null | ISO-8859-2 | R | false | false | 7,443 | r | Clase 4.R |
### Una base
url1="https://raw.githubusercontent.com/Cruzalirio/Ucentral/master/Bases/ICFES/PruebaSaber1.csv"
url2="https://raw.githubusercontent.com/Cruzalirio/Ucentral/master/Bases/ICFES/PruebaSaber2.csv"
Icfes1 <- read.csv(url1, sep=";")
Icfes2 <- read.csv(url2, sep=";")
names(Icfes1)==names(Icfes2) ### Son las mismas columnas
Comparacion = Icfes1==Icfes2 ### No son las mismas filas
summary(Comparacion)
Icfes=rbind(Icfes1, Icfes2) ### Uniendolas por filas
### row bind, sean las mismas columnas
library(tidyverse)
table(Icfes$ESTU_GENERO) ### Tabla de frecuencias
fig <- ggplot(Icfes, aes(x=ESTU_GENERO))
fig
fig + geom_bar()
fig+ xlab("Genero") + ylab("Conteo")
fig+ geom_bar()+xlab("Genero") + ylab("Conteo")
fig <- fig+ geom_bar()+xlab("Genero") + ylab("Conteo")
## En el objeto fig incluyo a fig, las barras, etiquetas a x y etiquetas a y
fig+ ggtitle("Frecuencias por Genero") ### Además del titulo
fig <- ggplot(Icfes, aes(x=ESTU_GENERO))+geom_bar(width=0.7,
colour="red", fill="blue")
fig
Icfes$ESTU_GENERO_Mo = factor(Icfes$ESTU_GENERO, levels=c("M", "F"),
ordered = TRUE) ### Ordenando
levels(Icfes$ESTU_GENERO_Mo) = c("Masculino", "Femenino")### Cambiando las etiquetas
## Graficar
fig <- ggplot(Icfes, aes(x=ESTU_GENERO_Mo))+geom_bar(width=0.7,
colour="red", fill="blue")
fig
colors()
### Un error
fig <- ggplot(Icfes, aes(x=ESTU_GENERO))+geom_histogram(width=0.8, colour="red", fill="blue")
fig
### Un histograma
fig <- ggplot(Icfes, aes(x=PUNT_GLOBAL))+
geom_histogram( colour="red", fill="blue")
fig
#### Un boxplot
fig <- ggplot(Icfes, aes(x=PUNT_GLOBAL))+
geom_boxplot( colour="red", fill="blue")
fig
#####
fig <- ggplot(Icfes, aes(y=PUNT_GLOBAL))+
geom_boxplot( colour="red", fill="blue")
fig
### Ejemplo de la ayuda en
### chrome-extension://efaidnbmnnnibpcajpcglclefindmkaj/viewer.html?pdfurl=https%3A%2F%2Fraw.githubusercontent.com%2Frstudio%2Fcheatsheets%2Fmaster%2Fdata-visualization-2.1.pdf&clen=1949514&chunk=true
e <- ggplot(mpg, aes(cty, hwy))
e + geom_label(aes(label = cty), nudge_x = 1, nudge_y = 1)
c <- ggplot(mpg, aes(hwy)); c2 <- ggplot(mpg)
c + geom_density(kernel = "gaussian")
fig = ggplot(Icfes, aes(x=MOD_RAZONA_CUANTITAT_PUNT))
fig+geom_density()+ xlab("Puntaje en RC")+
ylab("Densidad")+ ggtitle("Distribución de los puntajes")
####
fig <- ggplot(Icfes, aes(x=PUNT_GLOBAL, y=ESTU_GENERO_Mo))+
geom_boxplot( colour="red", fill="blue")
fig
fig <- ggplot(Icfes, aes(x=PUNT_GLOBAL, y=ESTU_GENERO))+geom_boxplot()
fig
####
fig <- ggplot(Icfes,aes(x=ESTU_GENERO, fill=ESTU_ESTADOCIVIL))
fig+geom_bar()
fig+geom_bar(position = "dodge")
fig <- ggplot(Icfes,aes(x=ESTU_ESTADOCIVIL, fill=ESTU_GENERO))
fig+geom_bar()
fig+geom_bar(position = "dodge")
Icfes %>% group_by(ESTU_ESTADOCIVIL) %>% summarise(n())
### library string
gsub("˘","ó","Uni˘n libre") ### reemplaza el simbolo raro por ó
gsub("˘","ó",c("Uni˘n libre", "Uni˘n", "Alirio", "Algodón")) ### Le doy 4 textos
gsub("˘","NA",c("Uni˘n libre", "Uni˘n", "Alirio", "Algodón"))
### Lo qeu voy es a cambiar los simbolos raros
Icfes <- Icfes %>% mutate(ESTU_ESTADOCIVIL= gsub("˘","ó",ESTU_ESTADOCIVIL))
Icfes %>% group_by(ESTU_ESTADOCIVIL) %>% summarise(n())
fig <- ggplot(Icfes,aes(x=ESTU_GENERO, fill=ESTU_ESTADOCIVIL))
fig+geom_bar(position = "dodge")
### ordenar la variable
Icfes %>% group_by(ESTU_ESTADOCIVIL) %>% summarise(n())
Icfes$ESTADO_CIVIL_ORDE = factor(Icfes$ESTU_ESTADOCIVIL,levels=c("Soltero","Unión libre","Casado",
"Separado y/o Divorciado", "Viudo",
""),
ordered = TRUE)
Icfes %>% group_by(ESTADO_CIVIL_ORDE) %>% summarise(n())
fig <- ggplot(Icfes,aes(x=ESTU_GENERO, fill=ESTADO_CIVIL_ORDE))
fig+geom_bar(position = "dodge")
fig <- ggplot(Icfes,aes(x=ESTU_GENERO_Mo, fill=ESTADO_CIVIL_ORDE))
fig+geom_bar(position = "dodge")+ scale_fill_discrete(name = "Estado Civil")+
geom_text()
### Anexar viudo a separado o divorciado
Icfes <- Icfes %>%
mutate(ESTADO_CIVIL=replace(ESTU_ESTADOCIVIL,
ESTU_ESTADOCIVIL=="Viudo","Separado y/o Divorciado"))
Base1 <-Icfes %>% group_by(ESTU_DEPTO_PRESENTACION, ESTU_GENERO) %>%
summarise(Conteo=n(), punta_prom=mean(PUNT_GLOBAL))%>%
filter(Conteo>20) %>% group_by(ESTU_DEPTO_PRESENTACION) %>%
summarise(mean(punta_prom))
fig <- ggplot(Icfes,aes(x=ESTU_GENERO, fill=ESTADO_CIVIL))
fig+geom_bar()
fig+geom_bar(position = "dodge")
fig+geom_bar(position = "dodge")+facet_grid(FAMI_TIENEINTERNET ~.)
Icfes %>% group_by(FAMI_TIENEINTERNET) %>% count()
fig+geom_bar(position = "dodge")+facet_grid(.~FAMI_TIENEINTERNET)
## Que hacemos?
Icfes %>% group_by(FAMI_TIENEINTERNET) %>% summarise(n())
Icfes<- Icfes %>% mutate(FAMI_TIENEINTERNET1 =replace(FAMI_TIENEINTERNET,
FAMI_TIENEINTERNET =="", NA))
Icfes %>% group_by(FAMI_TIENEINTERNET1) %>% summarise(n())
Icfes<- Icfes %>% mutate(FAMI_TIENEINTERNET1 =replace(FAMI_TIENEINTERNET,
FAMI_TIENEINTERNET %in% c("No", "Si"), NA))
Icfes %>% group_by(FAMI_TIENEINTERNET1) %>% summarise(n())
Icfes<- Icfes %>% mutate(FAMI_TIENEINTERNET1 =replace(FAMI_TIENEINTERNET,
!FAMI_TIENEINTERNET %in% c("No", "Si"), NA))
for(i in names(Icfes)){
}
Icfes %>% group_by(FAMI_TIENEINTERNET1) %>% summarise(n())
### Histogramas
fig <- ggplot(Icfes,aes(x=PUNT_GLOBAL, fill=ESTU_ESTADOCIVIL))
fig+geom_histogram()
fig <- ggplot(Icfes,aes(x=PUNT_GLOBAL))
fig+geom_histogram(binwidth = 81)+facet_grid(.~ESTU_ESTADOCIVIL)
fig <- ggplot(Icfes,aes(x=PUNT_GLOBAL, fill=ESTU_ESTADOCIVIL))
fig+geom_density()+facet_grid(.~ESTU_ESTADOCIVIL)
fig+geom_density()+facet_grid(ESTU_ESTADOCIVIL~.)
fig+geom_density(alpha=0.1)
fig <- ggplot(Icfes,aes(x=PUNT_GLOBAL))
fig+geom_density()+facet_grid(.~ESTU_ESTADOCIVIL)
fig <- ggplot(Icfes%>% filter(ESTU_ESTADOCIVIL %in% c("Soltero", "Unión libre")),
aes(x=PUNT_GLOBAL, fill=ESTU_ESTADOCIVIL))
fig+geom_density()
fig+geom_density(alpha=0.5)
fig <- ggplot(Icfes%>% filter(ESTU_ESTADOCIVIL %in% c("Soltero", "Casado")),
aes(x=PUNT_GLOBAL, fill=ESTU_ESTADOCIVIL))
fig+geom_density()
fig+geom_density(alpha=0.5)
Icfes %>% group_by(ESTU_METODO_PRGM)%>% count()
t.test(Icfes%>%filter(ESTU_ESTADOCIVIL=="Soltero")%>%select(PUNT_GLOBAL),
Icfes%>%filter(ESTU_ESTADOCIVIL=="Casado")%>%select(PUNT_GLOBAL))
fig <- ggplot(Icfes%>% filter(ESTU_METODO_PRGM %in% c("DISTANCIA", "PRESENCIAL")),
aes(x=PUNT_GLOBAL, fill=ESTU_METODO_PRGM))
fig+geom_density()
fig+geom_density(alpha=0.5)
t.test(Icfes%>%filter(ESTU_METODO_PRGM=="DISTANCIA")%>%select(PUNT_GLOBAL),
Icfes%>%filter(ESTU_METODO_PRGM=="PRESENCIAL")%>%select(PUNT_GLOBAL))
fig+geom_boxplot()
fig <- ggplot(Icfes,aes(x=PUNT_GLOBAL, fill=ESTU_ESTADOCIVIL))
fig+geom_boxplot()
fig <- ggplot(Icfes,aes(y=PUNT_GLOBAL, fill=ESTU_ESTADOCIVIL))
fig+geom_boxplot()
|
5a46e1e82a435893ac48f8c01160ce2fd00dd8cb | 6862189c57af437672aa818a2b6f15f1c1e3e463 | /R/surveyPoints.R | d1c5727a969deeb9cc42e89e449fa34e321622cd | [] | no_license | cran/soilassessment | e16dfac3f1d6b331be10e4d305ed3d807525ae6c | f9d33fe490db6c4c1be3336094c90c877e86bd6f | refs/heads/master | 2023-07-20T10:41:19.945716 | 2023-07-08T11:10:03 | 2023-07-08T11:10:03 | 217,496,098 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,121 | r | surveyPoints.R | surveyPoints=function(soilmap,scorpan,conditionclass,mapproportion){
if(is(soilmap,"RasterLayer")){
map=soilmap;
w=scorpan;
b=conditionclass;
j=mapproportion
tagt=which(map[]<=b)
emerg=sort(sample(tagt,length(tagt)*(j/100)))
map[emerg]=1
area=length(subset(getValues(map), getValues(map) == 1)) * res(map)^2
w=ifelse(w<1,1,ifelse(w>5,5,w))
area=area[1]
samplnum=area*(1/(4*w*res(map)))^2
samplnum=round(samplnum[1],0)
map$new=values(map)<=b
ndat=sampleRandom(map$new,samplnum, xy=TRUE, sp=TRUE)
ndat=subset(ndat,ndat$new>0)}
else {
map=raster(soilmap);
w=scorpan;
b=conditionclass;
j=mapproportion
tagt=which(map[]<=b)
emerg=sort(sample(tagt,length(tagt)*(j/100)))
map[emerg]=1
area=length(subset(getValues(map), getValues(map) == 1)) * res(map)^2
w=ifelse(w<1,1,ifelse(w>5,5,w))
area=area[1]
samplnum=area*(1/(4*w*res(map)))^2
samplnum=round(samplnum[1],0)
map$new=values(map)<=b
ndat=sampleRandom(map$new,samplnum, xy=TRUE, sp=TRUE)
ndat=subset(ndat,ndat$new>0)
}
return(ndat)
}
|
da769b1fdc97360fa85e9cc18eb0bfa193c8fda8 | 43e760ddd28bdb0f5ce1251766ae6bef56c1efd9 | /R Projects/Practice/2. Inferential Statistics.R | cd10a41b78934bea68511807c436ef57dcf9ca66 | [] | no_license | giteshpoudel/DataScience_Repo | 878d889ee5787c7ffca1af22ee8a4a95960aacd9 | fafa9039ae8965b1ae2dcda2f0a48fc1e3958e8a | refs/heads/main | 2023-01-18T23:41:53.211599 | 2020-11-14T14:51:51 | 2020-11-14T14:51:51 | 299,972,670 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,881 | r | 2. Inferential Statistics.R | setwd("C:/Users/Gitesh/Desktop/Term 2/ALY6015/Week 2 Project")
library(tidyverse)
install.packages("dslabs")
library(dslabs)
data(murders)
summary(murders)
#calculating murder rate in per hundred thousand people
murder_rate <- murders$total/murders$population * 100000
murders <- mutate(murders, murder_rate)
head(murders)
average_murder_rate <- mean(murder_rate)
average_murder_rate #2.779125
# We will test if the murder rate of Northeast states is less than country average
#one-sample t-test
#H0: Murder rate of Northeast is greater than average murder rate of country
#H1: Murder rate of Northeast is lower than average murder rate of country
#confidence level of 95%
northeast <- filter(murders, murders$region == "Northeast" )
northeast
plot(northeast$murder_rate, main = "Northeast states murder rate compared to Country wide average", col="blue")
abline(a=average_murder_rate, b=0, col="red")
t.test(northeast$murder_rate, mu=average_murder_rate, alternative = "less", conf.level = 0.95)
#For two sample T-test and F-test you will create a new sample vector
#we will create a data.frame with only Western states
west <- filter(murders, murders$region == "West" )
west
plot(west$murder_rate, main = "Western states murder rate compared to Country wide average", col="blue")
abline(a=average_murder_rate, b=0, col="red")
#Before t-test conduct f-test to identify if they have different variance
boxplot(northeast$murder_rate , west$murder_rate, main="Murder Rate of Northeast and West")
var.test(northeast$murder_rate, west$murder_rate, mu=0, conf.level = 0.95, alt="two.sided", var.equal = F, paired = F)
#t.test(murders$murder_rate ~ murders$region %in% c("Northeast", "West"), var.equal = F) #not sure why this didn't work
t.test(northeast$murder_rate, west$murder_rate, mu=0, conf.level = 0.95, alt="two.sided", var.equal = T, paired = F)
|
28c05a435ffdce0d5507ad273e123aa8d5435c62 | 711d4bc7205b2049fa56fa274d110792ce27f12b | /ties.r | 7886593b760b466e68785151b714fa810eac3213 | [] | no_license | slavojstar/general | e2593851dc85bbb9617343801cd6feb417f7fb22 | fd796759488f1b6dbc952dfa217ad91bb4ae65e1 | refs/heads/master | 2020-09-08T21:43:44.165356 | 2020-01-08T10:23:28 | 2020-01-08T10:23:28 | 221,250,328 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,388 | r | ties.r | tie_checker <- function(vector_in) {
# Returns a list of the positions of ties, in clusters.
# (i.e. a cluster is a list of positions in which all the elements
# at those positions are equal)
tie_positions <- list()
len <- length(vector_in)
# For each position in the vector, search along the rest of the vector
# and count the ties, and record where they are
for (i in seq(len - 1)) {
# Create a vector which records all the positions in a particular
# cluster:
tie_cluster <- c(i)
# If we've already recorded a tie in this position we can move on:
if (i %in% unlist(tie_positions)) {
next
}
else {
for (j in seq(i + 1, len)) {
# If we find a tie, add it to the cluster
if (vector_in[i] == vector_in[j]) {
tie_cluster <- c(tie_cluster, j)
}
}
# If the only element in the cluster is the one we added
# at the start of the iteration, there are no ties
if (length(tie_cluster) == 1) {
next
} else {
# Otherwise we can add the cluster to the end of tie_positions
tie_positions[[length(tie_positions) + 1]] <- tie_cluster
}
}
}
if (length(tie_positions) == 0) {
print("There are no ties.")
} else {
print("There are ties in the following positions:")
print(tie_positions)
}
invisible(tie_positions)
}
|
1488cff45a2e0783d3169234263f92d28826d45a | 77e1f3fff1cfdf8f76c5a0a1f7b3694d0c9ced89 | /refbias/Scripts/mult_testing4.R | 6f4046589d9d64d55e05ecfefaca3c7b2579d29a | [] | no_license | evigorito/geu_pipeline | ed0065eaf67cfe79519e4015b7087be16e8b0842 | 646b76f21cc9313e16aa3c7a21d8494736ffdb09 | refs/heads/master | 2021-04-04T16:53:50.221458 | 2020-03-23T11:32:34 | 2020-03-23T11:32:34 | 248,472,913 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 41,345 | r | mult_testing4.R | #' ---
#' title: Choose prior and set threshold for making significant calls with Bayesian model
#' author: Elena
#' output:
#' pdf_document:
#' toc: true
#'
#' ---
## Report built by rule Btrecase_analysis from
## /home/ev250/Bayesian_inf/trecase/Scripts/stan_eff/real_data/refbias/Snakefile
## Load R packages and functions:
library(data.table)
library(ggplot2)
library(gridExtra)
library(RColorBrewer)
library(cowplot)
library(grid)
## library(biomaRt)
library(mixtools)
source("/home/ev250/Bayesian_inf/trecase/Functions/out.trecase.many.genes.R")
source("/home/ev250/Bayesian_inf/trecase/Functions/various.R")
source('/home/ev250/Cincinatti/Functions/various.R')
#################################################################################
##' # Compare associations using Trec and Btrecase with old and new priors #####
#################################################################################
##########################
## Open trec and format ##
##########################
## trec <- comb.files(path='/mrc-bsu/scratch/ev250/EGEUV1/quant/Btrecase/output/chr22/GT',pattern="trec.stan.summary.txt")
trec <- comb.files(path=snakemake@params[['trec']], pattern="trec.stan.summary.txt")
## Add EAF
## le.file <- "/home/ev250/rds/rds-cew54-wallace-share/Data/reference/1000GP_Phase3/1000GP_Phase3_chr22.legend.gz"
le.file <- snakemake@input[['lefile']]
EAF <- snp.eaf(le.file, unique(trec$tag))
setnames(EAF, "eaf", "tag.EAF")
trec <- merge(trec, EAF, by.x="tag", by.y="snp")
## new priors
trec.m <- lapply(paste0("/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrec/" , c("SpikeMixV3_2", "SpikeMixV3_3")),
function(i) comb.files(path=i,pattern=".stan.summary.txt"))
names(trec.m) <- c("SpikeMixV3_2", "SpikeMixV3_3")
trec.m <- lapply(snakemake@params[['trec_other']] , function(i) comb.files(i, pattern=".stan.summary.txt"))
trec.m <- lapply(trec.m, add.null)
names(trec.m) <- basename(snakemake@params[['trec_other']])
## add gene distance to trec
## gene.coord <- fread("/mrc-bsu/scratch/ev250/psoriasis/Btrecase/inputs/gene_inputs/gene_info.txt")
gene.coord <- fread(snakemake@input[['geneStEnd']])
## select genes in chrom 22
gt22 <- gene.coord[chrom==22,]
## add tag distance to gene (closest to start or end)
trec.m <- lapply(trec.m, function(i) gene.d(i, gt22[, .(gene_id, start,end,chrom)]))
## remove bad runs
trec.m <- lapply(trec.m , function(i) i[Rhat <= 1.01,])
## merge after removing bad runs
trec.comp <- lapply(trec.m, function(i) merge(trec, i, by=c("Gene_id", "tag"), suffixes=c(".norm", ".mix")))
trec.comp <- lapply(trec.comp, function(i) add.signif(i, "log2_aFC_null", "null.95", c("Normal", "Mix")))
cols <- c("log2_aFC_mean", "log2_aFC_2.5%", "log2_aFC_97.5%")
#############################
## Open Btrecase and format ##
##############################
## btrecase <- mapply(function(i,j) {
## dt <- comb.files(i, paste0("^", j,"\\.ENSG[0-9]+.*stan.summary.txt"))
## dt <- add.null(dt)
## dt <- gene.d(dt, gt22[, .(gene_id, start,end,chrom)])
## return(dt)
## },
## i=c('/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrecase/SingleCounting/GT',
## '/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrecase/SpikeMixV3_2/GT',
## '/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrecase/SpikeMixV3_3/GT'
## ),
## j=c("refbias", rep("rbias",2)),
## SIMPLIFY=F)
## names(btrecase) <- basename(dirname(c('/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrecase/SingleCounting/GT',
## '/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrecase/SpikeMixV3_2/GT',
## '/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrecase/SpikeMixV3_3/GT'
## )))
btrecase <- mapply(function(i,j) {
dt <- comb.files(i, paste0("^", j,"\\.ENSG[0-9]+.*stan.summary.txt"))
dt <- add.null(dt)
dt <- gene.d(dt, gt22[, .(gene_id, start,end,chrom)])
return(dt)
},
i=c(snakemake@params[['gt_btrecase_normal']], snakemake@params[['gt_btrecase_mix']]),
j=c("refbias", rep("rbias",2)),
SIMPLIFY=F)
names(btrecase) <- basename(dirname(c(snakemake@params[['gt_btrecase_normal']], snakemake@params[['gt_btrecase_mix']])))
btrecase.comp <- lapply(btrecase[2:length(btrecase)],function(i) {
dt <- merge(btrecase[[1]], i, by=c("Gene_id", "tag","tag.EAF", "gene.dist"), suffixes=c(".norm", ".mix"))
dt <- add.signif(dt, "null.95.norm", "null.95.mix", c("Normal", "Mix"))
return(dt)})
cols.b <- c(cols, "null.95")
##########################################################
##' Trec
l1 <- lapply(seq_along(trec.comp), function (i) btrecase.plot(dt=trec.comp[[i]][Signif != "None" ,] ,
x1=c(paste0(cols, ".norm") ,"log2_aFC_null" ),
x2=c(paste0(cols, ".mix"), "null.95" ),
#s=nrow(trec.comp[[i]][Signif != "None" ,]),
xl="eQTL effect normal prior",
yl="eQTL effect mix prior",
col=c("Normal", "Mix"),
title=paste0("Trec with normal vs\n Gaussian ",names(trec.m)[i] ," prior"),
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
point.size=3
))
##+ fig.width= 8.3, fig.height=4.78
plot_grid(plotlist=l1, ncol=length(trec.m))
lapply(trec.comp, function(i) i[,.N,Signif])
#################################################################
##' Trecase
btrecase.tab <- lapply(names(btrecase.comp), function(i) {
tab <- tab2bplot(btrecase.comp[[i]], colors=c("None"="#999999","Normal"="yellow3", "Both"="#D55E00", "Mix"="#0072B2"))
p <- btrecase.plot(dt=btrecase.comp[[i]][Signif != "None" ,] ,
x1=paste0(cols.b, ".norm"),
x2=paste0(cols.b, ".mix"),
##s=nrow(trec.comp[[i]][Signif != "None" ,]),
xl="eQTL effect Btrecase normal prior",
yl="eQTL effect Btrecase mix normal prior",
col=c("Normal", "Mix"),
title=paste0("Btrecase with normal or Gaussian mix prior ", i),
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
point.size=3
) +
annotation_custom(tableGrob(tab[,.(Signif,SNPs)], rows=NULL, theme=ttheme_minimal(base_size = 10,padding = unit(c(2, 1.5), "mm"), core=list(fg_params=list(col=c(tab$color, rep("black", 4)))))), xmin=-3, xmax=-1.5, ymin=1.5, ymax=3)
print(p)
return(tab)
})
## ################################################################
##' Compare effect size of associations run with btrecase with or without ASE info
btrecase.com.l <- lapply(names(btrecase.comp), function(i) {
dt <- reshape(btrecase.comp[[i]][,.(Gene_id, tag, log2_aFC_mean.norm, log2_aFC_mean.mix, model.norm, model.mix)],
direction="long",
varying=list(c( "log2_aFC_mean.norm", "log2_aFC_mean.mix"), c("model.norm" ,"model.mix" )),
v.names=c("log2_aFC_mean", "model"),
times=c("normal", "mix"),
timevar="prior"
)
model <- ggplot(dt, aes(x=log2_aFC_mean, color=model)) +
geom_density() +
facet_grid(prior ~., scales="free") +
ggtitle(paste0("Effect size for associations with or without ASE info\n by model and prior ", i))
prior <- ggplot(dt, aes(x=log2_aFC_mean, color=prior)) +
geom_density() +
facet_grid(model ~., scale="free") +
ggtitle(paste0("Effect size for associations with or without ASE info\n by model and prior ", i))
print(plot_grid(model, prior, ncol=1))
})
###########################################################################################################################################
##' Calculate PIP by rejection zone in trec.m and add relevant columns
## Bayesian trec : using normal approximation calculate proportion of
## posterior out of rejection zone. If the mean of the posterior is
## within the rejection zone (-r,r) I set the posterior out of the
## rejection zone as 0% as I dont want to call any of those
## associations significant. If the rejection zone is narrow I could
## have a high % of the posterior out of the zone. The I coded a
## variable, null.rej "yes" if the % of the posterior out of the
## rejection zone is below a threshold I define (post.level) and "no"
## otherwise.
rej<-c(0,0.06, 0.08) ## this is for "SpikeMixV3_2", "SpikeMixV3_3", based on 95% and 99% of prior within +/- rej
## Trec
post.dt <- lapply(trec.m, function(i){
rbindlist(mapply(function(x,y,z) {
dt <- rej.recode(a=x,b=y,c=z)
dt[, post.level:=z]
setkey(dt, Gene_id, tag)
}, x=list(c(0,0.06), c(0, 0.08)), z=list(0.95, 0.99), MoreArgs=list(y=i), SIMPLIFY=F))
})
## Trecase
post.btrecase <-lapply(btrecase[2:3], function(i){
rbindlist(mapply(function(x,y,z) {
dt <- rej.recode(a=x,b=y,c=z)
dt[, post.level:=z]
setkey(dt, Gene_id, tag)
}, x=list(c(0,0.06), c(0, 0.08)), z=list(0.95, 0.99), MoreArgs=list(y=i), SIMPLIFY=F))
})
######################################################################################################################################
##' # Use Gtex-ebv as gold standard and compare to Trec,
##' # DEseq2, Rasqual and Trecase. Choose FDR in Dseq that gives a similar number of
##' # associations as Gtex-ebv
########################################################################################################################################
##' Open and format datasets: Gtex-ebv, DEseq and Rasqual
########################################
## Look at Gtex ebv for chromosome 22 ##
########################################
## ebv <- fread(cmd="zcat /mrc-bsu/scratch/ev250/GTEx_Analysis_v7_eQTL/Cells_EBV-transformed_lymphocytes.allpairs.txt.gz | awk -F'\t' 'NR == 1 || $2~/^22_/' ", header=T, sep="\t")
ebv <- fread(cmd=paste("zcat", snakemake@input[['gtex']], "| awk -F'\t' 'NR==1 || $2~/^22_/' "))
## ebv.sig <- fread(cmd="zcat /mrc-bsu/scratch/ev250/GTEx_Analysis_v7_eQTL/Cells_EBV-transformed_lymphocytes.v7.signif_variant_gene_pairs.txt.gz")
ebv.sig <- fread(cmd=paste("zcat", snakemake@input[['sigGtex']]))
ebv[, Gene_id:=gsub("\\..*","",gene_id)]
ebv <- ebv[Gene_id %in% unique(trec$Gene_id),]
ebv[, SNP:=gsub("^22_|_b37$", "", variant_id)][, SNP:=gsub("_", ":",SNP)]
ebv.sig <- ebv.sig[variant_id %in% ebv$variant_id,][, null:="no"]
ebv <- merge(ebv,ebv.sig[,.(gene_id, variant_id, null)], by=c("gene_id", "variant_id"), all.x=T)
ebv[is.na(null), null:="yes"]
###########
## DEseq ##
##########
## Open and format DEseq2 output (run by Wei-Yu)
## dseq <- rbindlist(lapply(list.files("/mrc-bsu/scratch/wyl37/ElenaData/RunNBmodel", pattern="RunNBmodelbatch[0-9]+_chr22.nbmodelRes.csv", full.names=T), fread))
dseq <- rbindlist(lapply(snakemake@input[['dseq']], fread))
dseq[, SNP:=NULL]
## remove pval NA
dseq <- dseq[!is.na(pvalue),]
## add log2_aFC to dseq
dseq[, log2_aFC:=log2FoldChange*2]
## add BH correction to dseq
setkey(dseq, pvalue)
dseq[,p.adj:= p.adjust(pvalue,method = "BH")]
setkey(dseq, p.adj)
## Add null column for dseq based on 5% FDR and exclude "ENSG00000211664"
dseq[,null.fdr5:="yes"][p.adj<=0.05, null.fdr5:="no"]
## Add gene distance
dseq <- gene.d(dseq, gt22[, .(gene_id, start,end,chrom)], snp="tag")
## Add EAF.tag
dseq <- merge(dseq, EAF, by.x="tag", by.y="snp")
## DEseq at various FDR to Gtex EBV cell lines at 5% FDR
dseq.l <- rbindlist(lapply(c(0.1, 0.05, 0.01, 0.001), function(i) {
tmp <- dseq[, null.fdr:="yes"][p.adj<=i, null.fdr:="no"]
tmp[, Fdr:= i]
tmp <- tmp[, .(Gene_id, tag, tag.EAF, log2FoldChange,log2_aFC, p.adj, null.fdr, Fdr, gene.dist)]
return(tmp)
}))
#############
## Rasqual ##
#############
## rasq <- list.files("/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/rasqual/output", "ENSG[0-9]+.*txt", full.names=T)
## rasq.header <- list.files("/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/rasqual/output", "rasqual.header.txt", full.names=T)
rasq <- list.files(snakemake@params[['rasqual']], "ENSG[0-9]+.*txt", full.names=T)
rasq.header <- paste0(snakemake@params[['rasqual']], "/rasqual.header.txt")
rasqual <- rbindlist(lapply(rasq, format_rasqual, top.hits="no", header=rasq.header))
## rasqual didnt run some genes "Estimated computational time is too long...", remove from output
rasqual <- rasqual[rs_id != "SKIPPED",]
## select associations run in Trec, no tagging was done in rasqual
rasqual <- merge(trec[,.(Gene_id, tag, tag.EAF)], rasqual, by.x=c("Gene_id", "tag"), by.y=c("gene_id", "rs_id"))
rasqual[, p_adjust:= p.adjust(p, method="BH")]
## transform Fold change to log2aFC
rasqual[, log2_aFC:=log2(Fold_change)]
## add various fdr cut-offs to rasqual
for (i in c(10^(seq(-3,-1,1)), 0.05)){
rasqual[,eval(paste0("null.fdr",i*100)):= "yes"][p_adjust<=i, paste0("null.fdr",i*100):="no"]
}
###################################################
##' Compare Bayesian trec to Gtex EBV cell lines
###################################################
## merge post.dt with ebv but on the same associations as rasqual
trec.ebv <- lapply(post.dt, function(i) {
dt <- merge(i, ebv, by.x=c("Gene_id", "tag"), by.y=c("Gene_id", "SNP"))
dt <- merge(dt, rasqual[, .(Gene_id, tag, Chrom)], by=c("Gene_id", "tag"))
dt <- add.signif(dt, x1="null.rej", x2="null", col=c("trec", "Gtex-ebv"))
})
##+ fig.width= 9.54, fig.height=7.15
tables.trec <- plot_tab(a=trec.ebv, fac=list(rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2))), colors=c(None="#999999", trec="yellow3", `Gtex-ebv`="#0072B2", Both= "#D55E00"),
title = "Gtex-ebv at 5%FDR vs Trec by PIP and rejection level\n using prior ",
xpos=0.25, ypos=0.7,
x1=c(rep("log2_aFC_mean",3),"null.rej") ,
x2=c(rep("slope",3), "null"),
#s=50000,
xl="eQTL effect Bayesian trec",
yl="eQTL effect Gtex",
col=c("trec", "Gtex-ebv"),
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
point.size=3
)
lapply(tables.trec, function(i) rbindlist(lapply(i, format.tab , "Gtex-ebv")))
#########################################
##' Compare DEseq to Gtex EBV
#########################################
dseq.ebv <- merge(dseq.l, ebv, by.x=c("Gene_id", "tag"), by.y=c("Gene_id", "SNP"))
## make it comparable with rasqual
dseq.ebv <- merge(dseq.ebv, rasqual[, .(Gene_id, tag, Chrom)], by=c("Gene_id", "tag"))
dseq.ebv <- add.signif(dseq.ebv, x1="null.fdr", x2="null", col=c("DEseq","Gtex-ebv") )
tab <- tab2bplot(dseq.ebv, var= c("Signif", "Fdr"), colors=c("None"="#999999","DEseq"="yellow3", "Both"="#D55E00", "Gtex-ebv"="#0072B2"))
tables.dseq <- lapply(c(0.1, 0.05, 0.01, 0.001), function(i) {
tab[Fdr==i,]
})
gl <- lapply(tables.dseq, function(i) tableGrob(i[,.(Signif,SNPs)], rows=NULL, theme=ttheme_minimal(base_size = 14,padding = unit(c(2, 1.5), "mm"), core=list(fg_params=list(col=c(i$color, rep("black", 4)))), xmin=-2.5, xmax=-1.5, ymin=1.5, ymax=2.5)))
dt.tab <- data.table(Fdr=c(0.1, 0.05, 0.01, 0.001), grob=gl)
lab <- paste('FDR(%) =', c(0.1, 0.05, 0.01, 0.001)*100)
names(lab) <- c(0.1, 0.05, 0.01, 0.001)
##' DEseq with different FDR relative to Gtex-EBV
#+ fig.width= 12, fig.height=21
btrecase.plot(dt=dseq.ebv[Signif != "None" ,] , x1=c(rep("log2FoldChange",3),"null.fdr") ,
x2=c(rep("slope",3), "null"),
xl="eQTL effect DEseq",
yl="eQTL effect Gtex",
col=c("DEseq", "Gtex-ebv"),
title="DEseq2 vs Gtex-EBV (5% FDR)",
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
point.size=3
) + facet_grid(Fdr~., labeller=labeller(Fdr=lab))+
theme(strip.background=element_rect(fill="white") ,strip.text.y = element_text(size = 14)) +
geom_custom(data=dt.tab, aes(grob=grob), x = 0.15, y = 0.75)
## DESeq: Significant and total associations by FDR
rbindlist(lapply(tables.dseq, format.tab, "Gtex-ebv" ))
## I carry on with DEseq2 model using fdr 0.01 and 0.001 which are the best matches to Gtex-ebv
###################
##' Rasqual vs Gtex
####################
rasq.l <- reshape(rasqual[,.(Gene_id, tag, log2_aFC, null.fdr0.1, null.fdr1, null.fdr5, null.fdr10)],
direction="long",
varying=list( c("null.fdr0.1", "null.fdr1", "null.fdr5", "null.fdr10")),
v.names="null.Fdr",
times=as.character(c(0.1, 1, 5, 10)),
timevar="Fdr_per")
rasq.ebv <- merge(rasq.l, ebv, by.x=c("Gene_id", "tag"), by.y=c("Gene_id", "SNP"))
rasq.ebv <- add.signif(rasq.ebv, x1="null.Fdr", x2="null", col=c("Rasqual", "Gtex-ebv"))
tab <- tab2bplot(rasq.ebv, var= c("Signif", "Fdr_per"), colors=c("None"="#999999","Rasqual"="yellow3", "Both"="#D55E00", "Gtex-ebv"="#0072B2"))
tables.ras <- lapply(unique(tab$Fdr_per), function(i) {
tab[Fdr_per==i,]
})
gl <- lapply(tables.ras, function(i) tableGrob(i[,.(Signif,SNPs)], rows=NULL, theme=ttheme_minimal(base_size = 14,padding = unit(c(2, 1.5), "mm"), core=list(fg_params=list(col=c(i$color, rep("black", 4)))), xmin=-2.5, xmax=-1.5, ymin=1.5, ymax=2.5)))
dt.tab <- data.table(Fdr_per=unique(tab$Fdr_per), grob=gl)
lab <- paste('FDR(%) =', unique(tab$Fdr_per))
names(lab) <- unique(tab$Fdr_per)
##' # Rasqual with different FDR relative to Gtex-EBV
#+ fig.width= 12, fig.height=21
btrecase.plot(dt=rasq.ebv[Signif != "None" ,] , x1=c(rep("log2_aFC",3),"null.Fdr") ,
x2=c(rep("slope",3), "null"),
xl="eQTL effect Rasqual",
yl="eQTL effect Gtex",
col=c("Rasqual", "Gtex-ebv"),
title="Rasqual vs Gtex-EBV (5% FDR)",
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
point.size=3
) + facet_grid(Fdr_per~., labeller=labeller(Fdr_per=lab))+
theme(strip.background=element_rect(fill="white") ,strip.text.y = element_text(size = 14)) +
geom_custom(data=dt.tab, aes(grob=grob), x = 0.15, y = 0.75)
rbindlist(lapply(tables.ras, format.tab, "Gtex-ebv" ))
##' Carry on with 0.01 and 0.001 FDR
###################
##' Trecase vs Gtex
###################
trecase.ebv <- lapply(post.btrecase, function(i){
dt <- merge(i, ebv, by.x=c("Gene_id", "tag"), by.y=c("Gene_id", "SNP"))
dt <- merge(dt, rasqual[, .(Gene_id, tag, Chrom)], by=c("Gene_id", "tag"))
dt <- add.signif(dt, x1="null.rej", x2="null", col=c("trecase", "Gtex-ebv"))
})
##+ fig.width= 9.78, fig.height=7.97
tables.trecase <- plot_tab(a=trecase.ebv, fac=list(rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2))), colors=c(None="#999999", trecase="yellow3", `Gtex-ebv`="#0072B2", Both= "#D55E00"),
title = "Gtex-ebv at 5%FDR vs Trecase by PIP and rejection level\n using prior ",
xpos=0.25, ypos=0.75,
x1=c(rep("log2_aFC_mean",3),"null.rej") ,
x2=c(rep("slope",3), "null"),
#s=50000,
xl="eQTL effect trecase",
yl="eQTL effect Gtex",
col=c("trecase", "Gtex-ebv"),
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
point.size=3
)
lapply(tables.trecase, function(i) rbindlist(lapply(i, format.tab , "Gtex-ebv")))
##########################################################################
##' # Compare trec, trecase and rasqual with Dseq2 model ###############
#########################################################################
## merge post.dt and dseq
fdr <- c(0.001, 0.01)
btrec.dsq <- lapply(post.dt, function(i) {
rbindlist(lapply(fdr, function(j) {
dt <- merge(i,dseq.l[Fdr %in% j,], by=c("Gene_id", "tag", "tag.EAF", "gene.dist"))
setkey(dt, p.adj)
dt <- add.signif(dt, x1="null.rej", x2="null.fdr", col=c("trec","dseq") )
}))
})
###################################
##' Trec vs Dseq2 model at 0.1% FDR
###################################
lab <- paste('r = \u00B1',rej)
names(lab) <- rej
#+ fig.width= 7.4, fig.height=6.7
l2 <- lapply(1:2, function(i) plot_tab(a=btrec.dsq[i], fac=list(Fdr=rep(0.001,4), rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2))),
colors=c(None="#999999", trec="yellow3", dseq="#0072B2", Both= "#D55E00"),
title = "DEseq at 0.1 %FDR vs Trec by PIP\n and rejection level\n using prior ",
var=setNames(0.001, "Fdr"),
xpos=0.25, ypos=0.75,
facet.fac=c("rej.level", "post.level"),
x1=c(rep(cols[1],3),"null.rej"),
x2=c(rep("log2_aFC",3), "null.fdr"),
xl="eQTL effect Trec",
yl="eQTL effect Dseq2",
col=c("trec", "dseq"),
title.size=12,
axis.title = 10,
axis.text=10,
legend.title=12,
legend.text=10,
legend.symbol=3,
tab.size=8,
point.size=1.5))
##' Comparing Trec with DEseq by prior and Fdr
lapply(fdr, function(j) lapply(only_tab(a=btrec.dsq, fac=list(Fdr=rep(j, 4), rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2)))),
function(i) rbindlist(lapply(i, format.tab,"dseq"))))
## merge post.btrecase and dseq
btrecase.dsq <- lapply(post.btrecase, function(i) {
rbindlist(lapply(fdr, function(j) {
dt <- merge(i,dseq.l[Fdr %in% j,], by=c("Gene_id", "tag", "tag.EAF", "gene.dist"))
setkey(dt, p.adj)
dt <- add.signif(dt, x1="null.rej", x2="null.fdr", col=c("trecase","dseq") )
}))
})
#######################################
##' Trecase vs Dseq2 model at 0.1% FDR
#######################################
#+ fig.width= 7.4, fig.height=6.7
btrecase.dsq.p <- lapply(1:2, function(i) plot_tab(a=btrecase.dsq[i], fac=list(Fdr=rep(0.001,4), rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2))),
colors=c(None="#999999", trecase="yellow3", dseq="#0072B2", Both= "#D55E00"),
var=setNames(0.001, "Fdr"),
title = "DEseq at 0.1 %FDR vs Trecase by PIP\n and rejection level\n using prior ",
xpos=0.25, ypos=0.75,
facet.fac=c("rej.level", "post.level"),
x1=c(rep(cols[1],3),"null.rej"),
x2=c(rep("log2_aFC",3), "null.fdr"),
xl="eQTL effect Trec",
yl="eQTL effect Dseq2",
col=c("trecase", "dseq"),
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
tab.size=12,
point.size=3))
##' Comparing Trecase with DEseq by prior and Fdr
lapply(fdr, function(j) lapply(only_tab(a=btrecase.dsq, fac=list(Fdr=rep(j, 4), rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2)))),
function(i) rbindlist(lapply(i, format.tab,"dseq"))))
#################################################
##' Compare rasqual with deseq2
#################################################
## merge with deseq2
rasq.dseq <- merge(dseq.l[Fdr==fdr[1],], rasq.l, by=c("Gene_id", "tag"), suffixes=c(".dseq", ".rasqual"))
rasq.dseq <- rbindlist(lapply(fdr, function(i) {
dt <- merge(dseq.l[Fdr == i,], rasq.l[Fdr_per == i*100,], by=c("Gene_id", "tag"),
suffixes=c(".dseq", ".rasqual"))
dt <- add.signif(dt, "null.fdr", "null.Fdr", col=c("deseq", "rasqual"))
return(dt)
}
))
rasq.dseq.tab <- tab2bplot(rasq.dseq, var=c("Signif", "Fdr"), colors=c(None="#999999", deseq="yellow3", rasqual="#0072B2", Both= "#D55E00"))
tables.rasq.dseq <- lapply(fdr, function(i) {
rasq.dseq.tab[Fdr==i,]
})
gl <- lapply(tables.rasq.dseq, function(i) tableGrob(i[,.(Signif,SNPs)], rows=NULL, theme=ttheme_minimal(base_size = 14,padding = unit(c(2, 1.5), "mm"), core=list(fg_params=list(col=c(i$color, rep("black", 4)))), xmin=-2.5, xmax=-1.5, ymin=1.5, ymax=2.5)))
dt.tab <- data.table(Fdr=fdr, grob=gl)
lab <- paste('FDR =', fdr)
names(lab) <- fdr
#############################
##' DEseq vs rasqual by FDR
#############################
#+ fig.width= 6, fig.height=8
btrecase.plot(dt=rasq.dseq[Signif != "None" ,] , x1=c(rep("log2_aFC.dseq",3),"null.fdr") ,
x2=c(rep("log2_aFC.rasqual",3), "null.Fdr"),
xl="eQTL effect DEseq",
yl="eQTL effect Rasqual",
col=c("deseq", "rasqual"),
title="DEseq2 vs Rasqual by FDR",
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
point.size=3
) + facet_grid(Fdr~., labeller=labeller(Fdr=lab))+
theme(strip.background=element_rect(fill="white") ,strip.text.y = element_text(size = 14)) +
geom_custom(data=dt.tab, aes(grob=grob), x = 0.15, y = 0.75)
rbindlist(lapply(tables.rasq.dseq, format.tab,"deseq"))
##############################################################################################
##' Compare trec and btrecase using same prior distribution but only association with ASE info
##############################################################################################
##' Compare trecase with trec using rejection regions and post.level
##+ fig.width= 8.8, fig.height=8.3
trec.trecase.ase <- mapply(function(a,b) {
dt <- merge(a,b[model=="trec-ase",], by=c("Gene_id", "tag", "tag.EAF", "gene.dist", "post.level", "rej.level"), suffixes=c(".trec", ".trecase"))
dt <- add.signif(dt, "null.rej.trec" , "null.rej.trecase" , col=c("trec","trecase") )
}, a=post.dt, b=post.btrecase, SIMPLIFY=F)
tables.trec.trecase <- plot_tab(trec.trecase.ase,
fac=list(rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2))),
colors=c(None="#999999", trec="yellow3", trecase="#0072B2", Both= "#D55E00"),
title = "Trec vs trecase by PIP and rejection level\n using prior ",
xpos=0.25, ypos=0.75,
x1=c(rep("log2_aFC_mean.trec",3),"null.rej.trec") ,
x2=c(rep("log2_aFC_mean.trecase",3), "null.rej.trecase"),
#s=50000,
xl="eQTL effect trec",
yl="eQTL effect trecase",
col=c("trec", "trecase"),
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
point.size=3
)
##' Number of associations by model
lapply(tables.trec.trecase, function(i) rbindlist(lapply(i, format.tab, "trec" )))
################################
##' Rasqual vs Btrec
################################
#+ fig.width= 7.4, fig.height=6.7
rasq.trec <- lapply(post.dt, function(i) {
rbindlist(lapply(fdr, function(j) {
dt <- merge(i,rasq.l[Fdr_per %in% as.character(j*100),], by=c("Gene_id", "tag"))
dt <- add.signif(dt, x1="null.rej", x2="null.Fdr", col=c("trec","rasqual") )
}))
})
rasq.trec.p <- lapply(1:2, function(i) plot_tab(a=rasq.trec[i], fac=list(Fdr_per=rep(0.1,4), rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2))),
colors=c(None="#999999", trec="yellow3", rasqual="#0072B2", Both= "#D55E00"),
var=setNames(0.1, "Fdr_per"),
title = "Rasqual at 0.1 %FDR vs Trec by PIP\n and rejection level\n using prior ",
xpos=0.25, ypos=0.75,
facet.fac=c("rej.level", "post.level"),
x1=c(rep(cols[1],3),"null.rej"),
x2=c(rep("log2_aFC",3), "null.Fdr"),
xl="eQTL effect Trec",
yl="eQTL effect Rasqual",
col=c("trec", "rasqual"),
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
tab.size=10,
point.size=3))
##' Comparing Rasqual to Trec by prior and Fdr
lapply(fdr*100, function(j) lapply(only_tab(a=rasq.trec, fac=list(Fdr_per=rep(j, 4), rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2)))),
function(i) rbindlist(lapply(i, format.tab,"rasqual"))))
#####################################
##' Rasqual vs Btrecase ASE only
#####################################
#+ fig.width= 7.4, fig.height=6.7
fdr=c(0.001, 0.01, 0.05, 0.1)
rasq.trecase <- lapply(post.btrecase, function(i) {
rbindlist(lapply(fdr, function(j) {
dt <- merge(i[model=="trec-ase",] ,rasq.l[Fdr_per %in% as.character(j*100),], by=c("Gene_id", "tag"))
dt <- add.signif(dt, x1="null.rej", x2="null.Fdr", col=c("trecase","rasqual") )
}))
})
rasq.trecase.p <-lapply(1:2, function(i) plot_tab(a=rasq.trecase[i], fac=list(Fdr_per=rep(0.1,4), rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2))),
colors=c(None="#999999", trecase="yellow3", rasqual="#0072B2", Both= "#D55E00"),
var=setNames(0.1, "Fdr_per"),
title = "Rasqual at 0.1 %FDR vs Trecase by PIP\n and rejection level\n using prior ",
xpos=0.25, ypos=0.75,
facet.fac=c("rej.level", "post.level"),
x1=c(rep(cols[1],3),"null.rej"),
x2=c(rep("log2_aFC",3), "null.Fdr"),
xl="eQTL effect Trecase",
yl="eQTL effect Rasqual",
col=c("trecase", "rasqual"),
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
point.size=3))
##' Comparing Rasqual to Trecase by prior and Fdr
lapply(fdr*100, function(j) lapply(only_tab(a=rasq.trecase, fac=list(Fdr_per=rep(j, 4), rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2)))),
function(i) rbindlist(lapply(i, format.tab,"rasqual"))))
#######################################################################################
##' Hidden-GT vs Trec and Btrecase
#######################################################################################
## read file with old output with tags matching GT and hidden-GT
## match.tags <- fread("/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrecase/SingleCounting/results/refbias.gt.rna.p054.txt")
match.tags <- fread(snakemake@input[['gt_rna_sum']])
match.tags <- match.tags[,.(Gene_id, tag.rna,tag.gt, op.dir)]
## ## change sign of effect size of op.dir=="yes"
## nogt <- mapply(function(i,j) {
## dt <- comb.files(i, paste0("^", j,"\\.ENSG[0-9]+.*stan.summary.txt"))
## dt <- add.null(dt)
## dt <- gene.d(dt, gt22[, .(gene_id, start,end,chrom)])
## dt <- merge(match.tags, dt, by.x=c("Gene_id","tag.rna"), by.y=c("Gene_id", "tag"))
## ##dt <- dt[!Gene_id %in% c( "ENSG00000093072", "ENSG00000100364", "ENSG00000241973"),]
## dt[op.dir=="yes", log2_aFC_mean:= -log2_aFC_mean]
## return(dt)
## },
## i=c('/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrecase/SpikeMixV3_2/RNA',
## '/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrecase/SpikeMixV3_3/RNA'
## ),
## j=c( rep("rbias",2)),
## SIMPLIFY=F)
## names(nogt) <- basename(dirname(c('/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrecase/SpikeMixV3_2/RNA',
## '/mrc-bsu/scratch/ev250/EGEUV1/quant/refbias/Btrecase/SpikeMixV3_3/RNA'
## )))
nogt <- mapply(function(i,j) {
dt <- comb.files(i, paste0("^", j,"\\.ENSG[0-9]+.*stan.summary.txt"))
dt <- add.null(dt)
dt <- gene.d(dt, gt22[, .(gene_id, start,end,chrom)])
dt <- merge(match.tags, dt, by.x=c("Gene_id","tag.rna"), by.y=c("Gene_id", "tag"))
dt[op.dir=="yes", log2_aFC_mean:= -log2_aFC_mean]
return(dt)
},
i=snakemake@params[['nogt_mix']],
j=c(rep("rbias",2)),
SIMPLIFY=F)
names(nogt) <- basename(dirname(snakemake@params[['nogt_mix']]))
post.nogt <-lapply(nogt, function(i){
rbindlist(mapply(function(x,y,z) {
dt <- rej.recode(a=x,b=y,c=z)
dt[, post.level:=z]
setkey(dt, Gene_id, tag.gt)
}, x=list(c(0,0.06), c(0, 0.08)), z=list(0.95, 0.99), MoreArgs=list(y=i), SIMPLIFY=F))
})
###################################
##' Compare observed vs hidden-GT
##################################
trecase.nogt <- mapply(function(a,b) {
dt <- merge(a ,b, by.x=c("Gene_id", "tag", "post.level", "rej.level"),
by.y=c("Gene_id", "tag.gt", "post.level", "rej.level"),
suffixes=c(".trecase", ".noGT"))
dt <- add.signif(dt, "null.rej.trecase" , "null.rej.noGT" , col=c("obs_GT","hidden_GT") )
}, a=post.btrecase, b=post.nogt, SIMPLIFY=F)
#+ fig.width= 8.8, fig.height=8.3
tables.trecase.nogt <- plot_tab(trecase.nogt,
fac=list(rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2))),
colors=c(None="#999999", obs_GT="yellow3", hidden_GT="#0072B2", Both= "#D55E00"),
title = "Observed vs hidden GT by PIP and rejection level\n using prior ",
xpos=0.25, ypos=0.75,
x1=c(paste0(cols, ".trecase"),"null.rej.trecase") ,
x2=c(paste0(cols,".noGT"), "null.rej.noGT"),
#s=50000,
xl="eQTL effect obs-GT",
yl="eQTL effect hidden-GT",
col=c("obs_GT","hidden_GT"),
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
point.size=1.5
)
##' Number of associations by model
lapply(tables.trecase.nogt, function(i) rbindlist(lapply(i, format.tab, "obs_GT" )))
######################
##' Trec vs hidden-GT#
######################
trec.nogt <- mapply(function(a,b) {
dt <- merge(a,b, by.x=c("Gene_id", "tag", "post.level", "rej.level"),
by.y=c("Gene_id", "tag.gt", "post.level", "rej.level"),
suffixes=c(".trec", ".noGT"))
dt <- add.signif(dt, "null.rej.trec" , "null.rej.noGT" , col=c("obs_GT","hidden_GT") )
}, a=post.dt, b=post.nogt, SIMPLIFY=F)
#+ fig.width= 8.8, fig.height=8.3
tables.trec.nogt <- plot_tab(trec.nogt,
fac=list(rej.level=c(0, 0.06, 0, 0.08), post.level=c(rep(0.95,2), rep(0.99,2))),
colors=c(None="#999999", obs_GT="yellow3", hidden_GT="#0072B2", Both= "#D55E00"),
title = "Trec vs hidden GT by PIP and rejection level\n using prior ",
xpos=0.25, ypos=0.75,
x1=c(paste0(cols, ".trec"),"null.rej.trec") ,
x2=c(paste0(cols,".noGT"), "null.rej.noGT"),
#s=50000,
xl="eQTL effect obs-GT",
yl="eQTL effect hidden-GT",
col=c("obs_GT","hidden_GT"),
title.size=16,
axis.title = 14,
axis.text=12,
legend.title=14,
legend.text=12,
legend.symbol=5,
point.size=3
)
##' Number of associations by model
lapply(tables.trec.nogt, function(i) rbindlist(lapply(i, format.tab, "obs_GT" )))
###################################################################################################################
##' Repeating analysis with trec and trecase with and without genotypes using posterior instead of normal approx.
###################################################################################################################
######################
##' Obs vs hidden-GT #
######################
## select spikeSlab priors
btrecase.m <- btrecase[2:3]
p <- lapply(c("null.95", "null.99"), function(i) {
mapply(merge.plot,
a=btrecase.m,
prior=names(btrecase.m),
b=nogt,
MoreArgs=list(null=i,byx=c("Gene_id", "tag") , byy=c("Gene_id", "tag.gt"), suffixes=c(".gt", ".nogt"),
colsig=c("obs_GT", "hidden_GT"), siglevel=gsub("null.", "",i), xl="eQTL effect observed-GT",
yl="eQTL effect hidden-GT", title=paste0("PIP = ", gsub("null.","", i), "%"),
size.tab=8, xmin=-1, xmax=-.5, ymin=.3, ymax=.5),
SIMPLIFY=F )})
#+ fig.width= 7, fig.height=5.5
plot_grid(plotlist=c(p[[1]], p[[2]]), nrow=2)
|
857fbb148add42627189eaab73aff083d31d9fc6 | d8e775429487eefa8b4e5731b4d50d4a95d20cd8 | /man/TaskDens.Rd | 8a683a41e1727b0b8db70671769a80343197597f | [
"MIT"
] | permissive | sands58/mlr3proba | 766dd7d977badcd81f6efdeab1e51a2d661a0859 | 901e3bc04d4eaacc2ce28126c97df16df736b5b6 | refs/heads/master | 2021-05-17T00:48:58.863868 | 2020-03-25T22:09:24 | 2020-03-25T22:09:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,624 | rd | TaskDens.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TaskDens.R
\name{TaskDens}
\alias{TaskDens}
\title{Density Task}
\description{
This task specializes \link{Task} for density estimation problems.
The target column is assumed to be numeric.
The \code{task_type} is set to \code{"density"}.
Predefined tasks are stored in the \link[mlr3misc:Dictionary]{dictionary} \link{mlr_tasks}.
}
\examples{
task = TaskDens$new("precip", backend = data.frame(target = precip), target = "target")
task$task_type
task$truth()
}
\seealso{
Other Task:
\code{\link{TaskSurv}}
}
\concept{Task}
\section{Super class}{
\code{\link[mlr3:Task]{mlr3::Task}} -> \code{TaskDens}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{TaskDens$new()}}
\item \href{#method-truth}{\code{TaskDens$truth()}}
\item \href{#method-clone}{\code{TaskDens$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="cbind">}\href{../../mlr3/html/Task.html#method-cbind}{\code{mlr3::Task$cbind()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="data">}\href{../../mlr3/html/Task.html#method-data}{\code{mlr3::Task$data()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="droplevels">}\href{../../mlr3/html/Task.html#method-droplevels}{\code{mlr3::Task$droplevels()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="filter">}\href{../../mlr3/html/Task.html#method-filter}{\code{mlr3::Task$filter()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="format">}\href{../../mlr3/html/Task.html#method-format}{\code{mlr3::Task$format()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="formula">}\href{../../mlr3/html/Task.html#method-formula}{\code{mlr3::Task$formula()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="head">}\href{../../mlr3/html/Task.html#method-head}{\code{mlr3::Task$head()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="help">}\href{../../mlr3/html/Task.html#method-help}{\code{mlr3::Task$help()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="levels">}\href{../../mlr3/html/Task.html#method-levels}{\code{mlr3::Task$levels()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="missings">}\href{../../mlr3/html/Task.html#method-missings}{\code{mlr3::Task$missings()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="print">}\href{../../mlr3/html/Task.html#method-print}{\code{mlr3::Task$print()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="rbind">}\href{../../mlr3/html/Task.html#method-rbind}{\code{mlr3::Task$rbind()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="rename">}\href{../../mlr3/html/Task.html#method-rename}{\code{mlr3::Task$rename()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="select">}\href{../../mlr3/html/Task.html#method-select}{\code{mlr3::Task$select()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="set_col_role">}\href{../../mlr3/html/Task.html#method-set_col_role}{\code{mlr3::Task$set_col_role()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="mlr3" data-topic="Task" data-id="set_row_role">}\href{../../mlr3/html/Task.html#method-set_row_role}{\code{mlr3::Task$set_row_role()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\subsection{Method \code{new()}}{
Creates a new instance of this \link[R6:R6Class]{R6} class.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TaskDens$new(id, backend, target)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{id}}{(\code{character(1)})\cr
Identifier for the new instance.}
\item{\code{backend}}{(\link{DataBackend})\cr
Either a \link{DataBackend}, or any object which is convertible to a \link{DataBackend} with \code{as_data_backend()}.
E.g., a \code{data.frame()} will be converted to a \link{DataBackendDataTable}.}
\item{\code{target}}{(\code{character(1)})\cr
Name of the target column.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-truth"></a>}}
\subsection{Method \code{truth()}}{
Returns the target column for specified \code{row_ids}, this is unsupervised so should not be
thought of as a 'true' prediction.
Defaults to all rows with role "use".
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TaskDens$truth(rows = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{rows}}{\code{integer()}\cr
Row indices.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
\code{numeric()}.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{TaskDens$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
e5b5ffbc72f6c2e2ff088a8bfc74ee0a16db8e35 | 241e5ec1146e8f606eed1ddb1f578592af7fbc03 | /R/Hilmo_import_csv.R | 256252c97b515839b077a8391928a6cc03f9cd89 | [] | no_license | vilponk/hilmotools | a5cece4af47a491c98bc2a44ce0f9092abea5b0d | f04a244cd35cae8c3b7ed2e9ec2b9cf8a9b8ccde | refs/heads/master | 2021-03-21T03:09:40.120896 | 2020-03-14T10:54:54 | 2020-03-14T10:54:54 | 247,259,062 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 881 | r | Hilmo_import_csv.R |
#' A Hilmo import .csv function
#'
#' This function allows you to import csv files in more tidy format and it creates ID column from lahtopaiva and tnro
#' @param filename the name of your file you wish to import
#' @keywords csv hilmo import
#' @export
#' @examples
#' Hilmo_import_csv()
Hilmo_import_csv <- function(filename) {
library(tidyverse)
data <- read_delim(filename,";", escape_double = FALSE, trim_ws = TRUE)
data %>%
select(SUKUP,IKA, TMPKOODI, KOODI1, lahtopvm, tulopvm, tnro, PALTU, KOKU) %>%
mutate(ID=rep(NA)) %>%
unite(ID, tnro, lahtopvm, sep="", remove=F) %>%
mutate(ID = str_replace_all(ID,"/", "")) %>%
mutate(pvm=as.Date(tulopvm, format= "%d/%m/%Y")) %>%
separate(tulopvm, into=c("day","month","year"), sep="/") %>%
select(-day, -month) %>%
select(ID, tnro, SUKUP, IKA, PALTU, KOKU, TMPKOODI, KOODI1, pvm, year)
}
|
d900f11382618c2d3ed72926075a05076f88bfde | 198ff2dd375b7bcf822662bc2dbf31ec87cfdc46 | /Packages/diff/R/Hessian.R | 76c2882f495839ab19f29f335091962429530811 | [] | no_license | AleMorales/DynamicPhotosynthesis | 7905f7bbf83e347b6b1bea0d39627565b6abcd6e | 82486e89635ed116c814eb8b939338d36734c616 | refs/heads/master | 2021-06-14T14:57:48.797872 | 2021-05-26T15:47:13 | 2021-05-26T15:47:13 | 30,246,331 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,076 | r | Hessian.R | # For multivariate- functions
# Finite difference -----------------------------------------------------------------------------------------------
#' @export
finite_hess = function(ofun, h = 1e-4, d = 0.1, zero.tol = sqrt(.Machine$double.eps/7e-7), r = 4, v = 2, ...) {
function(pars, ...) numDeriv::hessian(func = ofun, x = pars, method = "Richardson",
method.args = list(eps = h, d = d, zero.tol = zero.tol, r = r, v = v), ...)
}
# Complex step ----------------------------------------------------------------------------------------------------
#' @export
complex_hess = function(ofun) {
function(pars, ...) {
numDeriv::hessian(func = ofun, x = pars, method = "complex", ...)
}
}
# Symbolic --------------------------------------------------------------------------------------------------------
#' @export
symbolic_hess = function(ofun, parnames, ...) {
f = Deriv::Deriv(ofun, parnames, nderiv = 2, ...)
function(pars, ...) {
out = f(pars, ...)
matrix(unname(out), ncol = length(pars), byrow = TRUE)
}
}
|
8f8c8fd9c1fc072d1a25a331041902fa519dde63 | 90f0578e52e91b50b67f7f762d9cfc14e93928e3 | /R/Scripts/unsupervised_learning/k_means_clustering1.R | 099b77a91342aeca9298ece5250be0abd0b34544 | [] | no_license | tdworowy/Data_science_notes | 0d755f1de1a1e55398413d45f26055185285a0a5 | 3f08e0a1d87d78c663dd91d413f15be8cf813e84 | refs/heads/master | 2021-06-23T02:05:18.234198 | 2021-01-09T09:36:21 | 2021-01-09T09:36:21 | 173,426,255 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 477 | r | k_means_clustering1.R | set.seed(9999)
x <- rnorm(48, mean = rep(1:3,each=8),sd=0.2)
y <- rnorm(48, mean = rep(c(1,2,1),each=8),sd=0.2)
df <- data.frame(x,y)
k_means <- kmeans(df,centroids=3) # need to guess number of clusters in advance
library(cluster)
clusplot(df, k_means$cluster, color=TRUE, shade=TRUE,
labels=2, lines=0) # plot clusters
plot(x,y, col=k_means$cluster, pch=19) # color clusters on plot
points(k_means$centers, col = 1:3, pch=3, cex = 3, lwd = 3) # plot centroids |
b039e78b7862f23deb1f1c218a41b5a15a419520 | 0c7a06b9b2e1d9df4d085ead823e54861c8575fc | /server-side/code/LINCS.data.statistics.R | f5e6ddc1c512cfdb96a466851a12093d29cfe53c | [] | no_license | bioklab/CancerImmunoTherapy | b59fc5a794b8f8bb16ea5c4fa63a727222c9d48e | bb874859dc01933704313145dd5f53628bb6a156 | refs/heads/main | 2023-06-16T16:04:35.293470 | 2021-07-11T22:33:37 | 2021-07-11T22:33:37 | 385,063,937 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 485 | r | LINCS.data.statistics.R | require(plyr)
require(dplyr)
df1 <- fread(input= 'PhaseI/GSE92742_Broad_LINCS_inst_info.txt',header=TRUE) %>% as.data.frame
df2 <- fread(input= 'PhaseII/GSE70138_Broad_LINCS_inst_info_2017-03-06.txt',header=TRUE) %>% as.data.frame
col.name <- c('cell_id','pert_type')
df <- rbind(df1[,col.name],df2[,col.name])
df <- table(df) %>% as.data.frame %>% print
df <- dcast(df,cell_id ~ pert_type,value.var='Freq')
write.csv(x=df,file='LINCS.data.statistics.csv',quote=FALSE,row.names=FALSE) |
d50d1a98e0831368d425157108099a9b43c23b4c | 77d9df18e27e81c6cd7fa91c7808433b7d0a128b | /plot6.R | c44b1e0a3b9ec8212a0960ffa21ac845236e7324 | [] | no_license | krsn4/DataAnalysisCourseProject2 | 7a72a37fe52549b7f3722906f8020abcc3cfa962 | fc02f895a1a469eaa6433208b1e8df90f24f5fa0 | refs/heads/master | 2020-12-01T16:00:08.307049 | 2019-12-29T22:19:48 | 2019-12-29T22:19:48 | 230,691,627 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 832 | r | plot6.R | #fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
#download.file(fileUrl, destfile = "dataset.zip")
#unzip("dataset.zip")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#6 (Emissions from MC, Baltimore vs Los Angeles)
NEI_LA <- NEI[NEI$fips == "06037",]
NEI_LA_mc <- (NEI_LA$SCC %in% SCC[motor_vehicles,]$SCC)
#g5 <- ggplot(NEI_LA[NEI_LA_mc , ], aes(x=year, y=Emissions))
#g5 + stat_summary(geom="line", fun.y = "sum") + ylab("total emissions of PM2.5 (tons) due to Motor vehicles in Los Angeles")
Bal_LA <- rbind(NEI_bal[NEI_bal_mc,], NEI_LA[NEI_LA_mc,])
png(file = "plot6.png")
g6 <- ggplot(Bal_LA, aes(x=year, y=Emissions, col = fips))
g6 + stat_summary(geom="line", fun.y = "sum") + ylab("total emissions of PM2.5 (tons) due to Motor vehicles")
dev.off()
|
acdf01b2b6b044e8588c639ab46e8589ba8ef893 | 34534a12548e0ed95c1ae9e9fb435946eff92838 | /man/R0_recurrence_seroprev_whole.Rd | b6324332cb910ed1889dd2083f670f48b3a3790d | [] | no_license | mrc-ide/YFestimation | d9b52bd9d9eda1634de6bf4d9e1cd72f29c69d2d | a352fe9369c4e1ec5915d63f0b36c8cfcc18894b | refs/heads/master | 2021-03-08T16:29:37.221197 | 2021-03-03T18:07:05 | 2021-03-03T18:07:05 | 176,555,403 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,029 | rd | R0_recurrence_seroprev_whole.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R0_model_functions.R
\name{R0_recurrence_seroprev_whole}
\alias{R0_recurrence_seroprev_whole}
\title{function to calculate seroprevalence everywhere by recurrence relation in R0 model}
\usage{
R0_recurrence_seroprev_whole(adm, dat, R0, t0_vac_adm, dim_year, dim_age,
p_prop_3d, P_tot_2d, inc_v3d, pop_moments_whole, vac_eff_arg)
}
\arguments{
\item{adm}{admin 1 unit of interest}
\item{dat}{environmental and occurrence data}
\item{R0}{value of R0 for survey}
\item{t0_vac_adm}{time of vaccination for admin, scalar}
\item{dim_year}{years of interest}
\item{dim_age}{ages of interest}
\item{p_prop_3d}{population proportion}
\item{P_tot_2d}{population total}
\item{inc_v3d}{incidence of vaccination}
\item{pop_moments_whole}{aggregated population moments}
\item{vac_eff_arg}{vaccine efficacy}
}
\value{
all statuses in R0 model for admin
}
\description{
function to calculate seroprevalence everywhere by recurrence relation in R0 model
}
|
7ce0d4e5b26ff1014a8c624c9e62f91f6465dcee | b419072cba7e59d581a5f0a5df63b3525e6de214 | /R/create_summary_table.R | 8704ebd3f9c55e88fc79adbaf27c428de3d9af42 | [
"MIT"
] | permissive | awconway/ptds | 797ffe9edc4ca8eb561d93c890cb23f11a2189dd | d2573059f4a92d5ac9a740463a3310572e658cca | refs/heads/master | 2022-12-28T16:02:55.930052 | 2020-10-18T19:04:08 | 2020-10-18T19:04:08 | 249,006,473 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 587 | r | create_summary_table.R | ##' @title
##' @return
##' @author Aaron Conway
##' @export
#' @importFrom dplyr select
#' @importFrom gtsummary tbl_summary italicize_labels as_flextable
create_summary_table <- function(data_ptds) {
data_ptds %>%
select(age, sex_factor, procedure_factor, fluids_duration, food_duration) %>%
droplevels() %>%
tbl_summary(
missing = "no",
label = list(
sex_factor ~ "Gender",
procedure_factor ~ "Procedure",
fluids_duration ~ "Time since last clear fluids (hours)",
food_duration ~ "Time since last food (hours)"
)
)
}
|
7bb4a33f05f858f3486b013b2e9b0b5df576e889 | cfb022d19e331e397707619aea2d5142cf76b491 | /src/linear-modeling.R | ac74ee4474a3a8c97d4f5ae376141e298ffc8427 | [] | no_license | rajaldebnath/F1000_workflow | 6d356d88bf12cf48d9a809ed61e0bac825c0e1d7 | b29977a21d7f46ec0cedfc8052ff77a80763580e | refs/heads/master | 2020-06-20T00:47:30.501132 | 2017-07-31T13:18:02 | 2017-07-31T13:18:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,453 | r | linear-modeling.R | #! /usr/bin/env Rscript
# File description -------------------------------------------------------------
# Linear modeling to relate sample characteristics to community
# summaries.
## ---- lm-get-alpha-diversity ----
setup_example(c("phyloseq", "ggplot2", "nlme", "dplyr", "vegan", "reshape2"))
ps_alpha_div <- estimate_richness(ps, split = TRUE, measure = "Shannon")
ps_alpha_div$SampleID <- rownames(ps_alpha_div) %>%
as.factor()
ps_samp <- sample_data(ps) %>%
unclass() %>%
data.frame() %>%
left_join(ps_alpha_div, by = "SampleID") %>%
melt(measure.vars = "Shannon",
variable.name = "diversity_measure",
value.name = "alpha_diversity")
# reorder's facet from lowest to highest diversity
diversity_means <- ps_samp %>%
group_by(host_subject_id) %>%
summarise(mean_div = mean(alpha_diversity)) %>%
arrange(mean_div)
ps_samp$host_subject_id <- factor(ps_samp$host_subject_id,
diversity_means$host_subject_id)
## ---- lm-age ----
alpha_div_model <- lme(fixed = alpha_diversity ~ age_binned, data = ps_samp,
random = ~ 1 | host_subject_id)
## ---- lm-prediction-intervals ----
new_data <- expand.grid(host_subject_id = levels(ps_samp$host_subject_id),
age_binned = levels(ps_samp$age_binned))
new_data$pred <- predict(alpha_div_model, newdata = new_data)
X <- model.matrix(eval(eval(alpha_div_model$call$fixed)[-2]),
new_data[-ncol(new_data)])
pred_var_fixed <- diag(X %*% alpha_div_model$varFix %*% t(X))
new_data$pred_var <- pred_var_fixed + alpha_div_model$sigma ^ 2
## ---- lm-fitted-plot ----
# fitted values, with error bars
ggplot(ps_samp %>% left_join(new_data)) +
geom_errorbar(aes(x = age_binned, ymin = pred - 2 * sqrt(pred_var),
ymax = pred + 2 * sqrt(pred_var)),
col = "#858585", size = .1) +
geom_point(aes(x = age_binned, y = alpha_diversity,
col = family_relationship), size = 0.8) +
facet_wrap(~host_subject_id) +
scale_y_continuous(limits = c(2.4, 4.6), breaks = seq(0, 5, .5)) +
scale_color_brewer(palette = "Set2") +
labs(x = "Binned Age", y = "Shannon Diversity", color = "Litter") +
guides(col = guide_legend(override.aes = list(size = 4))) +
theme(panel.border = element_rect(color = "#787878", fill = alpha("white", 0)),
axis.text.x = element_text(angle = -90, size = 6),
axis.text.y = element_text(size = 6))
|
a6268ebfa427935d3c9032d4a151d505a78a6abf | 10d24e42cda4c05ee04a47a0e292a2f2b5dc86a3 | /plot6.R | f67ac7ee6eae44386507738a0d2c8ee21e848848 | [] | no_license | AviaPeron/Exploratory-Data-Analysis | 00840fcff7adb5db236f2ca0da37d46a66304215 | 83fdd9e9572e9cfcd5b52a89733ed89d6af6d6b1 | refs/heads/master | 2021-01-19T14:13:55.712387 | 2017-08-20T21:08:50 | 2017-08-20T21:08:50 | 100,888,721 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 903 | r | plot6.R | library("ggplot2")
if(!exists("NEI")){
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
## Subset the dataset to Emission In Baltimore City + LA and the type = ON-ROAD
MotDatLABal <- subset(NEI, NEI$fips %in% c("06037", "24510") & NEI$type == "ON-ROAD")
## Summrize the Emission per year and type
AggSum6 <- aggregate(Emissions ~ year + fips, MotDatLABal, sum)
##replace to country names instead nubers
AggSum6$fips <- gsub("06037", "Los Angeles County",AggSum6$fips)
AggSum6$fips <- gsub("24510", "Baltimore City",AggSum6$fips )
## Create a plot
g <- ggplot(AggSum6, aes(year, Emissions, color = fips))
g + geom_line() +
labs( x= "Year", y = expression("Total PM" [2.5]*""), title = "Total Emission From Motor Vehicle Per Year In Baltimore and LA")
dev.copy(png, file = "plot6.png")
dev.off() |
c688a2aaed7cd83870f21d77ac1724236139bb82 | 5e16efbfd051bb517527df48815622b9b227e216 | /man/coo_rotatecenter.Rd | b43317267157c39f55be4bf837725206e9a586d8 | [] | no_license | yuting27/Momocs | ac41fb817a689c90be97788c4cf4dbdc769b7ff5 | d4cb2a504e7f78a0d39e3e741620a9d4364b9437 | refs/heads/master | 2020-12-29T03:18:50.753927 | 2016-02-10T19:44:48 | 2016-02-10T19:44:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 817 | rd | coo_rotatecenter.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coo-utilities.R
\name{coo_rotatecenter}
\alias{coo_rotatecenter}
\title{Rotates shapes with a custom center}
\usage{
coo_rotatecenter(coo, theta, center = c(0, 0))
}
\arguments{
\item{coo}{a \code{matrix} of (x; y) coordinates or a \code{list}, or any \link{Coo} object.}
\item{theta}{\code{numeric} the angle (in radians) to rotate shapes.}
\item{center}{\code{numeric} the (x; y) position of the center}
}
\value{
a \code{matrix} of (x; y) coordinates, or a \link{Coo} object.
}
\description{
rotates a shape of 'theta' angles (in radians) and with a (x; y) 'center'.
}
\examples{
b <- bot[1]
coo_plot(b)
coo_draw(coo_rotatecenter(b, -pi/2, c(200, 200)), border='red')
}
\seealso{
Other rotation functions: \code{\link{coo_rotate}}
}
|
2c042975ac86f9080e8ada09842087f5ac4669e7 | 2b76e72f3e46d2fa85721b1a6ff4bdbb71c40f04 | /man/list_sheets.Rd | 6d86bfda273afa7282e0567eaf323dc38303b7ab | [
"MIT"
] | permissive | elias-jhsph/rsmartsheet | ae7f1a8531ce2225417d3444d3de213152c169d5 | 18bff1da9dce5acdaff07a3da49c5fe4827c4d98 | refs/heads/master | 2021-07-07T09:57:20.373524 | 2021-05-10T17:57:50 | 2021-05-10T17:57:50 | 236,876,103 | 8 | 6 | null | null | null | null | UTF-8 | R | false | true | 432 | rd | list_sheets.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Smartsheet-R-sdk.R
\name{list_sheets}
\alias{list_sheets}
\title{Download a file of an existing attachment on a Smartsheet with a specific sheet name}
\usage{
list_sheets()
}
\value{
returns table of smartsheets
}
\description{
Download a file of an existing attachment on a Smartsheet with a specific sheet name
}
\examples{
\dontrun{
list_sheets()
}
}
|
fc497ca1ac66ffde0f6f14949180907f5abec4f2 | a6d14a208a21ce11d36f9f1ada90fb3cfd11081a | /development/result_sco_prem/scripts/05_build_ensemble_and_evaluate_performance.R | 7df92fa48b965c7f860978bc6ab1c6760e74ea87 | [] | no_license | Sol-Won-1100/panenkar | b04e70be375b7da9a907742d67bc09569e210d33 | b50a29f62445b3be35c5949bf082abb659be2ab5 | refs/heads/master | 2023-05-06T18:20:39.853010 | 2021-06-04T23:37:15 | 2021-06-04T23:37:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,987 | r | 05_build_ensemble_and_evaluate_performance.R |
# TITLE: Build Ensemble and Evaluate Performance
# DESCRIPTION: Build an ensemble model and evaluate its performance on the result market of the Scottish Premiership
# Setup ----------------------------------------------------------------------------------------------------------------
.competition_id <- "sco_prem"
wd <- load_wd()
# Load model predictions -----------------------------------------------------------------------------------------------
set_model_fitting <- glue("{wd$dev_result_sco_prem_processed_data}set_model_fitting.rds") %>% read_rds()
set_test_main <- glue("{wd$dev_result_sco_prem_processed_data}set_test_main.rds") %>% read_rds()
predicted_poisson_model_fitting <- glue("{wd$dev_result_sco_prem_processed_data}predicted_poisson_model_fitting.rds") %>%
read_rds()
predicted_poisson_test_main <- glue("{wd$dev_result_sco_prem_processed_data}predicted_poisson_test_main.rds") %>%
read_rds()
predicted_probit_model_fitting <- glue("{wd$dev_result_sco_prem_processed_data}predicted_probit_model_fitting.rds") %>%
read_rds()
predicted_probit_test_main <- glue("{wd$dev_result_sco_prem_processed_data}predicted_probit_test_main.rds") %>%
read_rds()
juice_model_fitting <- glue("{wd$dev_result_sco_prem_processed_data}juice_model_fitting.rds") %>% read_rds()
juice_test_main <- glue("{wd$dev_result_sco_prem_processed_data}juice_test_main.rds") %>% read_rds()
min_matches_season <- 10
# Calculate the best ensemble from the model_fitting set ---------------------------------------------------------------
# Matrices better for doing numerical operations
predictions <- list(poisson = predicted_poisson_model_fitting,
probit = predicted_probit_model_fitting,
juice = juice_model_fitting) %>%
map(~filter(., home_matches_played_season > min_matches_season, away_matches_played_season > min_matches_season)) %>%
map(~select(., all_of(c("home_prob", "draw_prob", "away_prob")))) %>%
map(as.matrix)
num_matches <- nrow(predictions$poisson)
observed_model_fitting <- set_model_fitting %>%
filter(home_matches_played_season > min_matches_season, away_matches_played_season > min_matches_season) %>%
mutate(home = if_else(result == "home", 1, 0),
draw = if_else(result == "draw", 1, 0),
away = if_else(result == "away", 1, 0)) %>%
select(home, draw, away) %>%
unlist() %>%
matrix(nrow = num_matches, ncol = 3)
ensemble_weights <- calc_ensemble_weights(predictions, observed_model_fitting)
# Test ensemble --------------------------------------------------------------------------------------------------------
predictions <- list(poisson = predicted_poisson_test_main,
probit = predicted_probit_test_main,
juice = juice_test_main) %>%
map(~filter(., home_matches_played_season > min_matches_season, away_matches_played_season > min_matches_season)) %>%
map(~select(., all_of(c("home_prob", "draw_prob", "away_prob"))))
ensemble_probs <- build_ensemble(predictions, ensemble_weights)
set_test_main_valid_matches <- set_test_main %>%
filter(., home_matches_played_season > min_matches_season, away_matches_played_season > min_matches_season)
odds <- select(set_test_main_valid_matches, home_odds_max, draw_odds_max, away_odds_max)
outcomes <- factor(set_test_main_valid_matches$result, c("home", "draw", "away"))#
closing_odds <- select(set_test_main_valid_matches, home_odds_sharp_closing, draw_odds_sharp_closing,
away_odds_sharp_closing)
test_ensemble_0_025 <- simulate_bets(ensemble_probs, odds, outcomes, closing_odds, min_advantage = 0.025)
# Test poisson ---------------------------------------------------------------------------------------------------------
poisson_probs <- select(predictions$poisson, home_prob:away_prob) %>%
divide_by(1, .) %>%
remove_margin()
test_poisson_0_025 <- simulate_bets(poisson_probs, odds, outcomes, closing_odds, min_advantage = 0.025)
|
3fd1adde42d75007eb32407d0b3dfff79671ec34 | fef76dd9866c037d7120d04843bcc5403f5b1abd | /diveRsity.R | 8293092273e40440ccd5aef8acf45e2f62ba9619 | [] | no_license | popgenomics/quantiSex | cffec93511f3a9e3f98156d7b7d8609de3202a19 | c2c0622bd50ed1342578b2fc168fccfeffd5fb00 | refs/heads/master | 2021-06-24T15:52:50.696400 | 2020-12-28T17:46:04 | 2020-12-28T17:46:04 | 58,804,295 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 438 | r | diveRsity.R | #!/home/roux/R/bin/Rscript --vanilla
# # !/usr/bin/env Rscript
#./diveRsity.R input=nameOfGenePopFile output=nameOfROutputFile
.libPaths("/home/roux/softwares/R-3.4.2/library")
library(diveRsity)
#require(diveRsity)
options(warn=-1)
for(i in commandArgs()){
tmp = strsplit(i, "=")
if(tmp[[1]][1] == "input"){input = tmp[[1]][2]}
if(tmp[[1]][1] == "output"){output = tmp[[1]][2]}
}
a=diffCalc(input, fst=T, pairwise=F, outfile=output)
|
cf90d2616aba0b6e85282d10fee54d360802ba07 | d2a85f1faf32de2203f1a3e3083762b481c420c2 | /regresi-.R | 10fb2ec14c5a4d3a1f25d1bd6f4d9e4cef86f932 | [] | no_license | dwiabdulrahman/R-studio | e2ebe54fc6b73f129602b4cc95269a56f5ed245c | 0a2f4ff5df1dc4d0a75f8082623cec0619b8a7ba | refs/heads/master | 2020-03-30T07:57:03.856949 | 2019-01-11T09:08:36 | 2019-01-11T09:08:36 | 150,976,525 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 807 | r | regresi-.R | # Nama
# Dwi Abdul Rahman ( 17523151 )
# Girendra Egie Zuhrival (17523154)
#-----------------------------------------------------
#Data
data1<-read.csv(file="dataproduksi.csv", header=TRUE)
data1
#Linear Regresion
model <-lm(jamKerja ~ JumlahProduksi, data=data1)
summary(model)
plot(jamKerja ~ JumlahProduksi, data=data1)
abline(model, col = "red", lwd = 1)
# Predicting New Value based on our model
predict(model, data.frame(JumlahProduksi = 70))
#Polinomial Regresion
poly_model <- lm(jamKerja ~ poly(JumlahProduksi,degree=2), data = data1)
poly_model
x <- with(data1, seq(min(JumlahProduksi), max(JumlahProduksi), length.out=2000))
y <- predict(poly_model, newdata = data.frame(JumlahProduksi = x))
plot(jamKerja ~ JumlahProduksi, data = data1)
lines(x, y, col = "red")
|
b9b7e7e73fba489a09a4f3ecfd641e5f01dd1ccd | ec60cf027d03789e22f9ff00257b3148cc93ed46 | /PCAplots2.R | 73aee0fa1f4472c04d7fc369e37280414bcb0691 | [] | no_license | ReubenBuck/99Lives_scripts | 85f0000e6e442671dc65350036eac4ad12f2b560 | a45b82008e4ca3a1b3af46a92c1b5965bf69fc13 | refs/heads/master | 2022-07-20T14:26:56.676655 | 2020-05-17T22:54:04 | 2020-05-17T22:54:04 | 256,058,911 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,068 | r | PCAplots2.R | rm(list = ls())
options(stringsAsFactors = FALSE)
library(gdsfmt)
library(SNPRelate)
library(RColorBrewer)
library(dendextend)
#system("rm ~/Desktop/TMP/data.gds")
#vcf.fn <- "/mnt/raid/projects/99Lives_analysis/vcf_200117/thin_snps/combined.thin_1kb.1maf.vcf.gz"
#snpgdsVCF2GDS(vcf.fn,"~/Desktop/TMP/data.1maf.gds",method ="biallelic.only")
genofile <-snpgdsOpen("~/Desktop/TMP/data.1maf.gds")
set.seed(100)
fam_info <- read.table("/mnt/raid/projects/99Lives_analysis/result/cat_families/cat_family_id.tsv",
sep= "\t", header = TRUE, comment.char = "")
snpset <- snpgdsLDpruning(genofile, ld.threshold=0.5, autosome.only = FALSE, num.thread = 10)
str(snpset)
snpset <- snpset[!grepl("_", names(snpset))]
snpset <- snpset[!grepl("chrX", names(snpset))]
str(snpset)
snpset.id <- unlist(unname(snpset))
length(snpset.id)
ibs <- snpgdsIBS(genofile,snp.id = snpset.id, autosome.only=FALSE)
ibs.hc<-snpgdsHCluster(ibs)
rv <- snpgdsCutTree(ibs.hc, outlier.n = 2, label.H = FALSE, label.Z = FALSE, col.list = NA)
cols2 <- rep("grey", sum(grepl("Out", levels(rv$samp.group))))
nb.cols <- sum(!grepl("Out", levels(rv$samp.group)))
mycolors <- rev(colorRampPalette(brewer.pal(8, "Set1"))(nb.cols))
cols <- c(mycolors, cols2)
names(cols) <- levels(rv$samp.group)
meta_data <- read.table("/mnt/raid/projects/99Lives_analysis/accessory_data/99Lives_Pheno_LAL.csv", sep= "\t", header = TRUE)
rownames(meta_data) <- meta_data$LabID
meta_data <- meta_data[rv$sample.id,]
df.meta <- data.frame(sample_ID = meta_data$LabID,
breed = as.character(meta_data$breed_simplified),
breed_specifc = meta_data$breed,
Institution = meta_data$Institution,
Institution_symbol = meta_data$Institution_symbol,
Country = meta_data$Country,
Continent = meta_data$Continent,
cluster = rv$samp.group)
pdf(file = "/mnt/raid/projects/99Lives_analysis/result/PCA_result/breed_member.pdf",
height = 8,width = 8)
main.names <- 1:sum(!duplicated(cols))
main.names[length(main.names)] <- "Out"
# breeds
df <- data.frame(breed = as.character(meta_data$breed_simplified),
cluster = rv$samp.group)
df$cluster[grep("Out", df$cluster)] <- "Outlier001"
df.tab <- table(df)
df.tab[df.tab == 0] <- NA
cat.no <- table(df$cluster)
cat.no <- c(cat.no, Out = sum(cat.no[grepl("Out",names(cat.no))]))
cat.no <- cat.no[!grepl("Outlier",names(cat.no))]
layout(matrix(1:(sum(!duplicated(cols)) + 1), nrow = 1),
widths = c(3,rep(1,(sum(!duplicated(cols)) + 1))))
par(mar = c(5,0,5,0), oma = c(0,5,0,2))
mat.na <- t(df.tab[,1])
mat.na[1,] <- 0
image(mat.na , y = 1:nrow(df.tab),
yaxt = "n", xaxt = "n", axes = FALSE,
col = "white",
main = "")
mtext("Cluster:",side = 3, font = 2, line = 3, adj = 1)
mtext("Samples:",side = 3, line = 1.5, adj = 1, cex = .7)
mtext("Breeds:",side = 3, line = 0.2, adj = 1, cex = .7)
mtext(text = paste(rev(rownames(df.tab)), ":", sep = ""),
side = 4, at = 1:nrow(df.tab), las = 2,adj = 1,
cex = .7, line = -1.4)
mtext(text = rev(rowSums(df.tab, na.rm = TRUE)),
side = 4, at = 1:nrow(df.tab), las = 2,adj = 1,
cex = .7, line = -.3)
for(i in 1:(sum(!duplicated(cols)))){
image(t(rev(df.tab[,i])), y = 1:nrow(df.tab),
yaxt = "n", xaxt = "n",
main = "",
col = scales::alpha(cols[i], seq(.3,1,.1))
)
grid(ny = nrow(df.tab), nx = 0)
mtext(main.names[i],side = 3, col = cols[i], font = 2, line = 3)
mtext(cat.no[i],side = 3, line = 1.5, cex = .7)
mtext(sum(!is.na(df.tab[,i])),side = 3, line = 0.2, cex = .7)
}
dev.off()
# institution
pdf(file = "/mnt/raid/projects/99Lives_analysis/result/PCA_result/institute_member.pdf",
height = 8,width = 8)
df <- data.frame(inst = as.character(meta_data$Institution_symbol),
cluster = rv$samp.group)
df$cluster[grep("Out", df$cluster)] <- "Outlier001"
df.tab <- table(df)
df.tab[df.tab == 0] <- NA
layout(matrix(1:(sum(!duplicated(cols)) + 1), nrow = 1),
widths = c(3,rep(1,(sum(!duplicated(cols)) + 1))))
par(mar = c(5,0,5,0), oma = c(0,5,0,2))
mat.na <- t(df.tab[,1])
mat.na[1,] <- 0
image(mat.na , y = 1:nrow(df.tab),
yaxt = "n", xaxt = "n", axes = FALSE,
col = "white",
main = "")
mtext("Cluster:",side = 3, font = 2, line = 3, adj = 1)
mtext("Samples:",side = 3, line = 1.5, adj = 1, cex = .7)
mtext("Institutions:",side = 3, line = 0.2, adj = 1, cex = .7)
mtext(text = paste(rev(rownames(df.tab)), ":", sep = ""),
side = 4, at = 1:nrow(df.tab), las = 2,adj = 1,
cex = .7, line = -1.4)
mtext(text = rev(rowSums(df.tab, na.rm = TRUE)),
side = 4, at = 1:nrow(df.tab), las = 2,adj = 1,
cex = .7, line = -.3)
for(i in 1:(sum(!duplicated(cols)))){
image(t(rev(df.tab[,i])), y = 1:nrow(df.tab),
yaxt = "n", xaxt = "n",
main = "",
col = scales::alpha(cols[i], seq(.3,1,.1))
)
grid(ny = nrow(df.tab), nx = 0)
mtext(main.names[i],side = 3, col = cols[i], font = 2, line = 3)
mtext(cat.no[i],side = 3, line = 1.5, cex = .7)
mtext(sum(!is.na(df.tab[,i])),side = 3, line = 0.2, cex = .7)
}
dev.off()
df.tab.breed <- table(df.meta[,c("breed","cluster")])
df.tab.breed[df.tab.breed == 0] <- NA
most_common_breed = NULL
for(i in 1:ncol(df.tab)){
most_common_breed <- c(most_common_breed, names(which.max(df.tab.breed[,i])))
}
df.tab.country <- table(df.meta[,c("Country","cluster")])
df.tab.country[df.tab.country == 0] <- NA
most_common_country = NULL
for(i in 1:ncol(df.tab)){
most_common_country <- c(most_common_country, names(which.max(df.tab.country[,i])))
}
df.tab.continent <- table(df.meta[df.meta$breed == "Random bred",c("Continent","cluster")])
df.tab.continent[df.tab.continent == 0] <- NA
most_common_continent = rep(NA,ncol(df.tab.continent))
names(most_common_continent) <- colnames(df.tab.continent)
for(i in 1:ncol(df.tab)){
if(all(is.na(df.tab.continent[,i]))){
next()
}
most_common_continent[i] <- names(which.max(df.tab.continent[,i]))
}
most_common <- most_common_breed
most_common[most_common_breed == "Random bred"] <- paste("Random Bred",
most_common_continent[most_common_breed == "Random bred"],
sep = "/")
names(most_common) <- colnames(df.tab.breed)
df.stats <- data.frame(df.meta,
most_common_breed = most_common[df$cluster],
cluster_col = cols[df.meta$cluster])
pdf(file = "/mnt/raid/projects/99Lives_analysis/result/PCA_result/dendrogram.pdf",
height = 9, width = 8)
layout(1)
par(mar = c(5,5,5,10))
plot(rv$dendrogram, horiz = TRUE, edge.root = TRUE, leaflab = "none",
type = "triangle", xlim = c(.35,0),yaxt= "n", xlab = "Height", ylim = c(269,1))
axis(side = 1, at = seq(0,.3, by = .05))
df.lab <- data.frame(cluster = rv$samp.group[rv$samp.order], order = 1:269)
agg.lab <- aggregate(df.lab$order, by = list(df.lab$cluster), FUN = median)
agg.lab <- agg.lab[grep("G", x = agg.lab$Group.1),]
mtext(text = substr(agg.lab$Group.1, 3,4), side = 4, at = agg.lab$x, las = 2,
col = cols[agg.lab$Group.1], cex = 0.7)
mtext(text = most_common[agg.lab$Group.1], side = 4, at = agg.lab$x, las = 2,
col = cols[agg.lab$Group.1], line = 2, cex = 0.7)
mtext(text = "Cluster", side = 4, at = -4, las = 2, cex = 0.7,padj = 0, line = -.5)
mtext(text = "Most common breed", side = 4, at = -4, las = 2, cex = 0.7,padj = 0, line = 2)
agg.box <- aggregate(df.lab$order, by = list(df.lab$cluster), FUN = range)
agg.box <- agg.box[grep("G", x = agg.box$Group.1),]
rect(xleft = .3, xright = 0,
ybottom = agg.box$x[,1], ytop = agg.box$x[,2], border = FALSE,
col = scales::alpha(cols[agg.box$Group.1],.2))
ins_ID <- unique(df.meta$Institution_symbol)
fam_ids <- unique(fam_info$fam.id)
fam_ids <- fam_ids[fam_ids > 0]
for(i in fam_ids){
fam_info0 <- fam_info[fam_info$fam.id == i,]
df.ord <- data.frame(samp = rv$sample.id[rv$samp.order], order = 1:269)
b_heights <- get_leaves_attr(rv$dendrogram, attribute = "height")
y = df.ord$order[df.ord$samp %in% fam_info0$sample.ID]
x = b_heights[df.ord$samp %in% fam_info0$sample.ID]
points(x,y, cex = 1, pch = 16, col = fam_info0$col[1])
}
fam_col <- unique(fam_info[,c("fam.id","col")])
fam_col <- fam_col[!(fam_col$col == "black" | fam_col$col == "white"),]
legend("topleft",pch = 16 ,col = fam_col$col,
legend = fam_col$fam.id, cex = .7,
title = "Family ID", bty = "n",pt.cex = 1)
dev.off()
# colour cats according to breed
pca <- snpgdsPCA(genofile, snp.id=snpset.id, num.thread=2, autosome.only = FALSE)
pc.percent <- pca$varprop*100
head(round(pc.percent, 2))
tab <- data.frame(sample.id = pca$sample.id,
EV1 = pca$eigenvect[,1], # the first eigenvector
EV2 = pca$eigenvect[,2], # the second eigenvector
EV3 = pca$eigenvect[,3],
stringsAsFactors = FALSE)
head(tab)
pdf(file = "/mnt/raid/projects/99Lives_analysis/result/PCA_result/biplot.pdf", width = 10.5, height = 7)
layout(matrix(1:2, nrow = 1), widths = c(10,7))
par(mar = c(5,4,3,0))
plot(tab$EV1, tab$EV2,
xlab=paste("Eigenvector 1", " (",signif(pc.percent[1],3)," %)", sep = ""),
ylab=paste("Eigenvector 2", " (",signif(pc.percent[2],3)," %)", sep = ""),
col = scales::alpha(cols[rv$samp.group], .7), pch = 16)
cluster_names <- c(1:sum(grepl("G", unique(rv$samp.group))), "Out")
cluster_breed <- c(most_common[grepl("G", names(table(rv$samp.group)))], "Outliers")
cluster_col <- cols[!duplicated(cols)]
par(mar = c(5,0,3,2))
plot.new()
legend("topleft", legend = paste(cluster_names,cluster_breed,sep= ", "),
pch = 16, col = cluster_col, cex = 1, bty = "n",
title = "Cluster, Most common breed")
dev.off()
write.table(df.stats, "/mnt/raid/projects/99Lives_analysis/result/PCA_result/meta_data.tsv",
sep = "\t", quote = FALSE, row.names = FALSE)
|
5f8a9d576eedfde15d523f3f385f797d42199699 | b6c500373edf55d6c32030eda2a9d524641fa420 | /server/server-renderTable.R | b46cb0a926e02542adb034c8ac782a09b0d4293c | [] | no_license | hyeyeankkim/GEOprogram | 45137d6c87a43c98574b9ecbb18e20e0cfb7f270 | b0669109645c095fbafa5afd1a1fc5c75827a75e | refs/heads/master | 2020-05-29T16:09:10.162329 | 2016-12-07T08:41:47 | 2016-12-07T08:41:47 | 58,996,096 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 182 | r | server-renderTable.R | observe({
output$content <- renderTable({
infile <- input$file1
if(is.null(infile)){
return(NULL)
}
read.table(infile$datapath,header=input$header, sep=input$sep)})
})
|
dadf3eacff2106d762ab64556e11041ff1fbda13 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/doBy/examples/is-estimable.Rd.R | dd13cee848f244a3c3c5df046a03c15b39b9c947 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 192 | r | is-estimable.Rd.R | library(doBy)
### Name: is-estimable
### Title: Determines if contrasts are estimable.
### Aliases: is-estimable is_estimable
### Keywords: utilities
### ** Examples
## TO BE WRITTEN
|
47a3e6bc3bbddea1ad96acfd33655d4f230bb193 | 5e94cd423bfcb82cb06789be954fdccc183b3e09 | /code_FILLIAT_ELMELLOUKI.R | 3df14c88aff5cf38a13ec17793fb007ea10615a7 | [] | no_license | omarelmellouki/electric_appliances | 160012e1c3ce657799d04fc83e9c64dc427994f3 | d937198afff2a201c48ca30d16756395644a4ed2 | refs/heads/main | 2022-12-25T11:27:15.494183 | 2020-10-07T09:29:58 | 2020-10-07T09:29:58 | 301,985,117 | 1 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 21,003 | r | code_FILLIAT_ELMELLOUKI.R | ####################################################################################################################################
###################### Chargement des jeux de données et premières transformations pour commencer à travailler #####################
####################################################################################################################################
rm(list = objects())
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
Data0 <- read.csv(file="train.csv")
Data1 <- read.csv(file="test.csv")
#Nous transformons les variables de dates afin qu'elles aient un format plus pratique à manipuler
#et que nous puissions utiliser des opérateurs de comparaison
library(lubridate)
Data0$date <- strptime(as.character(Data0$date),format="%Y-%m-%d %H:%M:%S")
Data0$date <- as.POSIXct(Data0$date,tz = "UTC")
Data1$date <- strptime(as.character(Data1$date),format="%Y-%m-%d %H:%M:%S")
Data1$date <- as.POSIXct(Data1$date,tz = "UTC")
#On comble les valeurs manquantes
Data <-rbind(Data0[,-2], Data1[,-ncol(Data1)])
dim(Data)
#D'abord pour RH_6
Data_WTHNA <- Data[-which(is.na(Data$RH_6)), ]
library(ranger)
RF_NA <- ranger(RH_6 ~ RH_1 + RH_2 + RH_3 + RH_4 + RH_5 + RH_7 + RH_8, data=Data_WTHNA)
Data$RH_6[which(is.na(Data$RH_6))] <- predict(RF_NA,data=Data[which(is.na(Data$RH_6)),])$predictions
#Puis pour Visibility
Data_WTHNA <- Data[-which(is.na(Data$Visibility)), ]
RF_NA <- ranger(Visibility ~ Tdewpoint + Windspeed + T_out + Instant, data=Data_WTHNA)
Data$Visibility[which(is.na(Data$Visibility))] <- predict(RF_NA, data=Data[which(is.na(Data$Visibility)),])$predictions
#Nous réassignons les valeurs dans les colonnes correspondantes
Data0[,-2] <- Data[1:nrow(Data0),]
Data1[,-ncol(Data1)] <- Data[(nrow(Data0)+1):nrow(Data),]
####################################################################################################################################
###################### Analyse descriptive du jeu de données et examens préliminaires #####################
####################################################################################################################################
#Représentation de la consommation électrique totale du jeu de données
plot(Data0$date,Data0$Appliances,type='l',ylab='Consommation électrique',xlab='Date')
#Sur une semaine
plot(Data0$date[1:144*7],Data0$Appliances[1:144*7],type='l',ylab='Consommation électrique',xlab='Date')
#Sur une journée
plot(Data0$date[1:144],Data0$Appliances[1:144],type='l',ylab='Consommation électrique',xlab='Date')
# Représentations boîtes à moustache
col.pal<-colorRampPalette(c("lightblue", "red"))( 12 )
par(mfrow = c(2,1))
######### possible erreur (figure margins too large)
boxplot(Data0$Appliances~Data0$Heure,col=col.pal,outline=F,xlab = "Heure de la journée",ylab="Consommation en Wh")
boxplot(Data0$Appliances~Data0$Month,col=col.pal,outline=F,xlab = "Mois de l'année",ylab="Consommation en Wh")
####################################################################################################################################
###################### Analyse des matrices de correlation entre appliances et plusieurs autres variables
####################################################################################################################################
#variables de temperatures
#toutes les temperatures
temp<-cbind(Data0$Appliances, Data0$T1, Data0$T2, Data0$T3, Data0$T4, Data0$T5, Data0$T6, Data0$T7, Data0$T8, Data0$T9, Data0$T_out, Data0$Tdewpoint)
cov(temp)
cor(temp)
chart.Correlation(temp, histogram = TRUE) #prend quelques minutes a compiler
# une selection des temperatures les plus significatives et les moins redondantes, apparait dans le rapport
temp_reduit<-cbind(Data0$Appliances, Data0$T2, Data0$T3, Data0$T6)
cor(temp_reduit)
chart.Correlation(temp_reduit, histogram = TRUE) #qq minutes a compiler
# variables d'humidite relatives (idem)
RH<-cbind(Data0$Appliances, Data0$RH_1, Data0$RH_2, Data0$RH_3, Data0$RH_4, Data0$RH_5, Data0$RH_6, Data0$RH_7, Data0$RH_8, Data0$RH_9, Data0$RH_out)
cov(RH)
cor(RH)
chart.Correlation(RH, histogram = TRUE) # qq minutes a compiler
RH_reduit<-cbind(Data0$Appliances, Data0$RH_1, Data0$RH_8, Data0$RH_out)
cor(RH_reduit)
chart.Correlation(RH_reduit,histogram = TRUE) #apparait dans le rapport
# autres variables explicatives
Autres<-cbind(Data0$Appliances, Data0$NSM, Data0$lights, Data0$Windspeed)
cor(Autres)
chart.Correlation(Autres, histogram = TRUE) #apparait dans le rapport
####################################################################################################################################
###################### Analyse de la significativité des variables et première idée sur les variables pertinentes ##################
####################################################################################################################################
library(mlbench)
library(Boruta)
#On annule les valeurs de rv1 et rv2
Data0$rv1 = NULL
Data0$rv2 = NULL
#On convertit les facteurs et variables non numeriques en numeriques afin de pouvoir executer l'analyse en composantes principales ( qui ne supporte pas les facteurs)
Data0$WeekStatus = as.numeric(Data0$WeekStatus)
Data0$Day_of_week = as.numeric(Data0$Day_of_week)
Data0$DayType = as.numeric(Data0$DayType)
Data0$InstantF = as.numeric(Data0$InstantF)
Data0$WeekStatus = as.numeric(Data0$WeekStatus)
Data0$date = as.numeric(Data0$date)
Data0$rv1 = Data0$rv2 = NULL
library(FactoMineR)
PCA(Data0)
#Execution de l'algorithme d'analyse d'importance des variables
#Attention prend dans les 2h à compléter, je n'ai pas pu envoyer le .RData de la sortie
#car il est trop lourd
library(Boruta)
Boruta.Short <- Boruta(Appliances ~ ., data = Data0, doTrace = 2, ntree = 500)
#Représentation de l'importance des variables
plot(Boruta.Short)
#Code pour récupérer les variables classées par ordre croissant d'importance
Imp = Boruta.Short$ImpHistory
final_imp = Imp[99,]
names(final_imp[order(Imp[99,])])
####################################################################################################################################
###################### Modèles linéaire et non linéaire ############################################################################
####################################################################################################################################
######################## Premier Modèle : Modèle de regréssion linéaire
#Pour ce modèle nous procédons à un test stepwise de sélection de variables avec minimisation du critère AIC
#L'équation avec toutes les variables que va parcourir le modèle dans les deux sens
cov <- head(names(Data0)[-c(1,2)], 32)
eq <- paste0("Appliances ~", paste0(cov, collapse='+'))
eq
#Création d'un jeu de données de valiadation croisée composé de 85% du jeu de données d'entraînement
training = Data0
#Pour la reproductibilité
set.seed(150)
#On crée la partition
library(caret)
Train_on <- createDataPartition(y = training$Appliances, p = 0.85, list = FALSE)
training <- training[Train_on, ]
testing <- training[-Train_on, ]
#On conserve le jeu de donnée pour le test par la suite
testing_verif = testing
#On annule la colonne à prédire
testing$Appliances=NULL
#Le modèle complet à affiner
full.model <- lm(eq, data = training)
#Appel de la fonction stepAIC qui va ajuster le modèle sur le jeu de données de validation croisée
library(MASS)
step.model <- stepAIC(full.model, direction = "both", trace = TRUE,
data=training)
#Prédiction sur le jeu de données de test
prediction_lm <- predict(step.model, newdata=testing)
#Représentation de la prédiction comparée aux valeurs réelles
par(mfrow=c(1,1))
plot(testing_verif$Appliances[1:144],type ='l',ylab="Appliances")
lines(prediction_lm[1:144],col='green',lwd='3')
library(Metrics)
rmse(prediction_lm,testing_verif$Appliances) #86.0234
######################## Second Modèle : Modèle de regréssion non linéaire
#Pour ce modèle nous avons choisi nous mêmes les variables et les regroupements en fonction de la pertinence que nous avons
#exhibée lors de l'étude préliminaire
#Ajustement du modèle sur le jeu de données d'entraînement
#Nous avons choisi de garder le même afin de pouvoir comparer les résultats des différents modèles
library(mgcv)
g0 = gam(Appliances ~ InstantF + lights + s(T3,T9,T8,T2 , k=-1)+ Day_of_week + s(
RH_3,RH_2,RH_1,RH_8,RH_5,RH_4,k=-1)+ T_out + Press_mm_hg +
Windspeed + Tdewpoint + Day_of_week+ InstantF +NSM, data = training)
#Prédiction
prediction_gam <- predict(g0, newdata=testing)
#On représente les deux modèles sur le même graphique pour voir graphiquement la différence
plot(testing_verif$Appliances[1:144],type ='l',ylab="Appliances")
lines(prediction_lm[1:144],col='red',lwd='3')
lines(prediction_gam[1:144],col='green',lwd='3')
#L'erreur de prédiction est plus faible, le modèle est donc meilleur, mais il peut être encore amélioré
rmse(prediction_gam,testing_verif$Appliances) #80.25694
####################################################################################################################################
###################### Modèle de régression par forêt aléatoire avec interpolation #################################################
####################################################################################################################################
##################### Première partie, complétion du jeu de données train par interpolation naïve
#Dans cette partie nous allons compléter par interpolation linéaire les valeurs manquantes du jeu de données d'entraînement
#en utilisant les valeurs environnantes. Nous allons avant cela utiliser la fonction read_csv qui permet une visualisation plus propre des data frame
Data0 <- read_csv(file="train.csv")
Data1 <- read_csv(file="test.csv")
#On comble les valeurs manquantes
Data <-rbind(Data0[,-2], Data1[,-ncol(Data1)])
dim(Data)
#D'abord pour RH_6
Data_WTHNA <- Data[-which(is.na(Data$RH_6)), ]
library(ranger)
RF_NA <- ranger(RH_6 ~ RH_1 + RH_2 + RH_3 + RH_4 + RH_5 + RH_7 + RH_8, data=Data_WTHNA)
Data$RH_6[which(is.na(Data$RH_6))] <- predict(RF_NA,data=Data[which(is.na(Data$RH_6)),])$predictions
#Puis pour Visibility
Data_WTHNA <- Data[-which(is.na(Data$Visibility)), ]
RF_NA <- ranger(Visibility ~ Tdewpoint + Windspeed + T_out + Instant, data=Data_WTHNA)
Data$Visibility[which(is.na(Data$Visibility))] <- predict(RF_NA, data=Data[which(is.na(Data$Visibility)),])$predictions
#Nous réassignons les valeurs dans les colonnes correspondantes
Data0[,-2] <- Data[1:nrow(Data0),]
Data1[,-ncol(Data1)] <- Data[(nrow(Data0)+1):nrow(Data),]
#Les indices correspondants aux individus antérieurs à la semaine de prédiction pure
n_l<-which(Data1$date<=as.Date("2016-05-19 23:50:00"))
#On récupère les lignes
Data1[n_l,]
fill = Data1[n_l,]
#On crée une nouvelle colonne Appliance puis on enlève la colonne Id qui ne serta à rien
fill$Appliances = NA
fill$Id = NULL
#On crée une nouvelle data frame qui va contenir toutes les valeurs du début à la fin sans sauts de temps
Data = rbind(Data0,fill)
Data = arrange(Data, Data$date)
#On s'assure bien que les valeurs sont bonnes et que l'on a réussi à faire ce que l'on souhaitait
Data[1:15,]
#On remarque bien que l'on voit apparaître des lignes là où elles manquaient et que l'on a bien des NA là on l'Appliance manque
#On récupère maintenant les lignes contenant les valeurs à interpoler. C'est important de le stocker car il faudra par la suite remettre dans ces mêmes
#lignes les valeurs interpolées
k = which(is.na(Data$Appliances))
k
#On voit bien que l'on ne recupère que les lignes où l'Appliance manque
Data[k,]
#On utilise la fonction d'interpolation afin de faire ce que l'on souhaite
library(imputeTS)
Data$Appliances = na_interpolation(Data$Appliances)
#On s'assure que l'on a réussi à faire ce que l'on souhaitait
points(Data[1:50,]$date,Data[1:50,]$Appliances,col='blue',lwd='3')
#On stock la prédiciton dans le fichier à soumettre
submit <- read.csv(file="sample_submission.csv", sep=",", dec=".")
submit[n_l,]$Appliances = Data[k,]$Appliances
#On vérifie les longeurs pour s'assurer que tout est bon
length(k) == length(n_l)
# On représente la première partie de la prédiction. Ce sont les valeurs que l'on a interpolées
plot(Data0$date, Data0$Appliances, type='l', xlab="Date",ylab="Appliance")
lines(Data1$date,submit$Appliances,col='red')
##################### Deuxième partie, complétion du jeu de données train par régression linéaire et forêt aléatoire
#Dans cette partie nous allons compléter par des modèles plus complexes les valeurs manquantes du jeu de données d'entraînement
#en utilisant les valeurs environnantes mais également les variables explicatives.
setwd("C:/Users/harold/Desktop/STA/SIM202")
Data0 <- read.csv(file="train.csv")
Data1 <- read.csv(file="test.csv")
Data0$date <- strptime(as.character(Data0$date),format="%Y-%m-%d %H:%M:%S")
Data0$date <- as.POSIXct(Data0$date,tz = "UTC")
Data1$date <- strptime(as.character(Data1$date),format="%Y-%m-%d %H:%M:%S")
Data1$date <- as.POSIXct(Data1$date,tz = "UTC")
#On comble les valeurs manquantes
Data <-rbind(Data0[,-2], Data1[,-ncol(Data1)])
dim(Data)
#D'abord pour RH_6
Data_WTHNA <- Data[-which(is.na(Data$RH_6)), ]
library(ranger)
RF_NA <- ranger(RH_6 ~ RH_1 + RH_2 + RH_3 + RH_4 + RH_5 + RH_7 + RH_8, data=Data_WTHNA)
Data$RH_6[which(is.na(Data$RH_6))] <- predict(RF_NA,data=Data[which(is.na(Data$RH_6)),])$predictions
#Puis pour Visibility
Data_WTHNA <- Data[-which(is.na(Data$Visibility)), ]
RF_NA <- ranger(Visibility ~ Tdewpoint + Windspeed + T_out + Instant, data=Data_WTHNA)
Data$Visibility[which(is.na(Data$Visibility))] <- predict(RF_NA, data=Data[which(is.na(Data$Visibility)),])$predictions
#Nous réassignons les valeurs dans les colonnes correspondantes
Data0[,-2] <- Data[1:nrow(Data0),]
Data1[,-ncol(Data1)] <- Data[(nrow(Data0)+1):nrow(Data),]
#################### Extrapolation à l'aide d'une foret aléatoire
#Nous commençons par récupérer les indices à prédire
n_l<-which(Data1$date<=as.Date("2016-05-19 23:50:00"))
#Nous construisons la data frame "à trous" que nous réorganisons avec la date
fill = Data1[n_l,]
fill$Appliances = NA
fill$Id = NULL
tail(fill)
Data = rbind(Data0,fill)
Data = arrange(Data, Data$date)
Data[1:15,]
#Les valeurs à compléter pour pouvoir les restocker
k = which(is.na(Data$Appliances))
Data[k,]
#La fonction shift va nous permettre de décaler d'autant de pas de temps que l'on veut une colonnes
#On l'utilisera pour associer à un individu la valeur de l'appliance avant lui et après lui
shift <- function(d, k) rbind( tail(d,k), head(d,-k), deparse.level = 0 )
#Un test pour vérifier que cela a marché
df = shift(Data0,1)
df
#On crée les colonnes de l'appliance shiftée
#L'appliance suivante
Appl_fwd = shift(Data0,-1)
Appl_fwd = Appl_fwd$Appliances
#L'appliance précédente
Appl_back = shift(Data0,1)
Appl_back = Appl_back$Appliances
#On vérifie encore une fois que cela correspond à ce que l'on souhaite
head(Data0)
Appl_back
Appl_fwd
#ON crée la data frame complétée contenant les appliances shiftée que l'on va utiliser afin d'ajuster un modèle
#prenant en compte les valeurs passées et futures de l'Appliance
compl = Data0
compl$Appl_fwd = Appl_fwd
compl$Appl_back = Appl_back
#Une dernière vérification pour être sûr que cela correspond à ce qu'on veut
compl
#On met dans l'ordre les colonnes afin de bien voir les appliances futures passées et présentes côte à côte
#Cette data frame va être celle sur laquelle nous allons entraîner nos modèles
compl[,c(1,2,44,45,3:43)]
compl = as.tibble(cbind(compl[,c(1,2,44,45,3:43)]))
compl
#On récupère les lignes à prédire
k = which(is.na(Data$Appliances))
# Nous allons faire le même travail afin de récupérer la data frame des individus à prévoir
Appl_fwd = shift(Data,-1)
Appl_fwd = Appl_fwd$Appliances
Appl_back = shift(Data,1)
Appl_back = Appl_back$Appliances
compl3 = Data
compl3$Appl_fwd = Appl_fwd
compl3$Appl_back = Appl_back
compl3
compl3[,c(1,2,44,45,3:43)]
compl3 = as.tibble(cbind(compl3[,c(1,2,44,45,3:43)]))
compl3
#Nous pouvons ici voir les valeurs à extrapoler en rouge : ce sont les NA
#On les récupère
k = which(is.na(compl3$Appliances))
to_predict = compl3[k,]
#On enlève la colonne Appliance
to_predict$Appliances = NULL
to_predict
#On a donc ce qu'il nous faut pour la prévision : pour chaque ligne, la valeur passée et la valeur future de l'Appliance
#Cependant on voit qu'il nous manque certaines valeurs dans Appl_fwd. Cela vient du fait que certaines valeurs manquantes se suivent.
#Pour traiter cela pas le choix nous devons interpoler linéairement ces valeurs, car sinon nous ne pouvons rien faire
#L'erreur est cependant moindre et l'approximation correct car c'est toujours la moyenne de deux valeurs
#Nous interpolons
to_predict$Appl_fwd = na_interpolation(to_predict$Appl_fwd)
to_predict$Appl_back = na_interpolation(to_predict$Appl_back)
#On va maintenant ajuster un modèle permettant d'extrapoler de manière moins naïve les valeurs manquantes de l'Appliance
#D'abord à l'aide d'un random forest
rdf = ranger(formula = Appliances ~ Appl_fwd+Appl_back+lights+T1+RH_1+T2+RH_2+T3+RH_3+
T4+RH_4+T5+RH_5+T6+RH_6+T7+
RH_7+T8+RH_8+T9+RH_9+T_out+
Press_mm_hg+RH_out+Windspeed+
Tdewpoint+NSM+ WeekStatus+ Day_of_week,
data = compl, num.trees = 5000)
prediction_rdf = predict(rdf,data = to_predict)
#Ensuite à l'aide d'un modèle linéaire
lm = lm(formula = Appliances ~ Appl_fwd+Appl_back+lights+T1+RH_1+T2+RH_2+T3+RH_3+
T4+RH_4+T5+RH_5+T6+RH_6+T7+
RH_7+T8+RH_8+T9+RH_9+T_out+
Press_mm_hg+RH_out+Windspeed+
Tdewpoint+NSM+ WeekStatus+ Day_of_week,
data = compl)
prediction_lm = predict(lm,newdata = to_predict)
#On voit en les représentant que les deux extrapolation sont très proches
plot(prediction_lm,type='l')
lines(prediction_rdf$predictions,type="l",col='red')
############## Troisième partie : prédiction sur la semaine future à l'aide de notre modèle de random forest
#Création d'un jeu de données de valiadation croisée composé de 85% du jeu de données d'entraînement
training = Data0
#Pour la reproductibilité
set.seed(150)
#On crée la partition
library(caret)
Train_on <- createDataPartition(y = training$Appliances, p = 0.85, list = FALSE)
training <- training[Train_on, ]
testing <- training[-Train_on, ]
#On conserve le jeu de donnée pour le test par la suite
testing_verif = testing
#On annule la colonne à prédire
testing$Appliances=NULL
#Test du modèle par validation croisée
rdf2 = ranger(formula = Appliances ~ lights+T1+RH_1+T2+RH_2+T3+RH_3+
T4+RH_4+T5+RH_5+T6+RH_6+T7+
RH_7+T8+RH_8+T9+RH_9+T_out+
Press_mm_hg+RH_out+Windspeed+
Tdewpoint+NSM+ WeekStatus+ Day_of_week,
data = training, num.trees = 5000)
#On teste sur un jeu de données de validation croisée
prediction = predict(rdf2, data = testing)
plot(testing_verif$date[1:500], testing_verif$Appliances[1:500],type='l',xlab = "Date",ylab = "Appliance")
lines(testing_verif$date[1:500],prediction$predictions[1:500],col='red')
library(Metrics)
rmse(prediction$predictions,testing_verif$Appliances)#29.974
#On va maintenant prédire sur la deuxième partie du modèle
rdf2 = ranger(formula = Appliances ~ lights+T1+RH_1+T2+RH_2+T3+RH_3+
T4+RH_4+T5+RH_5+T6+RH_6+T7+
RH_7+T8+RH_8+T9+RH_9+T_out+
Press_mm_hg+RH_out+Windspeed+
Tdewpoint+NSM+ WeekStatus+ Day_of_week,
data = Data0, num.trees = 5000)
#On récupère le futur à prévoir
n_l_2 <-which(Data1$date>as.Date("2016-05-19 23:50:00"))
futur = Data1[n_l_2,]
#On prédit dessus
prediction = predict(rdf2, data=futur)
#On le stock dans le fichier de soumission
submit[n_l_2,]$Appliances <- prediction$predictions
#On le représente
plot(Data1$date[n_l_2],submit[n_l_2,]$Appliances, col='red', type='l',xlab='Date',ylab="Prédiction de l'appliance sur la semaine suivante")
#On écrit finalement le fichier de soumission
write.table(submit, file="submission_lm.csv", quote=F, sep=",", dec='.',row.names = F)
|
a54f3c0b405cc1fef9fdf868734b07f00f4b8b9f | c5de5d072f5099e7f13b94bf2c81975582788459 | /R Extension/RMG/Utilities/Interfaces/FTR/R/FTR.performance.R | c75ada847264b4b68dad1e6fd947c5f25b2b46d3 | [] | no_license | uhasan1/QLExtension-backup | e125ad6e3f20451dfa593284507c493a6fd66bb8 | 2bea9262841b07c2fb3c3495395e66e66a092035 | refs/heads/master | 2020-05-31T06:08:40.523979 | 2015-03-16T03:09:28 | 2015-03-16T03:09:28 | 190,136,053 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,292 | r | FTR.performance.R |
FTR.performance <- function(paths, ISO.env)
{
paths <- paths[order(paths$path.no), ]
must.have <- c("auction", "class.type", "sink.ptid", "source.ptid", "mw", "CP")
if (any(!(must.have %in% colnames(paths))))
stop(paste("Missing column", must.have[!(must.have %in% colnames(paths))],
"in variable paths."))
if (length(paths$path.no)==0){paths$path.no <- 1:nrow(paths)}
# get the start.dt/end.dt by path
aux <- data.frame(path.no=paths$path.no, FTR.AuctionTerm(auction=paths$auction))
paths <- cbind(paths, aux[,-1])
res <- vector("list", length=nrow(aux))
names(res) <- aux$path.no
for (r in 1:nrow(aux)){
res[[r]] <- seq(aux[r,2], aux[r,3], "day")
}
res <- data.frame(melt(res))
rownames(res) <- NULL
colnames(res) <- c("day", "path.no")
hSP <- FTR.get.hSP.for.paths(paths, ISO.env, melt=T)
colnames(hSP)[3] <- "SP"
# pick only the dates you're interested in
hSP <- merge(hSP, res)
# add the number of hours per path
aux <- ISO.env$HRS
colnames(aux)[which(colnames(aux)=="contract.dt")] <- "start.dt"
paths <- merge(paths, aux)
# add the
res <- merge(hSP)
hSP <- split(hSP[,c("day", "va")], )
}
.fun.expand <- function(x){
x <- sort(x)
return(seq(x[1], x[2], by="day"))
}
|
53ab80a6ba21acab0e68c6e5f343a4a311e3c55a | 80c9ba35106e5314544610f42cbb54a26d0289b0 | /function_presence only.R | 4070799433e2ec72142153b0c5ce6278fcdb9d95 | [] | no_license | bandara10/research | 91c5d176d3aee771f61801d8ef14dcc735c16fac | e5589c6a2a78a0f6b9135e2bc15aba706f15f1c4 | refs/heads/master | 2021-01-22T18:43:47.087735 | 2021-01-16T14:58:34 | 2021-01-16T14:58:34 | 102,412,442 | 0 | 0 | null | 2017-10-31T01:06:02 | 2017-09-04T23:55:48 | R | UTF-8 | R | false | false | 4,862 | r | function_presence only.R | ###################################################################################################
###################################################################################################
#likelihood functions
# Utility functions
logit = function(pp) { log(pp) - log(1-pp) }
expit = function(eta) {1/(1+exp(-eta))}
# function that calculates a probability of occupancy for each location in the dataset X
predict=function(mymodel, X){
beta=mymodel$coefs[1:ncol(X),'Value']
lambda=exp(X %*% beta)
# psi =1- exp(-lambda*area)
return(lambda)
}
#Function that fits IPP model
pb.ipp=function(X.po, W.po,X.back, W.back){
beta.names=colnames(X.back)
beta.names[1]='beta0'
alpha.names=colnames(W.back)
alpha.names[1]='alpha0'
par.names=c(beta.names, alpha.names)
minrecipCondNum = 1e-6
paramGuess = c(rep(.1, ncol(X.po)), rep(-.1, ncol(W.po)))
fit.po = optim(par=paramGuess, fn=negLL.po, method='BFGS', hessian=FALSE
, X.po=X.po, W.po=W.po,X.back=X.back,W.back=W.back ) # params for likelyhood function
# calculating se with Hessian matrix
recipCondNum.po = NA
se.po = rep(NA, length(fit.po$par))
if (fit.po$convergence==0) {
hess = ObsInfo.po(fit.po$par, X.po, W.po,X.back, W.back)
ev = eigen(hess)$values
recipCondNum.po = ev[length(ev)]/ev[1]
if (recipCondNum.po>minrecipCondNum) {
vcv = chol2inv(chol(hess))
se.po = sqrt(diag(vcv))
}
}
#printing PO results
tmp= data.frame(par.names,fit.po$par,se.po)
names(tmp)=c('Parameter name', 'Value', 'Standard error')
p=NULL
p$coefs=tmp
p$convergence=fit.po$convergence
p$optim_message=fit.po$message
p$value=fit.po$value
# print("Estimated parameters beta and alpha", quote=FALSE)
# print(p)
return(p)
}
# negative loglikelihood function for Poisson point process
negLL.pp = function(param) {
beta = param[1:dim(X.pp)[2]]
lambda = exp(X.back %*% beta)
mu = lambda * area.back
logL.pp = sum(X.pp %*% beta) - sum(mu)
(-1)*sum(logL.pp)
}
# negative loglikelihood function for thinned Poisson point process
negLL.po = function(param, X.po, W.po,X.back, W.back) {
beta = param[1:dim(X.po)[2]]
alpha = param[(dim(X.po)[2]+1):(dim(X.po)[2]+dim(W.po)[2])]
# dim(X.back)
# length(beta)
# length(area.back)
lambda = exp(X.back %*% beta)
mu = lambda * area.back
p = expit(W.back %*% alpha)
logL.po = sum(X.po %*% beta) + sum(W.po %*% alpha) - sum(log(1 + exp(W.po %*% alpha))) - sum(mu*p)
(-1)*sum(logL.po)
}
# Observed hessian matrix of negative loglikelihood function for thinned Poisson point process
ObsInfo.po = function(param, X.po,W.po,X.back, W.back) {
beta = param[1:dim(X.back)[2]]
alpha = param[(dim(X.back)[2]+1):(dim(X.back)[2]+dim(W.back)[2])]
lambda = exp(X.back %*% beta)
mu = lambda * area.back
p = expit(W.back %*% alpha)
p.po = expit(W.po %*% alpha)
nxcovs = length(beta)
nwcovs = length(alpha)
nparams = nxcovs + nwcovs
Hmat = matrix(nrow=nparams, ncol=nparams)
# beta partials
for (i in 1:nxcovs) {
for (j in 1:i) {
Hmat[i,j] = sum(X.back[,i] * X.back[,j] * mu * p)
Hmat[j,i] = Hmat[i,j]
}
}
# alpha partials
for (i in 1:nwcovs) {
for (j in 1:i) {
Hmat[nxcovs+i, nxcovs+j] = sum(W.back[,i] * W.back[,j] * mu * p * ((1-p)^3) * (1 - exp(2 * W.back %*% alpha)) ) + sum(W.po[,i] * W.po[,j] * p.po * (1-p.po))
Hmat[nxcovs+j, nxcovs+i] = Hmat[nxcovs+i, nxcovs+j]
}
}
# alpha-beta partials
for (i in 1:nwcovs) {
for (j in 1:nxcovs) {
Hmat[nxcovs+i, j] = sum(X.back[,j] * W.back[,i] * mu * p * (1-p))
Hmat[j, nxcovs+i] = Hmat[nxcovs+i, j]
}
}
Hmat
}
# Expected hessian matrix of negative loglikelihood function for thinned Poisson point process
FisherInfo.po = function(param) {
beta = param[1:dim(X.back)[2]]
alpha = param[(dim(X.back)[2]+1):(dim(X.back)[2]+dim(W.back)[2])]
lambda = exp(X.back %*% beta)
mu = lambda * area.back
p = expit(W.back %*% alpha)
nxcovs = length(beta)
nwcovs = length(alpha)
nparams = nxcovs + nwcovs
Hmat = matrix(nrow=nparams, ncol=nparams)
# beta partials
for (i in 1:nxcovs) {
for (j in 1:i) {
Hmat[i,j] = sum(X.back[,i] * X.back[,j] * mu * p)
Hmat[j,i] = Hmat[i,j]
}
}
# alpha partials
for (i in 1:nwcovs) {
for (j in 1:i) {
Hmat[nxcovs+i, nxcovs+j] = sum(W.back[,i] * W.back[,j] * mu * p * ((1-p)^3) * (1 - exp(2 * W.back %*% alpha)) ) + sum(W.back[,i] * W.back[,j] * p * (1-p) * mu * p)
Hmat[nxcovs+j, nxcovs+i] = Hmat[nxcovs+i, nxcovs+j]
}
}
# alpha-beta partials
for (i in 1:nwcovs) {
for (j in 1:nxcovs) {
Hmat[nxcovs+i, j] = sum(X.back[,j] * W.back[,i] * mu * p * (1-p))
Hmat[j, nxcovs+i] = Hmat[nxcovs+i, j]
}
}
Hmat
}
|
6eaf581bc279cffd177e2e075d3c024ede70c6a3 | 8a716d9d649c90826bc02d003aa252537ff53003 | /TP2/ex2/CAHMutations.R | 3f64958affdc2062290260d5a863bdfbce13a3f5 | [] | no_license | BenFradet/SY09 | 36dd3f4c1c1a2cd03613a317713b4a83e3b9a424 | ed96f21d3538d31341df01e4af1d734e98697641 | refs/heads/master | 2020-12-24T13:44:52.641642 | 2014-06-14T17:31:49 | 2014-06-14T17:31:49 | 18,172,748 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,414 | r | CAHMutations.R | mut <- read.table("mutations2.txt", header = F, row.names = 1)
distMut <- as.dist(mut)
cluster <- hclust(distMut, "single")
png("DendrogrammeMutationsSingle.png", width = 500, height = 500)
plot(cluster,
hang = -1,
main = "Dendrogramme des especes\npar le critere du lien minimum",
xlab = "",
ylab = "Indice",
sub = "")
dev.off()
cat("DendrogrammeMutationsSingle.png sauvegardee\n")
cluster <- hclust(distMut, "complete")
png("DendrogrammeMutationsComplete.png", width = 500, height = 500)
plot(cluster,
hang = -1,
main = "Dendrogramme des especes\npar le critere du lien maximum",
xlab = "",
ylab = "Indice",
sub = "")
dev.off()
cat("DendrogrammeMutationsComplete.png sauvegardee\n")
cluster <- hclust(distMut, "average")
png("DendrogrammeMutationsAverage.png", width = 500, height = 500)
plot(cluster,
hang = -1,
main = "Dendrogramme des especes\npar le critere du lien moyen",
xlab = "",
ylab = "Indice",
sub = "")
dev.off()
cat("DendrogrammeMutationsAverage.png sauvegardee\n")
library(cluster)
cluster <- agnes(distMut, method = "ward")
png("DendrogrammeMutationsWard.png", width = 500, height = 500)
plot(cluster,
which.plots = 2,
hang = -1,
main = "Dendrogramme des especes\npar la methode de Ward",
xlab = "",
ylab = "Indice",
sub = "")
dev.off()
cat("DendrogrammeMutationsWard.png sauvegardee\n")
|
7e58bb1df2e297184af5d03a50203a63d1e52881 | 151326bb8bb8252ae4524b30d31df14cef65d0c0 | /programmingr/complete.R | 4bc1ca655f938a75c464275d43d570d8b9ef981c | [] | no_license | sergiosennder/datasciencecoursera | bb4df5b42e98112656a9a2eb8baf08137a3cda30 | 747c7255073ea6b02cb2fd47764f9e9ec06ef649 | refs/heads/master | 2021-01-10T04:14:57.077573 | 2015-12-27T10:36:02 | 2015-12-27T10:36:02 | 43,112,009 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 724 | r | complete.R | if(!exists("readPollutantFile", mode="function")) {
source(paste(getwd(),"/pollutantUtils.R", sep=""))
}
complete <- function(directory, id = 1:332) {
data_frame <- data.frame(id=1:length(id), nobs=1:length(id))
index <- 1
for (cur_id in id) {
file_data <- readPollutantFile(directory, cur_id)
data_frame[index,"id"] <- cur_id
data_frame[index,"nobs"] <- nrow(file_data[complete.cases(file_data),])
debugPrint(paste("Monitor id = ", cur_id, sep=""), "debug")
debugPrint(paste("Number of complete cases = ",
data_frame[index,"nobs"], sep=""), "debug")
index <- index + 1
}
return (data_frame)
}
|
aa2c4386b365a04cef2680124ea123f229e6c715 | 2bcf4cbdf9bea03e82e930259894f95a58963f4b | /man/produccion.Rd | 13936b7da872e2b5f0f670b84ea2d52bc01d33a5 | [
"MIT"
] | permissive | Nolivera007/openhn | 4bf00653cd71d8402aa719175147ed87ff87eda8 | 24541de5bcebe2aa4f057aecec286300f244f03f | refs/heads/master | 2023-02-26T05:16:45.227380 | 2021-02-02T21:49:07 | 2021-02-02T21:49:07 | 332,574,755 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 749 | rd | produccion.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/produccion.R
\docType{data}
\name{produccion}
\alias{produccion}
\title{Producción por actividad económica.}
\format{
Un objeto de la clase \code{data.frame}
}
\source{
\href{https://www.bch.hn/}{Banco Central de Honduras (BCH)}
}
\usage{
data(produccion)
}
\description{
Datos proporcionados por el Banco Central de Honduras (BCH) que incluye
la producción (millones de lempiras)
de las actividades económicas trimestral en valores constantes.
}
\details{
\itemize{
\item id: número del 1 al 15.
\item actividad_economica: Concepto.
\item trimestre: Trimestre.
\item hnl: Millones de Lempiras.
}
}
\examples{
data(produccion)
head(produccion)
}
\keyword{datasets}
|
2b6fade21ef3437e073f192cc0540d2266c4d3a6 | c8e3fa93ea24cbcc8a447a53142a7526dc34214d | /Scripts/GetImagesForCompound.R | 1e4b91d99e99d6f2715ef365ef40ca0ccc89eb0b | [] | no_license | mas29/ClarkeLab_github | c982e617be4a2c067234af5afd5fdaf349a40618 | 52cb8a851b7e1209865e1cf163076a544250e748 | refs/heads/master | 2016-09-06T19:40:41.104117 | 2015-04-15T22:01:48 | 2015-04-15T22:01:48 | 28,983,461 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,560 | r | GetImagesForCompound.R | # Script to convert images for a compound to jpeg
# Load libraries
library("jpeg")
library("tiff")
library("animation")
# Function to get the folder structure of the archive directory for year/month/days levels
get_experiment_days <- function() {
# Experiment years/months
expt_yrs_months <- list.files(path = archive_dir)
expt_days <- vector()
expt_hrs_mins <- list()
# For each year/month, get the experiment days
for (i in 1:length(expt_yrs_months)) {
new_expt_days_list <- list.files(path = paste(archive_dir, "/", expt_yrs_months[i], sep=""))
new_expt_days <- vector()
for(j in 1:length(new_expt_days_list)) {
new_expt_days <- c(new_expt_days, paste(expt_yrs_months[i], "/", new_expt_days_list[j], sep=""))
}
expt_days <- c(expt_days, new_expt_days)
}
return(expt_days)
}
# Function to get the folder structure of the archive directory for hours/mins level
get_experiment_hours_and_mins <- function(expt_days) {
# For each year/month/day, get the hours/mins for the image data
expt_hrs_mins <- list()
for (j in 1:length(expt_days)) {
expt_new_hrs_mins <- list.files(path = paste(archive_dir,expt_days[j],sep=""))
expt_hrs_mins[[j]] <- expt_new_hrs_mins
}
expt_hrs_mins <- as.list(setNames(expt_hrs_mins, expt_days))
return(expt_hrs_mins)
}
# Function to get plate numbers as they appear in the archive (e.g. folders 282, 283, 284, 285, 286 correspond to plates 1, 2, 3, 4, 5)
get_plate_nums_in_archive_dir <- function(expt_days, expt_hrs_mins) {
plate_nums_etc <- list.files(path = paste(archive_dir,expt_days[1],"/",expt_hrs_mins[[1]][1],sep=""))
plate_nums <- plate_nums_etc[grep("^[[:digit:]]*$", plate_nums_etc)]
return(plate_nums)
}
# Function to get the images corresponding to the compound of interest
get_images <- function(compound) {
# Get info for this compound
expt_days <- get_experiment_days() # Archive structure
expt_hrs_mins <- get_experiment_hours_and_mins(expt_days) # Archive structure
plate_nums <- get_plate_nums_in_archive_dir(expt_days, expt_hrs_mins) # Get names of plates as they exist in the archive
position <- data_wide[data_wide$Compound == compound,]$Position[1] # Position of compound in plate
plate <- data_wide[data_wide$Compound == compound,]$Plate[1] # Plate of compound
# Set suffix for the different image types
suffixes <- list()
for (i in 1:length(image_types)) {
new_suffix <- paste("-1-",image_types[i],".tif",sep="")
suffixes[[i]] <- new_suffix
}
suffixes <- as.list(setNames(suffixes, image_type_names))
# Remove all files in www directory of shiny "explore" app
do.call(file.remove,list(list.files("Scripts/shiny_scripts/explore/www/"), full.names=TRUE))
# Now convert and add the images to the www directory of shiny "explore" app
count <- 1 # To keep track of which timepoint we're at
for (i in 1:length(expt_days)) { # For each experiment day
for (j in 1:length(expt_hrs_mins[[i]])) { # For each hour/minute of image capture in that experiment day
plate_name_in_archive <- plate_nums[plate]
for (x in 1:length(image_types)) {
image_dir <- paste(archive_dir,expt_days[i],"/",expt_hrs_mins[[i]][j],"/",plate_name_in_archive,"/",toupper(position),suffixes[[x]],sep="")
img <- suppressWarnings(readTIFF(image_dir, native=TRUE))
writeJPEG(img, target = paste("Scripts/shiny_scripts/explore/www/",image_types[x],"_t_",as.character(time_elapsed[count]),".jpeg",sep=""), quality = 1)
}
count <- count + 1
}
}
} |
176330425c8f6ed0ddffb9d8f0695372a51828f0 | 4853ece98a8d98418d9b881bef404ac358946eee | /spiral_Rcode.R | 12056cb1f80fc4b1cc58dfd588bff3891017d2bb | [] | no_license | irene1014/benchmarking-study-cluster-analysis | d9c0bdc91a0e5303916d4c38cbc4cfab4680f38d | 414d73d7a57adf9648746587a48029db9da8827c | refs/heads/master | 2022-11-06T14:51:03.620597 | 2020-06-20T20:37:48 | 2020-06-20T20:37:48 | 236,908,422 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,548 | r | spiral_Rcode.R | #####################################################
######## Simulated Dataset 2: Spiral Data ###########
#####################################################
library(mlbench)
library(dbscan)
library(kernlab)
library(matrixStats)
set.seed(120)
spiralData <- mlbench.spirals(n = 400, cycles = 2.5, sd = 0.025)
# Finding optimal eps value for dbscan
knn_matrix <- kNN(spiralData$x,4)
knn_matrix_new <- sort(knn_matrix$dist[,4])
plot(order(knn_matrix_new),knn_matrix_new,main="Optimized Eps using KNNS", xaxt = 'n')
axis(1, at = seq(0, 400, by = 10))
# There is no obvious elbow point but we can approximate our optimal eps = 0.22
knn_matrix_new[255]
# We are calculating the run time of each algorithm on spiral data.
start_time_spec <- Sys.time()
spec_spirals <- specc(spiralData$x, centers = 2)
end_time_spec <- Sys.time()
start_time_kmeans <- Sys.time()
kmean_spirals <- kmeans(spiralData$x, centers = 2, nstart = 50)
end_time_kmeans <- Sys.time()
start_time_dbscan <- Sys.time()
dbscan_spirals <- dbscan::dbscan(spiralData$x, eps = 0.22, minPts = 4)
end_time_dbscan <- Sys.time()
# Creating a table with run time results
spiral_timed_matrix <- matrix(0,1,3)
spiral_timed_matrix[1,1] <- round(end_time_spec - start_time_spec, digits = 4)
spiral_timed_matrix[1,2] <- round(end_time_kmeans - start_time_kmeans, digits = 4)
spiral_timed_matrix[1,3] <- round(end_time_dbscan - start_time_dbscan, digits = 4)
colnames(spiral_timed_matrix) <- c("Spectral", "K-means", "DBSCAN")
# We can repeat the simulation 20 times to find the means and standard deviations
# of the three indices.
ARI_matrix <- matrix(0,20,3)
JAC_matrix <- matrix(0,20,3)
FM_matrix <- matrix(0,20,3)
colnames(ARI_matrix) <- c("spectral", "kmeans", "dbscan")
colnames(JAC_matrix) <- c("spectral", "kmeans", "dbscan")
colnames(FM_matrix) <- c("spectral", "kmeans", "dbscan")
set.seed(120)
for(i in 1:20)
{
spiralData <- mlbench.spirals(n = 400, cycles = 2.5, sd = 0.025)
spec_spirals <- specc(spiralData$x, centers = 2)
kmean_spirals <- kmeans(spiralData$x, centers = 2, nstart = 50)
dbscan_spirals <- dbscan::dbscan(spiralData$x, eps = 0.22, minPts = 4)
ARI_matrix[i,1] <- ARI(spiralData$classes, spec_spirals)
ARI_matrix[i,2] <- ARI(spiralData$classes, kmean_spirals$cluster)
ARI_matrix[i,3] <- ARI(spiralData$classes, dbscan_spirals$cluster)
JAC_matrix[i,1] <- extCriteria(as.integer(spiralData$classes), as.integer(spec_spirals), "Jaccard")$jaccard
JAC_matrix[i,2] <- extCriteria(as.integer(spiralData$classes), kmean_spirals$cluster, "Jaccard")$jaccard
JAC_matrix[i,3] <- extCriteria(as.integer(spiralData$classes), dbscan_spirals$cluster, "Jaccard")$jaccard
FM_matrix[i,1] <- extCriteria(as.integer(spiralData$classes), as.integer(spec_spirals), "Folkes")$folkes_mallows
FM_matrix[i,2] <- extCriteria(as.integer(spiralData$classes), kmean_spirals$cluster, "Folkes")$folkes_mallows
FM_matrix[i,3] <- extCriteria(as.integer(spiralData$classes), dbscan_spirals$cluster, "Folkes")$folkes_mallows
print(i)
}
spiral_results <- round(rbind(colMeans(ARI_matrix), colSds(ARI_matrix),
colMeans(JAC_matrix),colSds(JAC_matrix),
colMeans(FM_matrix),colSds(FM_matrix)), 4)
rownames(spiral_results) <- c('ARI Mean', 'ARI Std Dev',
'JAC Mean', 'JAC Std Dev',
'FM Mean', 'FM Std Dev')
# This is the final table with the mean and std deviations of each index for all
# three clustering algorithms.
spiral_results
|
fcd938e2563db2d458903bfb92fbab99e5e62d74 | 1fc02d5293e23639d667acc9c228b761478206e2 | /R/ADDIS-spending.R | 8751187dd275ae331fd2b5e8262d533404c774ef | [] | no_license | dsrobertson/onlineFDR | caf7fa9d6f52531170b3d5caa505a15c87d6db11 | 2e5a3eaf9cf85d2c04a587ad3dd8783f66435159 | refs/heads/master | 2023-04-29T11:25:12.532739 | 2023-04-12T10:30:23 | 2023-04-12T10:30:23 | 129,420,795 | 14 | 4 | null | 2023-04-12T10:33:39 | 2018-04-13T15:27:02 | R | UTF-8 | R | false | false | 5,925 | r | ADDIS-spending.R | #' ADDIS-spending: Adaptive discarding algorithm for online FWER control
#'
#' Implements the ADDIS algorithm for online FWER control, where ADDIS stands
#' for an ADaptive algorithm that DIScards conservative nulls, as presented by
#' Tian and Ramdas (2021). The procedure compensates for the power loss of
#' Alpha-spending, by including both adaptivity in the fraction of null
#' hypotheses and the conservativeness of nulls.
#'
#' The function takes as its input either a vector of p-values, or a dataframe
#' with three columns: an identifier (`id'), p-value (`pval'), and lags, if the
#' dependent version is specified (see below). Given an overall significance
#' level \eqn{\alpha}, ADDIS depends on constants \eqn{\lambda} and \eqn{\tau},
#' where \eqn{\lambda < \tau}. Here \eqn{\tau \in (0,1)} represents the
#' threshold for a hypothesis to be selected for testing: p-values greater than
#' \eqn{\tau} are implicitly `discarded' by the procedure, while \eqn{\lambda
#' \in (0,1)} sets the threshold for a p-value to be a candidate for rejection:
#' ADDIS-spending will never reject a p-value larger than \eqn{\lambda}. The
#' algorithms also require a sequence of non-negative non-increasing numbers
#' \eqn{\gamma_i} that sum to 1.
#'
#' The ADDIS-spending procedure provably controls the FWER in the strong sense
#' for independent p-values. Note that the procedure also controls the
#' generalised familywise error rate (k-FWER) for \eqn{k > 1} if \eqn{\alpha} is
#' replaced by min(\eqn{1, k\alpha}).
#'
#' Tian and Ramdas (2021) also presented a version for handling local
#' dependence. More precisely, for any \eqn{t>0} we allow the p-value \eqn{p_t}
#' to have arbitrary dependence on the previous \eqn{L_t} p-values. The fixed
#' sequence \eqn{L_t} is referred to as `lags', and is given as the input
#' \code{lags} for this version of the ADDIS-spending algorithm.
#'
#' Further details of the ADDIS-spending algorithms can be found in Tian and
#' Ramdas (2021).
#'
#' @param d Either a vector of p-values, or a dataframe with three columns: an
#' identifier (`id'), p-value (`pval'), and lags (`lags').
#'
#' @param alpha Overall significance level of the procedure, the default is
#' 0.05.
#'
#' @param gammai Optional vector of \eqn{\gamma_i}. A default is provided with
#' \eqn{\gamma_j} proportional to \eqn{1/j^(1.6)}.
#'
#' @param lambda Optional parameter that sets the threshold for `candidate'
#' hypotheses. Must be between 0 and 1, defaults to 0.25.
#'
#' @param tau Optional threshold for hypotheses to be selected for testing. Must
#' be between 0 and 1, defaults to 0.5.
#'
#' @param dep Logical. If \code{TRUE} runs the version for locally dependent
#' p-values
#'
#' @param display_progress Logical. If \code{TRUE} prints out a progress bar for the algorithm runtime.
#'
#' @return \item{out}{A dataframe with the original p-values \code{pval}, the
#' adjusted testing levels \eqn{\alpha_i} and the indicator function of
#' discoveries \code{R}. Hypothesis \eqn{i} is rejected if the \eqn{i}-th
#' p-value is less than or equal to \eqn{\alpha_i}, in which case \code{R[i] =
#' 1} (otherwise \code{R[i] = 0}).}
#'
#'
#' @references Tian, J. and Ramdas, A. (2021). Online control of the familywise
#' error rate. \emph{Statistical Methods for Medical Research} 30(4):976–993.
#'
#'
#' @seealso
#'
#' \code{\link{ADDIS}} provides online control of the FDR.
#'
#'
#' @examples
#' sample.df <- data.frame(
#' id = c('A15432', 'B90969', 'C18705', 'B49731', 'E99902',
#' 'C38292', 'A30619', 'D46627', 'E29198', 'A41418',
#' 'D51456', 'C88669', 'E03673', 'A63155', 'B66033'),
#' pval = c(2.90e-08, 0.06743, 0.01514, 0.08174, 0.00171,
#' 3.60e-05, 0.79149, 0.27201, 0.28295, 7.59e-08,
#' 0.69274, 0.30443, 0.00136, 0.72342, 0.54757),
#' lags = rep(1,15))
#'
#' ADDIS_spending(sample.df) #independent
#'
#' ADDIS_spending(sample.df, dep = TRUE) #Locally dependent
#'
#' @export
ADDIS_spending <- function(d, alpha = 0.05, gammai, lambda = 0.25, tau = 0.5, dep = FALSE, display_progress = FALSE) {
d <- checkPval(d)
if (is.data.frame(d)) {
pval <- d$pval
} else if (is.vector(d)) {
pval <- d
} else {
stop("d must either be a dataframe or a vector of p-values.")
}
if (alpha <= 0 || alpha > 1) {
stop("alpha must be between 0 and 1.")
}
if (lambda <= 0 || lambda > 1) {
stop("lambda must be between 0 and 1.")
}
if (tau <= 0 || tau > 1) {
stop("tau must be between 0 and 1.")
}
if (lambda >= tau) {
stop("lambda must be less than tau.")
}
N <- length(pval)
if (missing(gammai)) {
gammai <- 0.4374901658/(seq_len(N)^(1.6))
} else if (any(gammai < 0)) {
stop("All elements of gammai must be non-negative.")
} else if (sum(gammai) > 1) {
stop("The sum of the elements of gammai must not be greater than 1.")
}
if (!(dep)) {
out <- addis_spending_faster(pval,
gammai,
alpha = alpha,
lambda = lambda,
tau = tau,
display_progress = display_progress)
out$R <- as.numeric(out$R)
out
} else {
checkStarVersion(d, N, "dep")
L <- d$lags
out <- addis_spending_dep_faster(pval,
L,
gammai,
alpha = alpha,
lambda = lambda,
tau = tau,
display_progress = display_progress)
out$R <- as.numeric(out$R)
out
}
}
|
a45c89db298a8b18c90a5d1aaa6bcb105c2cc846 | 76df3746402d3e8c842a9e8d2bc6bb72fc6791be | /R/hilbert_minmax_function.R | 03a0b6521db1ae09a6f648a67c3c8de46f83d374 | [] | no_license | htsikata/projects | 010062169fc4b5019a1d3fe07b95f6c71572e130 | eda6bdc510249cfdab94e837273944838d12a903 | refs/heads/master | 2021-01-20T07:48:57.878518 | 2014-11-05T05:09:14 | 2014-11-05T05:09:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 899 | r | hilbert_minmax_function.R |
## h phase
library(seewave)
t = seq(0,80,.01)
y = sin(pi * t/40) + sin(2* pi * t/40) + sin(6 * pi * t/40)
hil = function(y) ## t is a free variable
{
y = y-mean(y)
h = hilbert(y,0)
phase = Arg(h)
idx = seq(1,length(t),1)
D = diff(phase)
minindex = idx[abs(D)>pi]
tmin = t[minindex]
min_pts = y[minindex]
y = -y
h = hilbert(y,0)
phase = Arg(h)
idx = seq(1,length(t),1)
D = diff(phase)
maxindex = idx[abs(D)>pi]
tmax = t[maxindex]
max_pts = -y[maxindex]
nextreme=length(maxindex)+length(minindex)
#****************
minindex=(cbind(minindex,minindex, deparse.level=0));
maxindex=(cbind(maxindex,maxindex, deparse.level=0));
#*****************
#points(tmax,max_pts,pch=0,lwd=4,col=4)
return(list(minindex=minindex, maxindex=maxindex,nextreme=nextreme));
}
|
64a380086c2e1c76e6252dcc42376be787e53d0d | 46f12da014c71be8d1e10e5b9d3b30a3f64baf63 | /Plot1.R | ea30e812ed44d42250601f8a4aca987aa0546324 | [] | no_license | fedossa/ExData_Plotting1 | 0393502c55e40eccfdaea69d2b4a8d9ecc24f0d7 | 1a624ab553d1ec0e05d380747b4cb19dcea14b96 | refs/heads/master | 2020-12-29T06:22:39.441265 | 2020-02-12T18:16:28 | 2020-02-12T18:16:28 | 238,489,734 | 0 | 0 | null | 2020-02-05T15:57:49 | 2020-02-05T15:57:47 | null | UTF-8 | R | false | false | 228 | r | Plot1.R | # get cleaned dataset
source("Data Prep.R")
# Plot 1
png("Plot1.png", width = 480, height = 480)
hist(Data$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.off()
|
4647c42e70685f30cf885457db2a450efabef37c | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/196_2/rinput.R | fa937aad2eb5b79d3618194d776bfaa84452f710 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 133 | r | rinput.R | library(ape)
testtree <- read.tree("196_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="196_2_unrooted.txt") |
57143821b17ca0b2debeecaee544f5624fc3a1b0 | 4b7e1c30467c953a57a672d1cb555626f2cc85d9 | /plot2.R | 3720c6534d347b2f174a5db9571e32b82a9eb980 | [] | no_license | maitry-shah67/ExData_Plotting1 | c9b70bce2035443453c952075a7df70b1a7ab384 | 581c06be2c9f4d412e1a688ecbd7fead962885f0 | refs/heads/master | 2022-11-06T06:17:44.153531 | 2020-06-29T05:19:59 | 2020-06-29T05:19:59 | 275,737,176 | 0 | 0 | null | 2020-06-29T05:17:18 | 2020-06-29T05:17:18 | null | UTF-8 | R | false | false | 725 | r | plot2.R | #reading the dataset
plot2 = read.table("household_power_consumption.txt",header = TRUE,sep = ";",stringsAsFactors = FALSE,dec = ".")
#subsetting the dataset for the required dates
plot2 = subset(plot2,Date == "1/2/2007" | Date == "2/2/2007")
#converting dates to date class
datetime = as.POSIXct(paste(plot2$Date,plot2$Time,sep = " "),"%d/%m/%Y %H:%M:%S",tz = "India")
#converting the column to numeric class for plotting
global_active_power = as.numeric(plot2$Global_active_power)
#opening png file device for plotting
png("plot2.png",width = 480,height = 480)
#plot
plot(datetime,global_active_power,type = "l",xlab = "",ylab = "Global Active Power (killowatts)",)
#closing the file device
dev.off() |
ed04e488caafe9f4c1f93f8cf53ffee60db0adc6 | 871795af955282150cebba237e9b800ac5060eb4 | /problem5.R | ab34179684963feb714aa8d07fceb212d2d16eb4 | [] | no_license | JeffreyLinWeiYou/EE5324701_HW1 | 3ed156c08a37a784495aabfb7698d3605ea5a857 | 74eb3b2f87c503fea14b97ee397cf93c21ab128c | refs/heads/master | 2021-01-19T22:53:26.064902 | 2017-04-20T16:46:34 | 2017-04-20T16:46:34 | 88,885,534 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,093 | r | problem5.R | library(datasets)
a <- anscombe
fitlse = function(x,y){
res = NULL
res$b = sum((y-mean(y))*(x-mean(x)))/sum((x-mean(x))^2)
res$a = mean(y) - res$b * mean(x)
return(res)
}
library(datasets)
a <- anscombe
par(mfrow=c(2,2))
f1 = fitlse(a$x1,a$y1)
f2 = fitlse(a$x2,a$y2)
f3 = fitlse(a$x3,a$y3)
f4 = fitlse(a$x4,a$y4)
plot(a$x1,a$y1, main=paste("Dataset One"))
abline(a=f1$a,b=f1$b)
plot(a$x2,a$y2, main=paste("Dataset Two"))
abline(a=f2$a,b=f2$b)
plot(a$x3,a$y3, main=paste("Dataset Three"))
abline(a=f3$a,b=f3$b)
plot(a$x4,a$y4, main=paste("Dataset Four"))
abline(a=f4$a,b=f4$b)
par(mfrow=c(1,1))
x = a$x2
y = a$y2
x_sorted = sort.int(x,index.return=T)
x = x_sorted$x
y = y[x_sorted$ix]
z = x^2
fit2 = lm(y ~ x + z)
plot(a$x2,a$y2, main=paste("Dataset Two_FIT"))
lines(x = x,y=fitted(fit2))
pred1 = f1$a + f1$b * 10
pred2 = f2$a + f2$b * 10
pred3 = f3$a + f3$b * 10
pred4 = f4$a + f4$b * 10
cat('value:10 in Dataset 1 =',pred1)
cat('value:10 in Dataset 2 =',pred2)
cat('value:10 in Dataset 3 =',pred3)
cat('value:10 in Dataset 4 =',pred4)
|
b6fd1d8df2949b23090ece17d6c580e6501c6463 | b53235b3eb41adf731df65e1b78b4819576bec98 | /scripts/function_two.R | 1147d31a9bcb44bd8496ef767467a050ce168623 | [
"MIT"
] | permissive | amicha23/University-Data | ccc9ab0ff5bb4dfd728e81d12c9339c3dbe9e865 | 52d8d3a7bfd58e3f63ff9b2c7ecfd4cdc9598a80 | refs/heads/master | 2022-12-09T21:16:04.523979 | 2020-09-15T07:07:40 | 2020-09-15T07:07:40 | 271,671,293 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,130 | r | function_two.R | # Importing necessary libraries.
library("jsonlite")
library("openxlsx")
library("dplyr")
# Loading in necessary dataframes.
df1 <- read.xlsx("data/IPEDS_data.xlsx")
df2 <- data.frame(fromJSON(txt = "data/schoolInfo.json"))
df3 <- read.csv("data/Most-Recent-Cohorts-All-Data-Elements.csv",
stringsAsFactors = FALSE)
# Joining data based on the name of the university.
join_results1 <- inner_join(df3, df1, by = c("INSTNM" = "Name"))
result_df <- left_join(join_results1, df2, by = c("INSTNM" = "displayName"))
# Creating a summary of the data based on name, academic performance, and
# ethnic diversity.
summary_info <- result_df %>%
select("INSTNM", "SAT_AVG_ALL", "act.avg", "hs.gpa.avg",
"Percent.of.total.enrollment.that.are.American.Indian.or.Alaska.Native",
"Percent.of.total.enrollment.that.are.Asian",
"Percent.of.total.enrollment.that.are.Black.or.African.American",
"Percent.of.total.enrollment.that.are.Hispanic/Latino",
"Percent.of.total.enrollment.that.are.White",
"Percent.of.total.enrollment.that.are.two.or.more.races",
"Percent.of.total.enrollment.that.are.Race/ethnicity.unknown",
"Percent.of.total.enrollment.that.are.Nonresident.Alien",
"Percent.of.total.enrollment.that.are.Asian/Native.Hawaiian/Pacific.Islander",
"Percent.of.total.enrollment.that.are.women",
"overallRank",
"acceptance.rate",
"tuition",
"STABBR")
# Summarizing the data based on the acadmic ratings and tuition.
summarized_info <- summary_info %>%
summarize(
total_universities = nrow(summary_info),
ave_tuition = mean(tuition, na.rm = TRUE),
ave_gpa = mean(hs.gpa.avg, na.rm = TRUE),
ave_SAT_score = mean(as.numeric(SAT_AVG_ALL), na.rm = TRUE),
ave_ACT_score = mean(act.avg, na.rm = TRUE),
ave_acceptance_rate = mean(acceptance.rate, na.rm = TRUE),
ave_ranking = mean(overallRank, na.rm = TRUE),
)
# Getting a table with the school with the highest acceptance rate.
highest_accpetance_school <- summary_info %>%
filter(acceptance.rate == max(acceptance.rate, na.rm = TRUE))
|
308a7a229702b18c6be9dbd4cf44773a76eacdac | d60a4a66919a8c54d29a4677574b418107b4131d | /man/periodogram.Rd | 51129e672b5f24ace014e844581af13e803911fd | [] | no_license | cran/tsapp | 65203e21a255e832f0ad9471f9ee308793eb7983 | f2679a3d5ee0e3956a4ba013b7879324f77cf95f | refs/heads/master | 2021-11-12T21:18:18.835475 | 2021-10-30T10:30:02 | 2021-10-30T10:30:02 | 248,760,597 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 887 | rd | periodogram.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frequdom.r
\name{periodogram}
\alias{periodogram}
\title{\code{periodogram} determines the periodogram of a time series}
\usage{
periodogram(y, nf, ACF = FALSE, type = "cov")
}
\arguments{
\item{y}{(n,1) vector, the time series or an acf at lags 0,1,...,n-1}
\item{nf}{scalar, the number of equally spaced frequencies; not necessay an integer}
\item{ACF}{logical, FALSE, if y is ts, TRUE, if y is acf}
\item{type}{c("cov","cor"), area under spectrum, can be variance or normed to 1.}
}
\value{
out (floor(nf/2)+1,2) matrix, the frequencies and the periodogram
}
\description{
\code{periodogram} determines the periodogram of a time series
}
\examples{
data(WHORMONE)
## periodogram at Fourier frequencies and frequencies 0 and 0.5
out <-periodogram(WHORMONE,length(WHORMONE)/2,ACF=FALSE,type="cov")
}
|
902546f39c17667364452d8c83b98cd766fa9f87 | 6bbe96ebaa3d52d8c96635be1f70b0a8d07a29a3 | /tcgaDataProcessing.R | 4926cb6b51b62e277d1377efebd3cd0b04b42993 | [] | no_license | carlosback/Stat579Project | 8710f618518b6edab6b3f1a87a65da3f0988fabe | e8eea3e8461ccdddb34eb8fc653a521c78a0339f | refs/heads/master | 2020-04-08T10:38:12.915501 | 2018-11-25T07:32:29 | 2018-11-25T07:32:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,500 | r | tcgaDataProcessing.R | library(TCGAbiolinks)
library(dplyr)
library(DT)
library(data.table)
library(plyr)
packageVersion("TCGAbiolinks")
query <- GDCquery(project = "TCGA-CHOL", data.category = "Clinical", file.type = "xml")
GDCdownload(query)
clinical <- GDCprepare_clinic(query, clinical.info = "patient")
queryB <- GDCquery(project = "TCGA-COAD", data.category = "Biospecimen", file.type = "xml")
queryB <- GDCquery(project = "TCGA-BRCA", data.category = "Biospecimen", file.type = "xml")
GDCdownload(queryB,method = "client")
aliquot <- GDCprepare_clinic(queryB, clinical.info = c("aliquot"))
sample <- GDCprepare_clinic(queryB, clinical.info = c("sample"))
bio_patient <- GDCprepare_clinic(queryB, clinical.info = c("bio_patient"))
analyte <- GDCprepare_clinic(queryB, clinical.info = c("analyte"))
portion <- GDCprepare_clinic(queryB, clinical.info = c("portion"))
protocol <- GDCprepare_clinic(queryB, clinical.info = c("protocol"))
slide <- GDCprepare_clinic(queryB, clinical.info = c("slide"))
hbp<-as.data.frame(names(bio_patient))
hanalyte<-as.data.frame(names(analyte))
hportion<-as.data.frame(names(portion))
hprot<-as.data.frame(names(protocol))
hslid<-as.data.frame(names(slide))
join(aliquot,bio_patient)
length(unique(aliquot$bcr_patient_barcode))
length(unique(sample$bcr_patient_barcode))
length(unique(bio_patient$bcr_patient_barcode))
length(unique(analyte$bcr_patient_barcode))
length(unique(portion$bcr_patient_barcode))
length(unique(protocol$bcr_patient_barcode))
length(unique(slide$bcr_patient_barcode))
length(Reduce(intersect,list(aliquot$bcr_patient_barcode,sample$bcr_patient_barcode,bio_patient$bcr_patient_barcode,analyte$bcr_patient_barcode,portion$bcr_patient_barcode,protocol$bcr_patient_barcode,slide$bcr_patient_barcode)))
#remove duplicated rows from all tables
bio_patient_nr<-distinct(bio_patient)
aliquot_nr<-distinct(aliquot)
samp_nr<-distinct(sample)
analyte_nr<-distinct(analyte)
portion_nr<-distinct(portion)
protocol_nr<-distinct(protocol)
slide_nr<-distinct(slide)
#1 join aliquot with analyte, add analyte barcode in aliquot. for aliquot barcode TCGA-3L-AA1B-01A-01D-YYYY-23, analyte barcode is TCGA-3L-AA1B-01A-01D
#more info on barcodes https://docs.gdc.cancer.gov/Encyclopedia/pages/TCGA_Barcode/
#tcga center codes https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/center-codes
aliquot_nr<-aliquot_nr%>% mutate(bcr_analyte_barcode=substr(bcr_aliquot_barcode,1,nchar(as.character(bcr_aliquot_barcode))-8))
j1<-join(analyte_nr,aliquot_nr,by="bcr_analyte_barcode")
#2 add bioportion barcode to j1 and join with bioportion
#portion: TCGA-3L-AA1B-01A-11 ; analyte: TCGA-3L-AA1B-01A-11
j1<-j1 %>% mutate(bcr_portion_barcode=substr(bcr_analyte_barcode,1,nchar(as.character(bcr_analyte_barcode))-1))
j2<-join(j1,portion_nr,by="bcr_portion_barcode")
#3 add biosample barcode to j2
#sample: TCGA-3L-AA1B-01A ; portion: TCGA-3L-AA1B-01A-11
j2<-j2 %>% mutate(bcr_sample_barcode=substr(bcr_portion_barcode,1,nchar(as.character(bcr_portion_barcode))-3))
j3<-join(j2,samp_nr,by="bcr_sample_barcode")
#finally join by biopatient
j4<-join(j3,bio_patient_nr,by="bcr_patient_barcode")
#download clinical data
queryC <- GDCquery(project = "TCGA-COAD", data.category = "Clinical", file.type = "xml")
GDCdownload(queryC)
drug<-GDCprepare_clinic(queryC, clinical.info = "drug")
admin<-GDCprepare_clinic(queryC, clinical.info = "admin")
follow_up<-GDCprepare_clinic(queryC, clinical.info = "follow_up")
radiation<-GDCprepare_clinic(queryC, clinical.info = "radiation")
patient<-GDCprepare_clinic(queryC, clinical.info = "patient")
stage_event<-GDCprepare_clinic(queryC, clinical.info = "stage_event")
new_tumor_event<-GDCprepare_clinic(queryC, clinical.info = "new_tumor_event")
admin_nr<-distinct(admin)
patient_nr<-distinct(patient)
j5<-join(j4,patient_nr,by="bcr_patient_barcode")
#keep only selected columns
#other method
clinicalBRCA <- GDCquery_clinic(project = "TCGA-BRCA", type = "clinical")
biospecimenBRCA <- GDCquery_clinic(project = "TCGA-BRCA", type = "Biospecimen")
biospecimenCOAD <- GDCquery_clinic(project = "TCGA-COAD", type = "Biospecimen")
sampdf<-head(biospecimenCOAD,10)[,c("sample_type_id","tumor_code_id","sample_id","submitter_id","portions")]
sampPor<-as.data.frame(sampdf[2, c("portions")])
#use apply to unlist columns
#Function takes a df and expands it by unlisting elements at a column
expand<-function(df,colName){
res<-data.frame()
#for each row
for(i in 1: dim(df)[1]){
thisRow<-df[i, ! (colnames(df) %in% c(colName))]
tempdf<-as.data.frame(df[i, c(colName)])
#if list is empty skip that row
if(dim(tempdf)[1]<1){
next
}
#change colnames so they are unique
colnames(tempdf)<-paste(paste(colName,".",sep = ""),colnames(tempdf),sep = "")
#print(paste(i,colnames(tempdf)))
newRow<-cbind(thisRow,tempdf)
res<-bind_rows(res,newRow)
#for(j in 1: dim(tempdf)[1]){
#convert to dataframe in case there is only single column
#res<-bind_rows(res,newRow)
#}
}
#print(res)
return(res)
}
res<-NULL
sampdfExbrnew<-expand(biospecimenBRCA,"portions")
sampdfExanalyte<-expand(sampdfExbrnew,"portions.analytes")
sampdfExaliquot<-expand(sampdfExanalyte,"portions.analytes.aliquots")
#add patient barcode to biospecimen data
brcaTabRNA<- brcaTabRNA %>% mutate(bcr_patient_barcode=substr(submitter_id,1,nchar(as.character(submitter_id))-4))
#join clinical and biospecimen
brcaJoinedRNA<-join(clinicalBRCA,brcaTabRNA,by="bcr_patient_barcode")
|
c8d5161c8e6f2bfa580e4f2609418f333c02a598 | 85ea2cd459c7f3fc8f2dbe4880e5734277f8a9c0 | /man/IRA.Rd | 571b65514da223b91b76171af27dd798a013b53e | [] | no_license | alishinski/CRTpower | 3359cd250edc22687e46ad20f031b50ed2cf59d4 | 0609a2ac07b20239ccb619d5989d20e5f660699d | refs/heads/master | 2020-03-25T03:15:54.398373 | 2018-08-03T05:53:30 | 2018-08-03T05:53:30 | 143,331,917 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 699 | rd | IRA.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_formulas.R
\name{IRA}
\alias{IRA}
\title{Model 1.0: Individual Random assignment}
\usage{
IRA(R2, P, k, n, alpha = 0.05, power = 0.8, tails = 2)
}
\arguments{
\item{R2}{Proportion of variance explained in the outcome by the covariates}
\item{P}{Proportion of the sample randomized to the treatment}
\item{k}{Number of Covariates used}
\item{n}{Total Sample Size}
\item{alpha}{Probability of a type I error}
\item{power}{Statistical power (1 - probability of type II error)}
\item{tails}{One or two tailed test}
}
\value{
Minimum Detectable Effect Size
}
\description{
MDES for Individual Random assignment
}
|
3af677061c158d9ef62459b7f1adf43b9704fcbb | d2a3b0d54bd54e488bf0a8a98af7b205e1d912c0 | /man/find_vars.Rd | a0c5dc4dc68d4e2fdb61fce725d7d31aa8c6b40b | [] | no_license | Ajfrick/ajfhelpR | 05feaa2e7625540455af4447271d5b77ddfd32bd | 64d8e60c64d47a9b8bef2ef5f0ecb653ac99d976 | refs/heads/master | 2023-06-24T23:24:15.562243 | 2023-06-15T15:36:12 | 2023-06-15T15:36:12 | 161,224,735 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 738 | rd | find_vars.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_vars.R
\name{find_vars}
\alias{find_vars}
\title{Find columns containing string or regular expression}
\usage{
find_vars(dat, pattern, ignore.case = TRUE)
}
\arguments{
\item{dat}{data frame, tibble, or named vector}
\item{pattern}{character vector or regular expression to search for}
\item{ignore.case}{logical for case sensitivity}
}
\value{
named vector with column or index locations with variable names
}
\description{
Find columns containing string or regular expression
}
\examples{
find_vars(mtcars, "cyl")
find_vars(1:100, "cyl")
mtcars[,find_vars(mtcars,"cyl")]
mtcars[,find_vars(mtcars,"c")]
mtcars[,find_vars(mtcars,"C",ignore.case = F)]
}
|
35579704758aaca1eef9eb6d1e80be1ee697a45b | ec98c3ff6cf5638db5c590ea9390a04b674f8f99 | /release/test/test_allele_probs_format.R | 5edef7825649d5babc1062943e9897e472da2f8e | [
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0"
] | permissive | gavinband/qctool | 37e1122d61555a39e1ae6504e4ca1c4d75f371e9 | 8d8adb45151c91f953fe4a9af00498073b1132ba | refs/heads/master | 2023-06-22T07:16:36.897058 | 2021-11-13T00:12:26 | 2021-11-13T00:12:26 | 351,933,501 | 6 | 2 | BSL-1.0 | 2023-06-12T14:13:57 | 2021-03-26T23:04:52 | C++ | UTF-8 | R | false | false | 2,778 | r | test_allele_probs_format.R | library( argparse )
parser <- ArgumentParser( description = 'Generate and test randomly generated GEN files through bgen' )
parser$add_argument(
"--iterations",
type = "integer",
nargs = 1,
help = "Number of iterations",
default = 100
)
parser$add_argument(
"--variants",
type = "integer",
nargs = 1,
help = "Number of variants",
default = 100
)
parser$add_argument(
"--max_samples",
type = "integer",
nargs = 1,
help = "Maximum number of samples to simulate",
default = 100
)
parser$add_argument(
"--qctool",
type = "character",
nargs = 1,
help = "Path to qctool executable",
default = "qctool_v2.0-dev"
)
parser$add_argument(
"--verbose",
help = "Say what we're doing",
default = FALSE,
action = "store_true"
)
opts = parser$parse_args()
chromosomes=c( sprintf( "%02d", 1:22 ), sprintf( "%d", 1:22 ), "other" )
V = data.frame(
chromosome = sample( chromosomes, opts$variants, replace = T ),
SNPID = sprintf( "SNP%d", 1:opts$variants ),
rsid = sprintf( "rs%d", 1:opts$variants ),
position = as.integer( round( runif( opts$variants, min = 0, max = 1E6 ))),
alleleA = sample( c( 'A', 'C', 'T', 'G' ), opts$variants, replace = T ),
alleleB = sample( c( 'A', 'C', 'T', 'G' ), opts$variants, replace = T )
)
for( i in 1:opts$iterations ) {
N = sample( 1:opts$max_samples, 1 )
G = matrix( NA, nrow = opts$variants, ncol = N*2 )
G[,] = runif( nrow(G) * ncol(G) )
omit.chromosome = ( runif(1) > 0.5 )
cols = 1:6
if( omit.chromosome ) {
cols = 2:6
}
filename = tempfile()
write.table(
cbind( V[2:6], G ),
file = filename,
col.names = F,
row.names = F,
quote = F,
sep = " "
)
cat( sprintf( "Iteration %d, %dx%d...\n", i, nrow(G), ncol(G) ))
# Convert it to bgen
cmd1 = sprintf(
paste(
sep = ' ',
opts$qctool,
'-g %s',
'-og %s.bgen',
'-filetype impute_allele_probs'
),
filename,
filename
)
if( opts$verbose ) {
cat( sprintf( "Converting %s to bgen...\n", filename ))
}
system( sprintf( "%s 2>/dev/null", cmd1 ) )
# Convert it back to allele probs
filename2 = tempfile()
cmd2 = sprintf(
paste(
sep = ' ',
opts$qctool,
'-g %s.bgen',
'-og %s',
'-ofiletype impute_allele_probs'
),
filename,
filename2
)
if( omit.chromosome ) {
cmd2 = sprintf( "%s -omit-chromosome", cmd2 )
}
if( opts$verbose ) {
cat( sprintf( "Converting bgen back to %s...\n", filename2 ))
}
system( sprintf( "%s 2>/dev/null", cmd2 ))
if( opts$verbose ) {
cat( sprintf( "Reading %s...\n", filename2 ))
}
result = read.table( filename2, header = F, as.is = T, sep = " " )
stopifnot( length( which( result[,1:length(cols)] != V[,cols] )) == 0 )
G2 = as.matrix( result[, (length(cols)+1):ncol(result)])
mode(G2) = "numeric"
stopifnot( length( which( abs( G2 - G ) > 1E-6 )) > 0 )
}
|
15aa885be9a0a7511b732b20f6cdd38ae9b3b3b4 | e7829a3d3690aa9c42eea95574ac6796765c2ee1 | /assignments/assignment_3/student_test_suite/runit.merging.R | 9166a88915f5d8f9b273fd22a2c2018d3d330415 | [] | no_license | pjhartout/Computational_Biology | b71d62e9b53ba01c5275b21f2e4c85c9c7a1cf5a | 35443c20521b2338a241d6ef5057df107db9aaa5 | refs/heads/master | 2022-04-07T15:21:53.015194 | 2020-02-09T17:03:40 | 2020-02-09T17:03:40 | 217,732,551 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 570 | r | runit.merging.R | test.get_merge_node_distance = function() {
load("matrices_prov.RData")
df = data.frame(node_sizes = c(1,1,3,2) ,row.names = c("a","b","c","d"))
md = get_merge_node_distance(df,Hdm,c("a","b"),"d")
checkEqualsNumeric(13.5, md, "Wrong merge distance when merging tips")
md = get_merge_node_distance(df,Hdm,c("a","c"),"d")
checkEqualsNumeric(9.75, md, "Wrong merge distance when merging a tip and an internal node")
md = get_merge_node_distance(df,Hdm,c("d","c"),"a")
checkEqualsNumeric(18, md, "Wrong merge distance when merging internal nodes")
} |
ae684bfb2182eb69b6b3943d36a65e6efdaf5940 | 3ba20eb1ab9c2cf32e5f74008c4832f7417303f2 | /data_processing/comorbidities/hes_data.R | 89cbda7972a267897b91a4fe201826f70e41e123 | [] | no_license | S-Dhungana/tds_7 | 89ee06730bce0ede35429023d50413024db77348 | 043e7acd4a4d03759507816c9b795e2d48d86918 | refs/heads/main | 2023-04-20T07:34:02.879030 | 2021-05-06T20:18:43 | 2021-05-06T20:18:43 | 350,891,998 | 1 | 2 | null | 2021-05-06T20:07:01 | 2021-03-23T23:59:49 | HTML | UTF-8 | R | false | false | 1,971 | r | hes_data.R | rm(list=ls())
setwd("/rds/general/project/hda_students_data/live/General/")
library(data.table)
library(ukbtools)
hes=data.frame(fread("hesin.txt", nrows=100))
# hes$epistart # start of episode date
episode_ID=paste0(hes$eid, "_", hes$ins_index)
hes_diag=data.frame(fread("hesin_diag.txt", nrows=100))
episode_ID_diag=paste0(hes_diag$eid, "_", hes_diag$ins_index)
###create comorbidity data set
#translate icd codes
for(j in 5:dim(hes_diag)[2]) {
if(j < 7) {
icd <- 9
} else {
icd <- 10
}
for(i in 1:dim(hes_diag)[1]){
code_meaning <- ukb_icd_code_meaning(icd.code = hes_diag[i, j], icd.version = icd) #returns icd code + meaning
meaning <- unlist(strsplit(as.character(code_meaning[2]), split=" ", fixed=TRUE))[2] # removes icd code
hes_diag[i, j] <- meaning
}
}
column <- apply(hes_diag[, 5:8], 1, function(x) {which(!is.na(x))}) #returns position of column where a comorbidity is present
columns <- column + 4 #correction for original dataset
comor <- c(rep(0, dim(hes_diag)[1])) #create null vector
for(i in 1:dim(hes_diag)[1]){
comor[i] <- hes_diag[i, column[i]] #fill vector with merged info from the different icd columns
}
hes_diag$comor <- comor
### Create new dataframe with eid and binary comorbidity variables
#patients id
eid <- unique(hes_diag$eid)
#use unique values as column names
comorbidities <- unique(hes_diag[, 9])
#create dataframe
comorbidity_data <- as.data.frame(matrix(0, ncol = length(comorbidities) + 1, nrow = length(eid)))
colnames(comorbidity_data) <- c("eid", comorbidities)
rownames(comorbidity_data) <- eid
comorbidity_data[, 1] <- eid
# Change to 1 where patient has a comorbidity
for(i in 1:dim(hes_diag)[1]){
row_eid <- as.character(hes_diag[i, 1])
comorbidity <- as.character(hes_diag[i, 9])
comorbidity_data[row_eid, comorbidity] <- 1
}
#save data set
saveRDS(comorbidity_data,"/rds/general/project/hda_students_data/live/Group7/General/Carolina/comorbidity_data.RDS")
|
e81caedc891563eff99fc5e8cb91cc766bf1d8eb | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/bfast/examples/bfastmonitor.Rd.R | fc392c762c0f5532129a12b5518a8ccdf839f3cd | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,024 | r | bfastmonitor.Rd.R | library(bfast)
### Name: bfastmonitor
### Title: Near Real-Time Disturbance Detection Based on BFAST-Type Models
### Aliases: bfastmonitor
### Keywords: ts
### ** Examples
## See Fig. 6 a and b in Verbesselt et al. (2011)
## for more information about the data time series and acknowledgements
library(zoo)
NDVIa <- as.ts(zoo(som$NDVI.a, som$Time))
plot(NDVIa)
## apply the bfast monitor function on the data
## start of the monitoring period is c(2010, 13)
## and the ROC method is used as a method to automatically identify a stable history
mona <- bfastmonitor(NDVIa, start = c(2010, 13))
mona
plot(mona)
## fitted season-trend model in history period
summary(mona$model)
## OLS-based MOSUM monitoring process
plot(mona$mefp, functional = NULL)
## the pattern in the running mean of residuals
## this illustrates the empirical fluctuation process
## and the significance of the detected break.
NDVIb <- as.ts(zoo(som$NDVI.b, som$Time))
plot(NDVIb)
monb <- bfastmonitor(NDVIb, start = c(2010, 13))
monb
plot(monb)
summary(monb$model)
plot(monb$mefp, functional = NULL)
## set the stable history period manually and use a 4th order harmonic model
bfastmonitor(NDVIb, start = c(2010, 13),
history = c(2008, 7), order = 4, plot = TRUE)
## just use a 6th order harmonic model without trend
mon <- bfastmonitor(NDVIb, formula = response ~ harmon,
start = c(2010, 13), order = 6, plot = TRUE)
summary(mon$model)
## For more info
?bfastmonitor
## TUTORIAL for processing raster bricks (satellite image time series of 16-day NDVI images)
f <- system.file("extdata/modisraster.grd", package="bfast")
library("raster")
modisbrick <- brick(f)
data <- as.vector(modisbrick[1])
ndvi <- bfastts(data, dates, type = c("16-day"))
plot(ndvi/10000)
## derive median NDVI of a NDVI raster brick
medianNDVI <- calc(modisbrick, fun=function(x) median(x, na.rm = TRUE))
plot(medianNDVI)
## helper function to be used with the calc() function
xbfastmonitor <- function(x,dates) {
ndvi <- bfastts(x, dates, type = c("16-day"))
ndvi <- window(ndvi,end=c(2011,14))/10000
## delete end of the time to obtain a dataset similar to RSE paper (Verbesselt et al.,2012)
bfm <- bfastmonitor(data = ndvi, start=c(2010,12), history = c("ROC"))
return(cbind(bfm$breakpoint, bfm$magnitude))
}
## apply on one pixel for testing
ndvi <- bfastts(as.numeric(modisbrick[1])/10000, dates, type = c("16-day"))
plot(ndvi)
bfm <- bfastmonitor(data = ndvi, start=c(2010,12), history = c("ROC"))
bfm$magnitude
plot(bfm)
xbfastmonitor(modisbrick[1], dates) ## helper function applied on one pixel
## Not run:
##D ## apply the bfastmonitor function onto a raster brick
##D library(raster)
##D timeofbreak <- calc(modisbrick, fun=function(x){
##D res <- t(apply(x, 1, xbfastmonitor, dates))
##D return(res)
##D })
##D
##D plot(timeofbreak) ## time of break and magnitude of change
##D plot(timeofbreak,2) ## magnitude of change
##D
##D ## create a KMZ file and look at the output
##D KML(timeofbreak, "timeofbreak.kmz")
## End(Not run)
|
52e5d681f3e03fdcd16f46238326bae3ee1e1e43 | 2bfd2b59474b38750f91807f42f91453406c2429 | /fticr_test/code/a-fticr_processing.R | 7ab93f2848077e168b61acd799d56826362f1ea5 | [] | no_license | kaizadp/suli2021 | 94ac3da36b35ac4033ca77517b41662d5f03b60e | 3b058ac1743c70921b3a4effe49436ae8471779b | refs/heads/master | 2023-06-25T15:02:41.437285 | 2021-07-28T18:46:46 | 2021-07-28T18:46:46 | 373,258,014 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,997 | r | a-fticr_processing.R | ## SPATIAL ACCESS
## KAIZAD F. PATEL
## 2-JULY-2020
## FTICR-PROCESSING
## 2021-06-07: modify code for CC, SULI-2021
# load packages -----------------------------------------------------------
library(tidyverse)
# load files --------------------------------------------------------------
# the data file contains m/z and intensity data for each peak
fticr_data = read.csv("fticr_test/data/SpatAccess_testdata.csv")
# the meta file contains information about each peak
fticr_meta = read.csv("fticr_test/data/SpatAccess_eMeta.csv")
# the key file contains information about each sample (sample key)
fticr_key = read.csv("fticr_test/data/fticr_key.csv")
# fticr_key -----------------------------------------------------------------
fticr_key_cleaned =
fticr_key %>%
# create a new column that includes the entire name
mutate(SampleAssignment = paste0(Suction, "-", Moisture, "-", Wetting)) %>%
# subset only the columns needed
select(FTICR_ID, Core, Suction, Moisture, Wetting, Amendments, SampleAssignment)
#
# fticr_meta ---------------------------------------------------------
## processing the meta data
meta =
fticr_meta %>%
# filter appropriate mass range
filter(Mass>200 & Mass<900) %>%
# remove isotopes
filter(C13==0) %>%
# remove peaks without C assignment
filter(C>0) %>%
# create columns for indices
dplyr::mutate(AImod = round((1+C-(0.5*O)-S-(0.5*(N+P+H)))/(C-(0.5*O)-S-N-P),4),
NOSC = round(4-(((4*C)+H-(3*N)-(2*O)-(2*S))/C),4),
HC = round(H/C,2),
OC = round(O/C,2)) %>%
# create column/s for formula
# first, create columns for individual elements
# then, combine
dplyr::mutate(formula_c = if_else(C>0,paste0("C",C),as.character(NA)),
formula_h = if_else(H>0,paste0("H",H),as.character(NA)),
formula_o = if_else(O>0,paste0("O",O),as.character(NA)),
formula_n = if_else(N>0,paste0("N",N),as.character(NA)),
formula_s = if_else(S>0,paste0("S",S),as.character(NA)),
formula_p = if_else(P>0,paste0("P",P),as.character(NA)),
formula = paste0(formula_c,formula_h, formula_o, formula_n, formula_s, formula_p),
formula = str_replace_all(formula,"NA","")) %>%
# elemental composition (CHONS, etc)
# create column/s for formula
dplyr::mutate(element_c = if_else(C>0,paste0("C"),as.character(NA)),
element_h = if_else(H>0,paste0("H"),as.character(NA)),
element_o = if_else(O>0,paste0("O"),as.character(NA)),
element_n = if_else(N>0,paste0("N"),as.character(NA)),
element_s = if_else(S>0,paste0("S"),as.character(NA)),
element_p = if_else(P>0,paste0("P"),as.character(NA)),
element_comp = paste0(element_c,element_h, element_o, element_n, element_s, element_p),
element_comp = str_replace_all(element_comp,"NA","")) %>%
# dplyr::select(Mass, formula, El_comp, Class, HC, OC, AImod, NOSC, C:P)
# assign compound classes
mutate(
class = case_when(AImod > 0.66 ~ "condensed_arom",
AImod <=0.66 & AImod >= 0.50 ~ "aromatic",
AImod < 0.50 & HC < 1.5 ~ "unsaturated/lignin",
HC >= 1.5 & N==0 ~ "aliphatic",
HC >= 1.5 & N>0 ~ "aliphatic",
HC >= 2 ~ "aliphatic"),
class = if_else(is.na(class)&!is.na(formula), "other", class)) %>%
filter(!class=="other") %>%
# select only required columns
dplyr::select(Mass, formula, element_comp, class, HC, OC, AImod, NOSC, C:P, -C13)
mass_list =
meta %>% pull(Mass)
#
# fticr_data --------------------------------------------------------------------
## make a new file called data_long, which will take the wide-form data
## and convert to long-form
data_long =
fticr_data %>%
filter(Mass %in% mass_list) %>%
pivot_longer(-Mass,
names_to = "FTICR_ID",
values_to = "intensity") %>%
mutate(presence = if_else(intensity>0, 1, 0)) %>%
filter(presence==1) %>%
# add the molecular formula column
left_join(select(meta, Mass, formula), by = "Mass") %>%
# some formulae have multiple m/z. drop the multiples
distinct(FTICR_ID, formula, presence)
data_long_key =
data_long %>%
left_join(fticr_key_cleaned, by = "FTICR_ID") %>%
group_by(SampleAssignment, formula) %>%
mutate(n = n())
## Now, create a replication filter for the peaks,
## following Sleighter et al. 2012 (10.1021/ac3018026) and Payne et al. 2009 (10.1021/jasms.8b03484)
## keep only peaks seen in 2/3 of replicates within that treatment
# first, create a separate file that gives us the no. of reps per treatment
reps =
data_long_key %>%
ungroup() %>%
distinct(Core, SampleAssignment) %>%
group_by(SampleAssignment) %>%
dplyr::summarise(reps = n())
# second, join the `reps` file to the long_key file
# and then use the replication filter
data_long_key2 =
data_long_key %>%
left_join(reps, by = "SampleAssignment") %>%
ungroup() %>%
mutate(keep = n >= (2/3)*reps) %>%
filter(keep)
data_long_trt =
data_long_key2 %>%
group_by(SampleAssignment, Suction, Moisture, Wetting, Amendments) %>%
distinct(formula, presence)
#
meta_hcoc =
meta %>%
select(formula, HC, OC)
#
# outputs -----------------------------------------------------------------
write.csv(meta, "fticr_test/data/processed/fticr_meta.csv", row.names = F)
write.csv(fticr_key_cleaned, "fticr_test/data/processed/fticr_key.csv", row.names = F)
crunch::write.csv.gz(data_long, "fticr_test/data/processed/fticr_long_core.csv.gz", row.names = F, na="")
crunch::write.csv.gz(data_long_key2, "fticr_test/data/processed/fticr_long_key.csv.gz", row.names = F, na="")
crunch::write.csv.gz(data_long_trt, "fticr_test/data/processed/fticr_long_trt.csv.gz", row.names = F, na="")
# -----------------------------------------------------------------
|
a42ea1cf32d2962850fb0059c14d92ed07f54a63 | dcaf6a9ecc81bbb3bc8dabba825263e93ed856e3 | /cds_shiny_services/RelaunchCarStat/ui.R | c758561e3ce45732264440b4dec69462fffc5b58 | [] | no_license | zir12345/shiny | 79eb6f961007abb579286a38d37f09874e2a5bba | d7d32aaed9238d3466bdbc34cf4a43c485507e2d | refs/heads/master | 2020-09-28T15:40:27.540745 | 2019-12-11T15:49:36 | 2019-12-11T15:49:36 | 226,807,689 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,753 | r | ui.R | library(shiny)
library(rsconnect)
library(ggplot2)
library(plotly)
plotType <- function(data, x, y, type){
switch(type,
"Line" = ggplot(data, aes_string(x, y)) + geom_line(),
"Scatterplot" = ggplot(data, aes_string(x, y)) + geom_point()
)
}
ui <- fluidPage(
sidebarPanel(
# Input: select a file
fileInput(inputId = "file1", label = "Choose CSV File",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values, text/plain",
".csv")
),
# Horizontal line
tags$hr(),
# Input: Checkbox if file has header
checkboxInput("header", "Header", TRUE),
# Input: Select separator
radioButtons(inputId ="sep", label = "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ";"),
radioButtons(inputId = "quote", label = "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
# Horizontal line
tags$hr(),
selectInput('xcol', 'X Variable', ""),
selectInput('ycol', 'Y Variable', "", selected = ""),
# Horizontal line
tags$hr(),
# Input: Select the type of graph
radioButtons(inputId ="graph", label = "Type of graph:",
choices = c("Line",
"Scatterplot"),
selected = "Line")
),
mainPanel(
tabsetPanel( type = "tabs",
tabPanel(
# App title
titlePanel("Uploading Files"),
# Output: Data file
tableOutput("contents")
),
tabPanel(
titlePanel("Plot"),
plotOutput('MyPlot')
),
tabPanel(
titlePanel("Summary Statistics"),
verbatimTextOutput("summary")
)
)
)
)
|
992820f1bad7ff86e367e1a309bf5beea341bd78 | 1e7af5ef0fc68fd0df55eb671040057f777767c5 | /vectors_operations.R | 1e4c2498e2276ec94d2393734760c1b8427dd424 | [
"MIT"
] | permissive | Naghipourfar/R-Learning | 96a174abef4d90c671dbbf9713df5959e08d4b8f | d8bc5c3c5895331d53f754c884a126b05b98bd1e | refs/heads/master | 2021-08-14T19:47:54.375006 | 2017-11-16T15:38:01 | 2017-11-16T15:38:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 401 | r | vectors_operations.R | ###### Operations on vectors ######
x <- 1:10
mean(x) # computes the mean of x --> 5.5
sum(x) # sum of elements --> 55
nchar(x) # elementwise operation
##### MEAN ######
arr = c(1, 2, NA, 1:20)
mean(x = arr, na.rm = TRUE) # na.rm is what should be done if NA observed (remove or not)
mean(x = arr, trim = 0.1, na.rm = TRUE) # trim
# if you call mean() on arr without na.rm = TRUE --> returns NA
|
8da560819ca401bdcc00344714a27cbf53caa549 | 4056e89d2e74109f4938051dbe09017ab0c48f43 | /man/seed_production.Rd | bf5f3c1d15e37eac85e7ab15a774872b2c727af0 | [] | no_license | petrelharp/landsim | af4209dd8f5ffdd8a32a367d8e58b1b1f632e885 | c4e7cb8cb637c58bcacc1d92e4a01023eb0b281a | refs/heads/master | 2021-01-19T04:32:44.496828 | 2017-11-22T22:54:10 | 2017-11-22T22:54:10 | 47,908,664 | 6 | 2 | null | null | null | null | UTF-8 | R | false | true | 853 | rd | seed_production.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seed_production.R
\name{seed_production}
\alias{seed_production}
\title{Mean Seed Production}
\usage{
seed_production(seeders, pollen, mating, fecundity = 1)
}
\arguments{
\item{seeders}{A numeric matrix of numbers of seeding individuals, with number of columns equal to the number of genotypes.}
\item{pollen}{A numeric matrix of pollen density, with number of columns equal to the number of genotypes.}
\item{mating}{An array with probabilities of producing each genotype from each parental genotype.}
\item{fecundity}{Scaling factor that multiplies the resulting matrix.}
}
\value{
A matrix of the same form as \code{seeders}.
}
\description{
Find the mean seed production, by genotype,
given the local numbers of seeding individuals and pollen density,
by genotype.
}
|
8e3124178c8ad7d8b8f27e8c9ed5a065e46cc7da | 8c9cbd810c862f0abe589c1ad3424d0d4d7d3af0 | /crawlerR/rvest_in_action.R | e0c1aa7c8193c0e87728d31ac3e8e488bf39999c | [] | no_license | mi2-warsaw/pracuj | 1849fce49a2c30811499df331dea49c0beb662bd | 213e344a35837b8dd75bece934fbd49020493607 | refs/heads/master | 2020-12-15T03:56:36.978926 | 2017-03-26T12:21:14 | 2017-03-26T12:21:14 | 51,036,295 | 5 | 6 | null | 2016-02-10T09:32:18 | 2016-02-03T22:45:48 | Python | UTF-8 | R | false | false | 2,842 | r | rvest_in_action.R | library(rvest)
library(RPostgreSQL)
dbname = "pracuj"
user = "pracuj"
host = "services.mini.pw.edu.pl"
sterownik <- dbDriver("PostgreSQL")
polaczenie <- dbConnect(sterownik, dbname = dbname, user = user, password = password, host = host)
dbGetQuery(polaczenie, "SELECT count(*) FROM offers")
dbGetQuery(polaczenie, "SELECT * FROM offers")
###########################################################
# OLD CODE
# use updateDatabase.R instead
i = 1
wynikiDF[i,"link"]
for (i in 2:nrow(wynikiDF)) {
dbGetQuery(polaczenie,
paste0("INSERT INTO offers (id, link, title, data, location, position, company, text) VALUES ('"
,gsub(wynikiDF[i,"link"], pattern=".*,", replacement = ""),"','"
,gsub(wynikiDF[i,"link"], pattern = "['\"]", replacement = ""),"','"
,gsub(wynikiDF[i,"tytul"], pattern = "['\"]", replacement = ""),"','"
,gsub(wynikiDF[i,"data"], pattern = "['\"]", replacement = ""),"','"
,gsub(wynikiDF[i,"lokacja"], pattern = "['\"]", replacement = ""),"','"
,gsub(wynikiDF[i,"stanowisko"], pattern = "['\"]", replacement = ""),"','"
,gsub(wynikiDF[i,"firma"], pattern = "['\"]", replacement = ""),"','"
,gsub(wynikiDF[i,"oferta"], pattern = "['\"]", replacement = ""),"')"))
}
head(wynikiDF,1)
html <- read_html("http://www.pracuj.pl/praca?pn=1")
uchwyty <- html_nodes(html, css = ".offer__list_item_link_name")
linki <- html_attr(uchwyty, name = "href")
linki <- na.omit(linki)
wszystkieLinki <- list()
for (i in 1:474) {
html <- read_html(paste0("http://www.pracuj.pl/praca?pn=",i))
uchwyty <- html_nodes(html, css = ".offer__list_item_link_name")
linki <- html_attr(uchwyty, name = "href")
linki <- na.omit(linki)
wszystkieLinki[[i]] <- linki
}
linki <- unlist(wszystkieLinki)
wyniki <- list()
for (i in 25110:length(linki)) {
try({
ll <- paste0("http://www.pracuj.pl",linki[i])
oferta <- read_html(ll)
tytul <- html_text(html_nodes(oferta, ".offerTop__cnt_main_job"))
data <- html_text(html_nodes(oferta, ".ico-time span"))[2]
lokacja <- html_text(html_nodes(oferta, ".latlng span"))[2]
stanowisko <- html_text(html_nodes(oferta, ".ico-briefcase"))
firma <- html_text(html_nodes(oferta, ".offerTop__cnt_main_emplo-inline span"))
oferta <- html_text(html_node(oferta, "#offCont"))
o2 <- c(ll, tytul, data, lokacja, stanowisko, firma, oferta)
cat(i, "/", length(linki), " ", tytul, "\n")
wyniki[[i]] <- o2
}, silent = TRUE)
}
inds <- which(sapply(wyniki, length) == 7)
wynikiDF <- t(as.data.frame(wyniki[ inds ]))
colnames(wynikiDF) <- c("link", "tytul", "data", "lokacja", "stanowisko", "firma", "oferta")
rownames(wynikiDF) <- NULL
save(wynikiDF, file="wynikiDF.rda")
|
755312b9bd6d9f293298fe9d0fe895695a719948 | 3b57195605cc03aa5bbf21d9de3ee913dbd1f03c | /data_cleaning/raw/fiscal_contracts/combine_fiscal_contracts_data.R | b50983459a986f5092175e7aae7b120721029a2f | [] | no_license | christophergandrud/eurostat_revisions | ddfd399cf9c3860ef82d9eaed9f31d3229188d3f | 297e71a1d5043180cc632c3011ae4ff07d0418a7 | refs/heads/master | 2021-01-10T13:08:10.883746 | 2017-01-22T19:31:30 | 2017-01-22T19:31:30 | 46,604,897 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,874 | r | combine_fiscal_contracts_data.R | # ---------------------------------------------------------------------------- #
# Merge various versions of Hallerberg et al.'s fiscal contracts/delegation variables
# Christopher Gandrud
# MIT LICENSE
# ---------------------------------------------------------------------------- #
library(dplyr)
library(rio)
library(DataCombine)
setwd('/git_repositories/eurostat_revisions/')
# Data 1 ----------
contracts1 <- import('data_cleaning/raw/fiscal_contracts/DelegationtempStata11.dta') %>%
rename(wb = country)
contracts1$wb[contracts1$wb == 'BUL'] <- 'BGR'
contracts1$wb[contracts1$wb == 'GER'] <- 'DEU'
contracts1$wb[contracts1$wb == 'LAT'] <- 'LVA'
contracts1$wb[contracts1$wb == 'LIT'] <- 'LTU'
contracts1$wb[contracts1$wb == 'SLK'] <- 'SVK'
contracts1$country <- countrycode::countrycode(contracts1$wb, origin = 'wb',
destination = 'country.name')
contracts1 <- contracts1[, c('country', 'year', 'delegation', 'contracts',
'expcontracts')]
contracts1 <- contracts1 %>% DropNA(c('delegation', 'contracts'))
# Data 2 ------------
contracts2 <- import('data_cleaning/raw/fiscal_contracts/EUDeficitsDebt.dta') %>%
select(Country, Year, Delegation, Contracts) %>%
rename(wb = Country)
names(contracts2) <- names(contracts2) %>% tolower
contracts2$wb[contracts2$wb == 'BUL'] <- 'BGR'
contracts2$wb[contracts2$wb == 'GER'] <- 'DEU'
contracts2$wb[contracts2$wb == 'LAT'] <- 'LVA'
contracts2$wb[contracts2$wb == 'LITH'] <- 'LTU'
contracts2$wb[contracts2$wb == 'MAL'] <- 'MLT'
contracts2$wb[contracts2$wb == 'SLK'] <- 'SVK'
contracts2$country <- countrycode::countrycode(contracts2$wb, origin = 'wb',
destination = 'country.name')
contracts2 <- contracts2[, c('country', 'year', 'delegation', 'contracts')]
contracts2$expcontracts <- NA
contracts2 <- contracts2 %>% DropNA(c('delegation', 'contracts'))
# Data 3 -------------
contracts3 <- import('data_cleaning/raw/fiscal_contracts/CEEC short.dta') %>%
select(-delegation) %>%
rename(contracts = contract3) %>%
rename(delegation = delegate)
contracts3$country <- countrycode::countrycode(contracts3$country,
origin = 'country.name',
destination = 'country.name')
contracts3 <- contracts3 %>% DropNA(c('delegation', 'contracts'))
contracts3 <- contracts3[, c('country', 'year', 'delegation', 'contracts')]
contracts3$expcontracts <- NA
# Combine ------------
comb <- rbind(contracts1, contracts2, contracts3)
comb <- FindDups(comb, c('country', 'year'), NotDups = T) %>%
arrange(country, year)
export(comb, file = 'data_cleaning/raw/fiscal_contracts/combined_fiscal.csv')
|
7654670569dac6886e308eeff7a499419f58642b | eaef4d0bec6c465e25a1e716ecaff1cfb53a2688 | /cachematrix.R | 301c9c54e5a26b38f8dd8dc825e489e558af77be | [] | no_license | pedrosnk/ProgrammingAssignment2 | ae29d44cbaa7290cee5c553be8e88a358942fdf7 | eb33f1aa78cfc0f6bd930f56ac3acd2e459c059c | refs/heads/master | 2020-12-26T03:55:58.539182 | 2015-05-19T23:37:10 | 2015-05-19T23:37:10 | 35,797,936 | 0 | 0 | null | 2015-05-18T05:09:08 | 2015-05-18T05:09:04 | null | UTF-8 | R | false | false | 1,337 | r | cachematrix.R | ## This is the set of functions created to solve and cache the value of the inverse of a matrix
## This function is responsable to store a data structure that can be
## used to cache the resulve of the inverse of a matrix.
## The first and only argument must be a matrix.
##
## e.g.
## makeCacheMatrix(matrix(1:4, 2))
makeCacheMatrix <- function(x = matrix()) {
cachedInverse <- NULL
set <- function(newMatrix){
x <<- newMatrix
cachedInverse <<- NULL
}
get <- function(){
x
}
setInverse <- function(newInverse){
cachedInverse <<- newInverse
}
getInverse <- function(){
cachedInverse
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function is responsable to solve a matrix that is stored at
## the data structure created by the function makeCacheMatrix.
## It accepts any argument that will be passed to the function solve
## in order to calculate the cached matrix properlly or simply returns
## its cached value.
cacheSolve <- function(x, ...) {
cachedValue <- x$getInverse()
if(!is.null(cachedValue)) {
message("getting cached data")
} else {
data <- x$get()
cachedValue <- solve(data, ...)
x$setInverse(cachedValue)
}
cachedValue
}
|
4e79d0d9337dd210e293253d647150c49c35be66 | 98095937c67ba70cff3cb29356ca0d917959e1eb | /man/getfreqs.Rd | b1683d8d46cb187d7c7236555138be056d0c926f | [] | no_license | danjlawson/pcapred | 88be09966c8a5392a638b01429042f881834028b | d7cbd6327e2673ea72baf35681d85e0cd7eede68 | refs/heads/master | 2023-08-21T19:56:40.805484 | 2021-10-04T19:46:12 | 2021-10-04T19:46:12 | 261,128,161 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 504 | rd | getfreqs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rbed.R
\name{getfreqs}
\alias{getfreqs}
\title{Compute the allele frequencies for bed file}
\usage{
getfreqs(bed, verbose = TRUE)
}
\arguments{
\item{bed}{An "rbed" object as returned from \code{\link{readbed}} or \code{\link{mergeref}}}
\item{verbose}{(default TRUE) whether to report on activities}
}
\value{
The bed object provided with the $freq
}
\description{
Adds the SNP frequency calculations to an "rbed" object.
}
|
5c77c569ccdadf90f6d7ad5da8ce9052c7d20481 | 0eb0abb22102826cd233d2dbf51c4a1576412769 | /R/QueryEnsembl.R | b9a84b3e56f73b2cf3c1ad4d709c3c811fdd3343 | [] | no_license | barzine/randomUtils | d8a2c05fdfdcd4e6b13403eb049a3b08e8880cc1 | 77909cffbd450e35a6615aef6dd21b107f275e92 | refs/heads/master | 2021-01-17T11:38:27.300874 | 2017-08-07T15:09:20 | 2017-08-07T15:09:20 | 20,893,193 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 711 | r | QueryEnsembl.R |
useMart_HSG<-function(host){
suppressPackageStartupMessages(library(biomaRt))
useMart(host=host,
biomart='ENSEMBL_MART_ENSEMBL',
dataset='hsapiens_gene_ensembl')
}
ens76<-useMart_HSG(host='aug2014.archive.ensembl.org')
ens78<-useMart_HSG(host='dec2014.archive.ensembl.org')
ens79<-useMart_HSG(host='mar2015.archive.ensembl.org')
AttList<-c('ensembl_gene_id',
'gene_biotype')
getBM<-function(attributes,filters='ensembl_gene_id',values,mart){
biomaRt::getBM(attributes=attributes,filters=filters,values=values,mart=mart)
}
#TODO: fetch automatically the name of the attributes and the filters
#attributes[grep('ensembl_gene_id',attributes$name),]
|
05d6d31edd28637033cbd12dabe506318bd6f83f | 0eb1c0b93987d339b6b5e58978b6afc3c03f4e4c | /cachematrix.R | 838dfa3f5db87dba038260ed045e9a77d16576ac | [] | no_license | dudub100/ProgrammingAssignment2 | 49363e500fea3692ef917a19c84850fc79e7ff57 | 945e36e1c8ba216c7c15372103b659e42b7bb404 | refs/heads/master | 2021-01-21T15:43:49.566698 | 2015-05-18T06:58:10 | 2015-05-18T06:58:10 | 35,799,902 | 0 | 0 | null | 2015-05-18T06:08:10 | 2015-05-18T06:08:09 | null | UTF-8 | R | false | false | 866 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## This function get a matrix x and store it as cache Matrix
makeCacheMatrix <- function(x = matrix()) {
inv_m <- NULL
set <- function(y) {
x <<- y
inv_m <<- NULL
}
get <- function() x
setinv <- function(inverse) inv_m <<- inverse
getinv <- function() inv_m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function gets a matrix that was stored as cacheMatrix and calculate its inverse.
##If the inverse was calculated before, it uses the cached value
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_m <- x$getinv()
if(!is.null(inv_m)) {
message("getting cached data")
return(inv_m)
}
data <- x$get()
inv_m <- solve(data, ...)
x$setinv(inv_m)
inv_m
}
|
dbb94ff0a91d6217b7321fc5ad03f95e83c60484 | d74208b48e1595366435acfe90584036e89dd88e | /man/tilesPolygonIntersectVIIRS.Rd | e21fdca9577266a3a5d6c552e0106380a66fbc90 | [] | no_license | mjdhasan/Rnightlights | 85feaac20d8ed20429d95a41b59fef59e23a4cfa | f34fd986a405b1ca51d9a807849d2274f8e22d22 | refs/heads/master | 2022-11-06T18:50:41.533156 | 2020-06-26T12:11:28 | 2020-06-26T12:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 831 | rd | tilesPolygonIntersectVIIRS.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tiles.R
\name{tilesPolygonIntersectVIIRS}
\alias{tilesPolygonIntersectVIIRS}
\title{Get the list of VIIRS tiles that a polygon intersects with}
\usage{
tilesPolygonIntersectVIIRS(shpPolygon)
}
\arguments{
\item{shpPolygon}{a SpatialPolygon or SpatialPolygons}
}
\value{
Character vector of the intersecting tiles as given by \code{getNlTiles}
}
\description{
Get the list a VIIRS tiles that a polygon intersects with
}
\examples{
\dontrun{
#download shapefile if it doesn't exist
ctryShapefile <- Rnightlights:::dnldCtryPoly("KEN")
#read in shapefile top layer
ctryPoly <- readCtryPolyAdmLayer("KEN",
Rnightlights:::getCtryShpLyrNames("KEN",0))
#get list of intersecting tiles
tileList <- Rnightlights:::tilesPolygonIntersectVIIRS(ctryPoly)
}
}
|
791713f71c5747f29b972e711b574798f2b0f1fa | 8c5fff9b393f672dc04fda46c84ebea71571b83b | /code/mdsr1e/learningII.R | c1f8e2aed566021c6411e10b7638580348f25f64 | [] | no_license | mdsr-book/mdsr-book.github.io | 370b29d33639cd344ccb747b3627b96c77c099da | 97554b0a2f639c0cbef0a5094692f42df74f4936 | refs/heads/master | 2023-08-28T14:32:31.476236 | 2023-07-27T19:22:42 | 2023-07-27T19:22:42 | 53,080,248 | 15 | 13 | null | 2016-08-28T19:00:42 | 2016-03-03T20:18:33 | HTML | UTF-8 | R | false | false | 6,423 | r | learningII.R | ## ----cache=FALSE, echo=FALSE,include=FALSE-------------------------------
source('hooks.R', echo=TRUE)
fig.path='figures/learningII-'
## ----echo=FALSE,eval=TRUE------------------------------------------------
options(continue=" ")
## ----message=FALSE, eval=FALSE-------------------------------------------
## download.file("https://www.fueleconomy.gov/feg/epadata/16data.zip",
## destfile = "data/fueleconomy.zip")
## unzip("data/fueleconomy.zip", exdir = "data/fueleconomy/")
## ----message=FALSE, warning=FALSE----------------------------------------
library(mdsr)
library(readxl)
filename <- list.files("data/fueleconomy", pattern = "public\\.xlsx")[1]
cars <- read_excel(paste0("data/fueleconomy/", filename)) %>% data.frame()
cars <- cars %>%
rename(make = Mfr.Name, model = Carline, displacement = Eng.Displ,
cylinders = X..Cyl, city_mpg = City.FE..Guide....Conventional.Fuel,
hwy_mpg = Hwy.FE..Guide....Conventional.Fuel, gears = X..Gears) %>%
select(make, model, displacement, cylinders, gears, city_mpg, hwy_mpg) %>%
distinct(model, .keep_all = TRUE) %>%
filter(make == "Toyota")
rownames(cars) <- cars$model
glimpse(cars)
## ----warning=FALSE-------------------------------------------------------
car_diffs <- dist(cars)
str(car_diffs)
car_mat <- car_diffs %>% as.matrix()
car_mat[1:6, 1:6] %>% round(digits = 2)
## ----cars-tree, fig.height=14, fig.cap="A dendrogram constructed by hierarchical clustering from car-to-car distances implied by the Toyota fuel economy data."----
library(ape)
car_diffs %>%
hclust() %>%
as.phylo() %>%
plot(cex = 0.9, label.offset = 1)
## ----message=FALSE-------------------------------------------------------
BigCities <- WorldCities %>%
arrange(desc(population)) %>%
head(4000) %>%
select(longitude, latitude)
glimpse(BigCities)
## ----cluster-cities, fig.cap="The world's 4,000 largest cities, clustered by the 6-means clustering algorithm.", message=FALSE----
set.seed(15)
library(mclust)
city_clusts <- BigCities %>%
kmeans(centers = 6) %>%
fitted("classes") %>%
as.character()
BigCities <- BigCities %>% mutate(cluster = city_clusts)
BigCities %>% ggplot(aes(x = longitude, y = latitude)) +
geom_point(aes(color = cluster), alpha = 0.5)
## ----echo=FALSE, results='asis'------------------------------------------
library(xtable)
Votes_wide <- Votes %>%
tidyr::spread(key = bill, value = vote)
Votes_wide %>%
select(1:5) %>%
head(10) %>%
xtable(caption = "Sample voting records data from the Scottish Parliament.",
label = "tab:scot-votes-small") %>%
print(include.rownames = FALSE)
## ----ballot-grid, fig.cap="Visualization of the Scottish Parliament votes."----
Votes %>%
mutate(Vote = factor(vote, labels = c("Nay","Abstain","Aye"))) %>%
ggplot(aes(x = bill, y = name, fill = Vote)) +
geom_tile() + xlab("Ballot") + ylab("Member of Parliament") +
scale_fill_manual(values = c("darkgray", "white", "goldenrod")) +
scale_x_discrete(breaks = NULL, labels = NULL) +
scale_y_discrete(breaks = NULL, labels = NULL)
## ----two-ballots, fig.cap="Scottish Parliament votes for two ballots."----
Votes %>% filter(bill %in% c("S1M-240.2", "S1M-639.1")) %>%
tidyr::spread(key = bill, value = vote) %>%
ggplot(aes(x = `S1M-240.2`, y = `S1M-639.1`)) +
geom_point(alpha = 0.7,
position = position_jitter(width = 0.1, height = 0.1)) +
geom_point(alpha = 0.01, size = 10, color = "red" )
## ----many-ballots, fig.cap="Scatterplot showing the correlation between Scottish Parliament votes in two arbitrary collections of ballots."----
Votes %>%
mutate(set_num = as.numeric(factor(bill)),
set =
ifelse(set_num < max(set_num) / 2, "First_Half", "Second_Half")) %>%
group_by(name, set) %>%
summarize(Ayes = sum(vote)) %>%
tidyr::spread(key = set, value = Ayes) %>%
ggplot(aes(x = First_Half, y = Second_Half)) +
geom_point(alpha = 0.7, size = 5)
## ----ballot-PCA, fig.cap="Clustering members of Scottish Parliament based on SVD along the members."----
Votes_wide <- Votes %>%
tidyr::spread(key = bill, value = vote)
vote_svd <- Votes_wide %>%
select(-name) %>%
svd()
voters <- vote_svd$u[ , 1:5] %>% as.data.frame()
clusts <- voters %>% kmeans(centers = 6)
voters <- voters %>% mutate(cluster = as.factor(clusts$cluster))
ggplot(data = voters, aes(x = V1, y = V2)) +
geom_point(aes(x = 0, y = 0), color = "red", shape = 1, size = 7) +
geom_point(size = 5, alpha = 0.6, aes(color = cluster)) +
xlab("Best Vector from SVD") + ylab("Second Best Vector from SVD") +
ggtitle("Political Positions of Members of Parliament")
## ------------------------------------------------------------------------
voters <- voters %>%
mutate(name = Votes_wide$name) %>%
left_join(Parties, by = c("name" = "name"))
tally(party ~ cluster, data = voters)
## ------------------------------------------------------------------------
ballots <- vote_svd$v[ , 1:5] %>% as.data.frame()
clust_ballots <- kmeans(ballots, centers = 16)
ballots <- ballots %>% mutate(cluster = as.factor(clust_ballots$cluster),
bill = names(Votes_wide)[-1])
## ----issue-clusters, fig.cap="Clustering of Scottish Parliament ballots based on SVD along the ballots."----
ggplot(data = ballots, aes(x = V1, y = V2)) +
geom_point(aes(x = 0, y = 0), color = "red", shape = 1, size = 7) +
geom_point(size = 5, alpha = 0.6, aes(color = cluster)) +
xlab("Best Vector from SVD") + ylab("Second Best Vector from SVD") +
ggtitle("Influential Ballots")
## ----SVD-ballots, fig.cap="Illustration of the Scottish Parliament votes when ordered by the primary vector of the SVD.", warning=FALSE----
Votes_svd <- Votes %>%
mutate(Vote = factor(vote, labels = c("Nay", "Abstain", "Aye"))) %>%
inner_join(ballots, by = "bill") %>%
inner_join(voters, by = "name")
ggplot(data = Votes_svd,
aes(x = reorder(bill, V1.x), y = reorder(name, V1.y), fill = Vote)) +
geom_tile() + xlab("Ballot") + ylab("Member of Parliament") +
scale_fill_manual(values = c("darkgray", "white", "goldenrod")) +
scale_x_discrete(breaks = NULL, labels = NULL) +
scale_y_discrete(breaks = NULL, labels = NULL)
## ------------------------------------------------------------------------
Votes_svd %>%
arrange(V1.y) %>%
head(1)
## ------------------------------------------------------------------------
Votes_svd %>%
arrange(V1.y) %>%
tail(1)
|
0304a45ca0aac3fc92dfb9a2b876d1291432c2d1 | 4eb8d68d4dc3bf23ccdae7cc0b93082570321434 | /pkg/man/subset.ff.Rd | d8d84dc114628793070c1bb718975a5f62a9491e | [] | no_license | edwindj/ffbase | 5d034d8d1ec65e94e7f4feec3da81e20841aa405 | 98236ab7501fc9765741300879f80baddfe991a3 | refs/heads/master | 2023-07-08T22:39:38.448615 | 2023-06-21T07:59:40 | 2023-06-21T07:59:40 | 8,315,596 | 26 | 8 | null | 2023-06-21T07:59:42 | 2013-02-20T15:15:55 | R | UTF-8 | R | false | true | 580 | rd | subset.ff.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subset.R
\name{subset.ff}
\alias{subset.ff}
\alias{subset.ffdf}
\title{Subsetting a ff vector or ffdfdata frame}
\usage{
\method{subset}{ff}(x, subset, ...)
}
\arguments{
\item{x}{\code{ff} vector or \code{ffdf} data.frame to be subset}
\item{subset}{an expression, \code{ri}, \code{bit} or logical \code{ff} vector that can be used to index x}
\item{...}{not used}
}
\value{
a new ff vector containing the subset, data is physically copied
}
\description{
Subsetting a ff vector or ffdfdata frame
}
|
c436f65ba2d7822ac282424ffc9d09d5ea8895c4 | d02848aed91696cd42a854f3a1bc17f8a3d22d2b | /dmisc/R/renderLatexTable.R | d96a8eefd1b88510cf4526dc9c03000026adb901 | [] | no_license | antoniofabio/dmisc | 88fc51650a2912761503999d3a35f552783cd168 | 732d8d60d025bb4e48fbac525f909940398662b9 | refs/heads/master | 2016-09-05T19:54:01.088162 | 2014-12-11T14:37:50 | 2014-12-11T14:37:50 | 721,700 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,448 | r | renderLatexTable.R | collapse <- function(x, sep="") paste(x, collapse=sep)
matrixTabularHead <- function(m, align="c") {
return(paste("\\begin{tabular}{", collapse(rep(align, NCOL(m))), "}", sep=""))
}
matrixTabularTail <- function(m) {
return("\\end{tabular}")
}
matrixTabularBody <- function(m) {
return(paste(apply(m, 1, collapse, sep=" & "), "\\\\"))
}
matrixAsTabular <- function(m, align="c", head=TRUE) {
ans <- matrixTabularBody(m)
if(head) {
ans <- c(matrixTabularHead(m, align=align), ans, matrixTabularTail(m))
}
return(ans)
}
matrixSplitRows <- function(m, nrows=NULL) {
if(is.null(nrows))
return(list(m))
N <- NROW(m)
nblocks <- ceiling(N/nrows)
ans <- list()
for(j in seq_len(nblocks)) {
ans[[j]] <- m[seq(from=(j-1) * nrows + 1, to=min(N, j*nrows)),,drop=FALSE]
}
return(ans)
}
matrixPadd <- function(m, nr, nc, ..., padd="") {
padding <- nc*ceiling(length(m) / nc) - length(m)
x <- c(m, rep(padd, padding))
matrix(x, nr, nc, ...)
}
##render a vector as a latex table, coloring according to non-NA 'colors' elements
colorLatexTable <- function(x, colors, nc, rowsPerPage=NULL, align="c") {
colorI <- !is.na(colors)
coloredX <- x
coloredX[colorI] <- sprintf("\\cellcolor{%s} %s",
colors[colorI], x[colorI])
m <- matrixPadd(coloredX, nc=nc, byrow=TRUE)
return(unlist(sapply(matrixSplitRows(m, rowsPerPage),
matrixAsTabular, align=align)))
}
|
f97ddd2e7cbdc684927b91730a9502ebd41bdcb7 | c759164f6d93a71095038a3320b4ba3343c94fae | /inst/rstudio/templates/project/inst/tools/debug.R | a88306b7aa8685c6dd88dd18a08e1d1cfe1b0f93 | [] | no_license | dipterix/rave2 | cf0f68e70734105455aad12f3effcbded5a3e4fb | 40a9d4448ad3edeae0a8773c0f7ebf4135932ca7 | refs/heads/master | 2022-12-09T04:19:12.523858 | 2020-08-31T17:48:02 | 2020-08-31T17:48:02 | 291,144,544 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,004 | r | debug.R | ravecore::load_scripts(rlang::quo({
# debug.R runs before comp.R and main.R,
# providing some basic tools that will be available during the run-time
# Session data: run-time based. Should store the reactive user's inputs
# session_data will be different for each tabs
session_data = ravecore::getDefaultSessionData()
# Module data: shared across sessions (multiple web tabs), but stays within
# a package
package_data = ravecore::getDefaultPackageData()
# Global data: an environment that is shared across packages. stores some
# RAVE specific data (power, phase, etc), rarely used by rave modules to
# store data (read-only)
global_data = ravecore::getDefaultDataRepository()
# Reactives
input = ravecore::getDefaultReactiveInput()
output = ravecore::getDefaultReactiveOutput()
reactive_data = shiny::reactiveValues()
session = shiny::getDefaultReactiveDomain()
session %?<-% ravecore::fake_session()
ns <- session$ns
}))
|
44f7f1bb04495e498837b0506617af9f9ec7a3b3 | 9e8615839d1361b9690b32c59085ffeeccc91b07 | /2021/RScripts - Copia/organiza_dados.R | 6090ea0d8d73533fd7081c58a436d568c0042c9f | [] | no_license | rdosreis/MAT02018 | 7dbb658308b75997b14fe010fdc3c029d22819ca | 4e8e0ba81bd19de67a6f68ef8866d5389b6f42b8 | refs/heads/master | 2022-10-04T12:55:53.010897 | 2022-09-22T13:26:15 | 2022-09-22T13:26:15 | 244,178,904 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 900 | r | organiza_dados.R | ## ----carrega-dados0, echo=TRUE, eval=FALSE, warning=FALSE, message=FALSE----------------------------------------
## # install.packages("readxl")
library(readxl)
dados <- read_excel(path = "companhia_mb.xlsx")
## ----carrega-dados2, warning=FALSE, message=FALSE---------------------------------------------------------------
class(dados) # classe do objeto dados
dim(dados) # dimensão do objeto dados
## ----carrega-dados3, warning=FALSE, message=FALSE---------------------------------------------------------------
head(dados) # apresenta as primeiras linhas do objeto dados
## ----apuracao, warning=FALSE, message=FALSE---------------------------------------------------------------------
table(dados$`Estado Civil`) # apura dados nominais
table(dados$`Grau de Instrução`) # apura dados ordinais
table(dados$`N de Filhos`) # apura dados discretos
dados$Idade # apura dados contínuos
|
671f4402fef95541f8aa7a7278ca3f3c2712b827 | dd54e4975bf7d91056c15b491f6dfb448fc0dca1 | /R/parseData.R | dc11f4ce02c28502fb874a6efbce4efdb4ff6aec | [
"CC-BY-4.0"
] | permissive | petedodd/dtree | afa3163323d9653ccacf214d983597e8fefed3e4 | 0641bf317853c91e48570b28f436f44c85d9f05c | refs/heads/master | 2021-01-17T19:56:23.963819 | 2017-10-02T13:31:25 | 2017-10-02T13:31:25 | 60,473,953 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,514 | r | parseData.R |
## parse the string and extract the data
parseData <- function(x){
lnz <- unlist(strsplit(trim(x),split='\n')) #lines
lnz <- lnz[lnz!=""] #ditch empty lines
lnz <- unlist(lapply(as.list(lnz),FUN=function(x) unlist(strsplit(x,split="\\\\"))[1] )) ## strip comment
lnz <- trimall(lnz)
lnz <- lnz[lnz!=""] #ditch empty lines
edz <- grepl("->",lnz)
edges <- lnz[edz]
nodes <- lnz[!edz]
## ---edges---
espl <- strsplit(edges,split='->')
efrom <- unlist(lapply(espl,function(x)x[[1]])) #the froms
eto <- unlist(lapply(espl, function(x) unlist(strsplit(x[2],split='\\['))[1])) #the tos
eprob <- unlist(lapply(espl, function(x) unlist(strsplit(x[2],split="'"))[2])) #the probabilities
## ---nodes---
nnames <- unlist(lapply(nodes, function(x) unlist(strsplit(x,split='\\['))[1])) #the node names
nct <- unlist(lapply(nodes, function(x) unlist(strsplit(x,split="'"))[2])) #node data
ndat <- strsplit(nct,split="\\|") #node data
lbz <- unlist(lapply(ndat,function(x)x[1]))
cstz <- unlist(lapply(ndat,function(x)x[2]))
qlz <- unlist(lapply(ndat,function(x)x[3]))
## return values
list(edges=list(from=efrom,to=eto,prob=eprob),
nodes=list(names=nnames,labels=lbz,costs=cstz,qols=qlz))
}
## trimming utility functions
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
trimall <- function (x) gsub("\\s+", "", x)
gfun <- function(x) gsub( "[:space:]*\\|.*'[:space:]*]","']",x) #for dropping Q/C
|
6f1e17dc716bbb7daa1f8ed3f7c912b2b6ca7f84 | 1f092c22a5746af10ce574af15002d53881c6ef7 | /tests/testthat/testRankTimePoints.R | d0f1a69a15008211dea649cc1e96fb0baf5b02d6 | [] | no_license | cran/microsamplingDesign | 50ce4ca2e1439049c20733194c5962e0e3b696c8 | 49a02faf102dfc9abef4d34bbdd7041a251f64f8 | refs/heads/master | 2021-10-27T09:12:27.351890 | 2021-10-13T12:52:04 | 2021-10-13T12:52:04 | 131,901,288 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,734 | r | testRankTimePoints.R | # Project: microsamplingDesign
#
# Author: ablommaert
###############################################################################
context( "test Rank timePoints " )
#source( "/home/ablommaert/git/microsamplingDesign/microsamplingDesign/tests/testthat/beforeTesting.R" )
#source( "../testthat/beforeTesting.R" )
## Load in existing data
#seed <- getRdsFile( "seed" )
seedFile <- system.file( "dataForTesting" , "seed.rds" , package = "microsamplingDesign" )
seed <- readRDS( seedFile )
#rankTimePointsOrig <- getRdsFile( "rankedTimePoints" )'
rankTimePointsFile <- system.file( "dataForTesting" , "rankedTimePoints.rds" , package = "microsamplingDesign" )
rankTimePointsOrig <- readRDS( rankTimePointsFile )
### generate new data
suppressWarnings(RNGversion("3.5.0"))
set.seed( seed , kind = "Mersenne-Twister", normal.kind = "Inversion") # change to
fullTimePoints <- 0:10
setOfTimePoints <- getExampleSetOfTimePoints( fullTimePoints)
pkDataExample <- getPkData( getExamplePkModel() , getTimePoints( setOfTimePoints ) , nSubjectsPerScheme = 5 , nSamples = 17 )
suppressWarnings(RNGversion("3.5.0"))
set.seed( seed , kind = "Mersenne-Twister", normal.kind = "Inversion") # change to
rankedTimePointsNew <- rankObject( object = setOfTimePoints , pkData = pkDataExample , nGrid = 75 , nSamplesAvCurve = 13)
suppressWarnings(RNGversion("3.5.0"))
set.seed( seed , kind = "Mersenne-Twister", normal.kind = "Inversion") # change to
rankedTimePointsNew2 <- rankObject( object = setOfTimePoints , pkData = pkDataExample , nGrid = 75 , nSamplesAvCurve = 13)
suppressWarnings(RNGversion("3.5.0"))
set.seed( seed , kind = "Mersenne-Twister", normal.kind = "Inversion") # change to
rankedTimePointsNewDiffGrid <- rankObject( object = setOfTimePoints , pkData = pkDataExample , nGrid = 10 , nSamplesAvCurve = 13)
suppressWarnings(RNGversion("3.5.0"))
set.seed( seed , kind = "Mersenne-Twister", normal.kind = "Inversion") # change to
rankedTimePointsNewDiffCurves <- rankObject( object = setOfTimePoints , pkData = pkDataExample , nGrid = 75 , nSamplesAvCurve = 20)
### execute tests
test_that( "Equal ranking timePoints" , {
expect_equal( rankTimePointsOrig@ranking , rankedTimePointsNew@ranking )
}
)
test_that( "Different ranking timePoints with different number of grid poings" , {
expect_false( identical( rankedTimePointsNew , rankedTimePointsNewDiffGrid ) )
}
)
test_that( "Different ranking timePoints with different number of sample curves" , {
expect_false( identical( rankedTimePointsNew , rankedTimePointsNewDiffCurves) )
}
)
|
c0d7f3c9ac62dfc62f7e639c2a193be199e669d0 | a3454c956328b71bd7f1aabbe4113c20d4f93293 | /R/lseqcurlib1D.R | f7c70a5270c22e472d71e18d2baa954c1f81c18a | [] | no_license | promodel/reldna | 82abcf93f2385b972b47df68db62bb92cb7ca54b | 9f109d12794de55c8985956e3046b13183fc8d41 | refs/heads/master | 2021-01-01T19:07:39.791554 | 2015-05-22T09:44:27 | 2015-05-22T09:44:27 | 6,617,501 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,722 | r | lseqcurlib1D.R | lseqcurlib1D <-function(
### DEPRECATED! SHOULD NOT BE USED
### function to calculate electrostatic profile of DNA
s, ##<< DNA sequence
bound, ##<< define a fragment of interest
width=1, ##<< smoothing window width
ref, ##<< reference position
filename=NA #<< name of the file to save data in. Empty string or NA value means file would not be saved
#,name ##<< name of the library
){
width<-floor(width/2)
geom<-dnaGeom(s)
risem<-geom$risem-geom$risem[ref]
risef<-floor(risem)
i<-which(abs(risem-risef-1)<1e-10)
risef[i]<-risef[i]+1
risec<-ceiling(risem)
rspar<-risem-risef
risef<-floor(risem-min(risem))+1
risec<-ceiling(risem-min(risem))+1
# libname<-paste(name, '.Rdata', sep='')
# data(libname)
zlib<-dim(qqs)[2]
lz<-max(risec)-min(risec)+zlib
pot<-array(0, dim=c(1,lz))
qqm<-apply(qqs, c(2,3), FUN=mean)
for (i in 1:geom$l){
q<-qqm[,geom$nseq[i]]
pot[risef[i]:(risef[i]+zlib-1)]<-pot[risef[i]:(risef[i]+zlib-1)]+(1-rspar[i])*q
pot[risec[i]:(risec[i]+zlib-1)]<-pot[risec[i]:(risec[i]+zlib-1)]+rspar[i]*q
}
rm(qqm, rspar)
i1<-(zlib/2-width):(max(risef)+zlib/2-width)
mpot<-pot
mpot<-mpot[(risef[bound[1]]-8+zlib/2):(risef[bound[2]]+8+zlib/2)]
i1<-risef[bound[1]:bound[2]]-risef[ref]+9
zout<-floor(risem[bound[1]]-9):(risem[bound[2]]+9)
x<-(risem[bound[1]]-8):(risem[bound[2]]+8)
risem<-risem[bound[1]:bound[2]]-risem[ref]
if(!is.na(filename)&!nchar(gsub('^ +','',gsub(' +$','',filename)))>0){
filename<-gsub(' +','_',gsub('^ +','',gsub(' +$','',filename)))
save(mpot, risef, i1, file=paste(filename,'.lseqcurlib_data.Rdata',sep=''))
}
sgz<-data.frame(pos=1:length(s),risem=risem,
minZ=(risem-zlib/2),
maxZ=(risem+zlib/2-1))
sgz$minZ[sgz$minZ<min(zout)]<-min(zout)
sgz$maxZ[sgz$maxZ>max(zout)]<-max(zout)
sgz$minI<-floor(sgz$minZ)-min(zout)+1
sgz$maxI<-ceiling(sgz$maxZ)-min(zout)+1
elstatlist<-list(mpot=mpot, risef=risem, i1=i1,x=x,seq=s,bound=bound,ref=ref,zmap=sgz[sgz$minZ<sgz$maxZ,])
class(elstatlist)<-'elDNA1d'
##value<< list with eight components:
##\item{mpot}{1D profile of electrostatic potential along Z axis of DNA;}
##\item{risem}{coordinate of the base pair geometrical center on Z axis of DNA;}
##\item{i1}{index of the base pair geometrical center nearest mesh point ;}
##\item{x}{Z coordinates;}
##\item{seq}{DNA sequence used to calculate profile;}
##\item{bound}{boundaries of the part of interest within the sequence;}
##\item{ref}{index of the base pair that suppose to be placed at the origin;}
##\item{zmap}{data frame of geometical properties of base pairs like index, coordinate of the center, part of profile influenced by its charges.}
return (elstatlist)
}
|
7460361143e981c28bf94fdce7dfc9f012fdd3dc | 5469acfbb49e648582702a6322bf9989044b12b0 | /Code/survival_model.R | a764eefb05f7955c33674be91b4922106db2d760 | [] | no_license | Batuhanipekci/dmc | 3d93e208ada5ebca90fec190a9803a3eaf1e5aa3 | e1abd6b53540747eb26c61bd4686c1127b18f048 | refs/heads/master | 2020-03-19T04:33:39.099663 | 2018-05-17T09:20:19 | 2018-05-17T09:20:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,489 | r | survival_model.R | ### Survival analysis: Cox proportional hazard model
# use all cases from prediction-dataset and test with 1 unit
validation.surv <- subset(validation, units == 1)
# EVTL: Delete cases with releaseDate 2007-10-01
#validation.surv <- subset(validation.surv, validation.surv$releaseDate != "2017-10-01")
# Event-variable and time variable
training$event <- rep(1, NROW(training))
training$time <- as.integer(training$time_last)
# Model
coxmodel1 <- coxph(Surv(time, event) ~ size + color + rrp + brand + category + mainCategory + subCategory +
avg.price + discount.raise, data = training)
summary(coxmodel1)
# Plotting
#plot(survfit(coxmodel2, type = "aalen"), xlab = "Time", ylab = "Survival Probability")
#plot(survfit(Surv(time, event) ~ color, data = training), xlab = "Time",
# ylab = "Survival Probability", col = training$color) # exemplary plot depending on color-variable
# Prediction (using package pec)
# Extract predicted survival probabilities
# at selected time-points
time.points <- c(1:150)
prob.surv <- predictSurvProb(object = coxmodel1, newdata = validation.surv, times = time.points)
head(prob.surv)
# Problem: no prediction over future timepoints (>122) that did not occur in training data.
# Adding the predictions for new data to the plot
#lines(survfit(coxmodel2, newdata=validation.surv))
# In order to get a predicted date, we need to merge the last purchase of each product in
# the train-data with the items-data. Then we can add the predicted days until next purchase on that
for (i in unique(validation.surv$id)) {
validation.surv$last.purchase[validation.surv$id == i] <- max(as.Date(training$date[training$id == i]))
} # warning can be ignored (just some ids from validation data never occured in training data)
validation.surv$last.purchase <- as.Date(validation.surv$last.purchase, origin = "1970-01-01")
# we delete the new cases since we cant predict them here
validation.surv[is.na(validation.surv$time_last), ]$last.purchase <- validation.surv[is.na(validation.surv$time_last), ]$releaseDate
# Crossvalidate cutoff-level tau & number of days to round to prediction month
for (tau in c(0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.15)) {
print(c("For Level: ", tau))
# add/substract 0 days
for (x in 1:NROW(prob.surv)) {
validation.surv$pred.days[x] <- which(prob.surv[x,] < tau)[1]
}
validation.surv$soldOutDate <- validation.surv$last.purchase + validation.surv$pred.days
#hist(validation.surv$soldOutDate, breaks = 200)
validation.surv$soldOutDate[validation.surv$soldOutDate <= "2017-12-31" | validation.surv$soldOutDate >= "2018-02-01"] <- NA
validation.surv$error <- as.integer(round(abs(difftime(validation.surv$date, validation.surv$soldOutDate, units = "days"))))
avg.error.val <- sqrt(sum(validation.surv$error[!is.na(validation.surv$error)])) / sum(!is.na(validation.surv$error))
#hist(validation.surv$soldOutDate, breaks = 200)
# Naive model in comparison
validation.surv$soldOutDate[!is.na(validation.surv$soldOutDate)] <- "2018-01-16"
validation.surv$error <- as.integer(round(abs(difftime(validation.surv$date, validation.surv$soldOutDate, units = "days"))))
avg.error.naive <- sqrt(sum(validation.surv$error[!is.na(validation.surv$error)])) / sum(!is.na(validation.surv$error))
print(c("error: ", avg.error.val, "naive error: ", avg.error.naive, "difference: ", (avg.error.val - avg.error.naive)))
# add/substract 3 days
for (x in 1:NROW(prob.surv)) {
validation.surv$pred.days[x] <- which(prob.surv[x,] < tau)[1]
}
validation.surv$soldOutDate <- validation.surv$last.purchase + validation.surv$pred.days
validation.surv$soldOutDate[validation.surv$soldOutDate <= "2017-12-28" | validation.surv$soldOutDate >= "2018-02-03"] <- NA
validation.surv$soldOutDate[validation.surv$soldOutDate > "2017-12-28" & validation.surv$soldOutDate < "2018-01-01" & !is.na(validation.surv$soldOutDate)] <- "2018-01-01"
validation.surv$soldOutDate[validation.surv$soldOutDate > "2018-01-31" & validation.surv$soldOutDate < "2018-02-03" & !is.na(validation.surv$soldOutDate)] <- "2018-01-31"
validation.surv$error <- as.integer(round(abs(difftime(validation.surv$date, validation.surv$soldOutDate, units = "days"))))
avg.error.val <- sqrt(sum(validation.surv$error[!is.na(validation.surv$error)])) / sum(!is.na(validation.surv$error))
# Naive model in comparison
validation.surv$soldOutDate[!is.na(validation.surv$soldOutDate)] <- "2018-01-16"
validation.surv$error <-as.integer(round(abs(difftime(validation.surv$date, validation.surv$soldOutDate, units = "days"))))
avg.error.naive <- sqrt(sum(validation.surv$error[!is.na(validation.surv$error)])) / sum(!is.na(validation.surv$error))
print(c("error: ", avg.error.val, "naive error: ", avg.error.naive, "difference: ", (avg.error.val - avg.error.naive)))
# add/substract 5 days
for (x in 1:NROW(prob.surv)) {
validation.surv$pred.days[x] <- which(prob.surv[x,] < tau)[1]
}
validation.surv$soldOutDate <- validation.surv$last.purchase + validation.surv$pred.days
validation.surv$soldOutDate[validation.surv$soldOutDate <= "2017-12-25" | validation.surv$soldOutDate >= "2018-02-05"] <- NA
validation.surv$soldOutDate[validation.surv$soldOutDate > "2017-12-25" & validation.surv$soldOutDate < "2018-01-01" & !is.na(validation.surv$soldOutDate)] <- "2018-01-01"
validation.surv$soldOutDate[validation.surv$soldOutDate > "2018-01-31" & validation.surv$soldOutDate < "2018-02-05" & !is.na(validation.surv$soldOutDate)] <- "2018-01-31"
validation.surv$error <- as.integer(round(abs(difftime(validation.surv$date, validation.surv$soldOutDate, units = "days"))))
avg.error.val <- sqrt(sum(validation.surv$error[!is.na(validation.surv$error)])) /sum(!is.na(validation.surv$error))
# Naive model in comparison
validation.surv$soldOutDate[!is.na(validation.surv$soldOutDate)] <- "2018-01-16"
validation.surv$error <- as.integer(round(abs(difftime(validation.surv$date, validation.surv$soldOutDate, units = "days"))))
avg.error.naive <- sqrt(sum(validation.surv$error[!is.na(validation.surv$error)])) / sum(!is.na(validation.surv$error))
print(c("error: ", avg.error.val, "naive error: ", avg.error.naive, "difference: ", (avg.error.val - avg.error.naive)))
# add/substract 8 days
for (x in 1:NROW(prob.surv)) {
validation.surv$pred.days[x] <- which(prob.surv[x,] < tau)[1]
}
validation.surv$soldOutDate <- validation.surv$last.purchase + validation.surv$pred.days
validation.surv$soldOutDate[validation.surv$soldOutDate <= "2017-12-22" | validation.surv$soldOutDate >= "2018-02-08"] <- NA
validation.surv$soldOutDate[validation.surv$soldOutDate > "2017-12-22" & validation.surv$soldOutDate < "2018-01-01" & !is.na(validation.surv$soldOutDate)] <- "2018-01-01"
validation.surv$soldOutDate[validation.surv$soldOutDate > "2018-01-31" & validation.surv$soldOutDate < "2018-02-08" & !is.na(validation.surv$soldOutDate)] <- "2018-01-31"
#hist(validation.surv$soldOutDate, breaks = 200)
# Evaluation
validation.surv$error <- as.integer(round(abs(difftime(validation.surv$date, validation.surv$soldOutDate, units = "days"))))
avg.error.val <- sqrt(sum(validation.surv$error[!is.na(validation.surv$error)])) / sum(!is.na(validation.surv$error)) # 0.03601428 on known releaseDates | 0.01668198 on all releaseDates
# Naive model in comparison
validation.surv$soldOutDate[!is.na(validation.surv$soldOutDate)] <- "2018-01-16"
validation.surv$error <- as.integer(round(abs(difftime(validation.surv$date, validation.surv$soldOutDate, units = "days"))))
avg.error.naive <- sqrt(sum(validation.surv$error[!is.na(validation.surv$error)])) / sum(!is.na(validation.surv$error))
print(c("error: ", avg.error.val, "naive error: ", avg.error.naive, "difference: ", (avg.error.val - avg.error.naive)))
}
# Evaluation only for predicted January-cases
jan.cases <- subset(validation.surv, soldOutDate < "2018-02-01" & soldOutDate > "2017-12-31")
jan.cases$error <- as.integer(round(abs(difftime(validation.surv$date, validation.surv$soldOutDate, units = "days"))))
avg.error.jan.cases <- sqrt(sum(jan.cases$error[!is.na(jan.cases$error)])) / NROW(!is.na(jan.cases$error)); avg.error.jan.cases # 0.0900816 | 0.02614358
# # # # # # # # # # # #
# Run model again on full train.new-dataset and predict on items dataset
# use all cases from prediction-dataset and test with a stock of 1
items.surv <- subset(items, stock == 1)
NROW(items.surv) # 7616 cases
# EVTL: Delete cases with releaseDate 2007-10-01
#items.surv <- subset(items.surv, items.surv$releaseDate != "2017-10-01") # only 1019 cases after that
# Event-variable and time variable
train.new$event <- rep(1, NROW(train.new))
train.new$time <- as.integer(train.new$time_last)
# Model
coxmodel2 <- coxph(Surv(time, event) ~ size + color + rrp + brand + category + mainCategory + subCategory +
avg.price + discount.raise, data = train.new)
summary(coxmodel2)
# Plotting
plot(survfit(coxmodel2, type = "aalen"), xlab = "Time", ylab = "Survival Probability")
plot(survfit(Surv(time, event) ~ color, data = train.new), xlab = "Time",
ylab = "Survival Probability", col = train.new$color) # exemplary plot depending on color-variable
# Prediction
# Extract predicted survival probabilities
# at selected time-points
time.points <- c(1:150)
prob.surv <- predictSurvProb(object = coxmodel2, newdata = items.surv, times = time.points)
head(prob.surv)
# Problem: no prediction over future timepoints (>122) that did not occur in training data.
# Adding the predictions for new data to the plot
lines(survfit(coxmodel2, newdata=items))
# In order to get a predicted date, we need to merge the last purchase of each product in
# the train-data with the items-data. Then we can add the predicted days until next purchase on that
for (i in unique(items.surv$id)) {
items.surv$last.purchase[items.surv$id == i] <- max(as.Date(train.new[train.new$id == i,]$date))
}
items.surv$last.purchase <- as.Date(items.surv$last.purchase, origin = "1970-01-01")
# Define tuned cut-off level for prediction
tau <- 0.06 # use tuned tau from above
for (x in 1:NROW(prob.surv)) {
items.surv$pred.days[x] <- which(prob.surv[x,] < tau)[1]
}
items.surv$soldOutDate <- as.Date(items.surv$last.purchase + items.surv$pred.days, origin = "1970-01-01")
### Predicted days outside february? --> How to treat these??
hist(items.surv$soldOutDate, breaks = 200)
# Use tuned days here!!
#validation.surv$soldOutDate[validation.surv$soldOutDate < "2017-12-25" | validation.surv$soldOutDate > "2018-02-05"] <- "2018-01-16"
items.surv$soldOutDate[items.surv$soldOutDate <= "2018-01-31" | items.surv$soldOutDate >= "2018-03-01"] <- NA
#items.surv$soldOutDate[items.surv$soldOutDate > "2018-01-25" & items.surv$soldOutDate < "2018-02-01" & !is.na(items.surv$soldOutDate)] <- items.surv$soldOutDate[items.surv$soldOutDate > "2018-01-25" & items.surv$soldOutDate < "2018-02-01" & !is.na(items.surv$soldOutDate)] + 7
#items.surv$soldOutDate[items.surv$soldOutDate > "2018-02-28" & items.surv$soldOutDate < "2018-03-05" & !is.na(items.surv$soldOutDate)] <- items.surv$soldOutDate[items.surv$soldOutDate > "2018-02-28" & items.surv$soldOutDate < "2018-03-05" & !is.na(items.surv$soldOutDate)] - 5
sum(!is.na(items.surv$soldOutDate)) # This is effective the number of predictions in the end
hist(items.surv$soldOutDate, breaks = 200)
table(items.surv$soldOutDate)
# Writing file
write.table(x = items.surv[,c("pid", "size", "soldOutDate")], file = "Uni_HU_Berlin_2", sep = "|", row.names = FALSE)
# Not predictable stock = 1 cases
not.pred <- items.surv[is.na(items.surv$soldOutDate), c("pid", "size")]
pred <- items.surv[!is.na(items.surv$soldOutDate), c("pid", "size")]
write.csv2(x = not.pred, file = "survival.not.pred")
write.csv2(x = pred, file = "survival.pred")
|
a97565a93a7ef087e78173910fb32e06c520a5f4 | 0d524ddf0b2f832ac03d1b1529765e795e519c49 | /run_analysis.R | 9d5f9e7a083de244d9e6627a42fec3bb4c930292 | [] | no_license | ikersanchez/Getting-and-Cleaning-Data-Project | e3662ec7a93a7f223b99425bbf0e7b4117e21fe8 | f858f57ad319449dc8a1f4dce2416576b06c4e49 | refs/heads/master | 2021-01-13T01:26:16.450461 | 2014-04-27T20:12:20 | 2014-04-27T20:12:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,357 | r | run_analysis.R | #######################################
# Getting and Cleaning Data Project #
#######################################
### Set your working directory (UCI_HAR_Dataset folder already there,
### and we assume that this folder has been downloaded,extracted
### and renamed in the same way).
wd <- getwd()
### Useful folders
data.dir <- "UCI_HAR_Dataset\\"
data.dir.test <- "UCI_HAR_Dataset\\test\\"
data.dir.train <- "UCI_HAR_Dataset\\train\\"
### Join files stored in the train folder
creatingTrain <- function(path){
data.files <- list.files(path)
text <- data.files[grep(".txt",data.files)]
train <- list()
wd <- getwd()
for (i in 1:length(text)){
dir <- paste(wd,"\\",path,text[i],sep = "")
train[[i]] <- read.table(dir)
}
Train <<- do.call(cbind,train)
}
creatingTrain(data.dir.train)
### Join files in the test folder
creatingTest <- function(path){
data.files <- list.files(path)
text <- data.files[grep(".txt",data.files)]
test <- list()
wd <- getwd()
for (i in 1:length(text)){
dir <- paste(wd,"\\",path,text[i],sep = "")
test[[i]] <- read.table(dir)
}
Test<<- do.call(cbind,test)
}
creatingTest(data.dir.test)
### Join both train dataset and test dataset
Final <- rbind(Train,Test)
dim(Final)
### Add col.names
names(Final)[1] <- c("Subject")
dir <- paste(wd,"\\",data.dir,"features.txt",sep = "")
features <- read.table(dir)
names(Final)[2:562] <- as.character(features$V2)
names(Final)[563] <- c("Activity_Code")
### Add an empty "Activity"" column
Final$Activity <- as.character(seq(1:nrow(Final)))
### Create labels and fill Activity column with them
Final[Final$Activity_Code == 1,]$Activity <- "WALKING"
Final[Final$Activity_Code == 2,]$Activity <- "WALKING_UPSTAIRS"
Final[Final$Activity_Code == 3,]$Activity <- "WALKING_DOWNSTAIRS"
Final[Final$Activity_Code == 4,]$Activity <- "SITTING"
Final[Final$Activity_Code == 5,]$Activity <- "STANDING"
Final[Final$Activity_Code == 6,]$Activity <- "LAYING"
### Here we are moving columns to improve our dataframe
### (Subject,Activity_code and Activity columns first of all)
col_idx <- grep("Activity", names(Final))
Final <- Final[, c(col_idx, (1:ncol(Final))[-col_idx])]
col_idx2 <- grep("Subject", names(Final))
Final <- Final[, c(col_idx2, (1:ncol(Final))[-col_idx2])]
### 1)Extracts only the measurements on the mean and standard deviation for
### each measurement.
mean_index <- grep("mean()",names(Final)) #Not enough,this function also gets meanFreq string
meanFreq_index <- grep("meanFreq()",names(Final[,mean_index]))#Here exclude them
sd_index <- grep("std()",names(Final))
mean.sd.data <- Final[,c((1:3),mean_index[-meanFreq_index],sd_index)]
#required_index <- grep("\\(\\)$",names(mean.sd.data)) #Should exclude -X -Y -Z columns??.
#mean.sd.data <- mean.sd.data[,c((1:3),required_index)] ??
dim(mean.sd.data)
head(mean.sd.data,5)
### 2)Creates a second, independent tidy data set with the average of
### each variable for each activity and each subject.
library(reshape2) # We need this package in order to rearrange our dataset
molten <- melt(mean.sd.data,id = c("Activity","Subject"))
FinalData <- dcast(molten,Subject + Activity ~ variable,mean)
names(FinalData) <- gsub("\\(\\)","",names(FinalData))
head(FinalData,5)
#Write table for submit
write.table(FinalData,file = "Submit.txt")
|
deffe70f16bd356b198e97359ccfdf6fc113dd07 | 1bcbbf7a623d2605e4951096c6e6c78204b02dac | /publishtoblog.R | cfbfd923fd13cbdb0c7a1df04b05ec0ff02f8f0c | [] | no_license | qg0/rltrading | 5153bdf80680df3b32778de0cfb745ef2840be08 | f78045ec021bdc257e856dc926039276d97e47a5 | refs/heads/master | 2021-06-14T08:43:25.942764 | 2017-02-24T17:53:36 | 2017-02-24T17:53:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 325 | r | publishtoblog.R | library(knitr)
library(rmarkdown)
knitr::opts_knit$set(base.url = 'https://dl.dropboxusercontent.com/u/860546/',
base.dir = 'C:/Users/Roman/Dropbox/Public/')
knitr::opts_chunk$set(fig.path="figure/futfutarb06032015")
knitr::opts_chunk$get("fig.path")
knit2html("futfutarb.Rmd", encoding = 'UTF-8')
|
ff5aaf0d78622e2d6dac9679494b4767dee30119 | fb21cc60eb492ecf70c6a74f63eeaf8be96c6239 | /man/write_csv.Rd | e43501c8d85040f0636481bf269e21e0906873da | [
"MIT"
] | permissive | Lextuga007/monstR | 5740bd78d64074e8807a73b5df4e728e09edae05 | 3444c73711c79be1ae04eb1c03b005d1f444813b | refs/heads/master | 2023-05-26T23:18:39.778873 | 2020-11-16T22:07:13 | 2020-11-16T22:07:13 | 284,761,064 | 1 | 0 | MIT | 2020-08-03T17:15:59 | 2020-08-03T17:15:59 | null | UTF-8 | R | false | true | 536 | rd | write_csv.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{write_csv}
\alias{write_csv}
\title{write the data as a csv.}
\usage{
write_csv(data, monstr, create_directory)
}
\arguments{
\item{data}{The actual data}
\item{monstr}{metadata dataframe created by the pipeline}
\item{create_directory}{boolean indicating whether to
(recursively) create the directory hierarchy.}
}
\value{
boolean indicating success
}
\description{
write the data as a csv.
}
\author{
Neale Swinnerton <neale@mastodonc.com
}
|
e0bebbdfefe2fe4e1ad3304b481132f91e413dda | 279571b80e226589d2fbf4b53644334565969b11 | /man/arg.max.Rd | 6e1837f1c1cd0bf6852b594ca2b8496dd381fc35 | [] | no_license | 42n4/dmr.util | f3ad4caf28c5c6b7e7800cfc0657483968df677d | 2bb248cff14f7fb6fcf65360694bf80b2e96565b | refs/heads/master | 2021-01-20T15:33:03.219584 | 2017-05-09T19:46:31 | 2017-05-09T19:46:31 | 90,785,516 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,698 | rd | arg.max.Rd | \name{arg.max}
\alias{arg.max}
\title{Find an argument that maximizes a function}
\description{
This function applies a numeric-valued function to a vector or list of arguments and returns a specified number of arguments that yield the greatest function values. It is used in examples presented in the book Cichosz, P. (2015): Data Mining Algorithms: Explained Using R. See Appendix B or http://www.wiley.com/go/data_mining_algorithms for more details.
}
\usage{
arg.max(args, fun, k = 1)
}
\arguments{
\item{args}{a vector or list of function arguments}
\item{fun}{a function that can be called with one argument and returns a numeric value}
\item{k}{an integer specifying the number of arguments corresponding
to the greatest function values to return}
}
\details{
The \code{fun} function is applied to each argument in \code{args} and
then the \code{k} arguments that produced the greatest function values
are returned. Ties are broken according to the original ordering of
arguments (using stable sorting to determine the least \code{k} values).
}
\value{
If \code{args} is a list, its sublist of length \code{k} containing the
\code{k} arguments that yield the greatest \code{fun} values. If
\code{args} is a vector, its subvector of length \code{k} containing the
\code{k} arguments that yield the greatest \code{fun} values.
}
\references{
}
\author{
Pawel Cichosz <p.cichosz@elka.pw.edu.pl>
}
\note{
}
\seealso{
\code{\link{arg.min}}
}
\examples{
arg.max(1:10, sin)
arg.max(1:10, sin, 3)
data(weatherr, package="dmr.data")
arg.max(1:nrow(weatherr), function(i) weatherr$playability[i], 3)
arg.max(weatherr$temperature, function(temp) abs(temp-20), 3)
}
\keyword{arith}
|
38c863ca33cbaaa23d6a959eedec2a910a94c051 | a22038311e0a8faf72479b5eb2ed0c5e226f14df | /assignment 2.R | 1fbb24e97d5dc5d8229338fc0262b5d17788c10b | [] | no_license | jessybaker78/Statistics-1 | b8bc23dca0c1c239cff700bf52323d6ac41a230e | 8bc76d3240cda91eec03409fc39317d914718a5f | refs/heads/master | 2021-01-24T22:53:30.605444 | 2012-12-11T02:14:50 | 2012-12-11T02:14:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,830 | r | assignment 2.R | setwd('/Users/art/Movies/Study/Statistics/R')
data <- read.table('DAA.02.txt', header=T)
data <- split(data, data$cond)
aer <- subset(data$aer, select = -c(cond, pid))
des <- subset(data$des, select = -c(cond, pid))
cor(aer)
cor(des)
# 1. Which measures displayed the lowest correlation pre-training, suggesting the weakest reliability?
#
# Verbal working memory, des condition = 0.92869647
# Spatial working memory, des condition = 0.5401403
# Spatial working memory, aer condition = 0.44859589
# Verbal working memory, aer condition = 0.924055450
# Question 2
# Which measures displayed the highest correlation pre-training, suggesting the strongest reliability?
#
# Spatial working memory, aer condition = 0.448595886
# Verbal working memory, aer condition = 0.924055450
# Spatial working memory, des condition = 0.5401403
# Verbal working memory, des condition. pre.wm.v1~pre.wm.v2 = 0.92869647
# Question 3
# In the aer condition, which individual measure displayed the highest correlation
# between pre and post training?
# wm.v2 = 0.93091709
# wm.s1 = 0.661405034
# wm.s2 = 0.68329378
# wm.v1 = 0.694754675
# Question 4
# In the des condition, which individual measure displayed the highest c
# orrelation between pre and post training?
# wm.v2 = 0.92462271
# wm.s1 = 0.6277946
# wm.v1 = 0.74164780
# wm.s2 = 0.6336110
# Question 5
# Based on the correlations, the construct to be interpreted with most caution, from a measurement perspective,
# is:
# Verbal working memory, aer condition pre.v1~pre.v2: 0.924055450 / post.v1~v2: 0.54233335
# Spatial working memory, aer condition pre.s1~s2: 0.448595886 / post.s1~s2: 0.29732116
# Verbal working memory, des condition pre.v1~v2: 0.92869647 / post.v1~v2: 0.66629183
# Spatial working memory, des condition pre.s1~s2: 0.5401403 / post.s1~s2: 0.1634238 |
ac0b60ee1fa9a19f86b746d1ab8061a9c3bbec9a | e2b8329825d8ff9804eaea9c6a35f724579391b7 | /man/getMSTLeafRootISOMulroot.Rd | c8287f3655046f49186466d9712ac7be8de2b51d | [
"Apache-2.0"
] | permissive | ouyang-lab/LISA2 | 9cfb049344547ab38933821d27bd4f7619055657 | 92007b5e1310ebe0f60fb0799b0f0fd1db529a84 | refs/heads/main | 2023-07-01T14:07:48.616712 | 2021-07-31T18:55:17 | 2021-07-31T18:55:17 | 366,900,149 | 3 | 4 | null | null | null | null | UTF-8 | R | false | true | 1,137 | rd | getMSTLeafRootISOMulroot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mainMethods.R
\name{getMSTLeafRootISOMulroot}
\alias{getMSTLeafRootISOMulroot}
\title{getMSTLeafRootISOMulroot: create spanning tree with specified root and leaves}
\usage{
getMSTLeafRootISOMulroot(
neigDist,
cenDist,
cluid,
L2 = 2,
rootL = 1,
leaves = list(a = c(9, 12, 13, "linear"), b = 17, c = c(2, 6), d = 3, e = c(4, 5), f
= 14)
)
}
\arguments{
\item{neigDist}{neighbor distance matrix of clusters}
\item{cenDist}{distance matrix of cluster centers}
\item{cluid}{vector, the cluster id}
\item{L2}{integer, default is 2, knn size}
\item{rootL}{integer vector, default is 1, the tree will connect the root cluster in linear mode
For example, rootL = c(1,2,3), the root edges will be 1->2->3}
\item{leaves}{list, specify the leave groups, for each leave group, make it as a vector,
For example,a=c(9,12,13,"linear"), b=c(2,5, "mst"), c=c(7,8,9,"parallel")}
}
\value{
list
list(g5=g5,neigDist=neigDist,w=sum(E(g5)$weight))
}
\description{
getMSTLeafRootISOMulroot: create spanning tree with specified root and leaves
}
\examples{
}
|
5e649a799b00e13cd93c933a44a08088de726a20 | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /B_analysts_sources_github/pablobarbera/instaR/getLocation.R | 5b0cdab59b423e788a0af10060d43a07f90e5da3 | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,132 | r | getLocation.R | #' @rdname getLocation
#' @export
#'
#' @title
#' Get basic information about a location using a location ID
#'
#' @description
#' \code{getLocation} retrieves location information
#'
#' @author
#' Jonne Guyt \email{j.y.guyt@@uva.nl}
#'
#' @param location_id numeric, location id.
#'
#' @param token An OAuth token created with \code{instaOAuth}.
#'
#' @examples \dontrun{
#' ## See examples for instaOAuth to know how token was created.
#' ## Capturing information about a location
#' load("my_oauth")
#' loc_id_info <- getLocation( location_id=423423, token=my_oauth)
#' }
#'
getLocation <- function(location_id, token){
url <- paste0("https://api.instagram.com/v1/locations/", location_id)
content <- callAPI(url, token)
if (content$meta$code==400){
stop(content$meta$error_message)
}
if (length(content$data)==0){
stop("Location ID not known")
}
data <- content$data
data[sapply(data, is.null)] <- NA
df <- data.frame(latitude=data$latitude, longitude=data$longitude,
location_id=data$id, location_name=data$name,
stringsAsFactors=F)
return(df)
}
|
d9cafc7028585b4fb095e0993cc302f2a69ddad7 | 01ececa7c221357eaedf85a1c2b8414fd45302a2 | /R/processVelocytoPyResults.R | e7667272381797e176c6d3e75330b06fe3529751 | [] | no_license | sonejilab/cellexalvrR | d7e30f147d3d991e6858f50b11e90a31863e759c | c533136f59fa906e1c173a46cc4f2e36608c0204 | refs/heads/master | 2023-03-31T19:05:15.669831 | 2023-03-17T07:24:40 | 2023-03-17T07:24:40 | 133,559,720 | 4 | 2 | null | 2018-05-15T18:52:17 | 2018-05-15T18:52:15 | null | UTF-8 | R | false | false | 5,112 | r | processVelocytoPyResults.R | # #' Velocytopy is becoming more and more important and the analysis (normally)
# #' creates a normalized expression hdf5 file and a more detailed (e.g. variable genes based)
# #' dimensional reduction hdf5 file.
# #'
# #' The processed file likely also has velocity information which can also be added
# #' in the correct way to the cellexalvrR object.
# #'
# #' For the VR process bothe the embedings as well as (all) expression data
# #' is of interest.
# #'
# #' Here both files are processed and the resulting
# #' @name processVelocytoPyResults
# #' @aliases processVelocytoPyResults,cellexalvrR-method
# #' @rdname processVelocytoPyResults
# #' @docType methods
# #' @description Combine two anndata files into one cellexalvrR object
# #' @param total the hdf5 file containing all expression values (normalized)
# #' @param variable the hdf5 file containing the embeddings and velocyto data
# #' @param embeddings the embedding names default= c('umap', 'phate')
# #' @param embeddingDims the dimensionality of the embeddings (default 3)
# #' @param velocyto should velocyto data be added to the embeddings default=TRUE
# #' @param veloScale velocyto scaling factor default=20
# #' @param specie the species the data has been created from default='human'
# #' @param minCell4gene the total file might contain genes not expressed at all - filter them default= 10
# #' @title description of function processVelocytoPyResults
# #' @export
# setGeneric('processVelocytoPyResults', ## Name
# function (total, variable, embeddings= c('umap', 'phate'), embeddingDims=3, velocyto =TRUE, veloScale=20, specie='human', minCell4gene = 10) {
# standardGeneric('processVelocytoPyResults')
# }
# )
# setMethod('processVelocytoPyResults', signature = c ('character'),
# definition = function (total, variable, embeddings= c('umap', 'phate'), embeddingDims=3, velocyto =TRUE, veloScale=20, specie='human', minCell4gene = 10) {
# if (!require("hdf5r", quietly = TRUE ) == T ) {
# stop("package 'hdf5r' needed for this function to work. Please install it.",
# call. = FALSE)
# }
# if ( ! hdf5r::is_hdf5(variable)) {
# stop( "The variable genes / analyzed VelocytoPy outfile if no h5ad file")
# }
# if ( ! hdf5r::is_hdf5(total)) {
# stop( "The total genes VelocytoPy outfile if no h5ad file")
# }
# file <- H5File$new(variable, mode='r')
# ## parse the data into a sparse matrix
# toSparse <- function(file){
# message("reading expression data")
# x= file[['X']][['data']][]
# i= file[['X']][['indices']][]
# j= rep(0, length(x))
# indptr = file[['X']][['indptr']][]
# last = 1
# for ( a in 2: length(indptr) ) {
# j[(indptr[a-1]+1):(indptr[a]+1)] = last
# last = last+1
# }
# j = j [-length(j)]
# m = Matrix::sparseMatrix( i = i+1, j=j, x=x)
# meta.data = H5Anno2df( file, 'obs')
# annotation = H5Anno2df( file,'var')
# rownames(m) = annotation[,'_index']
# colnames(m) = meta.data[,'_index']
# m
# }
# m = toSparse( file )
# meta.data = H5Anno2df( file, 'obs')
# annotation = H5Anno2df( file, 'var')
# drcs = lapply(embeddings, function(n) {
# ret = t(file[['obsm']][[paste(sep="_",'X',n)]][1:embeddingDims,])
# if ( embeddingDims == 2 ){
# ret = cbind(ret, rep(0, nrow(ret)) )
# }
# ret
# } )
# names(drcs) = embeddings
# cellexalvrR = new( 'cellexalvrR',
# data=m, meta.cell=as.matrix(meta.data),
# meta.gene=as.matrix(annotation),
# drc = drcs, specie = specie )
# if ( velocyto ) {
# for ( n in names(cellexalvrR@drc)) {
# velo_n = paste( sep="_", 'velocity', n )
# cellexalvrR@drc[[n]] =
# cbind(
# cellexalvrR@drc[[n]],
# cellexalvrR@drc[[n]][,1:embeddingDims] + t(file[['obsm']][[velo_n]][,] * veloScale)
# )
# if ( embeddingDims == 2 ){
# cellexalvrR@drc[[n]] =
# cbind(cellexalvrR@drc[[n]],rep(0, nrow(cellexalvrR@drc[[n]])))
# }
# }
# }
# file2 <- H5File$new(total, mode='r')
# m = toSparse( file2 )
# annotation = H5Anno2df( file2, 'var')
# if ( ncol(m) != ncol(cellexalvrR@data)){
# stop( paste("the variable data has",ncol(cellexalvrR@data),"cells and the total",ncol(m),"mismatch not allowd!" ))
# }
# annotation$varGene = rep(0, nrow(annotation))
# annotation$varGene[match(rownames(cellexalvrR@data), rownames(m))] = 1
# ## possible, that the more analyzed data has dropped some cells
# if ( ! ( all.equal( colnames(cellexalvrR@data), colnames(m)) == TRUE ) ) {
# OK_cells <- match(colnames(cellexalvrR@data), colnames(m))
# if ( length(which(is.na(OK_cells))) > 0 ) {
# cellexalvrR = reduceTo( cellexalvrR, what='col', to = colnames(cellexalvrR@data)[which(! is.na(OK_cells))])
# }
# }
# ## and filter the low expression gene, too
# rsum = Matrix::rowSums( m )
# OK_genes = which(rsum >= minCell4gene)
# mOK = m[OK_genes,]
# #cellexalvrR@meta.gene= matrix()
# cellexalvrR@data = mOK
# cellexalvrR@meta.gene = as.matrix(annotation[OK_genes,])
# cellexalvrR
# } )
|
73c2ef50d0b6f80dd77074c5b5ea1cd4f735392c | a04cefa1c882b70dd3351b85eccc8cb65d3ef302 | /scripts/30_Visualization.R | 38476c997474c4a089d4d3dcf12472971eccd9ad | [] | no_license | PirateGrunt/raw_clrs | bd965dd49212d8695ff6be27f40990cf47c79605 | 01e9fc67b5f30761f28b4346353ae97193ad347e | refs/heads/master | 2020-12-24T06:48:26.277534 | 2017-03-08T04:45:17 | 2017-03-08T04:45:17 | 57,164,304 | 0 | 2 | null | 2016-09-06T04:03:41 | 2016-04-26T21:51:52 | HTML | UTF-8 | R | false | false | 3,953 | r | 30_Visualization.R | ## ----echo=FALSE----------------------------------------------------------
knitr::opts_knit$set(root.dir = "../")
knitr::opts_chunk$set(
comment = "#>",
collapse = TRUE,
fig.pos="t"
)
## ------------------------------------------------------------------------
dfUpper <- read.csv("./data/upper.csv", stringsAsFactors = FALSE)
plot(dfUpper$CumulativePaid, dfUpper$NetEP)
## ------------------------------------------------------------------------
library(raw)
data(RegionExperience)
library(ggplot2)
basePlot <- ggplot(RegionExperience)
class(basePlot)
## ------------------------------------------------------------------------
basePlot <- basePlot + aes(x = PolicyYear, y = NumClaims, color=Region)
## ------------------------------------------------------------------------
p <- basePlot + geom_line()
p
## ------------------------------------------------------------------------
p <- basePlot + geom_point()
p
## ------------------------------------------------------------------------
p <- basePlot + geom_point() + geom_line()
p
## ------------------------------------------------------------------------
p <- ggplot(RegionExperience, aes(x = PolicyYear, y = NumClaims, group=Region, color=Region)) + geom_line()
p
## ------------------------------------------------------------------------
p <- basePlot + geom_bar(stat="identity", aes(fill = Region))
p
## ------------------------------------------------------------------------
p <- basePlot + geom_bar(stat="identity", position="dodge", aes(fill=Region))
p
## ------------------------------------------------------------------------
data(StateExperience)
p <- ggplot(StateExperience, aes(x = PolicyYear, y = NumClaims, color = State)) + geom_point() + facet_wrap(~ Region)
p <- p + theme(legend.position = "none")
p
## ------------------------------------------------------------------------
p <- ggplot(RegionExperience, aes(x = PolicyYear, y = NumClaims, group=Region, color=Region)) + geom_point()
p + geom_smooth(se = FALSE)
## ------------------------------------------------------------------------
p + geom_smooth(method = lm)
## ------------------------------------------------------------------------
data("wkcomp")
## ----results='hide', messages=FALSE--------------------------------------
suppressMessages(library(dplyr))
data("wkcomp")
dfTwo <- wkcomp %>%
raw::CasColNames(FALSE)
set.seed(1234)
dfTwo <- dfTwo %>%
filter(Company %in% sample(unique(dfTwo$Company), 2)) %>%
mutate(PaidLR = CumulativePaid / NetEP)
## ------------------------------------------------------------------------
plt <- ggplot(dfTwo, aes(NetEP, CumulativePaid, color = factor(Lag))) + geom_point() + facet_wrap(~Company, scales="free")
plt
## ------------------------------------------------------------------------
plt + geom_smooth(method = lm, se=FALSE)
## ------------------------------------------------------------------------
pltDensity <- ggplot(filter(dfTwo, Lag == 10), aes(PaidLR, fill = Company)) + geom_density(alpha = 0.7)
pltDensity
## ------------------------------------------------------------------------
pltDensity + facet_wrap(~ Company)
## ------------------------------------------------------------------------
library(maps)
map('state')
## ------------------------------------------------------------------------
data(Hurricane)
dfKatrina = subset(Hurricane, Name == 'KATRINA')
dfKatrina = dfKatrina[dfKatrina$Year == max(dfKatrina$Year), ]
dfHugo = subset(Hurricane, Name == 'HUGO')
dfHugo = dfHugo[dfHugo$Year == max(dfHugo$Year), ]
dfDonna = Hurricane[Hurricane$Name == 'DONNA', ]
dfDonna = dfDonna[dfDonna$Year == max(dfDonna$Year), ]
## ------------------------------------------------------------------------
map('state')
points(dfKatrina$Longitude, dfKatrina$Latitude, pch=19, col = 'red')
points(dfHugo$Longitude, dfHugo$Latitude, pch = 19, col = 'blue')
points(dfDonna$Longitude, dfDonna$Latitude, pch = 19, col = 'green')
|
bd5dc183d2ffd45e0d6779ab11d3a90945687508 | 10ce1da1350976694f829448dfb611e52f809840 | /src-old/make_train_test.R | 1f687a357e57099aaca216efb06740fa0fb17db5 | [] | no_license | bkelemen56/SwiftKeyProject | 740e818522888b0bd7bd46cde115e76fc36b0c88 | a1fac1faa349048931c21f8dd54675e2d24df518 | refs/heads/master | 2021-01-22T04:09:28.492874 | 2017-04-01T23:40:41 | 2017-04-01T23:40:41 | 81,502,165 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,471 | r | make_train_test.R | # ---------------------------------------------------------------------
# make train, test and validation data sets
#
# the following combination are created:
# 1) small: only 1000/1500/3000 lines from blogs, news and twitter
# 2) 0.01-0.05: 1%-5% of each file
# ---------------------------------------------------------------------
library(readr)
# ---------------------------------------------------------------------
# Load documents
# ---------------------------------------------------------------------
file_types <- c('blogs', 'news', 'twitter')
make_small_dataset <- FALSE
# creates the small dataset
create_small_datasets <- function(raw_text, file_type) {
cat('writing small sample documents\n')
n <- c(1000L, 1500L, 3000L)
names(n) <- file_types
m <- 1
nlines <- n[[file_type]]
for (dataset in c('train', 'test', 'validate')) {
out_file <- paste0('data/', file_type, '.', dataset, '.small.txt')
writeLines(raw_text[m:(m+nlines-1)], out_file)
m <- m + nlines
nlines <- n[[file_type]] / 2
}
}
# creates datasets as random % number of lines.
# % referes to the number of lines in train. test/validate get half each
create_pct_dataset <- function(raw_text, file_type, pct) {
cat('writing ', pct, '% sample documents\n')
n <- length(raw_text)
pct_lines <- as.integer(n * pct)
s <- sample(1:n, min(n, pct_lines * 2)) # need double for train (100%), test (50%) and validation (50%)
m <- 1
nlines <- pct_lines
if (pct <= .5) {
datasets <- c('train', 'test', 'validate')
} else {
datasets <- c('train')
}
for (dataset in datasets) {
out_file <- paste0('data/', file_type, '.', dataset, '.', format(pct, decimal.mark = '_'), '.txt')
writeLines(raw_text[s][m:(m+nlines-1)], out_file)
m <- m + nlines
nlines <- pct_lines / 2
}
}
# main loop
for (file_type in file_types) {
cat('\nprocessing', file_type, '\n')
in_file <- paste0('raw-data/final/en_US/en_US.', file_type, '.txt')
raw_text <- read_lines(in_file, progress = interactive())
# create small datasets
if (make_small_dataset) {
create_small_datasets(raw_text, file_type)
}
# create pct datasets
# options c(.10, .15, .20, .25)
#for (pct in c(.50)) create_pct_dataset(raw_text, file_type, pct)
#for (pct in c(.06, .07, .08, .09)) create_pct_dataset(raw_text, file_type, pct)
for (pct in c(.75, 1.00)) create_pct_dataset(raw_text, file_type, pct)
}
cat('\nend program')
|
221f39a3809e7cb697bb37ad0f72c805d17be098 | e2a5cdf2dcbd788ac7c091897b5a027a809c302a | /R/segmentHighlight.R | c3f864e4312d432694915520cf6671f400551169 | [] | no_license | lindbrook/cholera | 3d20a0b76f9f347d7df3eae158bc8a357639d607 | 71daf0de6bb3fbf7b5383ddd187d67e4916cdc51 | refs/heads/master | 2023-09-01T01:44:16.249497 | 2023-09-01T00:32:33 | 2023-09-01T00:32:33 | 67,840,885 | 138 | 13 | null | 2023-09-14T21:36:08 | 2016-09-10T00:19:31 | R | UTF-8 | R | false | false | 3,047 | r | segmentHighlight.R | #' Highlight segment by ID.
#'
#' @param id Character. Segment ID: a concatenation of a street's numeric ID, a whole number between 1 and 528, and a second number to identify the segment.
#' @param highlight Logical. Color segment.
#' @param col Character. Highlight color.
#' @param rotate.label Logical. Rotate segment ID label.
#' @param latlong Logical. Use estimated longitude and latitude.
#' @return A base R graphics segment(s).
#' @export
#' @examples
#' streetNameLocator("Soho Square", zoom = TRUE, highlight = FALSE)
#' ids <- road.segments[road.segments$name == "Soho Square", "id"]
#' invisible(lapply(ids, function(x) segmentHighlight(x, highlight = FALSE)))
segmentHighlight <- function(id, highlight = TRUE, col = "red",
rotate.label = FALSE, latlong = FALSE) {
if (is.character(id) == FALSE) stop('id\'s type must be character.',
call. = FALSE)
if (id %in% cholera::road.segments$id == FALSE) {
stop("Invalid segment ID. See cholera::road.segments.", call. = FALSE)
}
if (latlong) {
rd.segs <- roadSegments(latlong = latlong)
seg <- rd.segs[rd.segs$id == id, ]
if (highlight) {
segments(seg$lon1, seg$lat1, seg$lon2, seg$lat2, col = col, lwd = 3)
}
mid.pt <- data.frame(lon = mean(unlist(seg[, c("lon1", "lon2")])),
lat = mean(unlist(seg[, c("lat1", "lat2")])))
if (rotate.label) {
origin <- data.frame(lon = min(cholera::roads$lon),
lat = min(cholera::roads$lat))
x1.proj <- c(seg$lon1, origin$lat)
y1.proj <- c(origin$lon, seg$lat1)
x2.proj <- c(seg$lon2, origin$lat)
y2.proj <- c(origin$lon, seg$lat2)
lon1.meters <- geosphere::distGeo(x1.proj, origin)
lat1.meters <- geosphere::distGeo(y1.proj, origin)
lon2.meters <- geosphere::distGeo(x2.proj, origin)
lat2.meters <- geosphere::distGeo(y2.proj, origin)
cartesian <- data.frame(x = c(lon1.meters, lon2.meters),
y = c(lat1.meters, lat2.meters))
ols <- stats::lm(y ~ x, data = cartesian)
intercept.slope <- stats::coef(ols)
angle <- atan(intercept.slope["x"]) * 180L / pi
text(mid.pt$lon, mid.pt$lat, labels = id, col = col, srt = angle)
} else {
text(mid.pt$lon, mid.pt$lat, labels = id, col = col)
}
} else {
seg <- cholera::road.segments[cholera::road.segments$id == id, ]
if (highlight) segments(seg$x1, seg$y1, seg$x2, seg$y2, col = col, lwd = 3)
seg.data <- data.frame(x = unlist(seg[, c("x1", "x2")]),
y = unlist(seg[, c("y1", "y2")]),
row.names = NULL)
intercept.slope <- stats::coef(stats::lm(y ~ x, data = seg.data))
x.prime <- mean(seg.data$x)
y.prime <- x.prime * intercept.slope["x"] + intercept.slope["(Intercept)"]
if (rotate.label) {
angle <- atan(intercept.slope["x"]) * 180L / pi
text(x.prime, y.prime, labels = id, srt = angle, col = col)
} else {
text(x.prime, y.prime, labels = id, col = col)
}
}
}
|
41091edfb01bceab913916469c9629c3c33960c8 | 08e5449bbb28ce2e4dda96e40a5d38e176fd0179 | /Stats700/HW_solutions/HW0/Yuanzhi/Bivariate-normal-1.R | 19992e1677f2d0bed02e2af1ddedff201c2b9730 | [] | no_license | Pill-GZ/Teaching | 7e63e894ce975170569de754db992290568d8e1d | c60b7c6d478419bb361978f137ac2c17bbcc24de | refs/heads/master | 2021-01-22T03:15:01.534748 | 2020-07-27T15:10:25 | 2020-07-27T15:10:25 | 81,099,878 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,818 | r | Bivariate-normal-1.R | Inv.quadform <- function(X, Sigma){
Y = t(X) %*% solve(Sigma, X)
return(Y)
}
library(MASS)
N = c(20,100,500,2000)
M = 5000
sigma.X = 1
sigma.Y = 1
rho = 0.3
Sigma = matrix( c(sigma.X^2, sigma.X*sigma.Y*rho,
sigma.X*sigma.Y*rho, sigma.Y^2), 2, 2 )
mu.X = 10
mu.Y = 10
Fin.Width.Fieller = rep(NA,length(N))
Fin.Width.Delta = Fin.Width.Fieller
Fin.Cover.Delta = Fin.Width.Fieller
Fin.Cover.Fieller = Fin.Width.Fieller
for(j in 1:length(N)){ ##Vary the sample size n
print(j)
Cover.Fieller = 0
Cover.Delta = 0
Width.Fieller = 0
Width.Delta = 0
n = N[j]
for(i in 1:M){## Simulation starts
#########
##Generating Data
Data = mvrnorm(n, mu = c(mu.X, mu.Y), Sigma)
###########
## Estimating Delta method
mu.hat = apply(Data, 2, mean)
Data.centered = apply(Data, 1, '-', mu.hat )
sigma2.hat = sum( apply(Data.centered, 2, Inv.quadform, Sigma) )/(2*n)
Sigma.hat = matrix( c(sigma2.hat/n, rho*sigma2.hat/n, rho*sigma2.hat/n,
sigma2.hat/n), 2, 2 )
temp.vec = c(-mu.hat[2]/mu.hat[1]^2, 1/mu.hat[1])
Delta.var.est = t(temp.vec) %*% Sigma.hat %*% temp.vec
Delta.CI = mu.hat[2]/mu.hat[1] + c( -qnorm(0.975) * sqrt(Delta.var.est),
qnorm(0.975) * sqrt(Delta.var.est) )
## Estimating Fieller's method
a = mu.hat[2]
b = mu.hat[1]
t.quant = qt(0.975, n-2)
f1 = a*b - t.quant^2 * rho * sigma2.hat / n
f2 = b^2 - t.quant^2 * sigma2.hat / n
f0 = a^2 - t.quant^2 * sigma2.hat / n
D = f1^2 - f0*f2
Fieller.CI = c((f1 - sqrt(D)) / f2, (f1 + sqrt(D)) / f2)
## Evaluate the width and coverage rate
Width.Fieller = Width.Fieller + diff(Fieller.CI)
Width.Delta = Width.Delta + diff(Delta.CI)
if(Fieller.CI[1]<=1 && Fieller.CI[2]>=1) Cover.Fieller = Cover.Fieller + 1
if(Delta.CI[1]<=1 && Fieller.CI[2]>=1) Cover.Delta = Cover.Delta + 1
}
Fin.Width.Fieller[j] = Width.Fieller / M
Fin.Width.Delta[j] = Width.Delta / M
Fin.Cover.Fieller[j] = Cover.Fieller / M
Fin.Cover.Delta[j] = Cover.Delta / M
}
# opar= par()
# par(cex.axis=1.5,cex.lab=1.5)
plot(Fin.Cover.Delta~log(N), type= 'b', ylim=c(0.94,0.96), pch = 4, xlab = 'log sample size', ylab = 'Coverage rates')
points(Fin.Cover.Fieller~log(N), type='b', pch = 5,col='blue')
abline(h=0.95, col = 'red',lty=2)
legend('topright',legend = c('Delta','Fieller','Nominal level'),lwd=2,pt.cex=1.2,cex=0.8, lty = c(1,1,2), pch=c(4,5,-1),col=c('black','blue','red'),y.intersp =0.35)
#
# # plot((Fin.Width.Delta)~log(N), type= 'b', ylim=(c(0.01,0.12)), pch = 4, xlab = 'log sample size', ylab = 'Average width')
# # points((Fin.Width.Fieller)~log(N), type='b', pch = 5, col='blue')
# # legend('topright',legend = c('Delta','Fieller'),lwd=2,pt.cex=1.2,cex=0.9, lty = c(1,1), pch=c(4,5),col=c('black','blue'),y.intersp =0.35)
#
|
f341f28567d433e4059cf49c075800eed2361180 | e83330b72a1a023b4b0f85bb02552484282eacf9 | /R/control_params.R | 74d93c0608dd732c6b873062db20eec02abebf66 | [] | no_license | cran/bootfs | e352d8e0446c32dca97649adf8767680d012c602 | 5266e612d0ad413989bdf36ab71867cde22e8b3e | refs/heads/master | 2019-03-10T15:35:35.058997 | 2012-08-01T00:00:00 | 2012-08-01T00:00:00 | 17,694,859 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,673 | r | control_params.R | #' Create control parameter object for the classifiers
#'
#' This function creates a set of control parameters which is passed to the classifier functions.
#'
#' @param seed A random seed to be set before the classification
#' @param bstr Integer. Number of bootstrap iterations.
#' @param ncv Integer. Number of crossvalidation folds.
#' @param repeats Integer. Number of repeats for cross-validation.
#' @param saveres Boolean. If TRUE, save results.
#' @param jitter Boolean. If TRUE, generate a small amount of noise, if standard deviations for samples are zero. NOTE: Use with care!
#' @param maxiter Integer. Maximum number of iterations in SCAD SVM. Parameter for SCAD SVM from \code{penalizedSVM} package.
#' @param maxevals Integer. Parameter for SCAD SVM from \code{penalizedSVM} package.
#' @param bounds Parameter for SCAD SVM from \code{penalizedSVM} package.
#' @param max_allowed_feat Integer. PAMR parameter, bounding the maximum number of features reported.
#' @param n.threshold Integer. PAMR parameter, number of thresholds to be generated.
#' @param maxRuns Integer. RF_Boruta parameter, number of runs in Boruta selection.
#' @param localImp Boolean. randomForest parameter; save local importances.
#' @param rfimportance String. randomForest parameter; which importance measure should be used in the randomForest (method 'rf') to rank and select features? Either \code{MeanDecreaseGini} or \code{MeanDecreaseAccuracy}. Features are selected with \code{rfimportance} >= 1.
#' @param ntree Integer. randomForest and GBM parameter; Number of trees to be used.
#' @param shrinkage Double. GBM parameter; shrinkage step size.
#' @param interaction.depth Integer. GBM parameter.
#' @param bag.fraction Numeric in 0..1. GBM parameter; Fraction of bagged samples.
#' @param train.fraction Numeric in 0..1. GBM paramter; Fraction of training samples.
#' @param n.minobsinnode Integer. GBM parameter.
#' @param n.cores Integer. GBM parameter.
#' @param verbose Boolean. GBM parameter. Be verbose or not.
#' @details
#' This function is used to define a set of control parameters used in the different methods. For each parameter, consult the respective help pages of the methodologies.
#' @seealso
#' \code{penalizedSVM}
#' \code{randomForest}
#' \code{gbm}
#' \code{Boruta}
#' \code{pamr}
#' @return
#' List with all named control parameters.
#' @examples \dontrun{ control_params() }
control_params <- function(seed=123,
bstr=100,
ncv=5, repeats=10,
saveres=TRUE,
jitter=FALSE, ## general parameters
maxiter=1000, maxevals=500, bounds=NULL,## scad parameters
max_allowed_feat=NULL, n.threshold=50, ## pamr parameters
maxRuns=300, ## rf_boruta
localImp=TRUE, rfimportance="MeanDecreaseAccuracy", ## RF parameters
ntree = 1000, ## GBM parameters, also RF
shrinkage = 0.01, interaction.depth = 3, ## GBM parameters
bag.fraction = 0.75, train.fraction = 0.75,
n.minobsinnode = 3, n.cores = 1,
verbose = TRUE)
{
params <- list(seed=seed,
jitter=jitter, saveres=saveres,## general parameters
bstr=bstr,
ncv=ncv, repeats=repeats,
maxiter=maxiter, maxevals=maxevals, bounds=bounds, ## scad parameters
max_allowed_feat=max_allowed_feat, n.threshold=n.threshold, ## pamr parameters
maxRuns=maxRuns, ## RF parameters (Boruta),
localImp=localImp, rfimportance=rfimportance, ## RF parameters
ntree = ntree, ## GBM parameters, also RF
shrinkage = shrinkage, interaction.depth = interaction.depth,
bag.fraction = bag.fraction, train.fraction = train.fraction,
n.minobsinnode = n.minobsinnode, n.cores = n.cores,
verbose = verbose)
params
}
|
67dd5c1dfd164a829b7f8a26541d8b5823b794e2 | 04b15e454486b62d7cfe9aed63d9347ecae90a39 | /R/importedFunctions.R | 137092dfe17a40b3d45f3c757d45f8ffc067b126 | [] | no_license | egmg726/crisscrosslinker | 272d39ec99abba40d1a9f057caf6d5d3361b846f | f0d5f80c5ee7966b11586923f8e1a89c74cb9661 | refs/heads/master | 2021-06-18T07:40:20.028850 | 2021-01-15T00:15:57 | 2021-01-15T00:15:57 | 146,306,055 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 490 | r | importedFunctions.R | #' @import ggplot2
NULL
#' @import bio3d
NULL
#' @importFrom Biostrings pairwiseAlignment
NULL
#'@importFrom seqinr read.fasta a aaa
NULL
#is that notation correct?
#'@import RColorBrewer
NULL
#'@importFrom prodlim row.match
NULL
#'@importFrom openxlsx read.xlsx
NULL
#'@importFrom stringr str_locate_all str_count
NULL
#'@import httr
NULL
#'@import jsonlite
NULL
#'@import xml2
NULL
#'@import grDevices
NULL
#'@importFrom svglite svglite
NULL
#'@import viridis
NULL
#'@import svglite
NULL
|
93b83c8ffb1c3c2521ac19532ccd2f67cb848edd | 8f24cc9723b56cd7c684c117e2f567d5b84b1ef1 | /man/plot.EEM.go.Rd | a8cc0c8879b68b49a2ee5b53b7c36c03df19611f | [
"MIT"
] | permissive | RichardLaBrie/paRafac_correction | 1723a77eb15c17a50109815d2690882b67c1e336 | c0d0dda8b7f70fe02290684f7875583b01b1fc20 | refs/heads/master | 2023-04-09T08:06:22.107375 | 2023-03-08T12:41:16 | 2023-03-08T12:41:16 | 89,527,851 | 1 | 1 | MIT | 2022-02-11T11:00:40 | 2017-04-26T21:23:21 | R | UTF-8 | R | false | true | 2,094 | rd | plot.EEM.go.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.eem.go.R
\name{plot.EEM.go}
\alias{plot.EEM.go}
\title{Subtract and plot fluorescence Excitation-Emission Matrix (EEM)}
\usage{
\method{plot}{EEM.go}(
path,
zlim = c(0, 10),
SUBTRACT.BLANK = TRUE,
PLOT.RAMAN = TRUE,
PLOT.BLANK = TRUE,
excitation = c(220, 450, 5),
emission = c(230, 600, 2),
EMCOL = FALSE,
samplepercsv = 1,
RU = T
)
}
\arguments{
\item{path}{Full path of the working directory (can be called using getwd())}
\item{zlim}{is the limits of the fluoresence scale (z axis) in raw units. Default is c(0, 10) for clear oceanic waters.}
\item{SUBTRACT.BLANK}{is a logical parameter to indicate if a blank EMM is to be subtract from the sample EEM.
Default is TRUE and the user is prompted to first select the blank file.}
\item{PLOT.RAMAN}{is a logical parameter indicating
whether or not the raman peak is ploted. Default is TRUE.}
\item{PLOT.BLANK}{is a logical parameter indicating whether the blank EEMs is ploted or not. Default is TRUE.}
\item{excitation}{is a vector of three variables of the scanning setup (min, max, interval).
Default is c(220, 450, 5)}
\item{emission}{is a vector of three variables of the scanning setup (min, max, interval).
Default is c(230, 600, 2)}
\item{EMCOL}{is a logical parameter indicating whether or not the emission are
stored as column in the csv file. Default is FALSE.}
\item{samplepercsv}{is a parameter which indicates the number of sample in the csv file coming from the fluorometer.}
\item{RU}{is a logical parameter to transform fluorescence intensities into Raman Unit at Ex = 350 nm.
Default is TRUE}
}
\value{
Returns the EEM from the sample
}
\description{
A simple function to visualize EEMs obtained using a Varian Cary Eclipse fluorometer in csv format
(could evolved to include other instrument). The user is prompted to select one or many files
}
\details{
}
\examples{
plot.EEM.go(getwd(), zlim = c(0, 20), SUBTRACT.BLANK = FALSE)
}
\seealso{
\code{\link{read.EEM}}
}
\author{
Simon Bélanger & Richard LaBrie
}
|
26bb87c51ba6861a2136dbf15577d5ef550db370 | 25a47e4e218143016496b4ebb1e767afb3d28bae | /shiny_app/global.r | 9dc8eca2fe5ea025defe8f423ce65e960d582e18 | [] | no_license | greg-slater/london-atmospheric-inequality | 81f175bd4a5b04222c5a5161f82cb3eadeea0779 | f3e36da451aaf9b1871eae7eae28c429d8330159 | refs/heads/master | 2020-05-18T08:55:34.146975 | 2019-04-30T18:29:04 | 2019-04-30T18:29:04 | 184,309,737 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 112 | r | global.r | library(shiny)
library(leaflet)
library(dplyr)
library(tidyr)
library(ggplot2)
library(sf)
library(RColorBrewer) |
9a70583805168448710ee35ac16cbbaec2e3d5c4 | 909b271137b3144466c77f94581b33a8bc368576 | /106/0522.R | 7b590320076a4f83bf716d90893cc2f458588c13 | [] | no_license | CGUIM-BigDataAnalysis/BigDataCGUIM | 8c5f1ece53853cbef7f455be5ad3ee724f5a2e8b | c69c1383dcb21971b9e91a9915d857030fd543f3 | refs/heads/master | 2023-01-22T11:45:49.819933 | 2023-01-20T13:58:03 | 2023-01-20T13:58:03 | 53,340,224 | 54 | 43 | null | null | null | null | UTF-8 | R | false | false | 2,194 | r | 0522.R | library(jsonlite)
BikeData<-fromJSON("https://data.tycg.gov.tw/opendata/datalist/datasetMeta/download?id=5ca2bfc7-9ace-4719-88ae-4034b9a5a55c&rid=a1b4714b-3b75-4ff8-a8f2-cc377e4eaa0f")
BikeDataDF <- data.frame(
matrix(unlist(BikeData$retVal),
nrow=length(BikeData$retVal), byrow=T),
stringsAsFactors=FALSE)
library(ggplot2)
library(ggmap)
t<-get_googlemap(center=c(lon=121.20,lat=25.00),
zoom = 11,language="zh-TW")
ggmap(t)
BikeDataDF$X5<-as.numeric(BikeDataDF$X5)
BikeDataDF$X4<-as.numeric(BikeDataDF$X4)
BikeDataDF$X3<-as.numeric(BikeDataDF$X3)
n<-ggmap(t)+
geom_point(data=BikeDataDF,
aes(x=X5, y=X4,size=X3),
color="red")+
scale_size(range = c(1,10))
n
library(readr)
Dengue <-
read_csv("https://od.cdc.gov.tw/eic/Dengue_Daily.csv")
Dengue $最小統計區中心點X<-
as.numeric(Dengue $最小統計區中心點X)
Dengue $最小統計區中心點Y<-
as.numeric(Dengue $最小統計區中心點Y)
Dengue$確定病例數<-
as.numeric(Dengue $確定病例數)
WrongData<-data.frame(lon=Dengue $最小統計區中心點X,
lat=Dengue $最小統計區中心點Y,
stringsAsFactors=F)
WrongData$lon<-as.numeric(as.character(WrongData$lon))
WrongData$lat
library(dplyr)
df<-filter(Dengue,Dengue$確定病例數>1.0)
Dengue<-rbind(Dengue,df)
library(ggmap)
twmap <- get_googlemap(center = c(lon=120.58,lat=23.58),
zoom = 8,
language = "zh-TW")
ggmap(twmap, extent = "device")+
geom_density2d(data = Dengue, aes(x = 最小統計區中心點X,
y =最小統計區中心點Y),
size = 1) +
stat_density2d(data = Dengue,
aes(x = 最小統計區中心點X,
y = 最小統計區中心點Y,
fill = ..level.., alpha = ..level..),
size = 0.01, bins = 16, geom = "polygon") +
scale_fill_gradient(low = "green",
high = "red", guide = FALSE) +
scale_alpha(range = c(0, 0.3), guide = FALSE)
Dengue %>% group_by(居住縣市) %>%
summarise(Count=n()) %>%
arrange(desc(Count))
|
8fbab454ccf8086b302f6704ddee5ae394b20f5b | 5508a3732fcbe38d33d887bdbc5c48d466e5bfac | /hw10/OMDB/plot.R | f856d09346be782bec36b0ff2db5850a4eeec64f | [] | no_license | arthursunbao/STAT545-Homework | dbc5a81c7b9524b35b014c2f3c9aeb35b635cb0f | 2bac65bab3db6b3daf0773a7955a36d7262c0eb6 | refs/heads/master | 2021-08-24T00:20:21.764582 | 2017-12-07T07:27:11 | 2017-12-07T07:27:11 | 103,560,458 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,480 | r | plot.R | library(purrr)
library(ggplot2)
library(reshape)
library(stringr)
library(tidyverse)
library(glue)
library(plyr)
#Get the data from the csv
movie_rating_transformers <- read_csv("./transformers_rating.csv")
movie_rating_startrek <- read_csv("./star_trek_rating.csv")
#Let's get a brief overview of the current dataset we have for transformers
knitr::kable(movie_rating_transformers)
#Let's get a brief overview of the current dataset we have for star trek
knitr::kable(movie_rating_startrek)
#Let's do some plots
#Let's first get the IMDB score for transformers
movie_rating_transformers_temp <- movie_rating_transformers %>% select(movie_title, imdb)%>% filter(imdb > 0) %>% arrange(desc(imdb))
#Let's choose top 5 for IMDB for transformers
movie_rating_transformers_temp <- movie_rating_transformers_temp[0:5,]
knitr::kable(movie_rating_transformers_temp)
#Then let's draw a plot for top 5 scores
plot1 <- ggplot(movie_rating_transformers_temp, aes(x=movie_rating_transformers_temp$movie_title, y = movie_rating_transformers_temp$imdb)) +
geom_bar(stat="identity",position="dodge",fill = "dark blue")+
labs(x = "Movie Name", y = "IMDB Score", title = "IMDB Score of Transformers") +
theme(plot.title = element_text(hjust = 0.5))
ggsave("./imdb_transformer.png",plot1,device = "png", width = 10, height = 7,dpi = 500)
#Let's first get the RT score for transformers
movie_rating_transformers_temp_rt <- movie_rating_transformers %>% select(movie_title, RT)%>% filter(RT > 0) %>% arrange(desc(RT))#Let's choose top 5 for RT for transformers
movie_rating_transformers_temp_rt <- movie_rating_transformers_temp_rt[0:5,]
#Then let's have an overview
knitr::kable(movie_rating_transformers_temp_rt)
#Then let's draw a plot for top 5 scores
plot2 <- ggplot(movie_rating_transformers_temp_rt, aes(x=movie_rating_transformers_temp_rt$movie_title, y = movie_rating_transformers_temp_rt$RT)) +
geom_bar(stat="identity",position="dodge",fill = "dark blue")+
labs(x = "Movie Name", y = "RT Score", title = "RT Score of Transformers") +
theme(plot.title = element_text(hjust = 0.5))
ggsave("./rt_transformer.png",plot2,device = "png", width = 10, height = 7,dpi = 500)
#Let's first get the Met score for transformers
movie_rating_transformers_temp_met <- movie_rating_transformers %>% select(movie_title, Met)%>% filter(Met > 0) %>% arrange(desc(Met))#Let's choose top 5 for RT for transformers
#Let's choose top 5 for RT
movie_rating_transformers_temp_met <- movie_rating_transformers_temp_met[0:5,]
#Then let's have an overview
knitr::kable(movie_rating_transformers_temp_met)
#Then let's draw a plot for top 5 scores
plot3 <- ggplot(movie_rating_transformers_temp_met, aes(x=movie_rating_transformers_temp_met$movie_title, y = movie_rating_transformers_temp_met$Met)) +
geom_bar(stat="identity",position="dodge",fill = "dark blue")+
labs(x = "Movie Name", y = "Met Score", title = "Met Score of Transformers") +
theme(plot.title = element_text(hjust = 0.5))
ggsave("./met_transformer.png",plot3,device = "png", width = 10, height = 7,dpi = 500)
#### Do Some Work with Star Trek
#Let's do some plots
#Let's first get the IMDB score for Star Trek
movie_rating_star_trek_temp <- movie_rating_startrek %>% select(movie_title, imdb)%>% filter(imdb > 0) %>% arrange(desc(imdb))
#Let's choose top 5 for IMDB for Star Trek
movie_rating_star_trek_temp <- movie_rating_star_trek_temp[0:5,]
knitr::kable(movie_rating_star_trek_temp)
#Then let's draw a plot for top 5 scores
plot4 <- ggplot(movie_rating_star_trek_temp, aes(x=movie_rating_star_trek_temp$movie_title, y = movie_rating_star_trek_temp$imdb)) +
geom_bar(stat="identity",position="dodge",fill = "green")+
labs(x = "Movie Name", y = "IMDB Score", title = "IMDB Score of Star Trek") +
theme(plot.title = element_text(hjust = 0.5))
ggsave("./imdb_star_trek.png",plot4,device = "png", width = 10, height = 7,dpi = 500)
#Let's first get the RT score for Star Trek
movie_rating_star_trek_temp_rt <- movie_rating_startrek %>% select(movie_title, RT)%>% filter(RT > 0) %>% arrange(desc(RT))
movie_rating_star_trek_temp_rt <- movie_rating_star_trek_temp_rt[0:5,]
#Then let's have an overview
knitr::kable(movie_rating_star_trek_temp_rt)
#Then let's draw a plot for top 5 scores
plot5 <- ggplot(movie_rating_star_trek_temp_rt, aes(x=movie_rating_star_trek_temp_rt$movie_title, y = movie_rating_star_trek_temp_rt$RT)) +
geom_bar(stat="identity",position="dodge",fill = "red")+
labs(x = "Movie Name", y = "RT Score", title = "RT Score of Star Trek") +
theme(plot.title = element_text(hjust = 0.5))
ggsave("./rt_star_trek.png",plot5,device = "png", width = 10, height = 7,dpi = 500)
#Let's first get the Met score for Star Trek
movie_rating_star_trek_temp_met <- movie_rating_startrek %>% select(movie_title, Met)%>% filter(Met > 0) %>% arrange(desc(Met))
#Let's choose top 5 for Met
movie_rating_star_trek_temp_met <- movie_rating_star_trek_temp_met[0:5,]
#Then let's have an overview
knitr::kable(movie_rating_star_trek_temp_met)
#Then let's draw a plot for top 5 scores
plot6 <- ggplot(movie_rating_star_trek_temp_met, aes(x=movie_rating_star_trek_temp_met$movie_title, y = movie_rating_star_trek_temp_met$Met)) +
geom_bar(stat="identity",position="dodge",fill = "dark blue")+
labs(x = "Movie Name", y = "Met Score", title = "Met Score of Star Trek") +
theme(plot.title = element_text(hjust = 0.5))
ggsave("./met_star_trek.png",plot6,device = "png", width = 10, height = 7,dpi = 500)
|
fe093c32bf99ba2068c7e2cf597dc9007f90e466 | 1855cfeeb88680ebcad978f73ecf11aac5e37bf3 | /vmstools/R/old/distanceVMS.R | 14362a745792d578a7fdd6fa4b03e97790494d58 | [] | no_license | nielshintzen/vmstools | ddb996dd4d5b3b2504216911284d3dd872f81baa | 8bb3666d3778eac5b8be1c9454573a85157f11c3 | refs/heads/master | 2023-06-22T08:48:54.584274 | 2023-06-09T09:10:05 | 2023-06-09T09:10:05 | 37,326,848 | 16 | 13 | null | 2023-06-01T20:23:18 | 2015-06-12T14:35:02 | R | UTF-8 | R | false | false | 525 | r | distanceVMS.R | distanceTacsat <- function(tacsat,index){
res <- unlist(lapply(as.list(1:dim(index)[1]),function(x){
iS <- index[x,1]
iE <- index[x,2]
iL <- iE-iS+1
res <- distance(tacsat[iS:iE,]$SI_LONG[2:iL],tacsat[iS:iE,]$SI_LATI[2:iL],tacsat[iS:iE,]$SI_LONG[1:(iL-1)],tacsat[iS:iE,]$SI_LATI[1:(iL-1)])
return(sum(res,na.rm=T))}))
return(res)} |
442076076f42164518f657db61e5545bba4d6ae7 | 44486d32ef587a23383bd855b30cc33506a1034b | /Pima_analysis.R | 4e76b25f770e29f3fad1967206bd4d2421e1f2c0 | [] | no_license | BlancNicolas/SY09_TP3 | a348ddc407dd040eed98e68a796bf9c1e7da75e2 | 86312d38edb736590375f195c11344df93d94835 | refs/heads/master | 2021-01-22T11:10:42.477058 | 2017-05-30T20:32:52 | 2017-05-30T20:32:52 | 92,672,107 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,853 | r | Pima_analysis.R | #load file
dataPima <- read.csv("Pima.csv")
XPim <- dataPima[,1:7]
zPim <- dataPima[,8]
#/!\--------Estimate parameters---------/!\
data = as.data.frame(dataPima);
mu1 = apply(data[which(data$z == 1), 1:2], 2, mean);
mu2 = apply(data[which(data$z == 2), 1:2], 2, mean);
epsilon1 = cov(data[which(data$z == 1), 1:2]);
epsilon2 = cov(data[which(data$z == 2), 1:2]);
pi1 = dim(data[which(data$z == 1),])[1] / dim(data)[1];
pi2 = dim(data[which(data$z == 2),])[1] / dim(data)[1];
df = data.frame("mu1" = mu1, "mu2" = mu2, "epsilon1" = epsilon1, "epsilon2" = epsilon2, "pi1" = pi1, "pi2" = pi2);
write.table(df, paste("Pima","_analysis.csv", sep=""), sep = ';'); #do not forget "sep" arg in ordre to define columns
#/!\-------EUCLIDEAN CLASSIFIER ANALYSIS---------/!\
#Divide data
donn.sep <- separ1(XPim, zPim)
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
#error matrix
error_train <- matrix(0, 1, 20);
error_test <- matrix(0, 1, 20);
#start of algorithm
for (j in 1:20 ){
donn.sep <- separ1(XPim, zPim) #using separ1.R function
#parting in learning and test sets
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
#we estimate parameters on Xapp
mu <- ceuc.app(Xapp, zapp);
#applying values on individuals of sets
train_set <- ceuc.val(Xapp, mu);
test_set <- ceuc.val(Xtst, mu);
#error rate
error_train[1,j] <- dim(as.matrix(which(train_set != as.matrix(zapp))))[1] / dim(as.matrix(zapp))[1];
error_test[1,j] <- dim(as.matrix(which(test_set != as.matrix(ztst))))[1] / dim(as.matrix(zapp))[1];
}
#applying mean on rows which represent each dataset
average_error_rate_Pima <- matrix(0, nrow = 1, ncol = 2);
average_error_rate_Pima[,1] = apply(error_train, 1, mean);
average_error_rate_Pima[,2] = apply(error_test, 1, mean);
#Confidence interval
#we consider with alpha = 0.05
#We use : CI = [p(mean) - fract(0.975*s/sqrt(n)), p(mean) + fract(0.975*s/sqrt(n))]
ptrain <- as.matrix(average_error_rate_Pima[, 1]);
ptest <- as.matrix(average_error_rate_Pima[, 2]);
frac = 1.96;
CI_Pima <- matrix(0, nrow = nrow(average_error_rate_Pima), ncol = 4); #4 because of two values for each interval
for (i in 1:nrow(average_error_rate_Pima)){
CI_Pima[i, 1] <- ptrain[i] - frac * sqrt((ptrain[i]*(1-ptrain[i]))/nrow(as.data.frame(donn_list[i]))); #lower bound for CI on train set
CI_Pima[i, 2] <- ptrain[i] + frac * sqrt((ptrain[i]*(1-ptrain[i]))/nrow(as.data.frame(donn_list[i]))); #upper bound for CI on train set
CI_Pima[i, 3] <- ptest[i] - frac * sqrt((ptest[i]*(1-ptest[i]))/nrow(as.data.frame(donn_list[i]))); #lower bound for CI on test set
CI_Pima[i, 4] <- ptest[i] + frac * sqrt((ptest[i]*(1-ptest[i]))/nrow(as.data.frame(donn_list[i]))); #upper bound for CI on test set
}
#/!\-----KPPV CLASSIFIER------/!\
#-------------------------------------
#AT FIRST : USE load-data.R file
#list of our datasets we want to estimate
donn_list = list(donn1000_2);
opti_neigh = matrix(0, nrow = length(donn_list), ncol = 1)
for (j in 1:20 ){
donn.sep <- separ1(XPim, zPim) #using separ1.R function
#parting in learning and test sets
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xval <- donn.sep$Xval
zval <- donn.sep$zval
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
opti_neigh[i] <- kppv.tune(Xapp, zapp, Xapp, zapp, 2*(1:6)-1)
}
#/!\-----Confidence Intervals and error Rate------/!\
#AT FIRST : USE load-data.R file
#error matrix
error_train_PIMA_kppv <- matrix(0, length(donn_list), 20);
error_test_PIMA_kppv <- matrix(0, length(donn_list), 20);
#start of algorithm
for (j in 1:20 ){
donn.sep <- separ2(XPim, zPim) #using separ1.R function
#parting in learning and test sets
Xapp <- donn.sep$Xapp
zapp <- donn.sep$zapp
Xval <- donn.sep$Xval
zval <- donn.sep$zval
Xtst <- donn.sep$Xtst
ztst <- donn.sep$ztst
#we estimate parameters on Xapp
Kopt <- kppv.tune(Xapp, zapp, Xval, zval,2*(1:6)-1);
print(Kopt);
#applying values on individuals of sets
zapp_val <- kppv.val(Xapp, zapp, Kopt, Xapp);
ztst_val <- kppv.val(Xapp, zapp, Kopt, Xtst);
#error rate
error_train_PIMA_kppv[i,j] <- length(zapp_val[which(as.matrix(zapp_val) != as.matrix(zapp))]) / dim(Xapp)[1];
error_test_PIMA_kppv[i,j] <- length(ztst_val[which(as.matrix(ztst_val) != as.matrix(ztst))]) / dim(Xtst)[1];
}
#applying mean on rows which represent each dataset
average_error_rate_PIMA_kppv <- matrix(0, nrow = length(donn_list), ncol = 2);
average_error_rate_PIMA_kppv[,1] = apply(error_train_PIMA_kppv, 1, mean);
average_error_rate_PIMA_kppv[,2] = apply(error_test_PIMA_kppv, 1, mean);
#Confidence interval
#we consider with alpha = 0.05
#We use : CI = [p(mean) - fract(0.975*s/sqrt(n)), p(mean) + fract(0.975*s/sqrt(n))]
ptrain <- as.matrix(average_error_rate_PIMA_kppv[, 1]);
ptest <- as.matrix(average_error_rate_PIMA_kppv[, 2]);
frac = 1.96;
CI_tab_PIMA_kppv <- matrix(0, nrow = nrow(average_error_rate_PIMA_kppv), ncol = 4); #4 because of two values for each interval
for (i in 1:nrow(average_error_rate)){
CI_tab_PIMA_kppv[i, 1] <- ptrain[i] - frac * sqrt((ptrain[i]*(1-ptrain[i]))/nrow(as.data.frame(donn_list[i]))); #lower bound for CI on train set
CI_tab_PIMA_kppv[i, 2] <- ptrain[i] + frac * sqrt((ptrain[i]*(1-ptrain[i]))/nrow(as.data.frame(donn_list[i]))); #upper bound for CI on train set
CI_tab_PIMA_kppv[i, 3] <- ptest[i] - frac * sqrt((ptest[i]*(1-ptest[i]))/nrow(as.data.frame(donn_list[i]))); #lower bound for CI on test set
CI_tab_PIMA_kppv[i, 4] <- ptest[i] + frac * sqrt((ptest[i]*(1-ptest[i]))/nrow(as.data.frame(donn_list[i]))); #upper bound for CI on test set
}
|
fa323a9e0e96c3df5f4a9b2ed4a7704545404c83 | a989b8e71744214e0b43546273845bae12463f84 | /man/is_jwt_expired.Rd | 49fd67aae7a3827d7db866f531fe8aab34b0634c | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | xenos-code/sealr | cceff9c597a042a86b612b28dbbeb61015f167c4 | 3843e3ee3f2d5a6efc88b964e72353f520060f82 | refs/heads/master | 2023-08-22T09:40:01.453801 | 2021-10-01T10:03:09 | 2021-10-01T10:03:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 450 | rd | is_jwt_expired.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_jwt.R
\name{is_jwt_expired}
\alias{is_jwt_expired}
\title{This function checks whether a JWT is expired.}
\usage{
is_jwt_expired(payload)
}
\arguments{
\item{payload}{list. Payload of JWT.}
}
\value{
TRUE if JWT is expired, FALSE if not
(either current time < expiration time or no exp claim in JWT).
}
\description{
This function checks whether a JWT is expired.
}
|
0b98be57547633c70cdeba5d49e6aaca9a1e95f0 | 0ea643a7898c3319fb850a6bbdddc3f3b6406207 | /R/nodes.R | b42821ffb54b1dae70ec620c3b460366cd503145 | [] | no_license | hjunwoo/slimy | 2109f501863bbce31c7c1803ad8b5d089bb2e0ac | 1343c890725762a33b8f9279216db07af8d44380 | refs/heads/master | 2020-03-27T09:34:32.933265 | 2019-02-11T15:15:33 | 2019-02-11T15:15:33 | 146,353,817 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,003 | r | nodes.R | # returns matrix of columns each representing all possible parent sets
parent.sets <- function(nodes, kappa=3){
p <- length(nodes)
ac <- matrix(0,nrow=p,ncol=1)
rownames(ac) <- nodes
a <- diag(p)
if(kappa > 0) ac <- cbind(ac,a)
if(kappa > 1 & p > 1) for(i in seq(1,p-1)) for(j in seq(i+1,p))
ac <- cbind(ac, a[,i] | a[,j])
if(kappa > 2 & p > 2)
for(i in seq(1,p-2)) for(j in seq(i+1,p-1)) for(k in seq(j+1,p))
ac <- cbind(ac, a[,i] | a[,j] | a[,k])
# columns of ac = repertoire of all parent sets
if(kappa > 3) stop('Maximum in-degree is limited to <=3')
return(ac)
}
# computes local score conditional to all possible parent sets for each node
local.score <- function(object, kappa, po=NULL, progress.bar, ncores){
nodes <- object@nodes
p <- length(nodes)
ac <- object@ac
cache <- matrix(0, nrow=p, ncol=ncol(ac))
rownames(cache) <- nodes
if(object@data.type != 'counts')
cat('Computing local scores ...\n')
bundle <- list(object=object, po=po)
nac <- ncol(ac) # no. of parent sets
if(ncores==1){
if(progress.bar) pb <- txtProgressBar(style=3)
lcache <- list()
for(iac in seq_len(nac)){
lcache[[iac]] <- fill.cache(iac, bundle)
if(progress.bar) setTxtProgressBar(pb, iac/nac)
}
if(progress.bar) close(pb)
}
else{ # parallel
Rmpi::mpi.bcast.Robj2slave(object)
lcache <- Rmpi::mpi.applyLB(seq_len(nac), FUN=fill.cache,
bundle)
}
maxs <- -Inf
for(iac in seq_len(nac))
for(x in lcache[[iac]]) if(!is.na(x)) if(x>maxs) maxs <- x
for(iac in seq_len(nac)){
z <- lcache[[iac]]
for(i in seq_len(length(z))) if(!is.na(z[i])) z[i] <- z[i] - maxs
cache[,iac] <- z
}
object@cache <- cache
return(object)
}
fill.cache <- function(iac, bundle){
object <- bundle$object
po <- bundle$po
ac <- object@ac
hyper <- object@hyper
prior <- object@prior
nodes <- object@nodes
p <- length(nodes)
type <- object@data.type
if(type %in% c('counts','mvln')){
ci <- object@data
xi <- object@latent.var
}
else xi <- object@data
pa <- nodes[which(ac[,iac]==1)]
lcache <- double(p)
for(i in seq_len(p)){
w <- nodes[i]
if(w %in% pa) sc <- NA
else{
wpa <- nodes[nodes %in% c(w,pa)]
nw <- length(wpa)
A <- matrix(0, nrow=nw, ncol=nw)
rownames(A) <- colnames(A) <- wpa
A[pa,w] <- 1
if(!is.DAG(A)) sc <- NA
else{
if(type=='discrete') sc <-
multinom.local.score(xi, w, pa, prior=prior,
hyper=hyper)
else if(type=='counts')
sc <- pois.score(ci=ci,xi=xi,node=w, pa=pa, hyper=hyper,
po=po)
else if(prior=='g')
sc <- g.score(xi=xi, node=w, pa=pa, hyper=hyper)
else if(prior=='diag')
sc <- diag.score(xi=xi, node=w, pa=pa, hyper=hyper)
else stop('Unknown prior')
}
}
lcache[i] <- sc
}
return(lcache)
}
|
e8d721962612097e141b768c1eea64d071cb36d0 | 153e17d4c7288c386a8f119d2ad08a336a96b934 | /Naive Bayes/code.R | fbd82ef3f072c625ff41ec8aacfffcb246351c0f | [] | no_license | KevorkSulahian/data-mining | 96d5fdb5b728c2462d9e62695edb89982fc062f8 | 510f5f96c11a3db0ffd7a252aa43f49e99046f7a | refs/heads/master | 2020-03-29T15:57:13.242893 | 2018-12-09T18:19:35 | 2018-12-09T18:19:35 | 150,089,429 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 572 | r | code.R | library(caret)
library(e1071)
library(ROCR)
data <- read.csv("Naive Bayes/HR_balanced.csv")
set.seed(1)
index <- createDataPartition(data$left, p = .75, list = F)
train <- data[index,]
test <- data [-index,]
model <- naiveBayes(left ~., data = train, laplace = 1)
pred <- predict(model, newdata = test)
confusionMatrix(pred, test$left, positive = "Yes")
pred_prob <- predict(model, newdata = test, type = "raw")
head(pred_prob)
p_test <- prediction(pred_prob[,2], test$left)
perf <- performance(p_test, "tpr", "fpr")
plot(perf)
performance(p_test,"auc")@y.values
|
9c9a7fcf251f90a2c5c35886312c0f6f00d6bbb3 | 76ca270b79fdc7e4a573fe22d65e5c6a911bc7fb | /R/inicial.R | cb070544a053cdac6143533586b3106b8542c8f4 | [] | no_license | srgmld/foofactors | ce94f1bd4d42057f2d2b85172782b1ef22cf79bb | bc9ac62cb49d85611875aeb6f26017040a4fa448 | refs/heads/master | 2023-08-12T19:30:24.160397 | 2021-09-28T17:09:08 | 2021-09-28T17:09:08 | 411,371,982 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 128 | r | inicial.R | library(devtools)
library(fs)
library(dplyr)
list.files()
dir_info(all = TRUE, regexp = "^[.]git$") %>%
select(path, type)
|
bfa079d64937f162060c32bd8f188e67dbbb8fb8 | ad3a6d7e0851cd786b6356b8cf0a12256f2d37bc | /man/ID3.Rd | ccbe3cc603e1f04286f52c63531d48a3dcd47820 | [] | no_license | talgalili/HBP | 53d22b34bcdf283bc2192373ea0be55aeed382f6 | c0ce7a77dcb6ecfb0120706bfa175108e25eb8cc | refs/heads/master | 2020-07-26T06:41:44.539200 | 2014-09-04T20:08:51 | 2014-09-04T20:08:51 | 23,677,324 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,667 | rd | ID3.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\docType{data}
\name{ID3}
\alias{ID3}
\title{ID3 - an R/Weka Classifier Trees}
\format{\preformatted{function (formula, data, subset, na.action, control = Weka_control(), options = NULL)
- attr(*, "class")= chr [1:2] "R_Weka_classifier_interface" "R_Weka_interface"
- attr(*, "meta")=List of 4
..$ name : chr "weka/classifiers/trees/Id3"
..$ kind : chr "R_Weka_classifier_interface"
..$ class: chr "Weka_classifier"
..$ init : NULL
}}
\source{
Heavily inspired by the code in the
function \link[RWeka]{J48}, and the help of Ista Zahn.
}
\usage{
ID3
}
\arguments{
\item{formula}{a symbolic description of the model to be fit.}
\item{data}{an optional data frame containing the variables in the model.}
\item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.}
\item{na.action}{a function which indicates what should happen when the data contain NAs. See \link{model.frame} for details.}
\item{control}{an object of class \link{Weka_control} giving options to be passed to the Weka learner. Available options can be obtained on-line using the Weka Option Wizard \link{WOW}, or the Weka documentation.}
\item{...}{not used.}
}
\value{
A list inheriting from classes Weka_tree and Weka_classifiers.
See \link[RWeka]{J48} for a list of the components.
}
\description{
ID3 - an R/Weka Classifier Trees
}
\examples{
\dontrun{
library(RWeka)
DF2 <- read.arff(system.file("arff", "contact-lenses.arff", package = "RWeka"))
load_simpleEducationalLearningSchemes()
ID3(`contact-lenses` ~ ., data = DF2)
}
}
\seealso{
\link[RWeka]{J48}
}
\keyword{datasets}
|
914f7e4f5798f573cb294a2a0324fe899a316766 | 6702a35116f3405621d52296e2e97a2b66f9db1d | /plot_trajectory.R | b770c434d7b22af6295489aa158dcc2d9a68b09d | [] | no_license | snoopycindy/rcomgame | 944054c54c3f9e672eef44b2a439d9f582373b1e | f6859e8a5d530e5d50147f57ca1539f12ed79079 | refs/heads/master | 2016-09-06T08:23:25.382221 | 2015-06-24T01:18:55 | 2015-06-24T01:18:55 | 37,356,034 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,839 | r | plot_trajectory.R | adir_id <- "../gestureLog/1parseData/"
picdir = "../pic/"
#read std
std={}
for(p in 1:len(p.list)){
fn = paste(gdir, "gameStd/", p.grp[p], "_", p.list[p],".csv",sep="")
d = read.table(fn)
d = cbind(p.list[p], d)
std = rbind(std, d)
}
names(std)[1]="sub"
# list all subject files =================
#list file names only
for(game in gcode$gc){
pic.name = paste(picdir, "pic_trajectory/", game, ".png",sep="")
# png(file = pic.name, width = 800, height = 800)
fn.full = list.files(adir_id, full=T, pattern=game)
fn.part = list.files(adir_id, pattern=game)
if(len(fn.part)>6)
par(mfrow=c(3,3))
else
par(mfrow=c(2,3))
for(f in 1:len(fn.full)){
d2 = read.table(fn.full[f], header=T)
id.list = unique(d2$id)
xlim = c(0, 1920)
ylim = c(0, 1200)
plot(0, 0, type = "n",
xlim = xlim, ylim=rev(ylim), #because the pad y is from top to down
xlab="x", ylab ="y", main = paste(p.list[p], game))
fn = paste(gdir, "gameStd/", p.grp[p], "_", p.list[p],".csv",sep="")
std = read.table(fn)
text(0, 0, std$all[std$code==game], col=2, cex=2)
for(j in 1:len(id.list))
{
isd = which(d2$id == id.list[j])
mean.prs = mean(d2$prs[isd])
lines(d2$x[isd], d2$y[isd], pch = mypch[j], lwd=2, col=mycol[3])
}
}
# dev.off()
}
for(game in gcode$gc){
pic.name = paste(picdir, "pic_trajectory/", game, ".png",sep="")
fn.full = list.files(adir_id, full=T, pattern=game)
fn.part = list.files(adir_id, pattern=game)
png(file = pic.name, width = 1500, height = 600)
par(mfrow=c(3,5))
for(p in 1:len(p.list)) {
# find the game log =================
n = grep(p.list[p], fn.part) #回傳fn.part的位置,如果吻合
if(length(n)==0){
xlim = c(0, 1920)
ylim = c(0, 1200)
plot(0, 0, type = "n",
xlim = xlim, ylim=rev(ylim), #because the pad y is from top to down
xlab="x", ylab ="y", main = paste(p.list[p], game))
text(1000, 1000, "None", cex=2)
next
}
# read file =================
fn = paste("../gestureLog/1parseData/", p.grp[p], "_",
p.list[p], "_", game, ".txt", sep="")
d2 = read.table(fn, header=T)
# To find the id list in each trace
id.list = unique(d2$id)
xlim = c(0, 1920)
ylim = c(0, 1200)
plot(0, 0, type = "n",
xlim = xlim, ylim=rev(ylim), #because the pad y is from top to down
xlab="x", ylab ="y", main = paste(p.list[p], game))
fn = paste(gdir, "gameStd/", p.grp[p], "_", p.list[p],".csv",sep="")
std = read.table(fn)
text(2, 2, std$all[std$code==game], col=2, cex=2)
for(j in 1:len(id.list))
{
isd = which(d2$id == id.list[j])
mean.prs = mean(d2$prs[isd])
lines(d2$x[isd], d2$y[isd], pch = mypch[j], lwd=2, col=mycol[3])
}
}
dev.off()
}
|
0f8ed1e0201ccef9797258d839462baf9f852477 | 588281e9a734d8154b6716d0aeca01f4cbfeab3c | /man/CityPopularity.Rd | 0356a6caeae29ce0e7253d7e1a06f59e32feeca2 | [] | no_license | jburos/GoogleVis | a6e3a4546d567b38ac254a86eec433d6702eddbd | 0b27d495443063522867541f704d886327473157 | refs/heads/master | 2021-01-21T06:59:24.517424 | 2013-07-11T17:26:20 | 2013-07-11T17:26:20 | 11,345,957 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,035 | rd | CityPopularity.Rd | \name{CityPopularity}
\alias{CityPopularity}
\docType{data}
\title{
CityPopularity: googleVis example data set
}
\description{
Example data set to illustrate the use of the googleVis package.
}
\usage{data(CityPopularity)}
\format{
A data frame with 6 observations on the following 2 variables.
\describe{
\item{\code{City}}{a factor with levels \code{Boston} \code{Chicago} \code{Houston} \code{Los Angeles} \code{Miami} \code{New York}}
\item{\code{Popularity}}{a numeric vector}
}
}
%%\details{
%% ~~ If necessary, more details than the __description__ above ~~
%%}
\source{
Google Geo Map API: \url{https://google-developers.appspot.com/chart/interactive/docs/gallery/geomap.html}
}
%%\references{
%% ~~ possibly secondary sources and usages ~~
%}
\examples{
data(CityPopularity)
G <- gvisGeoMap(CityPopularity, locationvar='City' ,numvar='Popularity',
options=list(region='US',
dataMode='markers',
colors='[0xFF8747, 0xFFB581, 0xc06000]'))
\dontrun{
plot(G)
}
}
\keyword{datasets}
|
1c96ffbf04a830d16cbbfabd8bf9576fda129044 | e7acb77613505ac3851e4255b800eef2304586a7 | /scripts/3_hsm.R | 61be579276f97babc94fa18ef5580d65385f6fae | [] | no_license | kalab-oto/ruspolia-expansion | 57f940db412fe93703096fd33ee976bbb69949fc | bf76e9f9b1295bd4b818b4bbaf5c2c53f07e0df9 | refs/heads/master | 2023-06-19T02:34:42.715338 | 2021-07-18T10:12:10 | 2021-07-18T10:12:10 | 385,578,808 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,884 | r | 3_hsm.R | #' The script executes the entire HSM, including filtering the finding
#' data (i.e. spatial thinning), selecting environmental variables with
#' VIFm, background data generation, choosing features and beta multiplier
#' for maxent (ENMeval), performing and evaluating maxent model. Final
#' model output is exported as geotiff (.tif), and ENMeval and model
#' evaluation results are exported to .csv.
library(raster)
library(rgdal)
library(gdalUtils)
library(spThin)
library(sdm)
library(usdm)
library(dismo)
library(ecospat)
library(ENMeval)
require(devtools)
source_url("https://raw.githubusercontent.com/RWunderlich/SEDI/master/R/sedi.R")
#' read boundaries data
cz_bounds <- getData("GADM",country='CZE',level=0,path=file.path("..","source_data","admin"))
#' # OCC data
occs <- readOGR(file.path("..","processed_data","ocss.gpkg"),layer="occs")
plot(occs,pch=20,col="red")
plot(cz_bounds, add = TRUE)
occs$species <- "ruspolia"
rownames(occs@data) <- 1:nrow(occs)
occs$id <- rownames(occs@data)
#' perform spatial thinning
thinned_sp <- spThin::thin(loc.data = as.data.frame(occs),
lat.col = "coords.x1",
long.col = "coords.x2",
spec.col = "species",
thin.par = 5,#km
reps = 1,
out.dir = "../processed_data",
locs.thinned.list.return = TRUE,
write.files = FALSE
)
thinned_sp <- as.data.frame(thinned_sp)
thinned_sp$id <- rownames(thinned_sp)
thinned_sp <- merge(thinned_sp,occs,by="id")
thinned_sp <- SpatialPointsDataFrame(
coords=thinned_sp[c("coords.x1","coords.x2")],
data=data.frame(thinned_sp[c("source","species")]),
proj4string=CRS("EPSG:4326")
)
plot(occs,pch=20,col="red")
plot(thinned_sp,pch=20,col="green",add=T)
plot(cz_bounds, add = TRUE)
#' # ENV data
env_source <- list.files(file.path("..","processed_data","env"),full.names=T,pattern=".tif$")[-c(6)]
env_data <- mask(stack(env_source),raster::buffer(cz_bounds,0.1))
# excluding variables based on VIF
na_mask <- sum(is.na(env_data))>0
na_mask[na_mask==1] <- NA
env_data <- mask(env_data,na_mask)
vif <- vifcor(env_data,.7)
env_data <- exclude(env_data,vif)
#' # Model
colnames(thinned_sp@coords) <- c("x","y")
thinned_sp$sp_name <- 1
sp_train <- thinned_sp[,"sp_name"]
# generate background data
bg_sp <- randomPoints(env_data, 10000)
bg <- extract(env_data,bg_sp)
bg <- cbind(bg_sp,bg)
bg <- as.data.frame(bg)
# Model calibration
rms <- seq(0.5,10,0.5)
fcs <- c("LQ", "LQP", "LQT", "LQH", "LQHT", "LQTP", "LQHP", "LQHPT")
enm_eval_results <- data.frame()
for (i in fcs){
enm_eval <- ENMevaluate(occ=sp_train@coords, env = env_data, bg.coords=bg_sp,method='randomkfold', kfolds=5, fc=i, RMvalues=rms, algorithm='maxent.jar',parallel=F)
enm_eval_results <- rbind(enm_eval@results,enm_eval_results)
gc()
print(unique(enm_eval_results$features))
}
enm_eval_results <- enm_eval_results[which(!is.na(enm_eval_results$AICc)),]
enm_eval_results$delta.AICc <- enm_eval_results$AICc - min(enm_eval_results$AICc)
comb <- as.character(enm_eval_results$features[enm_eval_results$delta.AICc==0])
b_beta <- enm_eval_results$rm[enm_eval_results$delta.AICc==0]
b_args <- c()
if (!grepl("L", comb)){
b_args <- c(b_args,"nolinear")
}
if (!grepl("H", comb)){
b_args <- c(b_args,"nohinge")
}
if (!grepl("Q", comb)){
b_args <- c(b_args,"noquadratic")
}
if (!grepl("P", comb)){
b_args <- c(b_args,"noproduct")
}
if (grepl("T", comb)){
b_args <- c(b_args,"threshold")
}
#' Model
d <- sdmData(train=sp_train, predictors=env_data,bg=bg)
m <- sdm(sp_name~.,data=d,
methods=c('maxent'),
replication='cv',
cv.folds=5,
modelSettings=list(maxent=list(beta=b_beta,args=c(b_args,"noremoveDuplicates", "noautofeature"))),
n=10
)
roc(m,smooth=T)
rcurve(m)
#' Evaluation
mean_ev <- function (mdl = m,testing_set="test.dep",measures=measur){
df <- as.data.frame(apply(getEvaluation(mdl,w=(getModelId(mdl)),stat=measures,opt=2,wtest= testing_set)[-1], 2, mean))
df <- cbind(df,as.data.frame(apply(getEvaluation(mdl,w=(getModelId(mdl)),stat=measures,opt=2,wtest= testing_set)[-1], 2, sd)))
df <- cbind(df,as.data.frame(apply(getEvaluation(mdl,w=(getModelId(mdl)),stat=measures,opt=2,wtest= testing_set)[-1], 2, max)))
df <- cbind(df,as.data.frame(apply(getEvaluation(mdl,w=(getModelId(mdl)),stat=measures,opt=2,wtest= testing_set)[-1], 2, min)))
colnames(df) <- c("mean","sd","max","min")
if ("boyce" %in% measures){
print("calculating boyce")
boyce_li <- c()
for (repl in getModelId(mdl)){
cv_bg <- c(mdl@models$sp_name$maxent[[repl]]@evaluation$train@predicted[mdl@models$sp_name$maxent[[repl]]@evaluation$train@observed==0],mdl@models$sp_name$maxent[[repl]]@evaluation[[testing_set]]@predicted[mdl@models$sp_name$maxent[[repl]]@evaluation[[testing_set]]@observed==0])
cv_pres <- mdl@models$sp_name$maxent[[repl]]@evaluation[[testing_set]]@predicted[mdl@models$sp_name$maxent[[repl]]@evaluation[[testing_set]]@observed==1]
b <- ecospat.boyce(cv_bg, cv_pres,PEplot=F)
boyce_li <- c(boyce_li,b$Spearman.cor)
}
df <- rbind(df,c(mean(boyce_li),sd(boyce_li),max(boyce_li),min(boyce_li)))
rownames(df)[nrow(df)] <- "boyce"
}
if ("SEDI" %in% measures){
print("calculating SEDI")
sedi_li <- c()
for (repl in getModelId(mdl)){
th_val <- mdl@models$sp_name$maxent[[repl]]@evaluation[[testing_set]]@threshold_based[2,2]
o <- mdl@models$sp_name$maxent[[repl]]@evaluation[[testing_set]]@observed
p <- mdl@models$sp_name$maxent[[repl]]@evaluation[[testing_set]]@predicted
cmx <- sdm:::.cmx(o>th_val,p>th_val)
sedi_li <- c(sedi_li,sedi(cmx[1,1],cmx[1,2],cmx[2,1],cmx[2,2])[[2]])
}
df <- rbind(df,c(mean(sedi_li),sd(sedi_li),max(sedi_li),min(sedi_li)))
rownames(df)[nrow(df)] <- "sedi_li"
}
return(df)
}
measur <- c('boyce', 'SEDI')
#' Variable imprtance
var_imp <- getVarImp(m,id=getModelId(m),method="maxent",wtest="test.dep")@varImportanceMean$corTest[2]*100
var_imp <- round(var_imp,0)
var_imp
eval_dep <- mean_ev(m,"test.dep")
eval_dep
#' Predict
env_pr_data <- exclude(stack(env_source),vif)
env_pr_data <- crop(env_pr_data,raster::buffer(occs,10000))
pr_eu <- ensemble(m,env_pr_data, setting=list(method='weighted',stat='AUC',opt=2))
plot(pr_eu)
plot(cz_bounds, add = TRUE)
plot(thinned_sp,pch=20,col="red",add=T)
#' Export results
writeRaster(pr_eu,file.path("..","processed_data","hsm.tif"),overwrite=T)
write.csv(enm_eval_results, file.path("..","processed_data","ENMeval.csv"))
write.csv(eval_dep,file.path("..","processed_data","eval_dep.csv"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.