blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
560df4e417c64bc79f38bcff0e6ae3552805f953
|
e77d89b8b8bf89baf5043b5f9b5f76b162a5e99b
|
/code/lmBootOptimized.R
|
31420a728f107e8cdef416ab4ddf46b68b70af10
|
[] |
no_license
|
samomidi/Predicting-Oxygen-intake---R-and-SAS-bootstrapping-
|
c795ccbf5e6415287f41f5ae4186f81249accc16
|
5abda194f2b9b03ddd46195d197ea6cc06af4f6a
|
refs/heads/master
| 2020-04-15T13:47:07.689324
| 2019-01-10T21:34:02
| 2019-01-10T21:34:02
| 164,729,898
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,382
|
r
|
lmBootOptimized.R
|
lmBootOptimized <- function(inputData, nBoot, xIndex, yIndex){
# Purpose:
# Optimize the bootstrapping code without clustering
# Inputs:
# inputData - data frame
# nBoot - integer - number of resampling
# xIndex - list of integers - the indexes of explanatory variables
# yIndex - integer - index of the response variable
# Output:
# bootResults - matrix - contains the y-intercept and the slopes of the
# explanatory variables
# Calculating the number of rows
numberOfRows <- nrow(inputData)
# Initialising the data
bindedData <- as.matrix(cbind(1, inputData))
# Initializing bootResults
bootResults <- matrix(data = NA, nrow = nBoot, ncol = (length(xIndex) + 1))
# Resample the data set once, getting the indexes of the rows
resampleVector <- sample(1:numberOfRows, numberOfRows * nBoot, replace = T)
for(i in 1:nBoot) {
# select the ith resampled data using the indexes calculated earlier
bootData <- bindedData[resampleVector[(((i - 1) * numberOfRows) + 1):(i*numberOfRows)], ]
# Selecting the explanatory and response columns
Xmat <- bootData[, c(1, xIndex + 1)]
Ymat <- bootData[, yIndex + 1]
# Get the linear model coefficients using by solving the Y = BX matrix
# B = (X'X)^-1 * X'Y
beta <- solve(t(Xmat)%*%Xmat)%*%t(Xmat)%*%Ymat
bootResults[i, ] <- beta
} # end of for loop
colnames(bootResults) <- c('intercept', colnames(inputData)[xIndex])
return(bootResults)
}
lmBootOptimizedQuantiles <-
function(data, yIndex, xIndex, nBoot = 1000, alpha = 0.05) {
# Purpose:
# Run optimized bootstrap code and return quantiles
# Inputs:
# inputData - data frame
# xIndex - list of integers - the indexes of explanatory variables
# yIndex - integer - index of the response variable
#
# nBoot - integer - number of resamples, default 1000
# Output:
# A matrix containing the upper and lower confidence intervals
# for the y-intercept and the slopes of the explanatory variables
boots <- lmBootOptimized(data, nBoot, xIndex, yIndex)
cis <- matrix(nrow = 2, ncol = ncol(boots))
colnames(cis) <- colnames(boots)
rownames(cis) <- c('Lower CI', 'Upper CI')
for (i in 1:ncol(cis)) {
cis[,i] <- quantile(boots[,i], probs = c(alpha/2,(1-alpha/2)))
}
return(cis)
}
|
0d5c2f67049262632e5130aa1cd923329baba8e1
|
e78ae3fdd458fc4b8bee318025614d5d6fd2af8f
|
/R/cdtDownloadReanalysis_jra55_apps.R
|
540887e451c8cb11f8f3f31f10be622938de945d
|
[] |
no_license
|
rijaf-iri/CDT
|
b42fd670adfad59c08dcf990a95f9f1ebea9a9e4
|
e1bb6deac6e814656bf5ed13b8d4af4d09475c9d
|
refs/heads/master
| 2023-07-27T09:16:15.407835
| 2023-07-21T02:16:42
| 2023-07-21T02:16:42
| 136,133,394
| 10
| 12
| null | 2018-09-28T03:56:51
| 2018-06-05T06:53:11
|
R
|
UTF-8
|
R
| false
| false
| 6,215
|
r
|
cdtDownloadReanalysis_jra55_apps.R
|
#' Submitting subset requests for JRA55 minimum and maximum temperature at 2 meter above ground.
#'
#' Function to request a subset of JRA55 minimum and maximum temperature at 2 meter above ground.
#'
#' @param variable the variable to be requested, available options are "tmax": maximum temperature and "tmin" minimum temperature.
#' @param email your email address used as login on https://rda.ucar.edu/.
#' @param password your password.
#' @param bbox a named list providing the bounding box of the area to extract. The names of components must minlon, maxlon, minlat and maxlat.
#' @param start_time a named list providing the start time to extract. The names of components must year, month, day and hour
#' @param end_time a named list providing the end time to extract. The names of components must year, month, day and hour
#'
#' @return A summary of your request and the link to download the data will be send via email.
#' In addition a summary of your request will be displayed on the console.
#'
#' @export
jra55.send.request <- function(variable = "tmax", email = NULL, password = NULL,
bbox = list(minlon = 42, maxlon = 52, minlat = -26, maxlat = -11),
start_time = list(year = 2020, month = 11, day = 1, hour = 0),
end_time = list(year = 2020, month = 11, day = 30, hour = 21)
)
{
param <- switch(variable, "tmax" = "T MAX", "tmin" = "T MIN")
start <- jra55.start.end.time(start_time)
start <- format(start, "%Y%m%d%H%M")
end <- jra55.start.end.time(end_time)
end <- format(end, "%Y%m%d%H%M")
request <- list(
dataset = "ds628.0",
date = paste0(start, "/to/", end),
param = param,
oformat = "netCDF",
nlat = bbox$maxlat,
slat = bbox$minlat,
wlon = bbox$minlon,
elon = bbox$maxlon
)
request <- jsonlite::toJSON(request, auto_unbox = TRUE)
handle <- curl::new_handle()
curl::handle_setopt(handle, username = email, password = password)
curl::handle_setopt(handle, copypostfields = request)
curl::handle_setheaders(handle,
"Accept" = "application/json",
"Content-Type" = "application/json",
"charset" = "UTF-8"
)
app_url <- "https://rda.ucar.edu/apps/request"
res <- curl::curl_fetch_memory(app_url, handle = handle)
cat(rawToChar(res$content), "\n")
}
#' Splitting and formatting the JRA55 3 hourly downloaded data.
#'
#' Function to split and format the downloaded JRA55 minimum and maximum temperature at 2 meter above ground.
#'
#' @param dirNCDF full path to the folder containing the downloaded netCDF files.
#' If the data are compressed wit "tar" or "zip", make sure to untar or unzip all files
#' and put the uncompressed netCDF files under this folder
#' @param dirOUT full path to the folder you want to save the formatted data.
#'
#' @export
jra55.format.ncdf <- function(dirNCDF, dirOUT){
ncfiles <- list.files(dirNCDF, ".+\\.nc$")
if(length(ncfiles) == 0)
stop("No NetCDF files found\n")
ncpaths <- file.path(dirNCDF, ncfiles)
nc <- ncdf4::nc_open(ncpaths[1])
varid <- nc$var[[1]]$name
dname <- sapply(nc$dim, "[[", "name")
nlon <- dname[grep("lon", dname)]
nlat <- dname[grep("lat", dname)]
lat <- nc$dim[[nlat]]$vals
lon <- nc$dim[[nlon]]$vals
ncdf4::nc_close(nc)
lon <- ((lon + 180) %% 360) - 180
xo <- order(lon)
yo <- order(lat)
varname <- switch(varid,
"TMAX_GDS4_HTGL" = "tmax",
"TMIN_GDS4_HTGL" = "tmin"
)
longname <- switch(varid,
"TMAX_GDS4_HTGL" = "TMAX JRA55 3 hourly",
"TMIN_GDS4_HTGL" = "TMIN JRA55 3 hourly"
)
dir3hr <- switch(varid,
"TMAX_GDS4_HTGL" = "JRA55_3Hr_tmax",
"TMIN_GDS4_HTGL" = "JRA55_3Hr_tmin"
)
outdir <- file.path(dirOUT, dir3hr)
dir.create(outdir, showWarnings = FALSE, recursive = TRUE)
missval <- -99
dx <- ncdf4::ncdim_def("Lon", "degreeE", lon[xo], longname = "Longitude")
dy <- ncdf4::ncdim_def("Lat", "degreeN", lat[yo], longname = "Latitude")
ncgrd <- ncdf4::ncvar_def(varname, "degC", list(dx, dy), missval,
longname, "float", compression = 6)
for(jj in seq_along(ncfiles)){
nc <- ncdf4::nc_open(ncpaths[jj])
# init_time0 <- nc$dim[['initial_time0_hours']]$vals
# t_units <- nc$dim[['initial_time0_hours']]$units
#
fcst_time <- nc$dim[["forecast_time1"]]$vals
initial_time0 <- ncdf4::ncvar_get(nc, varid = "initial_time0_encoded")
val <- ncdf4::ncvar_get(nc, varid)
ncdf4::nc_close(nc)
if(length(dim(val)) == 2){
ncout <- file.path(outdir, paste0(varname, "_", initial_time0, ".nc"))
val <- val - 273.15
val[is.na(val)] <- missval
nc <- ncdf4::nc_create(ncout, ncgrd)
ncdf4::ncvar_put(nc, ncgrd, val[xo, yo])
ncdf4::nc_close(nc)
}else{
# units(init_time0) <- units::as_units(t_units)
# times <- as.POSIXct(init_time0, tz = "UTC")
times <- strptime(initial_time0, "%Y%m%d%H", tz = "UTC")
#
timestamp <- c(rbind(times + fcst_time[1] * 3600, times + fcst_time[2] * 3600))
daty <- as.POSIXct(timestamp, origin = "1970-01-01", tz = "UTC")
outdaty <- format(daty, "%Y%m%d%H")
ncout <- file.path(outdir, paste0(varname, "_", outdaty, ".nc"))
dim_val <- dim(val)
dim(val) <- c(dim_val[1:2], dim_val[3] * dim_val[4])
val <- val - 273.15
val[is.na(val)] <- missval
for(kk in 2:(length(ncout) - 1)){
nc <- ncdf4::nc_create(ncout[kk], ncgrd)
ncdf4::ncvar_put(nc, ncgrd, val[xo, yo, kk])
ncdf4::nc_close(nc)
}
}
}
cat("Formatting data finished successfully\n")
}
|
4ce2251091fa22d9041373ff074a77836009ef5a
|
1382c19c0c12e5aa5adb46423f30fea565cc4bf5
|
/testing.r
|
dc4173e8bf518f8036c9959998245b7ce262d6f3
|
[] |
no_license
|
alfcrisci/PsychrometricChart
|
3bb61eb1e62bf04f9ab35cef5356600632a3e618
|
3912f67e587df890c402282c579e12b225a0bb6e
|
refs/heads/master
| 2020-04-06T04:36:12.182628
| 2016-01-24T04:03:30
| 2016-01-24T04:03:30
| 66,107,848
| 1
| 0
| null | 2016-08-19T19:39:54
| 2016-08-19T19:39:54
| null |
UTF-8
|
R
| false
| false
| 399
|
r
|
testing.r
|
source('psyplot.r')
obj = hlines()
obj = set_t_d_list(obj)
statemin = mas(W = obj$w_min,t_d = obj$t_d_min,p = obj$p)
statemax = mas(W = obj$w_max,t_d = obj$t_d_max,p = obj$p)
obj$x_min = statemin$h
obj$x_max = statemax$h
obj$x_list = seq(obj$x_min,obj$x_max,(obj$x_max-obj$x_min)/(obj$nlines-1))
x = obj$x_list[4]
obj$w_list = humidityratio(t_d=obj$t_d_list, h=rep(x,obj$nsteps))
#obj=trim(obj)
|
aa537a69cd49216f4b32e5780ceee50c060aaf44
|
07807de368091227d4474eb572586b3519912537
|
/R/negent3D.R
|
4f1e3c7f6c194f153814e9f8db9046394869995b
|
[] |
no_license
|
cran/msos
|
cbb0ab4bf892d666ced159c9a98beedd7ea60bde
|
7247b75464537542ed9697586c096545bbaf57fd
|
refs/heads/master
| 2021-05-21T11:54:06.859675
| 2020-10-31T05:10:07
| 2020-10-31T05:10:07
| 17,697,697
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,354
|
r
|
negent3D.R
|
#' Maximizing negentropy for \eqn{Q = 3} dimensions
#'
#' Searches for the rotation that maximizes the estimated negentropy of the
#' first column of the rotated data, and of the second variable fixing the
#' first, for \eqn{q = 3} dimensional data. The routine uses a random start for
#' the function optim using the simulated annealing option SANN, hence one may
#' wish to increase the number of attempts by setting nstart to a integer larger
#' than 1.
#'
#' @param y The \eqn{N \times 3}{N x 3} data matrix.
#' @param nstart The number of times to randomly start the search routine.
#' @param m The number of angles (between 0 and \eqn{\pi}) over which to
#' search to find the second variables.
#' @param ... Further optional arguments to pass to the \code{\link{optim}}
#' function to control the simulated annealing algorithm.
#'
#' @return
#' A `list` with the following components:
#' \describe{
#' \item{vectors}{The \eqn{3 x 3} orthogonal matrix G that optimizes
#' the negentropy.}
#' \item{values}{Estimated negentropies for the three rotated variables,
#' from largest to smallest.}
#' }
#'
#' @seealso \code{\link{negent}}, \code{\link{negent2D}}
#'
#' @export
#' @examples
#' \dontrun{
#' # Running this example will take approximately 30s.
#' # Centers and scales the variables.
#' y <- scale(as.matrix(iris[, 1:3]))
#'
#' # Obtains Negent Vectors for 3x3 matrix
#' gstar <- negent3D(y, nstart = 100)$vectors
#' }
negent3D <-
function(y, nstart = 1, m = 100, ...) {
f <- function(thetas) {
cs <- cos(thetas)
sn <- sin(thetas)
negent(y %*% c(cs[1], sn[1] * c(cs[2], sn[2])))
}
tt <- NULL
nn <- NULL
for (i in 1:nstart) {
thetas <- runif(3) * pi
o <- optim(thetas, f, method = "SANN", control = list(fnscale = -1), ...)
tt <- rbind(tt, o$par)
nn <- c(nn, o$value)
}
i <- imax(nn) # The index of best negentropy
cs <- cos(tt[i, ])
sn <- sin(tt[i, ])
g.opt <- c(cs[1], sn[1] * cs[2], sn[1] * sn[2])
g.opt <- cbind(g.opt, c(-sn[1], cs[1] * cs[2], sn[2] * cs[1]))
g.opt <- cbind(g.opt, c(0, -sn[2], cs[2]))
x <- y %*% g.opt[, 2:3]
n2 <- negent2D(x, m = m)
g.opt[, 2:3] <- g.opt[, 2:3] %*% n2$vectors
list(vectors = g.opt, values = c(nn[i], n2$values))
}
|
58081f385f55b16d2e301861cb4e1f0cedd18706
|
d97b180c5fad5b297c782e548fe054aaaa12dd05
|
/Data_sim.R
|
a521428835dee4a4e373e904122c50a103c5f1cc
|
[] |
no_license
|
SunnySunnia/Data-Simulation
|
28e527416ad09a1fccdfb370efda986c6405f033
|
e9abd5035142fc9019b01e14171ef47562bad04d
|
refs/heads/master
| 2021-01-01T05:17:41.113868
| 2016-05-15T08:07:24
| 2016-05-15T08:07:24
| 58,089,694
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,893
|
r
|
Data_sim.R
|
setwd("C:/HS616/Data-Simulation")
#set.seed(10)
generateData = function(N=412){
age = sample(c(runif(412,16,36)),N,replace = T)
mVIQ = sample(c(abs(105-rweibull(376,2, 45))+22, rep(NA,(412-376))),N,replace = T)
mPIQ = sample(c(rnorm(378, 88.5,14.4), rep(NA,(412-378))),N,replace = T)
mIQ = sample(c((mVIQ+mPIQ)/2), N,replace = T)
gestation = sample(c(45-rweibull(412,1.6,5)),N,replace = T)
wt_gain= sample(c(rnorm(366,25.0,14.1),rep(NA,(412-366))),N,replace = T)+0.1*gestation
phe_sd = sample(c(rweibull(408,2,188),rep(NA,(412-408))),N, replace = T)
phe_avg = sample(c(rweibull(412,1.8,610)),N,replace = T)
birth_len = sample(c(rnorm(406,45.0,3.1),rep(NA,(412-406))),N,replace = T)+0.1*gestation
head_circum = sample(c(rnorm(403,32.8,2.0),rep(NA,(412-403))),N, replace = T)+0.1*gestation-0.001*phe_avg-2 #birth_wt = sample(c(rweibull(411,2.7,1700)+1389,rep(NA,(412-411))), N,replace = T)+head_circum
birth_wt = 38*gestation+7*head_circum+41*wt_gain
#MDI = sample(c(rnorm(283,100.1, 20.3),rep(NA, (412-283))),N,replace = T)-33+head_circum
MDI = 1.5*head_circum+3*gestation-0.012*phe_avg-56.5
#PDI = sample(c(rweibull(263, 3,52)+45,rep(NA,(412-263))),N,replace = T)-33+head_circum
PDI = 1.5*head_circum+2.5*gestation-0.022*phe_avg-40.5
result = data.frame(age, mVIQ, mPIQ, mIQ, wt_gain, phe_sd,
phe_avg, gestation, birth_len, birth_wt,
head_circum, MDI, PDI)
return(result)
}
result = data.frame(age, mVIQ, mPIQ, mIQ, wt_gain, phe_sd,
phe_avg, gestation, birth_len, birth_wt,
head_circum, MDI, PDI)
wt_related = data.frame(c(sort(wt_gain),rep(NA,sum(is.na(wt_gain)))),
c(sort(birth_wt),rep(NA,sum(is.na(birth_wt)))))
colnames(wt_related)= c("wt_gain","birth_wt")
wt_related= wt_related[sample(1:N,N,replace = F),]
age_related=data.frame(sort(age),c(sort(phe_sd),rep(NA,sum(is.na(phe_sd)))))
colnames(age_related) = c("age", "phe_sd")
age_related= age_related[sample(1:N,N,replace = F),]
phe_related = data.frame(sort(phe_avg,decreasing = T),
c(sort(MDI),rep(NA,sum(is.na(MDI)))),
c(sort(PDI),rep(NA,sum(is.na(PDI)))))
colnames(phe_related)= c("phe_avg", "MDI", "PDI")
phe_related= phe_related[sample(1:N,N,replace = F),]
result = data.frame(age_related$age, mVIQ, mPIQ, mIQ, wt_related$wt_gain,
age_related$phe_sd, phe_related$phe_avg,
gestation, birth_len, wt_related$birth_wt,
head_circum, phe_related$MDI,
phe_related$PDI)
colnames(result)=c("age", "mVIQ", "mPIQ","mIQ","wt_gain",
"phe_sd", "phe_avg", "gestation","birth_len",
"birth_wt", "head_circum", "MDI","PDI")
data= generateData(412)
summary(data)
library(reshape2)
library(ggplot2)
b = melt(data)
head(b)
tail(b)
ggplot(b,aes(x = value)) +
# comparing diverse rather than similar data, so scales should range freely to display data best
facet_wrap(~variable,scales = "free") + # also try scales = "free"
geom_histogram(fill="pink",bins = 50)
data = mutate(data, age.level=factor(ifelse(data$age<20, "teens", ifelse(data$age<25,"early.20s",ifelse(data$age<30,"late.20s",ifelse(data$age<35,"early.30s","late.30s")))),levels = c("teens", "early.20s","late.20s", "early.30s", "late.30s"), ordered = T))
lm1 = lm(MDI ~ phe_avg, data =data)
summary(lm1)
plot(lm1,1)
full1 = lm(MDI ~ . -PDI-age.level , data =data)
summary(full1)
plot(full1,1)
data = generateData(1000)
mdi_all = lm(MDI~. -PDI-age.level, data = data)
summary(mdi_all)
pdi_all = lm(PDI~ . -MDI-age.level, data = data)
summary(pdi_all)
bwt_all = lm(birth_wt ~ . -MDI -PDI -age.level, data = data)
summary(bwt_all)
plot(bwt_all,1)
hcir_all = lm(head_circum ~ . -MDI -PDI -age.level, data = data)
summary(hcir_all)
plot(hcir_all,1)
|
71c0cf318f3936285617f39e2400d657a2e4ab3c
|
2148311b5ee85a79c7db68cc108ebb621958ff7a
|
/Druk_trace_app_inspection.R
|
4336b67dab3575da8d2bc276e0acfe42c8a3cce8
|
[] |
no_license
|
UgyenNorbu/Druk_Trace
|
a1bcd1a042147478f03afea5eed8f272db800cb9
|
05f5779e94249b22f7cec8f661bdaae9dbbf558b
|
refs/heads/master
| 2022-11-05T22:12:58.237586
| 2020-06-16T11:05:02
| 2020-06-16T11:05:02
| 272,255,781
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,919
|
r
|
Druk_trace_app_inspection.R
|
library(tidyverse)
library(ggplot2)
library(googlesheets4)
library(googledrive)
library(WriteXLS)
library(scales)
library(lubridate)
library(gganimate)
library(ggrepel)
# DATA IMPORT --------------------------------------------------------------
file_list <- read_csv("backup_csv/file_list.csv")
my_col_names <- c("Sl.No.", "Date", "Taxi_number", "DL_number", "QR_displayed",
"Log_book", "Inspection_location", "Updated_by", "Loc_name")
file_1 <- read_sheet(ss = file_list$id[1])
colnames(file_1) <- my_col_names
file_1 <- file_1 %>%
mutate(loc_name = file_list$Loc_name[1]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
#.....................................................................
file_2 <- read_sheet(ss = file_list$id[2])
colnames(file_2) <- my_col_names
file_2 <- file_2 %>%
mutate(loc_name = file_list$Loc_name[2]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
#.....................................................................
file_3 <- read_sheet(ss = file_list$id[3])
colnames(file_3) <- my_col_names
file_3 <- file_3 %>%
mutate(loc_name = file_list$Loc_name[3]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
# .....................................................................
file_4 <- read_sheet(ss = file_list$id[4])
colnames(file_4) <- my_col_names
file_4 <- file_4 %>%
mutate(loc_name = file_list$Loc_name[4]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d-%m-%y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
# .....................................................................
file_5 <- read_sheet(ss = file_list$id[5])
colnames(file_5) <- my_col_names
file_5 <- file_5 %>%
mutate(loc_name = file_list$Loc_name[5]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
# .....................................................................
file_6 <- read_sheet(ss = file_list$id[6])
colnames(file_6) <- my_col_names
file_6 <- file_6 %>%
mutate(loc_name = file_list$Loc_name[6]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
# .....................................................................
file_7 <- read_sheet(ss = file_list$id[7])
colnames(file_7) <- my_col_names
file_7 <- file_7 %>%
mutate(loc_name = file_list$Loc_name[7]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d-%B-%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
#.....................................................................
file_8 <- read_sheet(ss = file_list$id[8])
colnames(file_8) <- my_col_names
file_8 <- file_8 %>%
mutate(loc_name = file_list$Loc_name[8]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
# .....................................................................
file_9 <- read_sheet(ss = file_list$id[9])
colnames(file_9) <- my_col_names
file_9 <- file_9 %>%
mutate(loc_name = file_list$Loc_name[9]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
# .....................................................................
file_10 <- read_sheet(ss = file_list$id[10])
colnames(file_10) <- my_col_names
file_10 <- file_10 %>%
mutate(loc_name = file_list$Loc_name[10]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d-%m-%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
# .....................................................................
file_11 <- read_sheet(ss = file_list$id[11])
colnames(file_11) <- my_col_names
file_11 <- file_11 %>%
mutate(loc_name = file_list$Loc_name[11]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
# .....................................................................
file_12 <- read_sheet(ss = file_list$id[12])
colnames(file_12) <- my_col_names
file_12 <- file_12 %>%
mutate(loc_name = file_list$Loc_name[12]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
# .....................................................................
file_13 <- read_sheet(ss = file_list$id[13])
colnames(file_13) <- my_col_names
file_13 <- file_13 %>%
mutate(loc_name = file_list$Loc_name[13]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
# .....................................................................
file_14 <- read_sheet(ss = file_list$id[14])
colnames(file_14) <- my_col_names
file_14 <- file_14 %>%
mutate(loc_name = file_list$Loc_name[14]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by))
#.....................................................................
file_15 <- read_sheet(ss = file_list$id[15])
colnames(file_15) <- my_col_names
file_15 <- file_15 %>%
filter(!is.na(Taxi_number)) %>%
mutate(loc_name = file_list$Loc_name[15]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
#.....................................................................
file_16 <- read_sheet(ss = file_list$id[16])
colnames(file_16) <- my_col_names
file_16 <- file_16 %>%
mutate(loc_name = file_list$Loc_name[16]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
#.....................................................................
file_17 <- read_sheet(ss = file_list$id[17])
colnames(file_17) <- my_col_names
file_17 <- file_17 %>%
mutate(loc_name = file_list$Loc_name[17]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
# .....................................................................
file_18 <- read_sheet(ss = file_list$id[18])
colnames(file_18) <- my_col_names
file_18 <- file_18 %>%
mutate(loc_name = file_list$Loc_name[18]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
#.....................................................................
file_19 <- read_sheet(ss = file_list$id[19])
colnames(file_19) <- my_col_names
file_19 <- file_19 %>%
mutate(loc_name = file_list$Loc_name[19]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
#.....................................................................
file_20 <- read_sheet(ss = file_list$id[20])
colnames(file_20) <- my_col_names
file_20 <- file_20 %>%
mutate(loc_name = file_list$Loc_name[20]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
#.....................................................................
file_21 <- read_sheet(ss = file_list$id[21])
colnames(file_21) <- my_col_names
file_21 <- file_21 %>%
mutate(loc_name = file_list$Loc_name[21]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
#.....................................................................
file_22 <- read_sheet(ss = file_list$id[22])
colnames(file_22) <- my_col_names
file_22 <- file_22 %>%
mutate(loc_name = file_list$Loc_name[22]) %>%
select(-Sl.No.) %>%
filter(!is.na(Taxi_number)) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
mutate(Taxi_number = as.character(Taxi_number)) %>%
mutate(DL_number = as.character(DL_number)) %>%
mutate(QR_displayed = as.character(QR_displayed)) %>%
mutate(Log_book = as.character(Log_book)) %>%
mutate(Inspection_location = as.character(Inspection_location)) %>%
mutate(Updated_by = as.character(Updated_by)) %>%
mutate(loc_name = as.character(loc_name))
#.....................................................................
druk_trace_master <- bind_rows(file_1, file_2, file_3, file_3, file_4,
file_5, file_6, file_7, file_8, file_9,
file_10, file_11, file_12, file_13, file_14,
file_15, file_16, file_17, file_18, file_19,
file_20, file_21, file_22)
# bind_data_1 <- bind_rows(file_1, file_2, file_3, file_4, file_5)
# bind_data_2 <- bind_rows(file_6, file_7, file_8, file_9, file_10)
# bind_data_3 <- bind_rows(file_11, file_12, file_13, file_14, file_15)
# bind_data_4 <- bind_rows(file_16, file_17, file_18, file_19, file_20)
# bind_data_5 <- bind_rows(file_21, file_22)
druk_trace_master <- druk_trace_master %>%
mutate(QR_displayed = ifelse(QR_displayed == "yes", "YES", QR_displayed)) %>%
mutate(QR_displayed = ifelse(QR_displayed == "Yes", "YES", QR_displayed)) %>%
mutate(QR_displayed = ifelse(QR_displayed == "no", "NO", QR_displayed)) %>%
mutate(QR_displayed = ifelse(QR_displayed == "No", "NO", QR_displayed))
WriteXLS(druk_trace_master, paste("excel_output/", paste(Sys.Date(), "daily_backup.xlsx", sep = "_"), sep = ""))
druk_trace_master %>%
filter(is.na(QR_displayed))
druk_trace_grouped <- druk_trace_master %>%
group_by(loc_name, QR_displayed) %>%
summarise(number = n())
druk_trace_grouped %>%
ggplot(aes(x = reorder(loc_name, -number), y = number,
fill = QR_displayed)) +
geom_bar(stat = "identity", alpha = 0.5) +
coord_flip() +
labs(y = "Number of taxis inspected",
x = "Dzongkhag",
fill = "QR code displayed?",
title = "Inspection for Druk Trace app implementation in taxis",
subtitle = paste("Total number of taxi inspected = ",
sum(druk_trace_grouped$number), sep = " "),
caption = paste("Generated on ", Sys.Date(), sep = " ")) +
theme_minimal() +
theme(axis.text = element_text(size = 11, family = "Times"),
axis.title = element_text(size = 12, family = "Times"),
plot.title = element_text(size = 15, family = "Times", face = "bold", hjust = 0.5),
plot.subtitle = element_text(size = 13, family = "Times", hjust = 0.5),
plot.caption = element_text(size = 10, family = "Times", hjust = 0.95))
ggsave(paste("image_output/",
paste(Sys.Date(),
"Druk trace inspection detail.jpg",
sep = "_"),
sep = ""), width = 25, height = 15, units = "cm")
write_csv(druk_trace_grouped, paste("excel_output/",
paste(Sys.Date(), "master_file.csv",
sep = "_"),
sep = "")
)
# Daily progress ----------------------------------------------------------
list_1 <- file_list$Loc_name
list_2 <- druk_trace_grouped$loc_name
(insp_not_conducted <- list_1[!(list_1 %in% list_2)])
daily_summary <- read_csv("backup_csv/daily_summary.csv")
glimpse(daily_summary)
today_summary <- tibble(date = Sys.Date(),
total_taxis_insp = sum(druk_trace_grouped$number))
daily_summary <- rbind(daily_summary, today_summary)
daily_summary <- daily_summary %>%
mutate(date = as.POSIXct(date))
write_csv(daily_summary, "backup_csv/daily_summary.csv")
WriteXLS::WriteXLS(daily_summary, "excel_output/daily_summary.xlsx")
glimpse(daily_summary)
daily_summary %>%
ggplot(aes(x = date, y = total_taxis_insp)) +
geom_line(color = "#A9CCE3", size = 1.5) +
geom_point(color = "#2471A3", size = 3) +
theme_minimal() +
ylim(0, 2500) +
labs(x = "Date",
y = "No. of taxis inspection",
title = "Daily progress of Druk Trace app") +
scale_x_datetime(date_breaks = "1 day", date_labels = "%d-%b") +
theme(axis.title = element_text(family = "Times", size = 12),
axis.text = element_text(family = "Times", size = 10),
plot.title = element_text(family = "Times", size = 14,
face = "bold", hjust = 0.5),
axis.text.x = element_text(angle = 15))
ggsave("daily_progress_QR.jpg", width = 25, height = 15, units = "cm")
# Dzongkhag-wise overview --------------------------------------------------------
daily_DT <- druk_trace_master %>%
group_by(Date, loc_name) %>%
summarise(number = n())
druk_trace_master %>%
filter(is.na(Date)) %>%
View()
daily_DT %>%
ggplot(aes(x = Date, y = number)) +
geom_line(color = "#A9CCE3") +
geom_point(color = "#2471A3") +
labs(x = "Date",
y = "Number of taxis",
title = "Dzongkhag-wise progress of Druk Trace app implementation in taxis") +
facet_wrap(loc_name~., ncol = 3) +
theme(axis.text = element_text(size = 10, family = "Times"),
axis.title = element_text(size = 12, family = "Times"),
plot.title = element_text(size = 14, family = "Times", face = "bold", hjust = 0.5))
ggsave(paste("image_output/",
paste(Sys.Date(), "Druk_Trace_Overview.jpg", sep = "_"),
sep = ""),
width = 25, height = 15, units = "cm")
# Animation ---------------------------------------------------------------
daily_summary <- read_csv("backup_csv/daily_summary.csv")
animate_plot <- daily_summary %>%
ggplot(aes(x = date, y = total_taxis_insp)) +
geom_line(color = "#D4E6F1", size = 1.5) +
geom_point(color = "#2980B9", size = 3) +
labs(x = "Date",
y = "Number of taxis inspected",
title = "Daily progress of Druk Trace app in taxis") +
ylim(0, 2500) +
theme_light() +
scale_x_datetime(date_breaks = "1 day", date_labels = "%d-%b") +
theme(axis.title = element_text(family = "Times", size = 10),
axis.text = element_text(family = "Times", size = 10),
plot.title = element_text(family = "Times", size = 12,
face = "bold", hjust = 0.5),
axis.text.x = element_text(angle = 15))+
geom_label_repel(aes(label = round(daily_summary$total_taxis_insp, digits = 0)), label.size = 0)+
transition_time(date) +
ease_aes('linear') +
geom_point(aes(group = seq_along(date))) +
transition_reveal(date)
animate(animate_plot, nframes = 45, width = 900, height = 600, fps = 10)
anim_save("my_gif.gif")
|
5aeccdccea029f2132a9626dad8d26f78cba4285
|
a1cdc43213dcd9b48cd2df7e33effe3e32ced8ff
|
/analysis.02.R
|
f3cd5c726f94a1e7edee52e43f2e12340339bf0a
|
[
"MIT"
] |
permissive
|
yanlinlin82/190926a_How-Long-Will-I-Live
|
299dab01a7d9d880fb5406ae04b276eeee882797
|
677e306f70b5cf794d32dfb8ea2f75e70abf74db
|
refs/heads/master
| 2020-08-01T19:43:43.851592
| 2019-10-19T16:51:19
| 2019-10-19T16:51:19
| 211,095,926
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,711
|
r
|
analysis.02.R
|
library(tidyverse)
a <- read_tsv("data/life-time.txt")
gender <- "男"
age <- 37
dl <- NULL
for (gender in c("男", "女")) {
for (age_level in seq(0, 95, by = 5)) {
message("process: ", gender, ", ", age_level)
b <- tibble(age_lower = a$`年龄下限`,
age_upper = a$`年龄上限`,
total = a[[paste0("总人口数_", gender)]],
dead = a[[paste0("死亡人数_", gender)]])
d <- tibble(age = age_level:100) %>%
mutate(death_ratio = sapply(age, function(n) {
b %>% filter(n >= age_lower & n <= ifelse(is.na(age_upper), Inf, age_upper)) %>% with(dead / total)
}))
d <- d %>%
mutate(death_prob = ifelse(row_number() == 1, death_ratio, NA),
survival_prob = ifelse(row_number() == 1, 1 - death_ratio, NA))
for (i in 2:nrow(d)) {
d$death_prob[[i]] <- d$survival_prob[[i - 1]] * d$death_ratio[[i]]
d$survival_prob[[i]] <- d$survival_prob[[i - 1]] * (1 - d$death_ratio[[i]])
}
d <- d %>% mutate(age_level = age_level, gender = gender)
if (is.null(dl)) {
dl <- d
} else {
dl <- rbind(dl, d)
}
}
}
for (i in 0:4) {
g <- dl %>%
arrange(age) %>%
filter(floor(age_level / 20) == i) %>%
ggplot(aes(x = age, y = survival_prob * 100, color = gender, group = gender)) +
geom_path() +
facet_wrap(~ paste0("当前年龄 = ", age_level)) +
scale_x_continuous(breaks = seq(0,100,by=10), limits = c(0,100)) +
scale_y_continuous(breaks = seq(0,100,by=20), limits = c(0,100)) +
labs(x = "年龄", y = "存活概率 (%)", color = "性别")
g %>% ggsave(filename = paste0("plot.", (i + 1), ".png"), width = 8, height = 6)
}
|
b5c144a8501161f2d9ae4487d7820c9ac910b085
|
9db1042b59eccaa4729882c619631599426b6f14
|
/cleanSentence.R
|
5c1aa70cc08870acd5a22cd03561e822b64abee1
|
[] |
no_license
|
sreifeis/maRkov-chain-gang
|
4ef242c22922888d302eb4d1f81981f5a8b93479
|
6a09359ddf17fdf92b8d95396b52fa3e19002201
|
refs/heads/master
| 2020-05-01T19:09:04.733684
| 2019-04-26T22:50:24
| 2019-04-26T22:50:24
| 177,640,699
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,022
|
r
|
cleanSentence.R
|
#' Clean Sentence
#'
#' Standardizes the sentence so that it is in a proper format to be
#' analyzed in other functions.
#'
#' First, we convert the sentence to character in case it is of
#' variable type factor. Then, we make the sentence lower case,
#' and finally we remove all punctuation from the sentence.
#'
#'
#' @param sentence a single sentence - character or factor
#' @return the cleaned sentence
#'
#' @importFrom tm removePunctuation
#'
#' @export
cleanSentence <- function(sentence)
{
if(length(sentence) > 1)
{
stop("Sentence should just be one character (or factor) variable, not a vector")
}
if(typeof(sentence) == "list" & lengths(sentence) > 1)
{
stop("Length of the (only) element in the list must be 1!")
}
#converts to character
char_sent <- as.character(sentence)
#converts all words to lower case
low_case <- tolower(char_sent)
#removes all punctuation in the sentence
clean_sentence <- tm::removePunctuation(low_case)
return(clean_sentence)
}
|
1b39701c211b30f430bd13b0147f07c71bff3d29
|
faf69836350c93381d91748de48ffffc2b78ea02
|
/data_mgmt/data_prep/rc_motif_count.r
|
35f83df83dd98bcb246b5ac5c3b0f381d8fa9e5d
|
[
"MIT"
] |
permissive
|
theandyb/smaug-redux
|
0ffc09ea2761fd1ff77aa4435de0994a18913db8
|
6edf394b40d106ffec8f6791e0ee525fb799963b
|
refs/heads/master
| 2022-04-30T20:53:50.436051
| 2022-04-27T21:08:43
| 2022-04-27T21:08:43
| 143,439,173
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 938
|
r
|
rc_motif_count.r
|
library(tidyverse)
library(stringi)
library(here)
library(yaml)
args <- yaml.load_file(here("_config.yaml"))
attach(args)
k <- "7"
chromosomes <- seq(1,22)
results_dir <- paste0(analysisdir, "/motif_counts/",k,"-mers/full")
for(chr in chromosomes){
cntFile <- paste0(results_dir, "/", "chr", chr, "_bins.tsv")
df <- read_tsv(cntFile, col_names = c('Motif', 'nMotifs', 'BIN'))
df <- df %>% mutate(RC = stri_reverse(chartr("ACGT","TGCA", Motif)))
df <- inner_join(select(df, Motif, RC, BIN, nMotifs),
select(df, RC, BIN, nMotifs),
by = c("Motif" = "RC", "BIN" = "BIN"))
df['nMotifs'] <- df$nMotifs.x + df$nMotifs.y
df <- df %>% mutate(Motif = paste0(Motif, "(", RC ,")")) %>%
select(Motif, BIN, nMotifs)
df['CHR'] <- chr
outFile <- paste0(results_dir, "/", "chr", chr, ".",k,"-mer_motifs_1000kb_full.txt")
write.table(df, outFile, sep="\t", quote = FALSE, row.name = FALSE)
}
|
10831edaf380701241757dc992ba147736e00748
|
302386ef65a89f805809f5bf2b6ed20bd3951610
|
/code/R-files/parallelStudy.r
|
afe8fed9f0279cbb1176f1bab5100785218dc14f
|
[
"BSD-3-Clause"
] |
permissive
|
PyQuake/earthquakemodels
|
d7a90b72c452294923e00aead78a7fd3356b0b6b
|
a10b30c00c072a4061dbc3ae02c3cd86b4cda61c
|
refs/heads/master
| 2020-03-27T21:00:47.804765
| 2017-10-27T00:19:26
| 2017-10-27T00:19:26
| 25,963,657
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,883
|
r
|
parallelStudy.r
|
setwd("~/Documents/estudos/unb/earthquakemodels/code/")
options(scipen=999)
library(grid)
library(latticeExtra)
library(png)
library(grDevices)
library(RColorBrewer)
loadData = function(type, region, year, depth){
file = paste('loglike/',type,region,'_',depth,'_',year,".txt",sep="")
data = read.csv2(file, sep='\n', header=F)
return(data)
}
convertToNumeric = function(model){
values = rep(0, length(model$V1))
for (k in 1:length(model$V1)){
values[k] = as.numeric(levels(model$V1[k]))[model$V1[k]]
}
return(values)
}
calcMedia = function(type, year, depth, region,r,c){
soma = rep(0, r*c)
for(i in 1:10){
file = paste(type,'/',region,'_',depth,'_',year,i-1,".txt",sep="")
raw_data = read.csv2(file, sep='\n', header=F)
for (k in 1:length(raw_data$V1)){
value = as.numeric(levels(raw_data$V1[k]))[raw_data$V1[k]]
soma[k]=soma[k]+value
}
}
return(soma/10)
}
plotMatrixModel = function(modelData, fileToSave, r, c){
# TODO -- hardcoded map is BAD
matrixData = matrix(nrow = r, ncol = c)
k = 1
for (i in 1:r){
for (j in 1:c){
if(is.na(modelData[k])==T){
value=0
}
else{
value = modelData[k]
if (value > 12){
value = 12
}
}
matrixData[i,j] = value
k = k + 1
}
}
png(fileToSave, width = 800, height = 800)
jBrewColors <- rev(heat.colors(16))
p = levelplot((matrixData), col.regions = jBrewColors, alpha.regions=0.6)
grid.raster(as.raster(readPNG(imagePath)))
print( p+ layer(grid.raster(as.raster(readPNG(imagePath))), under=T))
dev.off()
}
plotModelsByYears= function(type, depth){
year=2005
#modelo
while(year<=2008){
region="Kanto"
saveFile = paste("./heatMap/",type,region,"_",depth,'_',year,".png",sep="")
mediaKanto=calcMedia(type=type,year=year, region=region, depth=depth, 45,45)
imagePath<<-"../data/kantomap.png"
plotMatrixModel(mediaKanto, saveFile, 45, 45)
year=year+1
}
}
setwd("~/Documents/estudos/unb/earthquakemodels/Zona2/DataFromR")
load("newdata.Rda")
summary(finalData)
setwd("~/Documents/estudos/unb/earthquakemodels/code/")
chooseRegion = function(i){
if (i==1) {
region="Kanto"
}
else if (i==2) {
region="Kansai"
}
else if (i==3) {
region = "Tohoku"
}
else{
region = "EastJapan"
}
return(region)
}
region = chooseRegion(1)
for (year in 2005:2010){
#gamodelpar
gaModelPar100 = loadData('parallel-random', region, year, '100')
valuesGAPar100 = convertToNumeric(gaModelPar100)
loglikeValues = c(valuesGAPar100)
nameGa = c(rep("GAModelPar",10))
years = c(rep(toString(year),10))
regions = c(rep(region, 10))
depth100 = c(rep('100',10))
depthsAmodel = c(depth100)
model = c(nameGa)
depths = c(depthsAmodel, depthsAmodel)
data = data.frame(loglikeValues, model,depths, years, regions)
finalData=rbind(finalData, data)
rm(data)
#parallelList
gaModelPar100 = loadData('parallelList-random', region, year, '100')
valuesGAPar100 = convertToNumeric(gaModelPar100)
loglikeValues = c(valuesGAPar100)
nameGa = c(rep("ReducedGAModelPar",10))
years = c(rep(toString(year),10))
regions = c(rep(region, 10))
depth100 = c(rep('100',10))
depthsAmodel = c(depth100)
model = c(nameGa)
depths = c(depthsAmodel, depthsAmodel)
data = data.frame(loglikeValues, model,depths, years, regions)
finalData=rbind(finalData, data)
rm(data)
#sc-parallel-random
gaModelPar100 = loadData('sc-parallel-random', region, year, '100')
valuesGAPar100 = convertToNumeric(gaModelPar100)
loglikeValues = c(valuesGAPar100)
nameGa = c(rep("GAModelParSC",10))
years = c(rep(toString(year),10))
regions = c(rep(region, 10))
depth100 = c(rep('100',10))
depthsAmodel = c(depth100)
model = c(nameGa)
depths = c(depthsAmodel, depthsAmodel)
data = data.frame(loglikeValues, model,depths, years, regions)
finalData=rbind(finalData, data)
rm(data)
#sc-parallelList-
gaModelPar100 = loadData('sc-parallelList-random', region, year, '100')
valuesGAPar100 = convertToNumeric(gaModelPar100)
loglikeValues = c(valuesGAPar100)
nameGa = c(rep("ReducedGAModelParSC",10))
years = c(rep(toString(year),10))
regions = c(rep(region, 10))
depth100 = c(rep('100',10))
depthsAmodel = c(depth100)
model = c(nameGa)
depths = c(depthsAmodel, depthsAmodel)
data = data.frame(loglikeValues, model,depths, years, regions)
finalData=rbind(finalData, data)
rm(data)
}
summary(finalData)
subTabela = finalData[finalData$regions=='Kanto',]
subTabela = subTabela[subTabela$years!='2009'&subTabela$years!='2010',]
subTabela = subTabela[subTabela$model=='GAModelSC'&subTabela$model=='ReducedGAModelSC'&
subTabela$model!='Emp-GAModelWindow'&subTabela$model!='Emp-ReducedGAModelWindow'&
subTabela$model!='Emp-GAModelSLC'&subTabela$model!='Emp-ReducedGAModelSLC'
,]
summary(subTabela)
#
mean(finalData$loglikeValues[finalData$model=='GAModelPar'&finalData$year=='2005'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelPar'&finalData$year=='2005'])
mean(finalData$loglikeValues[finalData$model=='GAModelParSC'&finalData$year=='2005'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelParSC'&finalData$year=='2005'])
mean(finalData$loglikeValues[finalData$model=='GAModelPar'&finalData$year=='2006'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelPar'&finalData$year=='2006'])
mean(finalData$loglikeValues[finalData$model=='GAModelParSC'&finalData$year=='2006'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelParSC'&finalData$year=='2006'])
mean(finalData$loglikeValues[finalData$model=='GAModelPar'&finalData$year=='2007'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelPar'&finalData$year=='2007'])
mean(finalData$loglikeValues[finalData$model=='GAModelParSC'&finalData$year=='2007'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelParSC'&finalData$year=='2007'])
mean(finalData$loglikeValues[finalData$model=='GAModelPar'&finalData$year=='2008'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelPar'&finalData$year=='2008'])
mean(finalData$loglikeValues[finalData$model=='GAModelParSC'&finalData$year=='2008'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelParSC'&finalData$year=='2008'])
mean(finalData$loglikeValues[finalData$model=='GAModelPar'&finalData$year=='2009'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelPar'&finalData$year=='2009'])
mean(finalData$loglikeValues[finalData$model=='GAModelParSC'&finalData$year=='2009'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelParSC'&finalData$year=='2009'])
mean(finalData$loglikeValues[finalData$model=='GAModelPar'&finalData$year=='2010'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelPar'&finalData$year=='2010'])
mean(finalData$loglikeValues[finalData$model=='GAModelParSC'&finalData$year=='2010'])
mean(finalData$loglikeValues[finalData$model=='ReducedGAModelParSC'&finalData$year=='2010'])
# resultANOVA = aov(loglikeValues~model+depths+years+regions , data = finalData)
# summary(resultANOVA)
# tuk = TukeyHSD(resultANOVA)
# op <- par(mar = c(5,15,4,2) + 0.1)
# plot(tuk,las=1)
# print(tuk)
# plotModelsByYears('parallel-random', depth)
# plotModelsByYears('sc-parallel-random', depth)
# plotModelsByYears('sc-parallelList-random', depth)
# plotModelsByYears('parallelList-random', depth)
|
cc476123df356e4a9e7575174b45f83a36ac8ac5
|
e721d50452830fbc61dde74c89ddf948561d558c
|
/tests/test_classic_examples.r
|
ec3b7bb4e069eb6327f7f956c38879751795782b
|
[] |
no_license
|
ShiruiH/apsimx
|
71fb1994e0e2614e11b00c731ef451338b761b67
|
c970f0c0779fb18319fb1d793fa857e717eb0fb0
|
refs/heads/master
| 2020-12-12T21:46:56.426362
| 2020-01-16T04:51:40
| 2020-01-16T04:51:40
| 234,236,853
| 0
| 0
| null | 2020-01-16T04:42:51
| 2020-01-16T04:42:50
| null |
UTF-8
|
R
| false
| false
| 738
|
r
|
test_classic_examples.r
|
## Write a test for running APSIM only under Windows
require(apsimx)
apsim_options(warn.versions = FALSE)
run.classic.examples <- grepl("windows",Sys.info()[["sysname"]], ignore.case = TRUE)
if(run.classic.examples){
ade <- auto_detect_apsim_examples()
ex <- list.files(path = ade, pattern = ".apsim$")
## Will only run a few
ex.to.run <- c("Canopy","Centro","Millet","Potato","Sugar")
for(i in ex.to.run){
tmp <- apsim_example(i)
cat("Ran (apsim_example):",i,"\n")
}
## Test examples individually
for(i in ex.to.run){
file.copy(paste0(ade,"/",i,".apsim"),".")
tmp <- apsim(paste0(i,".apsim"), cleanup = TRUE)
file.remove(paste0("./",i,".apsim"))
cat("Ran (apsim):",i,"\n")
}
}
|
3471fb486732ee7ad27fe7612f85450f8d88eec9
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googleadexchangebuyerv14.auto/man/UpdatePrivateAuctionProposalRequest.Rd
|
3f7ba5218210bd60ff8394ed681bb0ac3cf3495b
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 991
|
rd
|
UpdatePrivateAuctionProposalRequest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_objects.R
\name{UpdatePrivateAuctionProposalRequest}
\alias{UpdatePrivateAuctionProposalRequest}
\title{UpdatePrivateAuctionProposalRequest Object}
\usage{
UpdatePrivateAuctionProposalRequest(externalDealId = NULL, note = NULL,
proposalRevisionNumber = NULL, updateAction = NULL)
}
\arguments{
\item{externalDealId}{The externalDealId of the deal to be updated}
\item{note}{Optional note to be added}
\item{proposalRevisionNumber}{The current revision number of the proposal to be updated}
\item{updateAction}{The proposed action on the private auction proposal}
}
\value{
UpdatePrivateAuctionProposalRequest object
}
\description{
UpdatePrivateAuctionProposalRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\seealso{
Other UpdatePrivateAuctionProposalRequest functions: \code{\link{marketplaceprivateauction.updateproposal}}
}
|
af88456428692f8c14955c55f76f5f7d5981b512
|
a4e57b6e4bfd13cb326cdacf014c2bd57d583513
|
/Voedingsnelheid aanzet Vf.R
|
7de17492671008833fd5cf52501f0e769a62f1f2
|
[] |
no_license
|
Dennitizer/CNC
|
2da37e1e5083e5e5426229166abcb083db217c6e
|
34e309bdbc7f887a86e0996aed27febeeefe9eb7
|
refs/heads/master
| 2021-06-29T20:18:40.491045
| 2020-09-28T13:23:24
| 2020-09-28T13:23:24
| 158,672,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 145
|
r
|
Voedingsnelheid aanzet Vf.R
|
# Voedingssnelheid / aanzet Vf
Fz = 0.075 # Voeding per tand
N = 16000 # Toerental
Zc = 2 # Aantal tanden
Vf = Fz * N * Zc
Vf # mm/Min
|
f901421346b785e1570825cd8545719ad9eeb32d
|
6390c203df735c874044a8ffa0f3692bf6010a6a
|
/man/TPP.NW.Rd
|
3d5ce79779313f5f67c192261dde2de8ea41b23e
|
[
"MIT"
] |
permissive
|
felixlindemann/HNUORTools
|
c8c61ec550e2c6673c8d3e158bd7bc21208b26ab
|
0cb22cc0da14550b2fb48c996e75dfdad6138904
|
refs/heads/master
| 2020-05-15T18:37:48.423808
| 2018-02-04T11:04:52
| 2018-02-04T11:04:52
| 16,206,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,730
|
rd
|
TPP.NW.Rd
|
\docType{methods}
\name{TPP.NW}
\alias{TPP.NW}
\alias{TPP.NW,GeoSituation-method}
\title{Transportation-Problem -- North-West-Corner-Rule}
\usage{
TPP.NW(object, ...)
\S4method{TPP.NW}{GeoSituation}(object, ...)
}
\arguments{
\item{object}{Object of Type \code{\link{GeoSituation}}}
\item{...}{\emph{Optional Parameters} See Below.}
}
\description{
Calculates the Transportationplan.
}
\note{
for citing use: Felix Lindemann (2014). HNUORTools:
Operations Research Tools. R package version 1.1-0.
\url{http://felixlindemann.github.io/HNUORTools/}.
}
\section{Optional Parameters (\code{...})}{
\subsection{used by \code{\link{TPP.NW}}}{ \describe{
\item{log}{\code{"logical"} Optional Parameter.
Indicating, if the calculation should be logged to
console. Default is \code{FALSE}.} } }
\subsection{Forwarded to the follwowing functions}{ You
may want to check these functions for any other optional
parameters. \itemize{
\item{\code{\link{getInitialMatrix}}}
\item{\code{\link{TPP.Prepare}}} } }
}
\examples{
# demo(HNUTPP03)
}
\author{
Dipl. Kfm. Felix Lindemann
\email{felix.lindemann@hs-neu-ulm.de}
Wissenschaftlicher Mitarbeiter Kompetenzzentrum Logistik
Buro ZWEI, 17
Hochschule fur angewandte Wissenschaften Fachhochschule
Neu-Ulm | Neu-Ulm University Wileystr. 1
D-89231 Neu-Ulm
Phone +49(0)731-9762-1437 Web
\url{www.hs-neu-ulm.de/felix-lindemann/}
\url{http://felixlindemann.blogspot.de}
}
\references{
Domschke
}
\seealso{
\code{\link{GeoSituation}}, \code{\link{Node}},
\code{\link{TPP.NW}}, \code{\link{TPP.CMM}},
\code{\link{TPP.MMM}}, \code{\link{TPP.SteppingStone}},
\code{\link{TPP.MODI}}
}
\keyword{North-West-Corner-Rule}
\keyword{OR}
\keyword{TPP}
\keyword{Transportation-Problem}
|
7ec647684423dd1025df1374acbfa94a879edff0
|
c0a843db4d4c1e0a63f8f9e91d32246a34aaa0f6
|
/tests/regtest-weights.R
|
eeca31f634a541c04dbdf63c6cf0eae35a2f3099
|
[] |
no_license
|
cran/partykit
|
6765cf014fb4528894c34be20967d2b61265ff86
|
e43d2286d0d67830cff8ff7a1ce719782b834d06
|
refs/heads/master
| 2023-04-27T04:43:46.983139
| 2023-04-14T08:20:02
| 2023-04-14T08:20:02
| 17,698,361
| 7
| 15
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,950
|
r
|
regtest-weights.R
|
suppressWarnings(RNGversion("3.5.2"))
library("partykit")
## artificial data ---------------------------------------------------------------------------------
set.seed(0)
d <- data.frame(x = seq(-1, 1, length.out = 1000), z = factor(rep(0:1, 500)))
d$y <- 0 + 1 * d$x + rnorm(nrow(d))
d$w <- rep(1:4, nrow(d)/4)
dd <- d[rep(1:nrow(d), d$w), ]
## convenience function: likelihood ratio test -----------------------------------------------------
lrtest <- function(data, ...) {
lr <- -2 * (logLik(lm(y ~ x, data = data, ...)) - logLik(lm(y ~ x * z, data = data, ...)))
matrix(
c(lr, pchisq(lr, df = 2, lower.tail = FALSE)),
dimnames = list(c("statistic", "p.value"), "z")
)
}
## lm: case weights --------------------------------------------------------------------------------
## weighted and explicitly expanded data should match exactly
lm1 <- lmtree(y ~ x | z, data = d, weights = w, maxdepth = 2)
lm2 <- lmtree(y ~ x | z, data = dd, maxdepth = 2)
all.equal(sctest.modelparty(lm1), sctest.modelparty(lm2))
## LR test should be similar (albeit not identical)
all.equal(sctest.modelparty(lm1), lrtest(dd), tol = 0.05)
## lm: proportionality weights ---------------------------------------------------------------------
## LR test should be similar
lm3 <- lmtree(y ~ x | z, data = d, weights = w, maxdepth = 2, caseweights = FALSE)
all.equal(sctest.modelparty(lm3), lrtest(d, weights = d$w), tol = 0.05)
## constant factor should not change results
lm3x <- lmtree(y ~ x | z, data = d, weights = 2 * w, maxdepth = 2, caseweights = FALSE)
all.equal(sctest.modelparty(lm3), sctest.modelparty(lm3x))
## glm: case weights -------------------------------------------------------------------------------
## for glm different vcov are available
glm1o <- glmtree(y ~ x | z, data = d, weights = w, maxdepth = 2, vcov = "opg")
glm2o <- glmtree(y ~ x | z, data = dd, maxdepth = 2, vcov = "opg")
all.equal(sctest.modelparty(glm1o), sctest.modelparty(glm1o))
glm1i <- glmtree(y ~ x | z, data = d, weights = w, maxdepth = 2, vcov = "info")
glm2i <- glmtree(y ~ x | z, data = dd, maxdepth = 2, vcov = "info")
all.equal(sctest.modelparty(glm1i), sctest.modelparty(glm2i))
glm1s <- glmtree(y ~ x | z, data = d, weights = w, maxdepth = 2, vcov = "sandwich")
glm2s <- glmtree(y ~ x | z, data = dd, maxdepth = 2, vcov = "sandwich")
all.equal(sctest.modelparty(glm1s), sctest.modelparty(glm2s))
## different vcov should yield similar (albeit not identical) statistics
all.equal(sctest.modelparty(glm1o), sctest.modelparty(glm1i), tol = 0.05)
all.equal(sctest.modelparty(glm1o), sctest.modelparty(glm1s), tol = 0.05)
## LR test should be similar
all.equal(sctest.modelparty(glm1o), lrtest(dd), tol = 0.05)
## glm: proportionality weights --------------------------------------------------------------------
## different test versions should be similar
glmFo <- glmtree(y ~ x | z, data = d, weights = w, maxdepth = 2, caseweights = FALSE, vcov = "opg")
glmFi <- glmtree(y ~ x | z, data = d, weights = w, maxdepth = 2, caseweights = FALSE, vcov = "info")
glmFs <- glmtree(y ~ x | z, data = d, weights = w, maxdepth = 2, caseweights = FALSE, vcov = "sandwich")
all.equal(sctest.modelparty(glmFo), sctest.modelparty(glmFi), tol = 0.05)
all.equal(sctest.modelparty(glmFo), sctest.modelparty(glmFs), tol = 0.05)
all.equal(sctest.modelparty(glmFo), lrtest(d, weights = d$w), tol = 0.05)
## constant factor should not change results
glmFxo <- glmtree(y ~ x | z, data = d, weights = 2 * w, maxdepth = 2, caseweights = FALSE, vcov = "opg")
glmFxi <- glmtree(y ~ x | z, data = d, weights = 2 * w, maxdepth = 2, caseweights = FALSE, vcov = "info")
glmFxs <- glmtree(y ~ x | z, data = d, weights = 2 * w, maxdepth = 2, caseweights = FALSE, vcov = "sandwich")
all.equal(sctest.modelparty(glmFo), sctest.modelparty(glmFxo))
all.equal(sctest.modelparty(glmFi), sctest.modelparty(glmFxi))
all.equal(sctest.modelparty(glmFs), sctest.modelparty(glmFxs))
|
61d80a73d21a6cd6358ebf13b74e7590d65d3402
|
caf361bdbc2459187fb58fae876bad5497e532a1
|
/man/plot_pseudotime_heatmap.Rd
|
3b4804de974c5c30c5841be0b1c9f0ff22468e70
|
[
"MIT"
] |
permissive
|
ddiez/scmisc
|
35efffabe859ddc6ac9c2c20f00d283a376def44
|
f19819e7e736cfd167fd4b0c29d7290d66ab961a
|
refs/heads/master
| 2023-08-17T04:08:03.971880
| 2023-08-06T13:35:17
| 2023-08-06T13:35:17
| 180,719,852
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 571
|
rd
|
plot_pseudotime_heatmap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trajectory.R
\name{plot_pseudotime_heatmap}
\alias{plot_pseudotime_heatmap}
\alias{plot_pseudotime_heatmap.Seurat}
\title{plot_pseudotime_heatmap}
\usage{
plot_pseudotime_heatmap(x, ...)
\method{plot_pseudotime_heatmap}{Seurat}(
x,
features,
assay = "RNA",
slot = "data",
reduction = "pseudotime",
pseudo.color = NULL,
...
)
}
\arguments{
\item{x}{and object with pseudotime information.}
\item{...}{arguments passed down to methods.}
}
\description{
plot_pseudotime_heatmap
}
|
ddb143fdb224bcf15ba6ede30cf6d4bdb665f887
|
51d951ec46b6e9d1e6449b22e71b56ba19b377fd
|
/ui.R
|
cfcfcf70c80c155f525fa5fca744888614097d24
|
[] |
no_license
|
congiceee/Developing_Data_Products_Assignment
|
8b041f6a2d4792fc2bea4a2d2b706ffb1ef4b255
|
213b5a5fe6caeb21f1e9c9deabf51839230bea37
|
refs/heads/master
| 2021-01-20T14:49:49.158092
| 2017-05-08T23:13:16
| 2017-05-08T23:13:16
| 90,679,966
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,986
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Analysis of the 'abalone' data"),
sidebarLayout(
sidebarPanel(
sliderInput("percentage",
"Percentage of considered observations (randomly chosen):",
min = 0, max = 100, value = 25),
h4('Please choose one outcome:'),
checkboxInput("OLS2", "LongestShell"),
checkboxInput("OD3", "Diameter"),
checkboxInput("OH4", "Height"),
checkboxInput("OW5", "WholeWeight"),
checkboxInput("OS6", "ShuckedWeight"),
checkboxInput("OVW7", "VisceraWeight"),
checkboxInput("OSW8", "ShellWeight"),
checkboxInput("OR9", "Rings"),
h4('Please choose one regressor:'),
checkboxInput("RLS2", "LongestShell"),
checkboxInput("RD3", "Diameter"),
checkboxInput("RH4", "Height"),
checkboxInput("RW5", "WholeWeight"),
checkboxInput("RS6", "ShuckedWeight"),
checkboxInput("RVW7", "VisceraWeight"),
checkboxInput("RSW8", "ShellWeight"),
checkboxInput("RR9", "Rings"),
h4('Please choose whether to regard type as the condition:'),
checkboxInput("Typ", "Type"),
h4('Do you want to include regression lines:'),
checkboxInput("lmline", "Yes")
),
mainPanel(
h2('Note:'),
h4('1. This is a simple application of "lm" model where only one regressor is considered at one time since we would like to illustrate the function of "lm" in a 2-dimensional graph.'),
h4('2. First, you need to choose the desired outcome and regressor from the corresponding list to show a scatterplot below.'),
h4('3. Further, you can add the variable "Type" to investigate the variation in different groups.'),
h4('4. The regression line also presents potential features in data.'),
h4('5. You can randomly choose a fraction of the dataset. Otherwise, just slide the bar to 100%.'),
plotOutput('plot1')
)
)
))
|
82c6a34a3bb007669b8c7459e830e0d77462c3f2
|
cf95b7cb899ecc28629f30fadfc34b74d19712c1
|
/man/RSE.Rd
|
3ec7bf3f368cf2093a590b78e55716b0703a360e
|
[] |
no_license
|
cddesja/REPM
|
02bfd4d34db4f4653ae87ce741d0d8493df4ab29
|
543b2df65dd2b438c99d9e77edb3837bfb96f9f0
|
refs/heads/master
| 2020-09-24T00:57:19.758881
| 2017-02-01T19:45:26
| 2017-02-01T19:45:26
| 66,779,271
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,510
|
rd
|
RSE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{RSE}
\alias{RSE}
\title{The Rosenberg Self-Esteem Scale}
\format{A data frame with 1000 participants who responded to 10 rating scale items in an interactive version of the Rosenberg Self-Esteem Scale (Rosenberg, 1965). There are also additional demographic items about the participants:
\describe{
\item{Q1}{I feel that I am a person of worth, at least on an equal plane with others.}
\item{Q2}{I feel that I have a number of good qualities.}
\item{Q3}{All in all, I am inclined to feel that I am a failure.}
\item{Q4}{I am able to do things as well as most other people.}
\item{Q5}{I feel I do not have much to be proud of.}
\item{Q6}{I take a positive attitude toward myself.}
\item{Q7}{On the whole, I am satisfied with myself.}
\item{Q8}{I wish I could have more respect for myself.}
\item{Q9}{I certainly feel useless at times.}
\item{Q10}{At times, I think I am no good at all.}
\item{Gender}{Chosen from a drop down list (1=male, 2=female, 3=other; 0=none was chosen)}
\item{Age}{Entered as a free response. (0=response that could not be converted to integer)}
\item{Source}{How the user came to the web page of the RSE scale (1=Front page of personality website, 2=Google search, 3=other)}
\item{Country}{Inferred from technical information using MaxMind GeoLite}
}}
\source{
The The Rosenberg Self-Esteem Scale is available at \url{http://personality-testing.info/tests/RSE.php}.
}
\usage{
RSE
}
\description{
The RSE data set was obtained via online with an interactive version of the Rosenberg Self-Esteem Scale (Rosenberg, 1965). Individuals were informed at the start of the test that their data would be saved. When they completed the scale, they were asked to confirm that the responses they had given were accurate and could be used for research, only those who confirmed are included in this dataset.
A random sample of 1000 participants who completed all of the items in the scale were included in the RSE data set. All of the 10 rating scale items were rated on a 4-point scale (i.e., 1=strongly disagree, 2=disagree, 3=agree, and 4=strongly agree). Items 3, 5, 8, 9 and 10 were reversed-coded in order to place all the items in the same direction. That is, higher scores indicate higher self-esteem.
}
\references{
Rosenberg, M. (1965). Society and the adolescent self-image. Princeton, NJ: Princeton University Press.
}
\keyword{datasets}
|
ee41143274b7d25abc39984999dab4889322da70
|
b0b8b208ff6e7b6308a1b08df1d3606bf4e5f98d
|
/07. Regression Models/quiz01.R
|
e20dbe1f017f37347266795a12d255aa57487cd2
|
[] |
no_license
|
changboon/coursera
|
f5028a9a05019affc531268f68d1ae3fa878295e
|
a7eba816047d4352199d26dae3ad1dd16c0b8ba1
|
refs/heads/master
| 2021-01-13T02:23:56.323219
| 2015-01-20T10:01:30
| 2015-01-20T10:01:30
| 29,004,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 938
|
r
|
quiz01.R
|
quiz 01
1.
x <- c(0.18, -1.54, 0.42, 0.95)
w <- c(2, 1, 3, 1)
u <- c(0.1471, 0.0025, 1.077, 0.300)
sapply(u,function(u){
sum( w*(x-u)^2 )
})
2.
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
beta1<-cor(y,x)* sd(y)/sd(x)
beta0<-mean(y)-beta1*mean(x)
rbind(c(beta0,beta1),coef(lm(y~x-1)))
lm(y~x-1)
3.
data(mtcars)
lm(mpg~wt, data=mtcars)
4.
cor(Y,X) * (SD(Y)/SD(X)) = slope
0.5 * (1/.5)
5.
0.4 * 1.5
6.
x <- c(8.58, 10.46, 9.01, 9.64, 8.86)
m <- mean(x)
(x-m)/sd(x)
7.
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
beta1<-cor(y,x)* sd(y)/sd(x)
beta0<-mean(y)-beta1*mean(x)
rbind(c(beta0,beta1),coef(lm(y~x)))
9.
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
u <- c(0.573, 0.8, 0.36, 0.44)
sapply(u,function(u){
sum( (x-u)^2 )
})
|
cc7aa4dace1618ae99a8fc8910edcf2977ceac87
|
3c237a825eb64d547a4788eaaeeda7fb66354c5e
|
/cachematrix.R
|
75cea72c9a8f8517a509dba9ed79a56580bbb62d
|
[] |
no_license
|
guiml/ProgrammingAssignment2
|
af65acbdc85dac271225274c27066f5e9aa56fd2
|
cea7ff87b20615c609668f6f102274c66844aa78
|
refs/heads/master
| 2021-01-15T22:38:49.498172
| 2016-02-28T20:45:40
| 2016-02-28T20:45:40
| 52,684,804
| 0
| 0
| null | 2016-02-27T19:31:24
| 2016-02-27T19:31:24
| null |
UTF-8
|
R
| false
| false
| 1,574
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
#invertedm is where the result (inveted matrix) is stored.
#Everytime makeCacheMatrix is called, this string is set to null
#This function has 4 functions in it:
# - set, to set the matrix to be inverted in this function
# - get, to retrieve the matrix to be inverted in this function
# - setinv, to store inverted matrix values (invertedm)
# - getinv, to retrieve the invertedmatrix (invertedm)
makeCacheMatrix <- function(x = matrix()) {
invertedm <- NULL
set <- function(y) {
x <<- y
invertedm <<- NULL
}
get <- function() x
setinv <- function(solve) invertedm <<- solve
getinv <- function() invertedm
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
#This function has 2 possible outcomes based on a check:
# CHECK if there is anything on cache (if invertedm is not null)
# CASE YES (not NULL) prints out "getting.." and return the stored value by calling getinv
# CASE NO (it is NULL) process the value given and store in the cache using setinv
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invertedm <- x$getinv()
if(!is.null(invertedm)) {
message("getting cached data")
return(invertedm)
}
data <- x$get()
invertedm <- solve(data, ...)
x$setinv(invertedm)
invertedm
}
|
3b131c1005f1ed5629cd43667dc71c58efa7818a
|
65592e8ad8df4a48b22440b879d8d6735f533d4c
|
/NEU-IntermediateAnalytics/Dong Quoc Tuong-M3 Project- Lasso Pratice.r
|
981f178803588ddf773cf151813b2d82a96c3e95
|
[] |
no_license
|
saisowmya7095/R-Studio
|
78c8c7ac246ff7a7284a1f50c84a0ef4705500d6
|
22407d01dea94841df8e3f9d7b9c744b655b1b2b
|
refs/heads/master
| 2023-03-10T15:22:19.921633
| 2021-02-23T16:49:14
| 2021-02-23T16:49:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,095
|
r
|
Dong Quoc Tuong-M3 Project- Lasso Pratice.r
|
rm(list = ls())
install.packages('ncvreg')
library('ncvreg')
install.packages('bigmemory')
library('bigmemory')
install.packages('biglasso')
library('biglasso')
#Excerise 1
install.packages('lars') #Install lars,gmlnet packages and introduce the diabetes dataset
library('lars')
data(diabetes)
install.packages('glmnet')
library('glmnet')
#Excerise 2 #Use the dataset to plot the scatter plot for the predictor x with y
summary(diabetes$x)
cbind(summary(diabetes$x))
par(mfrow=c(2,5))
for(i in 1:10)
plot(diabetes$x[,i], diabetes$y)
abline(lm(diabetes$y~diabetes$x[,i]))
layout(1)
#Excerise 3
OLS<- lm(diabetes$y ~ diabetes$x) #Regress the result in x using OLS to use as the benchmark
summary(OLS)
cbind(summary(OLS))
#Excerise 4
OLS <- glmnet(diabetes$x, diabetes$y) #Graph to see when the x's coefficients against the L1 norm of the beta vector shink to 0
plot(OLS, xvar = "norm", label = TRUE)
#Excerise 5 #Cross validation curve and the value of lambda
fitted<- cv.glmnet(x=diabetes$x, y=diabetes$y, alpha = 1, nlambda = 1000)
plot(fitted)
small_lambda<-fitted$lambda.min
#Excerise 6 #Get the estimated beta matrix, indicates which predictors are important in explaining the variation in y
fitted2<- glmnet(x=diabetes$x, y=diabetes$y, alpha = 1, lambda =small_lambda)
fitted2$beta
#Excerise 7
One_se<-fitted$lambda.1se
One_se
fitted3 <- glmnet(x=diabetes$x, y=diabetes$y, alpha = 1, lambda =One_se)
fitted3$beta
#Excerise 8
summary(diabetes$x2) #Using x2 to do the analysis
OLS2<- lm(diabetes$y ~ diabetes$x2)
summary(OLS2)
cbind(summary(OLS2))
#Excerise 9
OLS2 <- glmnet(diabetes$x2, diabetes$y)
par(mfrow=c(1,1))
plot(OLS, xvar = "norm", label = TRUE)
#Excerise 10
fitted_x2<- cv.glmnet(x=diabetes$x2, y=diabetes$y, alpha = 1, nlambda = 1000)
plot(fitted_x2)
small_lambda_x2<-fitted_x2$lambda.min
fitted2_x2<- glmnet(x=diabetes$x2, y=diabetes$y, alpha = 1, lambda =small_lambda_x2)
fitted2_x2$beta
|
d7ca26f35559fc7fb3066e981f1fc9763dd3cfc1
|
b2702e65d031a9e0cb8980154b8f7709fbc0779b
|
/Scripts/0_msigdb_FET.R
|
db91da085032aa426dc29763ca5e828cb0d59acf
|
[] |
no_license
|
integrativenetworkbiology/Tumor_invasion_esLUAD
|
51027e40187d3db6e5fa0710da259c2859a8ace1
|
64637b2adf8b9a9ed814179c5f01f929f0c7773e
|
refs/heads/main
| 2023-07-13T00:46:07.306947
| 2021-08-30T10:36:36
| 2021-08-30T10:36:36
| 401,305,016
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,025
|
r
|
0_msigdb_FET.R
|
msigdb_fish <- function(query, gene_univ, dataset)
{
set<-getGmt(con=paste("./Msigdb_database/",dataset,".all.v6.0.symbols.gmt",sep=""),geneIdType=SymbolIdentifier(),collectionType=BroadCollection(category=dataset))
univ<-length(gene_univ)
gs1<-query
len1<-length(unique(query))
res<-matrix(NA,nr=length(set),nc=7)
colnames(res)<-c("Geneset","Size","Overlap","OddRatio","p-value","q-value","Genelist")
for(i in 1:length(set)){
gs<-set[i]
gsname<-names(gs)
gs2<-unique(unlist(geneIds(gs)))
gs2<-gs2[gs2%in%gene_univ]
len2<-length(gs2)
overlap<-sort(intersect(gs1,gs2))
over<-length(overlap)
tab<-matrix(c(univ-len1-len2+over,len1-over,len2-over,over),nr=2)
fish_test<-fisher.test(tab,alternative="greater")
pva<-fish_test$p.value
odd<-fish_test$estimate
res[i,c(1:5,7)]<-c(gsname,len2,over,odd,pva,paste(overlap,collapse=", "))
}
res[,6]<-p.adjust(as.numeric(res[,5]),method="BH",n=length(set))
return(res)
}
|
0356c6d3894f86d078781c6cfab4aae7e192c02c
|
dfa6f9ae20dd7b548b25d917c016fc3d5780ae5c
|
/R/calc_ess.R
|
af8c4eebb486dd59e98987470f032aa58cb3e18b
|
[
"MIT"
] |
permissive
|
venkataduvvuri/RBeast
|
13de4b2a8a7fed45ed2f35d6046625a3e0603b9c
|
1e9ba811a1ac328dc4ee9209bb0c68c19fa3e43e
|
refs/heads/master
| 2022-04-27T11:04:19.878296
| 2019-04-10T21:50:12
| 2019-04-10T21:50:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,356
|
r
|
calc_ess.R
|
#' Calculates the Effective Sample Size
#' @param trace the values without burn-in
#' @param sample_interval the interval in timesteps between samples
#' @return the effective sample size
#' @examples
#' filename <- system.file(
#' "extdata", "beast2_example_output.log", package = "RBeast"
#' )
#'
#' # Parse the file as-is and conclude the sampling interval
#' df <- RBeast::parse_beast_log(
#' filename = filename
#' )
#' sample_interval <- df$Sample[2] - df$Sample[1]
#'
#' # Only keep the parameter estimates, do not care about the sampling times anymore
#' estimates <- subset(df, select = -Sample)
#'
#' esses <- rep(NA, ncol(estimates))
#' burn_in_fraction <- 0.1
#' for (i in seq_along(estimates)) {
#' # Trace with the burn-in still present
#' trace_raw <- as.numeric(t(estimates[i]))
#'
#' # Trace with the burn-in removed
#' trace <- RBeast::remove_burn_in(trace = trace_raw, burn_in_fraction = 0.1)
#'
#' # Store the effectice sample size
#' esses[i] <- RBeast::calc_ess(trace, sample_interval = sample_interval)
#' }
#'
#' # Use the values that TRACER shows
#' expected_esses <- c(10, 10, 10, 10, 7, 10, 9, 6)
#' testit::assert(all(expected_esses - esses < 0.5))
#' @export
#' @author Richel J.C. Bilderbeek
calc_ess <- function(trace, sample_interval) {
tracerer::calc_ess(trace, sample_interval)
}
|
b78c81fda65d32305e1d6713c4a5283e2146b8a7
|
1ec32c44fa24619daf1a95c608bc845d8bc1da68
|
/Day12/day12.R
|
423e2a82c1ba264e486267a72ac24fe74873a4ba
|
[] |
no_license
|
Darius-Jaraminas/advent_of_code_2019
|
7a363af95f2dde0206768cdd37fa082ca0c215d7
|
bc00c84baed7cb1f282715e03cf54f5617a6c28c
|
refs/heads/master
| 2020-09-22T09:45:55.195125
| 2019-12-20T13:42:28
| 2019-12-20T13:42:28
| 225,144,508
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,176
|
r
|
day12.R
|
library(dplyr)
library(tidyr)
source("fun.R")
# part 1
t1 <- read_moons(fnm = "test1.txt")
r1 <- run_time(pos = t1, t = 1)
e1 <- calculate_energy(x = r1)
ch1_pos10 <- data.frame(
x = c(2, 1, 3, 2),
y = c(1, -8, -6, 0),
z = c(-3, 0, 1, 4)
)
ch1_vel10 <- data.frame(
x = c(-3, -1, 3, 1),
y = c(-2, 1, 2, -1),
z = c(1, 3, -3, -1)
)
print(all(r1$pos == ch1_pos10))
print(all(r1$vel == ch1_vel10))
print(e1 == 179)
t2 <- read_moons(fnm = "test2.txt")
r2 <- run_time(pos = t2, t = 100)
e2 <- calculate_energy(x = r2)
print(e2 == 1940)
inp <- read_moons(fnm = "input.txt")
rp1 <- run_time(pos = inp, t = 1000)
ep1 <- calculate_energy(x = rp1)
# part 2
r1 <- run_time_keep_all(pos = t1, t = 1000)
all_ts <- extract_time_series(x = r1)
ptrn <- find_pattern_length(x = all_ts)
ptrn <- expand.grid(ptrn)
lcm <- apply(ptrn, 1, find_orbit_length)
y_ptrn <- ptrn[paste0("y", 1:4)]
y_ptrn <- expand.grid(y_ptrn)
y_lcm <- apply(y_ptrn, 1, find_orbit_length)
z_ptrn <- ptrn[paste0("z", 1:4)]
z_ptrn <- expand.grid(z_ptrn)
z_lcm <- apply(z_ptrn, 1, find_orbit_length)
eg <- expand.grid(unique(x_lcm), unique(y_lcm), unique(z_lcm))
lcm <- apply(eg, 1, find_orbit_length)
|
ea137aa1d948c59fbd55dfc439270b5ab994cb7c
|
2e731f06724220b65c2357d6ce825cf8648fdd30
|
/BayesMRA/inst/testfiles/rmvn_arma_scalar/libFuzzer_rmvn_arma_scalar/rmvn_arma_scalar_valgrind_files/1612726097-test.R
|
e347ccbc9f522a486c8195f4b89e22f621035229
|
[] |
no_license
|
akhikolla/updatedatatype-list1
|
6bdca217d940327d3ad42144b964d0aa7b7f5d25
|
3c69a987b90f1adb52899c37b23e43ae82f9856a
|
refs/heads/master
| 2023-03-19T11:41:13.361220
| 2021-03-20T15:40:18
| 2021-03-20T15:40:18
| 349,763,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
1612726097-test.R
|
testlist <- list(a = 1.32963809623256e-105, b = 3.67090132574707e-317)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result)
|
d5ddede862ab3178b7db6c4f1e230433c0bfc163
|
a1c59394a2b42d6756c2b9564697db714b27fe49
|
/man/as.s2dv_cube.Rd
|
30f9abd40e6f2d9bd26143a4cc9d12a588d48d11
|
[] |
no_license
|
cran/CSTools
|
e06a58f876e86e6140af5106a6abb9a6afa7282e
|
6c68758da7a0dadc020b48cf99bf211c86498d12
|
refs/heads/master
| 2023-06-26T01:20:08.946781
| 2023-06-06T13:10:05
| 2023-06-06T13:10:05
| 183,258,656
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,872
|
rd
|
as.s2dv_cube.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.s2dv_cube.R
\name{as.s2dv_cube}
\alias{as.s2dv_cube}
\title{Conversion of 'startR_array' or 'list' objects to 's2dv_cube'}
\usage{
as.s2dv_cube(object, remove_attrs_coords = FALSE, remove_null = FALSE)
}
\arguments{
\item{object}{An object of class 'startR_array' generated from function
\code{Start} from startR package or a list output from function \code{Load}
from s2dv package. Any other object class will not be accepted.}
\item{remove_attrs_coords}{A logical value indicating whether to remove the
attributes of the coordinates (TRUE) or not (FALSE). The default value is
FALSE.}
\item{remove_null}{Optional. A logical value indicating whether to remove the
elements that are NULL (TRUE) or not (FALSE) of the output object. It is
only used when the object is an output from function \code{Load}. The
default value is FALSE.}
}
\value{
The function returns an 's2dv_cube' object to be easily used with
functions with the prefix \code{CST} from CSTools and CSIndicators packages.
The object is mainly a list with the following elements:\cr
\itemize{
\item{'data', array with named dimensions;}
\item{'dims', named vector of the data dimensions;}
\item{'coords', list of named vectors with the coordinates corresponding to
the dimensions of the data parameter;}
\item{'attrs', named list with elements:
\itemize{
\item{'Dates', array with named temporal dimensions of class 'POSIXct'
from time values in the data;}
\item{'Variable', has the following components:
\itemize{
\item{'varName', character vector of the short variable name. It is
usually specified in the parameter 'var' from the functions
Start and Load;}
\item{'metadata', named list of elements with variable metadata.
They can be from coordinates variables (e.g. longitude) or
main variables (e.g. 'var');}
}
}
\item{'Datasets', character strings indicating the names of the
datasets;}
\item{'source_files', a vector of character strings with complete paths
to all the found files involved in loading the data;}
\item{'when', a time stamp of the date issued by the Start() or Load()
call to obtain the data;}
\item{'load_parameters', it contains the components used in the
arguments to load the data from Start() or Load() functions.}
}
}
}
}
\description{
This function converts data loaded using Start function from startR package or
Load from s2dv into an 's2dv_cube' object.
}
\examples{
\dontrun{
# Example 1: convert an object from startR::Start function to 's2dv_cube'
library(startR)
repos <- '/esarchive/exp/ecmwf/system5_m1/monthly_mean/$var$_f6h/$var$_$sdate$.nc'
data <- Start(dat = repos,
var = 'tas',
sdate = c('20170101', '20180101'),
ensemble = indices(1:5),
time = 'all',
latitude = indices(1:5),
longitude = indices(1:5),
return_vars = list(latitude = 'dat', longitude = 'dat', time = 'sdate'),
retrieve = TRUE)
data <- as.s2dv_cube(data)
# Example 2: convert an object from s2dv::Load function to 's2dv_cube'
startDates <- c('20001101', '20011101', '20021101',
'20031101', '20041101', '20051101')
data <- Load(var = 'tas', exp = 'system5c3s',
nmember = 2, sdates = startDates,
leadtimemax = 3, latmin = 10, latmax = 30,
lonmin = -10, lonmax = 10, output = 'lonlat')
data <- as.s2dv_cube(data)
}
}
\seealso{
\code{\link{s2dv_cube}}, \code{\link[s2dv]{Load}},
\code{\link[startR]{Start}} and \code{\link{CST_Load}}
}
\author{
Perez-Zanon Nuria, \email{nuria.perez@bsc.es}
Nicolau Manubens, \email{nicolau.manubens@bsc.es}
}
|
f2bbd8c29fc7a2f5ff1d3b7508cedc2bbb0b9b74
|
7046ab8d510b5f2d8719c017555edee3473176cf
|
/develop/example_vMB.R
|
be676a46fe2d72c80fd160660c97296ea295b3c9
|
[] |
no_license
|
kosugitti/bmds
|
046647d233f366260f4d1bf5d1871da9f1e39d17
|
4703a8416fd80066cb352f9f12aee8e068640d41
|
refs/heads/master
| 2021-01-10T07:56:47.034529
| 2016-03-11T03:13:46
| 2016-03-11T03:13:46
| 52,501,082
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 994
|
r
|
example_vMB.R
|
rm(list=ls())
# B-MDS
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
example1 <- matrix(c(0,1,5,5,0,1,1,5,0),nrow=3,byrow=T)
example2 <- matrix(c(0,1,1,7,1,0,1,7,7,7,0,1,1,1,7,0),nrow=4)
example3 <- matrix(c(0,7,7,7,1,0,1,7,1,7,0,1,1,1,7,0),nrow=4)
example4 <- matrix(c(0,5,4,5,3,3,2,4,1,1,
6,0,4,2,1,2,3,3,4,3,
4,4,0,3,3,4,4,5,4,3,
4,1,2,0,1,1,4,2,4,3,
7,1,2,1,0,1,2,2,2,3,
4,3,4,2,3,0,4,4,4,4,
4,3,4,4,5,5,0,2,4,2,
6,4,4,4,3,4,3,0,4,4,
2,3,3,3,3,2,3,3,0,2,
4,4,4,5,4,4,4,4,4,0),nrow=10)
dat <- example1/apply(example1,2,max)
ini <- list(xi=cmdscale(dat))
standata <- list(N=ncol(dat),X=dat)
stanmodel <- stan_model("develop/vonMisesBayes.stan",model_name="vonMises")
fit_vb <- vb(stanmodel,data=standata)
fit_sp <- sampling(stanmodel,data=standata)
|
216103954a0810fbcc1bd7b231498ec69e2b5ad4
|
c92e159e087121d6eb73a5461da32118583d2008
|
/R/estimate_parameters.R
|
def61f963f2288a9ed9497d9f679fe41e3114df5
|
[
"MIT"
] |
permissive
|
StevenGolovkine/funestim
|
8778a0e1f700cca921932238898e38095a4d0f56
|
634f707eda61e38cf3ca0864bf1c284da0e757c0
|
refs/heads/master
| 2023-04-16T22:51:12.249298
| 2022-05-30T15:45:21
| 2022-05-30T15:45:21
| 362,427,640
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,875
|
r
|
estimate_parameters.R
|
################################################################################
# Functions for parameters estimation #
################################################################################
# Functions for the estimation of the different parameters that are developed in
# S. Golovkine, N. Klutchnikoff and V. Patilea (2021) - Adaptive optimal
# estimation of irregular mean and covariance functions.
# Estimate sigma -- the standard deviation of the noise ----
#' Perform an estimation of the standard deviation of the noise.
#'
#' This function performs an estimation of the standard deviation of the noise
#' in the curves. The following formula is used:
#' \deqn{\hat{\sigma^2} = \frac{1}{N}\sum_{n = 1}^{N}
#' \frac{1}{2(M_n - 1)}\sum_{l = 2}^{M_n}(Y_{n, (l)} - Y_{n, (l-1)})^2}
#'
#' @param curves List, where each element represents a curve. Each curve have to
#' be defined as list with two entries:
#' \itemize{
#' \item \strong{$t} Sampling points.
#' \item \strong{$x} Observed points.
#' }
#' @param delta Numeric (default = 0.1), neighborhood for the estimation.
#'
#' @return Numeric, estimation of the std of the noise \eqn{\sigma}
#'
#' @references S. Golovkine, N. Klutchnikoff, V. Patilea (2020) - Learning the
#' smoothness of noisy curves with application to online curve estimation.
#' @export
estimate_sigma <- function(curves, delta = 0.1){
estimateSigma(curves, delta)
}
# ----
# Estimate the different quantities using pre-smoothing ----
# The quantities are:
# * hurst -> the regularity parameter (Hurst coefficient).
# * constant -> the Holder constant.
# * Var(X_t) -> the variance of the process at point t.
#' Perform a pre-smoothing of the data.
#'
#' This function performs a pre-smoothing of the data using a Nadaraya-Watson
#' estimator. We use an Epanechnikov kernel and a naive bandwidth.
#'
#' @param curves List, where each element represents a curve. Each curve have to
#' be defined as a list with two entries:
#' \itemize{
#' \item \strong{$t} Sampling points
#' \item \strong{$x} Observed points
#' }
#' @param point Numeric (default = 0.5), sampling point at which the data is
#' pre-smoothed.
#' @param delta_f Function (default = NULL), function to determine the delta.
#' @param kernel String (default = 'epanechnikov'), the kernel used for the
#' estimation:
#' \itemize{
#' \item epanechnikov
#' \item uniform
#' \item biweight
#' }
#' @param beta Numeric (default = 1), pre-specified regularity of the curves.
#' The default value is 1, which correspond to at least one time
#' differentiable curves.
#' @param bandwidth_naive Numeric (default = 0), bandwidth to use for the
#' presmoothing. If set to 0, the bandwidth will be defined as
#' \deqn{\frac{\delta}{m}^{1 / (2\beta + 1)}}
#' where
#' \itemize{
#' \item \eqn{m} is the mean number of sampling points per curve.
#' \item \eqn{\delta} is the length of the interval where the smoothing is
#' done.
#' \item \eqn{\beta} represents the regularity of the curves.
#' }
#'
#' @return List, with two entries:
#' \itemize{
#' \item \strong{$grid} Grid on which the smoothing has been done.
#' \item \strong{$x_smooth} The smoothed data.
#' }
#'
#' @references S. Golovkine, N. Klutchnikoff and V. Patilea (2021) - Adaptive
#' optimal estimation of irregular mean and covariance functions.
#' @export
presmoothing <- function(curves, point = 0.5, delta_f = NULL,
kernel = 'epanechnikov',
beta = 1, bandwidth_naive = 0){
m <- curves |> sapply(function(curve) length(curve$t)) |> mean()
delta <- delta_f(m)
t_vec <- c(point - delta / 2, point, point + delta / 2)
if (bandwidth_naive == 0)
#bandwidth_naive <- (delta / m)**(1 / (2 * beta + 1))
bandwidth_naive <- log(m) / m
list(
grid = t_vec,
x_smooth = sapply(curves, function(curve) {
estimate_curve(curve, grid = t_vec, bandwidth = bandwidth_naive)
})
)
}
#' Perform an estimation of \eqn{Var(X_{t_0)}}.
#'
#' This function performs an estimation of \eqn{Var(X_{t_0})} used for the
#' estimation of the bandwidth for the mean and the covariance by a univariate
#' kernel regression estimator.
#'
#' @param curves_smooth List, resulting from the `presmoothing` function.
#'
#' @return Numeric, estimation of the variance at \eqn{t_0}.
#'
#' @references S. Golovkine, N. Klutchnikoff and V. Patilea (2021) - Adaptive
#' optimal estimation of irregular mean and covariance functions.
#' @export
estimate_var <- function(curves_smooth){
stats::var(curves_smooth$x_smooth[2,], na.rm = TRUE)
}
#' Perform an estimation of the regularity \eqn{H_0}.
#'
#' This function performs an estimation of \eqn{H_0} used for the estimation of
#' the bandwidth for a univariate kernel regression estimator defined over
#' continuous domains data.
#'
#' @family estimate \eqn{H_0}
#'
#' @param curves_smooth List, resulting from the `presmoothing` function.
#'
#' @return Numeric, an estimation of \eqn{H_0} at \eqn{t_0}.
#'
#' @references Golovkine S., Klutchnikoff N., Patilea V. (2021) - Adaptive
#' estimation of irregular mean and covariance functions.
#' @export
estimate_regularity <- function(curves_smooth){
current_smooth <- curves_smooth$x_smooth
a <- mean((current_smooth[3,] - current_smooth[1,])**2, na.rm = TRUE)
b <- mean((current_smooth[2,] - current_smooth[1,])**2, na.rm = TRUE)
c <- mean((current_smooth[3,] - current_smooth[2,])**2, na.rm = TRUE)
max(min((2 * log(a) - log(b * c)) / log(16), 1), 0.1)
}
#' Perform the estimation of the constant \eqn{L_0}.
#'
#' This function performs an estimation of \eqn{L_0} used for the estimation of
#' the bandwidth for a univariate kernel regression estimator defined over
#' continuous domains data.
#'
#' @family estimate \eqn{L_0}
#'
#' @param curves_smooth List, resulting from the `presmoothing` function.
#' @param regularity Numeric, estimation of the regularity of the curves and
#' resulting from the the `estimate_regularity` function.
#'
#' @return Numeric, an estimation of \eqn{L_0} at \eqn{t_0}.
#'
#' @references Golovkine S., Klutchnikoff N., Patilea V. (2021) - Adaptive
#' estimation of irregular mean and covariance functions.
#' @export
estimate_constant <- function(curves_smooth, regularity) {
current_grid <- curves_smooth$grid
current_smooth <- curves_smooth$x_smooth
a <- mean((current_smooth[3,] - current_smooth[1,])**2, na.rm = TRUE)
b <- abs(current_grid[3] - current_grid[1])**(2 * regularity)
sqrt(a / b)
}
#' Perform the estimation of the moments.
#'
#' This function performs an estimation of the moments \eqn{E(X^{\alpha}_{t_0})}
#' used for the estimation of the bandwidth for a univariate kernel regression
#' estimator defined over continuous domains data.
#'
#' @family estimate moment
#'
#' @param curves_smooth List, resulting from the `presmoothing` function.
#' @param order Numeric (default = 1), the moment to estimate.
#'
#' @return Numeric, an estimation of the moments \eqn{E(X^{\alpha}_{t_0})}
#'
#' @references Golovkine S., Klutchnikoff N., Patilea V. (2021) - Adaptive
#' estimation of irregular mean and covariance functions.
#' @export
estimate_moment <- function(curves_smooth, order = 1) {
mean(curves_smooth$x_smooth[2,]**order, na.rm = TRUE)
}
#' Perform the estimation of the variance \eqn{Var(X_{s}X_{t)}}.
#'
#' This function performs an estimation of the variance \eqn{Var(X_{s}X_{t)}}
#' used for the estimation of the bandwidth for a univariate kernel regression
#' estimator defined over continuous domains data.
#'
#' @family estimate variance
#'
#' @param curves_smooth_s List, smoothing of the curves at point \eqn{s} and
#' resulting from the `presmoothing` function.
#' @param curves_smooth_t List, smoothing of the curves at point \eqn{t} and
#' resulting from the `presmoothing` function.
#'
#' @return Numeric, estimation of \eqn{Var(X_{s}X_{t)}}.
#'
#' @references Golovkine S., Klutchnikoff N., Patilea V. (2021) - Adaptive
#' estimation of irregular mean and covariance functions.
#' @export
estimate_crossvar <- function(curves_smooth_s, curves_smooth_t) {
current_smooth_s <- curves_smooth_s$x_smooth
current_smooth_t <- curves_smooth_t$x_smooth
stats::var(current_smooth_s[2,] * current_smooth_t[2,], na.rm = TRUE)
}
# ----
# Recursive estimation of the parameters ----
#' Perform a recursive estimation of the parameters.
#'
#' This function performs a recursive estimation of the different parameters
#' used for the estimation of the mean and covariance estimation of functional
#' data. The recursion is made by small step onto the estimation of the
#' regularity of the curves. The pre-smoothing of the data is done using a
#' Nadaraya-Watson estimator and the used bandwidth modified using each new
#' estimation of the regularity.
#'
#' @param curves List, where each element represents a curve. Each curve have to
#' be defined as a list with two entries:
#' \itemize{
#' \item \strong{$t} The sampling points
#' \item \strong{$x} The observed points
#' }
#' @param point Numeric (default = 0.5), sampling point at which the data is
#' pre-smoothed.
#' @param delta_f Function (default = NULL), function to determine the delta.
#' @param kernel_name String (default = 'epanechnikov'), the kernel used for the
#' estimation:
#' \itemize{
#' \item epanechnikov
#' \item uniform
#' \item biweight
#' }
#' @param beta Numeric (default = 1), pre-specified regularity of the curves to
#' start the recursion. The default value is 1, which correspond to at least one
#'time differentiable curves.
#'
#' @return List, with six entries:
#' \itemize{
#' \item \strong{$point} Time point where the smoothing has been done.
#' \item \strong{$curves} Smoothed curves
#' \item \strong{$H} Estimated regularity.
#' \item \strong{$L} Estimated constant.
#' \item \strong{$var} Estimated variance.
#' \item \strong{$mom} Estimated \eqn{E(X^{2}_{t_0})}
#' }
#'
#' @references S. Golovkine, N. Klutchnikoff and V. Patilea (2021) - Adaptive
#' optimal estimation of irregular mean and covariance functions.
#' @export
estimate_parameters_recursion <- function(
curves, point = 0.5,
delta_f = NULL, kernel_name = 'epanechnikov', beta = 1) {
n_loop <- 0
H_estim <- 0
H_prev <- beta
while (abs(H_prev - H_estim) > 0.1) {
H_prev <- beta - 0.1 * n_loop
curves_smooth <- presmoothing(
curves, point, delta_f, kernel = kernel_name, beta = H_prev)
H_estim <- estimate_regularity(curves_smooth)
n_loop <- n_loop + 1
}
L_estim <- estimate_constant(curves_smooth, H_estim)
var_estim <- estimate_var(curves_smooth)
mom_estim <- estimate_moment(curves_smooth, 2)
list(
'point' = point,
'curves' = curves_smooth,
'H' = H_estim,
'L' = L_estim,
'var' = var_estim,
'mom' = mom_estim
)
}
#' Perform a recursive estimation of the parameters over a grid of points for
#' the estimation of the mean.
#'
#' This function performs a recursive estimation of the different parameters
#' used for the estimation of the mean estimation of functional data. The
#' recursion is made by small step onto the estimation of the regularity of the
#' curves. The pre-smoothing of the data is done using a Nadaraya-Watson
#' estimator and the used bandwidth modified using each new estimation of the
#' regularity.
#'
#' @param curves List, where each element represents a curve. Each curve have to
#' be defined as a list with two entries:
#' \itemize{
#' \item \strong{$t} The sampling points
#' \item \strong{$x} The observed points
#' }
#' @param grid Vector (default = c(0.25, 0.5, 0.75)), sampling points at which
#' the data is pre-smoothed.
#' @param delta_f Function (default = NULL), function to determine the delta.
#' @param kernel_name String (default = 'epanechnikov'), the kernel used for the
#' estimation:
#' \itemize{
#' \item epanechnikov
#' \item uniform
#' \item biweight
#' }
#' @param beta Numeric (default = 1), pre-specified regularity of the curves to
#' start the recursion. The default value is 1, which correspond to at least one
#' time differentiable curves.
#'
#' @return Dataframe, with columns:
#' \itemize{
#' \item \strong{$point} Time point where the smoothing has been done.
#' \item \strong{$curves} Smoothed curves.
#' \item \strong{$H} Estimated regularity.
#' \item \strong{$L} Estimated constant.
#' \item \strong{$var} Estimated variance.
#' \item \strong{$mom} Estimated \eqn{E(X^{2}_{t_0})}
#' }
#'
#' @references S. Golovkine, N. Klutchnikoff and V. Patilea (2021) - Adaptive
#' optimal estimation of irregular mean and covariance functions.
#' @export
estimate_parameters_mean <- function(
curves, grid = c(0.25, 0.5, 0.75), delta_f = NULL,
kernel_name = 'epanechnikov', beta = 1){
lapply(1:length(grid), function(idx){
estimate_parameters_recursion(
curves, point = grid[idx], delta_f = delta_f,
kernel_name = kernel_name, beta = beta)
}) |>
(\(x) do.call("rbind", x))() |>
as.data.frame()
}
#' Perform a recursive estimation of the parameters over a grid of points for
#' the estimation of the covariance.
#'
#' This function performs a recursive estimation of the different parameters
#' used for the estimation of the covariance estimation of functional data. The
#' recursion is made by small step onto the estimation of the regularity of the #' curves. The pre-smoothing of the data is done using a Nadaraya-Watson
#' estimator and the used bandwidth modified using each new estimation of the
#' regularity.
#'
#' @param curves List, where each element represents a curve. Each curve have to
#' be defined as a list with two entries:
#' \itemize{
#' \item \strong{$t} The sampling points
#' \item \strong{$x} The observed points
#' }
#' @param grid Vector (default = c(0.25, 0.5, 0.75)), sampling points at which
#' the data is pre-smoothed.
#' @param delta_f Function (default = NULL), function to determine the delta.
#' @param kernel_name String (default = 'epanechnikov'), the kernel used for the
#' estimation:
#' \itemize{
#' \item epanechnikov
#' \item uniform
#' \item biweight
#' }
#' @param beta Numeric (default = 1), pre-specified regularity of the curves to
#' start the recursion. The default value is 1, which correspond to at least one
#' time differentiable curves.
#'
#' @return Dataframe, with columns:
#' \itemize{
#' \item \strong{$point} Time point where the smoothing has been done.
#' \item \strong{$curves} Smoothed curves.
#' \item \strong{$H} Estimated regularity.
#' \item \strong{$L} Estimated constant.
#' \item \strong{$var} Estimated variance.
#' \item \strong{$mom} Estimated \eqn{E(X^{2}_{t_0})}
#' \item \strong{$var_st} \eqn{Var(X_{s}X_{t)}}
#' }
#'
#' @references S. Golovkine, N. Klutchnikoff and V. Patilea (2021) - Adaptive
#' optimal estimation of irregular mean and covariance functions.
#' @export
estimate_parameters_covariance <- function(
curves, grid = c(0.25, 0.5, 0.75), delta_f = NULL,
kernel_name = 'epanechnikov', beta = 1){
params_estim <- estimate_parameters_mean(
curves, grid = grid, delta_f = delta_f,
kernel_name = kernel_name, beta = beta)
zz <- expand.grid(point_s = grid, point_t = grid) |>
merge(params_estim,
by.x = "point_s", by.y = "point", all.x = TRUE,
suffixes = c("", "_s"), sort = FALSE) |>
merge(params_estim,
by.x = "point_t", by.y = "point", all.x = TRUE,
suffixes = c("_s", "_t"), sort = FALSE)
zz_upper <- zz[zz$point_t <= zz$point_s, ]
zz_upper$var_st <- zz_upper |> apply(1, function(rows) {
estimate_crossvar(rows$curves_s, rows$curves_t)
})
zz_upper[order(unlist(zz_upper$point_s), unlist(zz_upper$point_t)), ]
}
|
c38467667da91aafb48626adb19f71cdab11ec3a
|
5d8788dc8d0c22dc451b9f9fb7b9e89823b43394
|
/Chapter_01/r-spark-app/r-script-01.R
|
8125933e60aa33ec63cf1a4bb0a4e0d46d25d90c
|
[] |
no_license
|
ml-resources/spark-ml
|
5b5db52339266b3cf3c1b3bf95980b405d604e57
|
95f8470a3a0c2ccf190640c6f562f772121fbd60
|
refs/heads/branch-ed2
| 2021-01-15T08:36:25.088277
| 2017-05-16T10:09:10
| 2017-05-16T10:09:10
| 48,091,878
| 47
| 42
| null | 2017-05-19T01:49:13
| 2015-12-16T06:24:23
|
Scala
|
UTF-8
|
R
| false
| false
| 1,767
|
r
|
r-script-01.R
|
Sys.setenv(SPARK_HOME = "/home/ubuntu/work/spark-2.0.0-bin-hadoop2.7")
.libPaths(c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib"), .libPaths()))
#load the Sparkr library
library(SparkR)
sc <- sparkR.init(master = "local", sparkPackages="com.databricks:spark-csv_2.10:1.3.0")
sqlContext <- sparkRSQL.init(sc)
user.purchase.history <- "/home/ubuntu/work/ml-resources/spark-ml/Chapter_01/r-spark-app/data/UserPurchaseHistory.csv"
data <- read.df(sqlContext, user.purchase.history, "com.databricks.spark.csv", header="false")
head(data)
count(data)
parseFields <- function(record) {
Sys.setlocale("LC_ALL", "C") # necessary for strsplit() to work correctly
parts <- strsplit(as.character(record), ",")
list(name=parts[1], product=parts[2], price=parts[3])
}
parsedRDD <- SparkR:::lapply(data, parseFields)
cache(parsedRDD)
numPurchases <- count(parsedRDD)
sprintf("Number of Purchases : %d", numPurchases)
getName <- function(record){
record[1]
}
getPrice <- function(record){
record[3]
}
nameRDD <- SparkR:::lapply(parsedRDD, getName)
nameRDD = collect(nameRDD)
head(nameRDD)
uniqueUsers <- unique(nameRDD)
head(uniqueUsers)
priceRDD <- SparkR:::lapply(parsedRDD, function(x) { as.numeric(x$price[1])})
take(priceRDD,3)
totalRevenue <- SparkR:::reduce(priceRDD, "+")
sprintf("Total Revenue : %.2f", totalRevenue)
products <- SparkR:::lapply(parsedRDD, function(x) { list( toString(x$product[1]), 1) })
take(products, 5)
productCount <- SparkR:::reduceByKey(products, "+", 2L)
productsCountAsKey <- SparkR:::lapply(productCount, function(x) { list( as.integer(x[2][1]), x[1][1])})
productCount <- count(productsCountAsKey)
mostPopular <- toString(collect(productsCountAsKey)[[productCount]][[2]])
sprintf("Most Popular Product : %s", mostPopular)
|
041bad6ab6349dac53482c546b76d08ef12e6ede
|
845c33e99e5a2475ae334ed00b12f08f7346c8b0
|
/man/smfishHmrf.hmrfem.multi.Rd
|
bd3e2c855aa99fa8a8c4d792be7e19ceafa59552
|
[] |
no_license
|
cran/smfishHmrf
|
0236d9802a718b6477a8732e8a0130bdcdf7fda4
|
41aae463be028aeb53e34eb5e49a044af0472565
|
refs/heads/master
| 2023-01-05T12:54:43.094726
| 2020-11-03T11:20:02
| 2020-11-03T11:20:02
| 310,523,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,316
|
rd
|
smfishHmrf.hmrfem.multi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smfishHmrf.hmrfem.R
\name{smfishHmrf.hmrfem.multi}
\alias{smfishHmrf.hmrfem.multi}
\title{Performs HMRF for multivariate normal distribution. Accepts R data structures as inputs. Accepts a single beta.}
\usage{
smfishHmrf.hmrfem.multi(
y,
neighbors,
numnei,
blocks,
beta = 0.5,
mu,
sigma,
err = 1e-07,
maxit = 50,
verbose,
dampFactor = NULL,
forceDetectDamp = FALSE,
tolerance = 1e-60
)
}
\arguments{
\item{y}{gene expression matrix}
\item{neighbors}{adjacency matrix between cells}
\item{numnei}{a vector containing number of neighbors per cell}
\item{blocks}{a list of cell colors for deciding the order of cell update}
\item{beta}{the beta to try (smoothness parameter)}
\item{mu}{a 2D matrix (i,j) of cluster mean (initialization)}
\item{sigma}{a 3D matrix (i,j,k) where (i,j) is the covariance of cluster k (initialization)}
\item{err}{the error that is allowed between successive iterations}
\item{maxit}{maximum number of iterations}
\item{verbose}{TRUE or FALSE}
\item{dampFactor}{the dampening factor}
\item{forceDetectDamp}{will auto detect a dampening factor instead of using the specified one}
\item{tolerance}{applicable when forceDetectDamp is set to TRUE}
}
\value{
A list of prob, new mu, new sigma, unnormalized prob after iterations finish
}
\description{
This function performs HMRF \insertCite{Zhu2018}{smfishHmrf} on multivariate normal distributions. Different from other variations, this function accepts R data structures directly as inputs, and only accepts a single value of beta.
This function exists for legacy and compatibility reason. User should use \strong{smfishHmrf.hmrfem.multi.it.min} function.
}
\section{More information}{
Arguments mu and sigma refer to the cluster centroids from running kmeans algorithm.
They serve as initialization of HMRF.
Users should refer to \strong{smfishHmrf.hmrfem.multi.it.min} for more information about function parameters and the requirements.
}
\examples{
data(seqfishplus)
s <- seqfishplus
res<-smfishHmrf.hmrfem.multi(s$y, s$nei, s$numnei, s$blocks, beta=28,
mu=s$mu, sigma=s$sigma, err=1e-7, maxit=50, verbose=TRUE, dampFactor=s$damp,
tolerance=1e-5)
}
\references{
\insertRef{Zhu2018}{smfishHmrf}
}
|
987a80b5e84f3971d65bde640ecd7e4b713e75b7
|
cc74bef8892c8bbdf9a4d24da00503eb7ed962e7
|
/man/get_caseargs.Rd
|
367cc32a796cdffcdfb3491d9899bf74ff622869
|
[] |
no_license
|
Kotkot/ss3sim
|
330961734b4809df3d2299f09ef3d3965b9173ab
|
15e61a27b078222a5ac295cb22f65db0bdb10346
|
refs/heads/master
| 2020-05-29T14:10:44.863275
| 2013-06-13T23:45:49
| 2013-06-13T23:45:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,051
|
rd
|
get_caseargs.Rd
|
\name{get_caseargs}
\alias{get_caseargs}
\title{Take a scenario ID and return argument lists to pass to functions}
\usage{
get_caseargs(folder, scenario, delimiter = "-",
ext = ".txt", case_vals = c("M", "F", "D"),
case_files = list(M = "M", F = "F", D = c("index", "lcomp", "agecomp")))
}
\arguments{
\item{folder}{The folder to look for input files in.}
\item{scenario}{A character object that has the cases
separated by some delimiter. The combination of cases is
referred to as a scenario. E.g. \code{"M1-F1-D1-R1-S1"}.}
\item{delimiter}{The delimiter between the cases.
Defaults to a dash.}
\item{ext}{The file extension of the input files.
Defaults to \code{".txt"}.}
\item{case_vals}{The case types that make up the scenario
ID. In the example above the \code{case_vals} would be
\code{c("M", "F", "D", "R")}}
\item{case_files}{A named list that relates the
\code{case_vals} to the files to return. If each
\code{case_val} has only one file then this is simple.
See the default values for a more complicated case.}
}
\value{
A (nested) named list. The first level of the named list
refers to the \code{case_files}. The second level of the
named list refers to the argument names (the first column
in the input text files). The contents of the list are
the argument values themselves (the second column of the
input text files).
}
\description{
This function calls a number of internal functions to go
from a unique scenario identifier like
\code{"M1-F2-D3-R4-cod"} and read the corresponding input
files (like \code{"M1.txt"}) that have two columns: the
first column contains the argument names and the second
column contains the argument values. The two columns
should be separated by a comma. The output is then
returned in a named list.
}
\details{
The input plain text files should have arguments in the
first column that should be passed on to functions. The
names should match exactly. The second column should
contain the values to be passed to those arguments.
Multiple words should be enclosed in quotes. Vectors
(\code{"c(1, 2, 3"}) should also be enclosed in quotes as
shown.
}
\examples{
\dontrun{
# Create some demo input files first:
wt <- function(x, f) write.table(x, f, row.names = FALSE, col.names =
FALSE, sep = "; ", quote = FALSE)
wt(data.frame("a", 1), "M1-cod.txt")
wt(data.frame("b", "Some words"), "F2-cod.txt")
wt(data.frame("d", 1), "index3-cod.txt")
wt(data.frame("d", 1), "agecomp3-cod.txt")
wt(data.frame(c("e", "f"), c(1, 99)), "lcomp3-cod.txt")
wt(data.frame("c", "c(1, 2, 3)"), "R4-cod.txt")
get_caseargs(".", "M1-F2-D3-R4-cod")
# Clean up the files created above:
file.remove(c("M1-cod.txt", "F2-cod.txt", "index3-cod.txt",
"agecomp3-cod.txt", "lcomp3-cod.txt", "R4-cod.txt"))
# The following output is returned:
# $M
# $M$a
# [1] 1
#
# $F
# $F$b
# [1] "Some words"
#
# $index
# $index$d
# [1] 1
#
# $lcomp
# $lcomp$e
# [1] 1
#
# $lcomp$f
# [1] 99
#
# $agecomp
# $agecomp$d
# [1] 1
#
# $R
# $R$c
# [1] c(1, 2, 3)
}
}
|
d86f0de7be104203ffc5757c4aa6fc15c0bdbfa6
|
dc7c1016493af2179bd6834614be0902a0133754
|
/functions6.R
|
2394f352a2952e49cb88dd429c1e8b7979b2526e
|
[] |
no_license
|
ashishjsharda/R
|
5f9dc17fe33e22be9a6031f2688229e436ffc35c
|
fc6f76740a78d85c50eaf6519cec5c0206b2910c
|
refs/heads/master
| 2023-08-08T13:57:05.868593
| 2023-07-30T13:51:56
| 2023-07-30T13:51:56
| 208,248,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 49
|
r
|
functions6.R
|
hello<-function(){
print("Hello R")
}
hello()
|
86045cabdd6e83fabe645a6c1bace733365472a2
|
62f84d7157e0e3bfc57cc6d6942ea9205adc4463
|
/man/acdb.matchSubstitutionsIndices.Rd
|
ba1a076c0b1878ffb56d874b245729c0b3c7a8fa
|
[
"MIT"
] |
permissive
|
SamT123/acutilsLite
|
251da4cf955c05a4e52a6b10e59fa2876759ea4a
|
fb36cd0f0786b9a9822ebda76fe4a44538569c4b
|
refs/heads/master
| 2023-03-02T20:52:23.145170
| 2021-02-15T10:03:21
| 2021-02-15T10:03:21
| 315,282,286
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 379
|
rd
|
acdb.matchSubstitutionsIndices.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acdatabase_utils.R
\name{acdb.matchSubstitutionsIndices}
\alias{acdb.matchSubstitutionsIndices}
\title{Find db entries with same subsititutions as subs}
\usage{
acdb.matchSubstitutionsIndices(ags, subs, gene = "HA", no_extras = TRUE)
}
\description{
Find db entries with same subsititutions as subs
}
|
3b926225906055ffa12208e7b8e27ec61b7c026b
|
bae0af3bec95ee9123dd74a3cd42a3792f65e25d
|
/Chapter06/06__04__robanova.R
|
658f1bef56b9be62f2c7b24b4d8619b906c7de16
|
[
"MIT"
] |
permissive
|
PacktPublishing/R-Statistics-Cookbook
|
f521ead1a05104b68663521374861dfced4c1bab
|
74eb6057e47df5d43a981c44a52148bd3930c7e1
|
refs/heads/master
| 2023-02-04T14:18:10.374693
| 2023-01-30T09:26:43
| 2023-01-30T09:26:43
| 179,272,388
| 9
| 18
| null | null | null | null |
UTF-8
|
R
| false
| false
| 346
|
r
|
06__04__robanova.R
|
library(robust)
r = PlantGrowth
d = aov(weight ~ group,data=r )
summary(d)
plot(d, 2)
robanova = robust::lmRob(weight ~ group,data=r )
robust::anova.lmRob(robanova)
r[1,1] = 50
r[2,1] = 50
d = aov(weight ~ group,data=r )
plot(d, 2)
summary(d)
robanova = robust::lmRob(weight ~ group,data=r )
robust::anova.lmRob(robanova)
|
bc6fc1878ec861980296eb4009fc7c807925817a
|
a5ac9c2534057ec103a0ff9092c8a9bb15cde702
|
/RepoAdd.R
|
1477be186e2b1a786114619ef34dbb8d940a526b
|
[] |
no_license
|
rivei/RepoTest
|
c112cf37e786b55164aa7abd7fcf66b6609246d8
|
e54f140bb853005458e1c68054720be03515b59e
|
refs/heads/master
| 2021-05-16T03:00:13.180882
| 2020-12-07T02:38:08
| 2020-12-07T02:38:08
| 31,114,483
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 63
|
r
|
RepoAdd.R
|
# RepoTest
## This is a test of my repo
## Why nothing happen?
|
3a72e6974b1899b08dac9c10c41bf3a3317d449d
|
c85471f60e9d5c462de6c60c880d05898ec81411
|
/cache/Ryo-N7|tidy_tuesday_april_3|week_5_april_30__tidy_tuesday_april_30.R
|
b8fc628e8920eedda740173e78002a01a22dccd0
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
a-rosenberg/github-content-scraper
|
2416d644ea58403beacba33349ee127e4eb42afe
|
ed3340610a20bb3bd569f5e19db56008365e7ffa
|
refs/heads/master
| 2020-09-06T08:34:58.186945
| 2019-11-15T05:14:37
| 2019-11-15T05:14:37
| 220,376,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,028
|
r
|
Ryo-N7|tidy_tuesday_april_3|week_5_april_30__tidy_tuesday_april_30.R
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ------------------------------------------------------------------------
library(dplyr)
library(tidyr)
library(ggplot2)
library(readr)
acs_survey <- read_csv("../april_30_week_5/acs2015_county_data.csv")
glimpse(acs_survey)
# gather on Ethnicity
acs_survey <- acs_survey %>% gather(key = "ethnicity", value = "percentage", Hispanic:Pacific)
## ------------------------------------------------------------------------
library(albersusa)
counties_map_data <- counties_composite()
glimpse(counties_map_data@data)
counties_map_data@data <- left_join(counties_map_data@data, acs_survey, by = c("name" = "County"))
anti_counties_map_data <- anti_join(counties_map_data@data, acs_survey, by = c("name" = "County"))
# ~50 counties from Alaska and Virginia dont have matching name == County between 2 data sets.........
# CensusID == fips >>> fips is chr and has 0 in front of all
glimpse(counties_map_data@data)
plot(counties_map_data, lwd = 0.25)
c_map <- fortify(counties_map_data, region = "fips")
## ------------------------------------------------------------------------
library(dplyr)
library(ggplot2)
library(maps)
counties <- map_data("county")
acs_survey <- acs_survey %>% mutate(County = tolower(County),
State = tolower(State))
all_county <- counties %>% inner_join(acs_survey, by = c("subregion" = "County",
"region" = "State"))
glimpse(all_county)
county_plot <- function(x) {
all_county$x <- all_county[, x]
counties %>%
ggplot(aes(x = long, y = lat, group = group)) +
coord_fixed(1.3) +
geom_polygon(data = all_county, aes(fill = x), color = "grey30", size = 0.05) +
labs(fill = x) +
scale_fill_distiller(palette = "Spectral") +
theme_void()
}
county_plot("Unemployment")
county_plot("Income")
county_plot("Asian")
county_plot("Poverty")
|
a6d6570ac84dc10c5d7f9359d1b2aed1690b2e20
|
4250e06e89ad4b7042d798691d2a17b258a51e21
|
/250m Segments of Climbs.R
|
b8e7e10e29e525f028b81e23f7ad261f2c98b000
|
[] |
no_license
|
jalnichols/p-c
|
b143bd210e222d6f755a7034d688db31b10f1720
|
1ea6587e28a1c20ce8a2f6870eb4ece7aeaa05e3
|
refs/heads/master
| 2022-06-15T22:03:40.823212
| 2022-06-11T20:25:31
| 2022-06-11T20:25:31
| 199,725,985
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,064
|
r
|
250m Segments of Climbs.R
|
all_stage_data <- dbGetQuery(con, "SELECT * FROM stage_data_perf WHERE year > 2019") %>%
mutate(date = as.Date(date))
#
x250m_segments <- dbGetQuery(con, "SELECT * FROM strava_new_segment_creation_climbs WHERE year > 2019") %>%
mutate(date = as.Date(date)) %>%
inner_join(
dbReadTable(con, "segments_from_strava_data") %>%
mutate(date = as.Date(date),
stage = as.character(stage)) %>%
select(start_km_orig = start_km,
end_km_orig = end_km,
seg_length_orig = length,
stage_length,
gradient,
stage, race, year, date) %>%
group_by(stage, race, year) %>%
filter(gradient >= 0.04) %>%
unique() %>%
mutate(start_km_orig = stage_length - start_km_orig,
end_km_orig = stage_length - end_km_orig) %>%
arrange(start_km_orig) %>%
rowid_to_column() %>%
mutate(rowid = rank(rowid, ties.method = "first")) %>%
ungroup() %>%
rename(original_segment = rowid) %>%
mutate(kind = "expand_climbs") %>%
mutate(start_prior_orig = start_km_orig,
end_next_orig = end_km_orig), by = c("original_segment", "stage", "race", "year", "date")) %>%
inner_join(telemetry_available %>%
select(rider = pcs, rnk, activity_id, total_seconds), by = c("activity_id")) %>%
mutate(rider = str_to_title(rider)) %>%
left_join(
dbGetQuery(con, "SELECT rider, weight FROM rider_attributes") %>%
mutate(rider = str_to_title(rider)) %>%
filter(!is.na(weight)) %>%
group_by(rider) %>%
summarize(weight = median(weight)) %>%
ungroup(), by = c("rider"))
#
#
#
specific_race <- x250m_segments %>%
filter(race == "saudi tour" & stage == 4 & year == 2022)
#
#
#
specific_race %>%
filter(original_segment == 20) %>%
mutate(time = should_be_distance / (segment_speed_kmh * 1000 / 3600)) %>%
group_by(rider) %>%
arrange(desc(end_next)) %>%
mutate(cum_time = total_seconds - cumsum(segment_time)) %>%
mutate(segs = n()) %>%
ungroup() %>%
filter(max(segs) == segs) %>%
filter(rnk <= 45) %>%
group_by(rowid, original_segment) %>%
mutate(cum_time = mean(cum_time) - cum_time) %>%
ungroup() %>%
#filter(rider %in% c("Vine Jay", "Cherel Mikael", "Bettiol Alberto",
# "Johannessen Tobias Halland", "Champoussin Clement")) %>%
ggplot()+
geom_path(aes(x = end_next, y = cum_time, color = rider), size = 1)+
ggrepel::geom_label_repel(data = . %>% filter(rowid == min(rowid) | rowid == max(rowid)),
aes(x = end_next, y = cum_time, label = rider, color = rider))+
scale_x_continuous(breaks = seq(141,145,0.25),
labels = scales::number_format(accuracy = 0.25))+
guides(color = FALSE)
#
#
#
specific_race %>%
filter(original_segment == 3) %>%
group_by(rider) %>%
mutate(segs = n()) %>%
ungroup() %>%
filter(max(segs) == segs) %>%
filter(!is.na(weight)) %>%
mutate(Power = ifelse(Power == 0, NA, Power)) %>%
group_by(rowid) %>%
mutate(segment_gradient = median(segment_gradient)) %>%
mutate(Power_to_Speed = mean((Power/weight) / segment_speed_kmh, na.rm = T)) %>%
ungroup() %>%
mutate(ImpliedWattsKG = ifelse(is.na(Power),segment_speed_kmh * Power_to_Speed, Power/weight)) %>%
filter(rnk <= 25) %>%
arrange(rowid, rider, rnk) %>%
ggplot()+
geom_tile(aes(x = (start_prior+end_next)/2, y = reorder(rider, desc(rnk)),
fill = ImpliedWattsKG))+
#geom_text(aes(x = (start_prior+end_next)/2, y = reorder(rider, desc(rnk)),
# label = paste0(round(segment_gradient,3)*100,"%")))+
theme(axis.ticks.y = element_blank(),
panel.grid = element_blank())+
scale_fill_viridis_c(option = "A")+
labs(x = "KM thru race",
y = "",
fill = "Watts/KG",
title = "Top finishers on Mont Bouquet",
subtitle = "Average Power for 250m segments")
|
5f00dbc28ef7ebf38c21450c1ab525b27848890d
|
f0489c47853fc78a49bfbc28ca3cf39798b17431
|
/man/purity-ANY-ANY-method.Rd
|
102818546505138113619511e8b27cce37e6bd78
|
[] |
no_license
|
pooranis/NMF
|
a7de482922ea433a4d4037d817886ac39032018e
|
c9db15c9f54df320635066779ad1fb466bf73217
|
refs/heads/master
| 2021-01-17T17:11:00.727502
| 2019-06-26T07:00:09
| 2019-06-26T07:00:09
| 53,220,016
| 0
| 0
| null | 2016-03-05T19:46:24
| 2016-03-05T19:46:24
| null |
UTF-8
|
R
| false
| true
| 677
|
rd
|
purity-ANY-ANY-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NMF-class.R
\docType{methods}
\name{purity,ANY,ANY-method}
\alias{purity,ANY,ANY-method}
\title{Default method that should work for results of clustering algorithms, that have a
suitable \code{predict} method that returns the cluster membership vector:
the purity is computed between \code{x} and \code{predict{y}}}
\usage{
\S4method{purity}{ANY,ANY}(x, y, ...)
}
\description{
Default method that should work for results of clustering algorithms, that have a
suitable \code{predict} method that returns the cluster membership vector:
the purity is computed between \code{x} and \code{predict{y}}
}
|
ba8cd02cc98b4bb7f8d9b655eb09b91f842ccd0d
|
c3fa626b6076d7e5d6df2b4205f6d33bb966483c
|
/man/defRootCells.Rd
|
ff6af7bb0c81504d0d6c634fbf64be6ddc330515
|
[] |
no_license
|
tinahub10/CytoTree
|
b09433ef08824d136545f2e9c723147bfd6ccad7
|
1c60ff8d9dd9cd5cfed4a44ec6c3e17727e306e2
|
refs/heads/master
| 2023-04-08T06:08:57.228441
| 2020-11-08T12:13:59
| 2020-11-08T12:13:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 937
|
rd
|
defRootCells.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pseudotime.R
\name{defRootCells}
\alias{defRootCells}
\title{definition of root cells}
\usage{
defRootCells(object, root.cells = NULL, verbose = FALSE)
}
\arguments{
\item{object}{a CYT object}
\item{root.cells}{vector. Cell name of the root cells}
\item{verbose}{logical. Whether to print calculation progress.}
}
\value{
A CYT object
}
\description{
definition of root cells
}
\examples{
cyt.file <- system.file("extdata/cyt.rds", package = "CytoTree")
cyt <- readRDS(file = cyt.file)
# Define root cells by cluster
cyt <- defRootCells(cyt, root.cells = 6, verbose = TRUE)
cyt <- defRootCells(cyt, root.cells = c(6,8), verbose = TRUE)
# Define root cells by cell names
meta.data <- fetchPlotMeta(cyt)
cells <- meta.data$cell[which(meta.data$stage == "D0")]
cells <- as.character(cells)
cyt <- defRootCells(cyt, root.cells = cells, verbose = TRUE)
}
|
f2b16c56c888328cadbd8b03e18ac400d87d054c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ibdreg/examples/exact.ibd.var.Rd.R
|
78dd7080f712a6c14bec27fa29f2edfadce36ab2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 328
|
r
|
exact.ibd.var.Rd.R
|
library(ibdreg)
### Name: exact.ibd.var
### Title: create an ibd.var object
### Aliases: exact.ibd.var
### ** Examples
## create a temporary file using perl script
# unix% exact.ibd.var.pl chrom1.pre 1 chr1.var.tmp
## make an ibd.var object from chr1.var.tmp file
# RorS> chr1.ibd.var <- exact.ibd.var("chr1.var.tmp")
|
a1c343da089769fb7e5d1f95cf4b0cd585b74121
|
278daae804bf17f50336a39cdb54cf5f35f0e164
|
/R/Load Data from database.R
|
213c1e42bac3bc4a8de4db70886e5f8f218130c6
|
[] |
no_license
|
audhalbritter/Flowering
|
413d3e7ac882f96102095d04dd80850fc445ec78
|
90cb3c5b89234e21bab98f9d0b637e58a1790582
|
refs/heads/master
| 2021-12-15T03:09:24.750001
| 2020-07-09T09:59:37
| 2020-07-09T09:59:37
| 135,484,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,988
|
r
|
Load Data from database.R
|
##############################
### Import Function ###
##############################
# Needs to be uploaded first!!!
DownloadData <- function(){
### DOWNLOAD DATABASE FROM OSF ###
# add function here
### DOWNLOAD CLIMATE DATA ###
# get_file(node = "npfa9",
# file = "GriddedDailyClimateData2009-2019.csv",
# path = "data_cleaned")
}
### MAKE CONNCECTIO TO DATABASE ####
# DatabaseConnection <- function(){
#
# con <- dbConnect(SQLite(), dbname = "~/Dropbox/Bergen/seedclimComm/database/seedclim.sqlite")
# # vies all subtables
# #DBI::dbListTables(conn = con)
# # view column names of a table
# #dbListFields(con, "sites")
#
# return(con)
# }
### LOAD DATA FROM DATABASE ###
ImportFertility <- function(){
con <- dbConnect(SQLite(), dbname = "~/Dropbox/Bergen/seedclimComm/database/seedclim.sqlite")
fertile_raw <- tbl(con, "subTurfCommunity") %>%
select(turfID, subTurf, year, species, fertile, dominant) %>% # could also be interesting to import: seedlings, juvenile, adult, vegetative
left_join(tbl(con, "turfs"), by = "turfID") %>%
# only control plots
filter(TTtreat %in% c("TTC", "TT1")) %>%
select(-RTtreat, -GRtreat, -destinationPlotID) %>%
left_join(tbl(con, "plots"), by = c("originPlotID" = "plotID")) %>%
left_join(tbl(con, "blocks"), by = c("blockID")) %>%
left_join(tbl(con, "sites"), by = c("siteID")) %>%
collect() %>%
# Calculate stuff
group_by(turfID, year, species, siteID, blockID, originPlotID, TTtreat, temperature_level, precipitation_level, annualPrecipitation_gridded, summerTemperature_gridded) %>%
summarize(SumOffertile = sum(fertile), NumberOfOccurrence = n()) %>% # loos colums here, need to add in group_by above if I need to keep more columns
mutate(PropFertile = SumOffertile / NumberOfOccurrence) %>%
mutate(Experiment = ifelse(is.na(TTtreat), "RTC", "SeedClim"))
return(fertile_raw)
}
MakeMeta <- function(fertile_raw){
meta <- fertile_raw %>%
ungroup() %>%
distinct(siteID, summerTemperature_gridded, annualPrecipitation_gridded)
return(meta)
}
ImportTraits <- function(){
con <- dbConnect(SQLite(), dbname = "~/Dropbox/Bergen/seedclimComm/database/seedclim.sqlite")
# Load taxon and trait data
traits <- tbl(con, "character_traits") %>%
left_join(tbl(con, "taxon", by = "species")) %>%
collect() %>%
spread(key = trait, value = value) %>%
select(-`Common-rear`, -Habitat, -`Soil type`) %>%
# Flowering time
# FloweringStart: Var or FSo => early; otherwise late
rename(FloweringFinish = `Flowering finish`, FloweringStart = `Flowering start`) %>%
mutate(FloweringFinish = plyr::mapvalues(FloweringFinish, c("H<f8>st", "MSo", "SSo", "FSo", "V<e5>r"), c("Host", "MSo", "SSo", "FSo", "Var"))) %>%
mutate(FloweringStart = plyr::mapvalues(FloweringStart, c("MSo", "SSo", "FSo", "V<e5>r"), c("MSo", "SSo", "FSo", "Var")))
return(traits)
# Occurrence
# Upper: everything but not HAlp, MAlp or Lalp => lowland
# Lower: LAlp but not Nem, BNem, SBor => alpine
}
ImportSite <- function(){
con <- dbConnect(SQLite(), dbname = "~/Dropbox/Bergen/seedclimComm/database/seedclim.sqlite")
# Load site details
sites.raw <- tbl(con, "sites") %>%
collect()
sites <- sites.raw %>%
select(siteID, latitude, longitude, `altitude(DEM)`, annualPrecipitation_gridded, temperature_level, summerTemperature_gridded, precipitation_level) %>%
rename("elevation_masl" = `altitude(DEM)`) %>%
mutate(temperature_level2 = case_when(temperature_level == "1" ~ "alpine",
temperature_level == "2" ~ "subalpine",
temperature_level == "3" ~ "boreal"),
precipitation_level2 = as.character(case_when(precipitation_level == "1" ~ "600",
precipitation_level == "2" ~ "1200",
precipitation_level == "3" ~ "2000",
precipitation_level == "4" ~ "2700")),
precipitation_level2 = factor(precipitation_level2, levels = c("600", "1200", "2000", "2700")),
temperature_level2 = factor(temperature_level2, levels = c("alpine", "subalpine", "boreal"))
return(sites)
}
### LOAD WEATHER DATA FROM MODEL
ImportClimate <- function(meta){
climate <- read_csv(file = "data_cleaned/GriddedDailyClimateData2009-2019.csv")
# Calculate monthly values (sum for prec, mean for temp)
monthlyClimate <- climate %>%
select(Site, Date, Precipitation, Temperature) %>%
mutate(Date = dmy(paste0("15-",format(Date, "%b.%Y")))) %>%
pivot_longer(cols = c(Temperature, Precipitation), names_to = "Logger", values_to = "value") %>%
group_by(Date, Site, Logger) %>%
summarise(n = n(), mean = mean(value), sum = sum(value)) %>%
mutate(Value = ifelse(Logger == "Precipitation", sum, mean)) %>%
select(-n, -sum, -mean)
# get annual values
summer <- monthlyClimate %>%
filter(Logger == "Temperature" & month(Date) %in% 6:9) %>%
mutate(Year2 = year(Date)) %>%
group_by(Year2, Site, Logger) %>%
summarise(n = n(), Value = mean(Value)) %>%
mutate(Logger = "MeanSummerTemp") %>%
select(-n)
Climate <- monthlyClimate %>%
mutate(Year = year(Date)) %>%
mutate(Year2 = if_else(month(Date) > 7, Year + 1, Year)) %>%
group_by(Year2, Site, Logger) %>%
summarise(n = n(), Value = sum(Value)) %>%
filter(Logger == "Precipitation") %>%
select(-n) %>%
bind_rows(summer) %>%
spread(key = Logger, value = Value) %>%
rename(AnnPrec = Precipitation, Year = Year2) %>%
ungroup() %>%
mutate(Site = recode(Site, Ulv = "Ulvhaugen", Hog = "Hogsete", Vik = "Vikesland", Gud = "Gudmedalen", Ram = "Rambera", Arh = "Arhelleren", Skj = "Skjellingahaugen", Ves = "Veskre", Alr = "Alrust", Ovs = "Ovstedal", Fau = "Fauske", Lav = "Lavisdalen"))
# Previous year climate
ClimatePrev <- Climate %>%
mutate(Year = Year + 1) %>%
rename(MeanSummerTempPrev = MeanSummerTemp, AnnPrecPrev = AnnPrec)
# Annomalies, centre and scale
Climate <- Climate %>%
left_join(ClimatePrev, by = c("Year", "Site")) %>%
left_join(meta, by = c("Site" = "siteID")) %>%
# Calculate annomalies
mutate(AnnPrecAnnomalie = AnnPrec - annualPrecipitation_gridded,
AnnPrecPrevAnnomalie = AnnPrecPrev - annualPrecipitation_gridded,
MeanSummerTempAnnomalie = MeanSummerTemp - summerTemperature_gridded,
MeanSummerTempPrevAnnomalie = MeanSummerTempPrev - summerTemperature_gridded) %>%
# centre and scale data
mutate(AnnPrecGrid.sc = as.vector(scale(annualPrecipitation_gridded)),
MeanSummerTempGrid.sc = as.vector(scale(summerTemperature_gridded)),
AnnPrec.sc = as.vector(scale(AnnPrec)),
MeanSummerTemp.sc = as.vector(scale(MeanSummerTemp)),
AnnPrecPrev.sc = as.vector(scale(AnnPrecPrev)),
MeanSummerTempPrev.sc = as.vector(scale(MeanSummerTempPrev))) %>%
mutate(AnnPrecAnnomalie.sc = as.vector(scale(AnnPrecAnnomalie)),
MeanSummerTempAnnomalie.sc = as.vector(scale(MeanSummerTempAnnomalie)),
AnnPrecPrevAnnomalie.sc = as.vector(scale(AnnPrecPrevAnnomalie)),
MeanSummerTempPrevAnnomalie.sc = as.vector(scale(MeanSummerTempPrevAnnomalie))) %>%
mutate(Temp_level = case_when(Site %in% c("Ulvhaugen", "Lavisdalen", "Gudmedalen", "Skjellingahaugen") ~ "alpine",
Site %in% c("Alrust", "Hogsete", "Rambera", "Veskre") ~ "subalpine",
Site %in% c("Fauske", "Vikesland", "Arhelleren", "Ovstedal") ~ "boreal"),
Prec_level = case_when(Site %in% c("Ulvhaugen", "Alrust", "Fauske") ~ "600mm",
Site %in% c("Lavisdalen", "Hogsete", "Vikesland") ~ "1200mm",
Site %in% c("Gudmedalen", "Arhelleren", "Rambera") ~ "2000mm",
Site %in% c("Skjellingahaugen", "Veskre", "Ovstedal") ~ "2700mm")) %>%
mutate(Prec_level = factor(Prec_level, levels = c("600mm", "1200mm", "2000mm", "2700mm")))
return(Climate)
}
CombineandCurate <- function(fertile_raw, Climate, traits){
# join climate data
fertile_raw <- fertile_raw %>%
left_join(Climate, by = c("siteID" = "Site", "year" = "Year", "summerTemperature_gridded", "annualPrecipitation_gridded"))
# Join fertility and trait data
fertile_raw <- fertile_raw %>%
left_join(traits, by = "species")
### Data curation
fertile <- fertile_raw %>%
filter(year != 2010) %>% # remove 2010, only 1 plot (506)
# remove species that occur in less than 3 years
group_by(turfID, species) %>%
mutate(nYears = n()) %>%
filter(nYears > 3) %>%
# remove shrubs etc.
filter(functionalGroup %in% c("graminoid", "forb")) %>%
# relative fertility (correct for species having different proportion of fertility)
group_by(species) %>%
mutate(mean.fertile = mean(PropFertile)) %>%
mutate(rel.fertile = PropFertile / mean.fertile) %>%
mutate(rel.fertile = ifelse(rel.fertile == "NaN", 0 , rel.fertile)) %>%
# Filter species (at site level) that never flower
group_by(siteID, species) %>%
mutate(sum(SumOffertile)) %>%
filter(`sum(SumOffertile)` != 0) %>%
ungroup()
return(fertile)
}
### SELECT COMMON SPECIES ###
CommonSpecies <- function(fertile){
# Select common species (in time or space) that occur in 8 or more turfs
commonSP <- fertile %>%
filter(NumberOfOccurrence > 20) %>%
ungroup() %>%
distinct(species)
fertileCommon <- fertile %>%
inner_join(commonSP, by = "species") %>%
group_by(year, species) %>%
mutate(n = n()) %>%
filter(n > 7)
}
|
927cd4b5acba595933c123345286fbae61f6ee42
|
b582cee58d5a94ebb8411eaf6a6d1b9ac148fa1b
|
/lessons/3_July18-Reg_LogReg/day3_homework/problems_6_1.R
|
8c33e91350f7d2a3fda5230a442bcdfda2d230a7
|
[] |
no_license
|
anhnguyendepocen/HarvardSummerStudent2018
|
54e593f4f4ad2103106d7ba812d78ebbdff69012
|
0b96567938739012328236e7e801041764b73cb2
|
refs/heads/master
| 2020-11-29T08:16:48.749510
| 2018-08-02T01:33:59
| 2018-08-02T01:33:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,019
|
r
|
problems_6_1.R
|
library(MLmetrics)
options(scipen=999)
set.seed(1)
setwd('~/HarvardSummerStudent2018/book datasets')
df <- read.csv('BostonHousing.csv')
df$CAT..MEDV <- NULL
df.training.n <- round(nrow(df) %*% .8)
df.training.idx <- sample(1:nrow(df), df.training.n)
df.training <- df[df.training.idx,]
df.validation <- df[-df.training.idx,]
lm <- lm(MEDV ~ CRIM + CHAS + RM, df.training)
summary(lm)
lm.all <- lm(MEDV ~ ., df.training)
summary (lm.all)
df.validation.predicted <- predict(lm.all, df.validation)
MLmetrics::MedianAE(df.validation.predicted, df.validation$MEDV)
heatmap(cor(df.training[, !(names(df.training) %in% c('MEDV'))]), symm = TRUE, col = cm.colors(20))
df.training.trimmed <- df.training[, !(names(df.training) %in% c('INDUS', 'NOX', 'DIS'))]
heatmap(cor(df.training.trimmed[, !(names(df.training.trimmed) %in% c('MEDV'))]), symm = TRUE, col = cm.colors(20))
lm.trimmed <- lm(MEDV ~ ., df.training.trimmed)
summary (lm.trimmed)
## Forward
lm.forward <- step(lm.all, direction = 'forward')
summary (lm.forward)
df.validation.forwardPredicted <- predict(lm.forward, df.validation)
MLmetrics::RMSE(df.validation.forwardPredicted, df.validation$MEDV)
MLmetrics::MAPE(df.validation.forwardPredicted, df.validation$MEDV)
MLmetrics::MedianAE(df.validation.forwardPredicted, df.validation$MEDV)
## Backward
lm.backward <- step(lm.all, direction = 'backward')
summary (lm.backward)
df.validation.backwardPredicted <- predict(lm.backward, df.validation)
MLmetrics::RMSE(df.validation.backwardPredicted, df.validation$MEDV)
MLmetrics::MAPE(df.validation.backwardPredicted, df.validation$MEDV)
MLmetrics::MedianAE(df.validation.backwardPredicted, df.validation$MEDV)
## Both
lm.both <- step(lm.all, direction = 'both')
summary (lm.both)
df.validation.bothPredicted <- predict(lm.both, df.validation)
MLmetrics::RMSE(df.validation.bothPredicted, df.validation$MEDV)
MLmetrics::MAPE(df.validation.bothPredicted, df.validation$MEDV)
MLmetrics::MedianAE(df.validation.bothPredicted, df.validation$MEDV)
|
a27160cfc3031b78d9b724896b250bf41f04ab01
|
61ea89b328ccde8cd0b2c32e1b70a7d7b81117fa
|
/man/plr.Rd
|
79306c0443b8755ee046a030c46cbed3396bbfd4
|
[] |
no_license
|
cran/psda
|
251bc6ec05780052efbed3d7e2363fa9a92264cc
|
3681827dec70b30cccc4f42c1d0276f81ad91e19
|
refs/heads/master
| 2021-07-10T17:17:48.759259
| 2020-05-24T15:40:02
| 2020-05-24T15:40:02
| 102,455,988
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,437
|
rd
|
plr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plr.R
\name{plr}
\alias{plr}
\title{Polygonal linear regression}
\usage{
plr(formula, data, model = TRUE, ...)
}
\arguments{
\item{formula}{an object of class "formula": a symbolic description of the model to be fitted.}
\item{data}{a environment that contains the variables of the study.}
\item{model}{logicals. If TRUE the corresponding components of the fit are returned.}
\item{...}{additional arguments to be passed to the low level polygonal linear regression fitting functions.}
}
\value{
residuals is calculated as the response variable minus the fitted values.
rank the numeric rank of the fitted polygonal linear model.
call the matched call.
fitted.values the fitted mean values.
terms the \code{\link[stats]{terms}}.
coefficients a named vector of coefficients.
model the matrix model for center and radius.
}
\description{
plr is used to fit polygonal linear models.
}
\details{
Polygonal linear regression is the first model to explain the behavior of a symbolic polygonal
variable in furnction to other polygonal variables, dependent and regressors, respectively.
\href{https://www.sciencedirect.com/science/article/pii/S0950705118304052}{PLR} is based on the
least squares and uses the center and radius of polygons as representation them. The model is
given by \eqn{y = X\beta + \epsilon}, where \eqn{y, X, \beta}, and \eqn{\epsilon} is the dependent
variable, matrix model, unknown parameters, and non-observed errors. In the model, the vector
\eqn{y = (y_c^T, y_r)^T}, where \eqn{y_c} and \eqn{y_r} is the center and radius of center and radius.
The matrix model \eqn{X = diag(X_c, X_r)} for \eqn{X_c} and \eqn{X_r} describing the center and radius
of regressors variables and finally, \eqn{\beta = (\beta_c^T, \beta_r^T)^T}. A detailed study about the
model can be found in \href{https://www.sciencedirect.com/science/article/pii/S0950705118304052}{Silva et al.(2019)}.
}
\examples{
yp <- psim(10, 10) #simulate 10 polygons of 10 sides
xp1 <- psim(10, 10) #simulate 10 polygons of 10 sides
xp2 <- psim(10, 10) #simulate 10 polygons of 10 sides
e <- new.env()
e$yp <- yp
e$xp1 <- xp1
e$xp2 <- xp2
fit <- plr(yp~xp1+xp2, e)
}
\references{
Silva, W.J.F, Souza, R.M.C.R, Cysneiros, F.J.A. (2019) \url{https://www.sciencedirect.com/science/article/pii/S0950705118304052}.
}
|
00c34f5937b6ac8cbf558d156d0259c5f78e7ec5
|
c4539e51bf7495d84473e11e4a05ad80644c5335
|
/Weekend_Homework.R
|
4365762a7d751facbc9d6f4ef7a484eb4d256095
|
[] |
no_license
|
ahargey/Intro_R_UWC
|
a905b96e2d869def399e1d3976f26e5f57fbc65c
|
33975eb73b968b2d471a2073cf0ca1a87067aa13
|
refs/heads/master
| 2020-04-19T09:20:05.965394
| 2019-05-22T11:45:43
| 2019-05-22T11:45:43
| 168,107,215
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,769
|
r
|
Weekend_Homework.R
|
# Instructions:
# Submit: Tuesday morning (before 10-am)
# Answer each of the sections in an individual script (titled section 1,2,3)
# Answer all sections
# Add comments and notes thoughout the script
# Have details at the top of each script
# Section 1: always make maps for both rast_feb and rest_aug
# Make use of the rast_feb and rast_aug dataset:
# Explore the dataset (Hint* head, tail, !dims, glimpse, !summary etc) - Make use of google for more functions on exploring a dataset
# Create a map by making use of the lat and long variables
# Create a colour pallete using the link in the document and make use this colour pallete on the map
# Add complete labels and titles to the map
# Add the name of the oceans (Atlanic and indian ocean) on the map, increase the size of the labels
# The map should include the north arrow and scale bar
# Bonus marks for insetting (having a smaller map inside another map)
# Get creative, try new things.
# Section 2:
# Make use of the ecklonia.csv dataset:
# Explore the data (Hint* head, tail, glimpse functions)
# Demonstrate the dimensions of the dataset (dim)
# Create three graphs; bargraph, line graph and boxplot: Write hypothesis for each of the graphs and answer these hypotheses
# Make use of the ggarrange function and arrange these three graphs created above into 1 plot
# All graphs must have labels as well as titles !and themes!
# Calculate the mean,max,min,median and variance for the stipe_length, stipe_diameter for each of the sites (Hint* group_by site)
# Calculate standard error !se!
# #lam %>% #standard error
# group_by(site) %>%
# summarise(var_bl = var(blade_length),
# n = n()) %>%
# mutate(se = sqrt(var_bl/n)) #creates a new column
# Determine the min and maximum frond length and stipe length
# Determine the overall summary of the dataset !summary(wholedatasetname)
# Section 3:
# Make use of the SACTN_day1 data:
# Here create a graph showing temperature variation between sites !(group by site!)
# Select all the temperatures recorded at the site Port Nolloth during August or September.
# Select all the monthly temperatures recorded in Port Nolloth during the year 1994
# Calculate the average temperature by depth
# Work through the tidyverse section within the document. Show what you have done by creating comments/ notes throughout the script
# FROM TIDY TO TIDIEST
# Section 4:
# Make use of any two built in datasets:
# Make use of the summarise, select, group_by functions ('in one code or different chunks')
# Create at least two visualisations that were not done in the Intro R workshop eg. density plot,
# !go to geom_ and look from there
## Good luck!!!
geom_
---------------------------------------------------------------------------------------------
|
548109112976442c6b8a16bdb9fe92e2d1a5c589
|
bbe1d2df7586e19a6390d80f88b99847aab39177
|
/code/Steve/eQTL/total_transcript_feature_eqtl.R
|
1f044fdbe8514ea8dae7c1dce6e80db2d4812efe
|
[] |
no_license
|
LieberInstitute/SNX19
|
0d9122855bb3b0d292dd22412193a96a5ed49698
|
eb946058427c9300d16e81908e260f1df854c533
|
refs/heads/master
| 2021-10-14T11:00:08.897850
| 2019-02-04T19:59:16
| 2019-02-04T19:59:16
| 79,872,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,333
|
r
|
total_transcript_feature_eqtl.R
|
## Absolute isoform
library(jaffelab)
library(MatrixEQTL)
library(SummarizedExperiment)
library(matrixStats)
## Load stats
load('/dcl01/lieber/ajaffe/Steve/SNX19/eqtl_runs/Merge/rdas/FinalObject_Annotated_5LIBD-CMC_set_Merged_filtered_noLIBD_PGC_Missing_1e5sigFilt_reOrdered_morePGC_100417.rda')
## Load features of intereset
load('/dcl01/lieber/ajaffe/Steve/SNX19/SNX19_8Features_of_Interest_crossBuild.rda')
## DLPFC
setwd('/dcl01/lieber/ajaffe/Steve/SNX19/eqtl_runs/DLPFC_polyA')
## load counts
load("/users/ajaffe/Lieber/Projects/RNAseq/SNX19/DLPFC_polyA/rawAndRpkmCounts_plusGenotype_SNX19_DLPFC_n495_updateMap_03082017.rda")
## filter samples to postnatal
keepIndex = which(pd$Race %in% c("CAUC", "AA") & pd$age > 13)
pd = pd[keepIndex,]
geneRpkm = geneRpkm[,keepIndex]
exonRpkm = exonRpkm[,keepIndex]
jRpkm = jRpkm[,keepIndex]
### filter to people in common ###
mm = match(pd$BrNum, colnames(snpAll ))
geneRpkm = geneRpkm[,!is.na(mm)]
exonRpkm = exonRpkm[,!is.na(mm)]
jRpkm = jRpkm[,!is.na(mm)]
snpAll = snpAll [,mm[!is.na(mm)]]
### filter features ###
gIndex=which(rowMeans(geneRpkm) > 0.01) #selecting out genes with sufficient expression (genes with less than 0.01 read per kilobase million are not perserved)
geneRpkm = geneRpkm[gIndex,] #keeping only the genes above the rpkm threshold (threshold 0.01 here)
geneMap = geneMap[gIndex,] #keeping only the gene map for genes above the rpkm threshold
eIndex=which(rowMeans(exonRpkm) > 0.01) #selecting out exons with sufficient expression (exons with less than 0.01 read per kilobase million are not perserved)
exonRpkm = exonRpkm[eIndex,] #keeping only the exons above the rpkm threshold (threshold 0.01 here)
exonMap = exonMap[eIndex,] #keeping only the gene map for genes above the rpkm threshold
jIndex = which(rowMeans(jRpkm) > 0.2)#There are an excessive number of junctions. Apply a stricter filter and drop novel junctions to reduce the number.
jRpkm = jRpkm[jIndex,] #keeping only the junctions above the rpkm threshold (threshold = 0.01 rpkm here) and are NOT novel
jMap= jMap[jIndex] #keeping only the junction map for junctions above the rpkm threshold and are NOT novel
## get expression PCs ####
exprs3features = log2(rbind(geneRpkm, exonRpkm, jRpkm)+1) #combine the three types of data into a single matrix
pcaexprs3features = prcomp(t(exprs3features+1)) #principle component decomposition on the transpose of that matrix + 1
all3PCs = pcaexprs3features$x[,1:15] #take the first 15 PCs from that matrix
## set up model matrix
pd$Dx = factor(pd$Dx, levels=c("Control","Schizo") )
mod = model.matrix(~pd$snpPC1 + pd$snpPC2 + pd$snpPC3 + pd$snpPC4 + pd$snpPC5 + all3PCs) #forming the model matrix from the SNP PCs and the expression PCs
colnames(mod)[2:ncol(mod)] = c(paste0("snpPC",1:5), paste0("exprsPC",1:15)) #Renaming the column names of the model matrix
covs = SlicedData$new(t(mod[,-1])) #This part employs the "MatrixEQTL" package
### set up SNPs
snpSlice = SlicedData$new(as.matrix(snpAll) ) #formating the snp data for the MatrixEQTL package
snpSlice$ResliceCombined(sliceSize = 5000)
snpspos = snpMapAll[,c("SNP","CHR","POS")]
snpspos$CHR = paste0("chr",snpspos$CHR) #concatenating the string "chr" with the numbers in "CHR"
colnames(snpspos) = c("SNP","chr","pos")
rownames(snpspos) = NULL
### set up expression data
yExprs = t(as.data.frame( rowSums( scale( t(exprs3features[foi$Feature[!foi$Tag%in%c('junc8.9','junc4.6','SNX19.gene')],] ) ) )) )
rownames(yExprs) <- "totalIsoformExpression"
exprs = SlicedData$new(as.matrix(yExprs))
exprs$ResliceCombined(sliceSize = 5000)
## make position data frame
posGene = geneMap[,c('Chr', 'Start', 'End')] #gene position information taken from the geneMap
posGene$name = rownames(geneMap) #gene position names variable made from rownames of geneMap
posExon = exonMap[,c('Chr', 'Start', 'End')] #exon position information taken from exonMap
posExon$name = rownames(exonMap) #exon position names variable made from rownames of exonMap
posJxn = as.data.frame(jMap)[,c('seqnames','start','end')] #extracting junction position information from the jMap object after converting it to a dataframe
colnames(posJxn) = c("Chr","Start","End")
posJxn$name = names(jMap)
posExprs = rbind(posGene, posExon, posJxn) #combining position information for the three different types of expression data
posExprs = posExprs[,c(4,1:3)]
#### full eqtl run ####
meQtl_all = Matrix_eQTL_main(snps=snpSlice,
gene = exprs,
cvrt = covs,
output_file_name.cis = "cis.txt",
output_file_name = "trans.txt",
pvOutputThreshold.cis = 0,
pvOutputThreshold=1,
snpspos = snpspos,
genepos = posExprs,
useModel = modelLINEAR,
cisDist=1e6,
pvalue.hist = 100,
min.pv.by.genesnp = TRUE)
#### eqtl ####
eqtl = meQtl_all$all$eqtls #extracting cis eqtl information from the eQTL analysis
colnames(eqtl)[1:2] = c("SNP","Feature") #adding a column called feature
eqtl$Feature = as.character(eqtl$Feature) #the feature column contains expression information
eqtl$SNP = as.character(eqtl$SNP) #converting the eqtl snps variable to a character variable
colnames(eqtl)[colnames(eqtl) %in% c('statistic','pvalue','FDR','beta')] = paste0("All_", colnames(eqtl)[colnames(eqtl) %in% c('statistic','pvalue','FDR','beta')]) #Adding "ALL_ to "statistics, p-value, FDR, and beta" to help distinguish from future analyses
eqtl$UniqueID = paste0(eqtl$SNP, ".", eqtl$Feature) #making a variable called Unique ID... Just a combination of SNP and feature
eqtl = eqtl[,c('UniqueID', 'SNP', 'Feature', 'All_statistic', 'All_pvalue', 'All_FDR', 'All_beta')] #just reordering the columns of the dataframe
#
a = rowMeans(exprs3features[foi$Feature,])
names(a) <- foi$Tag
a
##### Boxplots
library(ggplot2)
theme_set(theme_bw(base_size=18) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(hjust = 0.5),
legend.position="none"))
dat = cbind(pd, t(exprs3features[foi$Feature,]),t(yExprs), t(snpAll) )
colnames(dat)<-plyr::mapvalues(colnames(dat),foi$Feature, foi$Tag)
dat = tidyr::gather_(dat,'transcriptFeature','expression', c(foi$Tag, 'totalIsoformExpression' ) )
plot_dat = dat[!dat$transcriptFeature%in%c("junc4.6","junc8.9","SNX19.gene"),]
colnames(plot_dat) = plyr::mapvalues(colnames(dat),snpMapAll$SNP, snpMapAll$name)
plot_dat[,colnames(plot_dat)%in%snpMapAll$name] <- lapply(plot_dat[,colnames(plot_dat)%in%snpMapAll$name], factor)
pdf('/dcl01/lieber/ajaffe/Steve/SNX19/plots/eqtl_boxplots_isoform_panels.pdf',height=12,width=15)
for (i in eqtl[!duplicated(eqtl$All_beta),'SNP'][1:10]) {
#Change column name for ggplot2 to work
k=snpMapAll$name[match(i, snpMapAll$SNP)]
b = ggplot(data=plot_dat[!is.na(plot_dat[,k]),], aes_string(x =k, y = 'expression')) +
facet_wrap(~transcriptFeature,ncol=3,nrow=2,scales='free') +
geom_boxplot(outlier.colour = NA, alpha = 0.1, col='black') +
geom_jitter(height=0) +
labs(y="Normalized Expression", title = i)
print(b) }
dev.off()
#### Pca of 5 expression features
pcs = prcomp(t(exprs3features[foi$Feature[!foi$Tag%in%c('junc8.9','junc4.6','SNX19.gene')],] ))
getPcaVars(pcs)
pdf('/dcl01/lieber/ajaffe/Steve/SNX19/plots/five_feature_pca_plots.pdf')
plot(pcs$x[,1],pcs$x[,2] )
plot(pcs$x[,2],pcs$x[,3] )
plot(pcs$x[,3],pcs$x[,4] )
plot(pcs$x[,4],pcs$x[,5] )
plot(pcs$x[,1],pcs$x[,2] )
plot(pcs$x[,2],pcs$x[,3] )
plot(pcs$x[,3],pcs$x[,4] )
plot(pcs$x[,4],pcs$x[,5] )
dev.off()
###### matrixEqtl of PC1
#### full eqtl run ####
meQtl_all = Matrix_eQTL_main(snps=snpSlice,
gene = SlicedData$new(as.matrix( t(pcs$x[,1]) )),
cvrt = covs,
output_file_name.cis = "cis.txt",
output_file_name = "trans.txt",
pvOutputThreshold.cis = 0,
pvOutputThreshold=1,
snpspos = snpspos,
genepos = posExprs,
useModel = modelLINEAR,
cisDist=1e6,
pvalue.hist = 100,
min.pv.by.genesnp = TRUE)
eqtl2 = meQtl_all$all$eqtls #extracting cis eqtl information from the eQTL analysis
colnames(eqtl2)[1:2] = c("SNP","Feature") #adding a column called feature
eqtl2$Feature = as.character(eqtl2$Feature) #the feature column contains expression information
eqtl2$SNP = as.character(eqtl2$SNP) #converting the eqtl snps variable to a character variable
colnames(eqtl2)[colnames(eqtl2) %in% c('statistic','pvalue','FDR','beta')] = paste0("All_", colnames(eqtl2)[colnames(eqtl2) %in% c('statistic','pvalue','FDR','beta')]) #Adding "ALL_ to "statistics, p-value, FDR, and beta" to help distinguish from future analyses
eqtl2$UniqueID = paste0(eqtl2$SNP, ".", eqtl2$Feature) #making a variable called Unique ID... Just a combination of SNP and feature
eqtl2 = eqtl2[,c('UniqueID', 'SNP', 'Feature', 'All_statistic', 'All_pvalue', 'All_FDR', 'All_beta')] #just reordering the columns of the dataframe
cor.test(eqtl$All_beta, eqtl2$All_beta[match(eqtl$SNP, eqtl2$SNP )])
cor.test(eqtl$All_pvalue, eqtl2$All_pvalue[match(eqtl$SNP, eqtl2$SNP )])
cor.test(eqtl$All_statistic, eqtl2$All_statistic[match(eqtl$SNP, eqtl2$SNP )])
pdf('/dcl01/lieber/ajaffe/Steve/SNX19/plots/total_isoformQTL_v_PC1_5Feature_QTL.pdf')
plot(eqtl$All_beta, eqtl2$All_beta[match(eqtl$SNP, eqtl2$SNP )], xlab="Total Isoform QTL Beta", ylab="5 Feature PC1 QTL Beta")
plot(eqtl$All_statistic, eqtl2$All_statistic[match(eqtl$SNP, eqtl2$SNP )], xlab="Total Isoform QTL Statistic", ylab="5 Feature PC1 QTL Statistic")
plot(-log10(eqtl$All_pvalue), -log10( eqtl2$All_pvalue[match(eqtl$SNP, eqtl2$SNP )]), xlab="Total Isoform QTL -log10(P)", ylab="5 Feature PC1 QTL -log10(P)")
dev.off()
#######
best_SNP_by_feature = cisMerge[ cisMerge$Feature %in% foi$Feature[!foi$Tag%in%c('junc8.9','junc4.6','SNX19.gene')], ]
library(dplyr)
five_feature_bestSNP = group_by(best_SNP_by_feature, Feature ) %>% summarise( best_SNP = SNP[which.min(DLPFC_PolyA_Linear_All_pvalue)], p=min(DLPFC_PolyA_Linear_All_pvalue),statistic=DLPFC_PolyA_Linear_All_statistic[which.min(DLPFC_PolyA_Linear_All_pvalue)] ) %>% as.data.frame()
five_feature_bestSNP = rbind(five_feature_bestSNP,
setNames(eqtl[1,c('Feature','SNP','All_pvalue','All_statistic')], names(five_feature_bestSNP)),
setNames(eqtl2[1,c('Feature','SNP','All_pvalue','All_statistic')] , names(five_feature_bestSNP)) )
five_feature_bestSNP$Feature[five_feature_bestSNP$Feature=="row1"] <- "Five Feature PC1"
stats_summary = best_SNP_by_feature[best_SNP_by_feature$SNP%in%five_feature_bestSNP$best_SNP,c('Feature','SNP','DLPFC_PolyA_Linear_All_pvalue', "DLPFC_PolyA_Linear_All_statistic")]
stats_summary = rbind(stats_summary,
setNames(eqtl[eqtl$SNP %in% stats_summary$SNP,c('Feature','SNP','All_pvalue','All_statistic')], names(stats_summary)),
setNames(eqtl2[eqtl2$SNP %in% stats_summary$SNP,c('Feature','SNP','All_pvalue','All_statistic')] , names(stats_summary)) )
library(tidyr)
pval_Stats = tidyr::spread(stats_summary[,c('Feature','SNP','DLPFC_PolyA_Linear_All_pvalue')], SNP, DLPFC_PolyA_Linear_All_pvalue)
pval_Stats$Feature[pval_Stats$Feature =="row1"] <- "Five Feature PC1"
t_Stats = tidyr::spread(stats_summary[,c('Feature','SNP','DLPFC_PolyA_Linear_All_statistic')], SNP, DLPFC_PolyA_Linear_All_statistic)
t_Stats$Feature[t_Stats$Feature =="row1"] <- "Five Feature PC1"
## Get R^2
cor(t(snpAll[unique(five_feature_bestSNP$best_SNP),]), use = c("pairwise.complete.obs")) ^2
|
e9255258767f787486ecfba1a565e0f6900fa526
|
bd8fe36257d3069f1eeb939458e9830fdf5c7fee
|
/cachematrix.R
|
97e0ad6bfa1fe0ef4939ee42f7045eacc6307ea8
|
[] |
no_license
|
apurvsibal/ProgrammingAssignment2
|
ce098bc0787cd83339408e4e876fa411a6c335a9
|
3ec7ae9df52ce0fb847c5a6d4cd805eb27f84966
|
refs/heads/master
| 2021-01-01T04:02:18.834860
| 2017-07-14T05:09:49
| 2017-07-14T05:09:49
| 97,106,422
| 0
| 0
| null | 2017-07-13T09:41:43
| 2017-07-13T09:41:42
| null |
UTF-8
|
R
| false
| false
| 921
|
r
|
cachematrix.R
|
## Name: Apurv Sibal
## The function computes the inverse of matrix efficiently by using cache
## The function uses the <<- operator to create a image in a different environment
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(mean) m <<- mean
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function solves the problem by taking the inverse of the matrix using solve function
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
## Return a matrix that is the inverse of 'x'
}
|
71f85a70cddc99e054ab94f7cc37f035987706d8
|
705255987191f8df33b8c2a007374f8492634d03
|
/man/CrmPackClass.Rd
|
a611be119d87b8f00e15fc12b004ae497dc2f492
|
[] |
no_license
|
Roche/crmPack
|
be9fcd9d223194f8f0e211616c8b986c79245062
|
3d897fcbfa5c3bb8381da4e94eb5e4fbd7f573a4
|
refs/heads/main
| 2023-09-05T09:59:03.781661
| 2023-08-30T09:47:20
| 2023-08-30T09:47:20
| 140,841,087
| 24
| 9
| null | 2023-09-14T16:04:51
| 2018-07-13T11:51:52
|
HTML
|
UTF-8
|
R
| false
| true
| 533
|
rd
|
CrmPackClass.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CrmPackClass-class.R
\docType{class}
\name{CrmPackClass-class}
\alias{CrmPackClass-class}
\alias{.CrmPackClass}
\alias{CrmPackClass}
\title{\code{CrmPackClass}}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
\code{\link{CrmPackClass}} is a virtual class, from which all other \code{crmPack} classes
inherit.
}
|
6d73403cc3124b98c4c72fb5eb4fc9387a8c4966
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pear/examples/peboxplot.Rd.R
|
8f9e32befcda18f9d7f170cd0c0855dbe1fb3cdb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 179
|
r
|
peboxplot.Rd.R
|
library(pear)
### Name: peboxplot
### Title: boxplots of a periodic time series
### Aliases: peboxplot
### Keywords: ts
### ** Examples
data(Fraser)
peboxplot(log(Fraser))
|
04b68fd1409cf404de66888f69dfd8e89fef3c67
|
414a28673a7b5ea8d541adebd08c4c20746664fa
|
/data-raw/mlbgm.R
|
18a6a3b35b67cce8458a36a9422dc4484d90a671
|
[] |
no_license
|
beanumber/mlbgm
|
526db62a69fc0e880b256e28ff6a6e3b17ba7363
|
b537473d9c20702fbc828c3233bd96f1c24eabb2
|
refs/heads/master
| 2020-04-05T05:43:41.726409
| 2020-01-20T19:05:21
| 2020-01-20T19:05:21
| 156,608,198
| 7
| 2
| null | 2019-11-21T14:40:49
| 2018-11-07T21:06:44
|
R
|
UTF-8
|
R
| false
| false
| 155
|
r
|
mlbgm.R
|
# build internal databases
comps_hypercube <- mlbgm:::comps_hypercube_build()
save(comps_hypercube, file = "data/comps_hypercube.rda", compress = "xz")
|
60acdcd3aed5af185eb98bb90fe4cec33f6ea83a
|
5f684a2c4d0360faf50fe055c1147af80527c6cb
|
/2021/2021-week29/deprecated/drought-dimetric.R
|
f9b580d1133dac9b652eeead5e13b075e57ef4a4
|
[
"MIT"
] |
permissive
|
gkaramanis/tidytuesday
|
5e553f895e0a038e4ab4d484ee4ea0505eebd6d5
|
dbdada3c6cf022243f2c3058363e0ef3394bd618
|
refs/heads/master
| 2023-08-03T12:16:30.875503
| 2023-08-02T18:18:21
| 2023-08-02T18:18:21
| 174,157,655
| 630
| 117
|
MIT
| 2020-12-27T21:41:00
| 2019-03-06T14:11:15
|
R
|
UTF-8
|
R
| false
| false
| 2,884
|
r
|
drought-dimetric.R
|
library(tidyverse)
library(lubridate)
library(camcorder)
gg_record(dir = "temp", device = "png", width = 6, height = 12, units = "in", dpi = 320)
drought <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-07-20/drought.csv')
west_grid <- us_state_grid1 %>%
filter(code %in% c("AK", "AZ", "CA", "CO", "HI", "ID", "MT", "NV", "NM", "OR", "UT", "WA", "WY"))
d4_lvl <- drought %>%
mutate(
drought_lvl = fct_relevel(drought_lvl, c("None", "D0", "D1", "D2", "D3", "D4")),
year = year(valid_start),
week = week(valid_start),
side = sqrt(area_pct / 100)
) %>%
filter(drought_lvl %in% c("D4")) %>%
filter(week == 28) %>%
filter(state_abb %in% c("AK", "AZ", "CA", "CO", "HI", "ID", "MT", "NV", "NM", "OR", "UT", "WA", "WY")) %>%
left_join(west_grid, by = c("state_abb" = "code"))
d4_lvl_plot <- d4_lvl %>%
mutate(area_pct = pmax(area_pct, 0.5)) %>%
rowwise() %>%
mutate(
x = list(c(0,
area_pct/100 * sqrt(3)/2,
0,
area_pct/100 * -sqrt(3)/2)),
y = list(c(-area_pct/100 * sqrt(0.5)/2,
0,
area_pct/100 * sqrt(0.5)/2,
0))
) %>%
unnest(c(x, y))
ggplot(d4_lvl_plot) +
geom_text(aes(x = col * 1.5 + area_pct/100 * sqrt(3)/2 + 0.1,
y = -row * 1.5 + year/50,
label = if_else(area_pct > 1 | (area_pct < 1 & year %% 2 == 0) | year == 2021, year, NULL)),
stat = "unique", size = 1, family = "Input Mono Compressed") +
geom_segment(aes(x = col * 1.5 + area_pct/100 * sqrt(3)/2 + 0.05,
y = -row * 1.5 + year/50,
xend = col * 1.5 + + area_pct/100 * sqrt(3)/2 + 0.02,
yend = -row * 1.5 + year/50,
size = if_else(area_pct > 1 | (area_pct < 1 & year %% 2 == 0) | year == 2021, 0.07, 0)),
color = "grey60"
) +
geom_polygon(aes(x = col * 1.5 + x,
y = -row * 1.5 + y + year/50,
group = interaction(name, year, week),
fill = if_else(year == 2021, "darkred", "grey50"),
color = if_else(year == 2021, "red", "grey20")),
alpha = 0.7, size = 0.1) +
geom_text(aes(x = col * 1.5,
y = -row * 1.5 + 2045/50,
label = name),
stat = "unique", family = "Fira Sans Condensed") +
scale_color_identity() +
scale_fill_identity() +
scale_size_identity() +
coord_fixed() +
theme_minimal(base_family = "Fira Sans") +
theme(
legend.position = "none",
plot.background = element_rect(fill = "grey97", color = NA)
)
# export gif
# gg_playback(frame_duration = 0.15, image_resize = 1080)
# convert to mp4 in terminal
# ffmpeg -i animated.gif -movflags faststart -pix_fmt yuv420p -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" video.mp4
|
2a8e2a45504cc865bc99bc092cc919a0201c3ab4
|
aea23923040a247e78d082137d05be11227f0c23
|
/proyecto_tfg/preparar_datos_dias_naturales.R
|
223fee329eb9fdc2177c4bb292d934b7baaa894a
|
[] |
no_license
|
alonso-bh/TFG
|
a5f1e83a661024d09058f81c86d169d07ea1242d
|
cdd2923578cc208de269bcab4e07ea1e13b4b089
|
refs/heads/main
| 2023-06-10T23:30:18.746629
| 2021-07-09T21:57:12
| 2021-07-09T21:57:12
| 340,448,091
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,603
|
r
|
preparar_datos_dias_naturales.R
|
# ALONSO BUENO HERRERO - 2021
################################################################################
#' FICHERO PARA TRANSFORMAR EL DATASET CON DATOS DE CADA DÍA DEL AÑO (DÍAS
#' NATURALES)
#'
#' @param path_fichero es un parámetro obligatorio que espefica la ruta del
#' fichero XLS descargado del portal IECA. La ruta debería ser relativa (aunque
#' si es absoluta, evidentemente, no importa) dentro de la carpeta de trabajo
#' "TFG".
preparar_datos_dias_naturales <- function(path_fichero = getwd()){
library('tidyr') # para el fill
library('readr')
library("rio")
# path_fichero <- path_dias_naturales # descomentar para las pruebas
excel <- import(path_fichero)
# delete head non-valid rows
excel <- excel[-c(1,2,3,4),]
# renombrar cabecera temporalmente para más comodidad
colnames(excel) <- c("V1", "V2", "V3", "V4","V5","V6","V7")
# rellenar primera columna con la fecha correspondiente (primera fila del día)
excel <- excel %>% fill(V1, .direction = "down")
# extraer nombre real de las columnas
cabecera <- excel[1,] # fila con los nombres
excel <- excel[-c(1),] # borrar esta fila
# eliminar filas de datos totales (Andalucía), pues se calcularán al hacer
# roll-up por el cubo
source("proyecto_tfg/utils.R")
provincias <- obtener_provincias()
excel <- excel[ (is.element(excel$V2, provincias)), ]
# nombres de las columnas
colnames(excel) <- cabecera
# salvar los datos
write.table(excel, "datos/datos_dias_naturales.csv", row.names=FALSE, col.names=TRUE, sep = ';')
}
|
56eaea78de7b17362c73a0689ce683ad0c8d5224
|
ca1c506a4b05f8468896cb507a944adc62484560
|
/Chapter14/7_keras_mnist_vae_outlier.R
|
309b92f5a7150a5e022d755d31646103bfe8e194
|
[
"MIT"
] |
permissive
|
Rosiethuypham/Deep-Learning-with-R-for-Beginners
|
d216afe2518e7e01ea3456d26ef1e66942bed46d
|
70bc0e1ff074ad31a4eea0396c32c58014dfed1a
|
refs/heads/master
| 2023-05-07T14:25:30.686911
| 2019-05-17T06:29:16
| 2019-05-17T06:29:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,155
|
r
|
7_keras_mnist_vae_outlier.R
|
library(keras)
# Switch to the 1-based indexing from R
options(tensorflow.one_based_extract = FALSE)
K <- keras::backend()
mnist <- dataset_mnist()
X_train <- mnist$train$x
y_train <- mnist$train$y
X_test <- mnist$test$x
y_test <- mnist$test$y
## Exclude "0" from the training set. "0" will be the outlier
outlier_idxs <- which(y_train!=0, arr.ind = T)
X_train <- X_train[outlier_idxs,,]
y_test <- sapply(y_test, function(x){ ifelse(x==0,"outlier","normal")})
# reshape
dim(X_train) <- c(nrow(X_train), 784)
dim(X_test) <- c(nrow(X_test), 784)
# rescale
X_train <- X_train / 255
X_test <- X_test / 255
original_dim <- 784
latent_dim <- 2
intermediate_dim <- 256
# Model definition --------------------------------------------------------
X <- layer_input(shape = c(original_dim))
hidden_state <- layer_dense(X, intermediate_dim, activation = "relu")
z_mean <- layer_dense(hidden_state, latent_dim)
z_log_sigma <- layer_dense(hidden_state, latent_dim)
sample_z<- function(params){
z_mean <- params[,0:1]
z_log_sigma <- params[,2:3]
epsilon <- K$random_normal(
shape = c(K$shape(z_mean)[[1]]),
mean=0.,
stddev=1
)
z_mean + K$exp(z_log_sigma/2)*epsilon
}
z <- layer_concatenate(list(z_mean, z_log_sigma)) %>%
layer_lambda(sample_z)
# we instantiate these layers separately so as to reuse them later
decoder_hidden_state <- layer_dense(units = intermediate_dim, activation = "relu")
decoder_mean <- layer_dense(units = original_dim, activation = "sigmoid")
hidden_state_decoded <- decoder_hidden_state(z)
X_decoded_mean <- decoder_mean(hidden_state_decoded)
# end-to-end autoencoder
variational_autoencoder <- keras_model(X, X_decoded_mean)
# encoder, from inputs to latent space
encoder <- keras_model(X, z_mean)
# generator, from latent space to reconstructed inputs
decoder_input <- layer_input(shape = latent_dim)
decoded_hidden_state_2 <- decoder_hidden_state(decoder_input)
decoded_X_mean_2 <- decoder_mean(decoded_hidden_state_2)
generator <- keras_model(decoder_input, decoded_X_mean_2)
loss_function <- function(X, decoded_X_mean){
cross_entropy_loss <- loss_binary_crossentropy(X, decoded_X_mean)
kl_loss <- -0.5*K$mean(1 + z_log_sigma - K$square(z_mean) - K$exp(z_log_sigma), axis = -1L)
cross_entropy_loss + kl_loss
}
variational_autoencoder %>% compile(optimizer = "rmsprop", loss = loss_function)
history <- variational_autoencoder %>% fit(
X_train, X_train,
shuffle = TRUE,
epochs = 10,
batch_size = 256,
validation_data = list(X_test, X_test)
)
plot(history)
# Reconstruct on the test set
preds <- variational_autoencoder %>% predict(X_test)
error <- rowSums((preds-X_test)**2)
eval <- data.frame(error=error, class=as.factor(y_test))
library(dplyr)
library(ggplot2)
eval %>%
ggplot(aes(x=class,fill=class,y=error))+geom_boxplot()
threshold <- 5
y_preds <- sapply(error, function(x){ifelse(x>threshold,"outlier","normal")})
# Confusion matrix
table(y_preds,y_test)
library(ROCR)
pred <- prediction(error, y_test)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
auc <- unlist(performance(pred, measure = "auc")@y.values)
auc
plot(perf, col=rainbow(10))
|
06dfca60306a717fe7c6f9a346abc80f72dbb0e7
|
4e1e7228f0dfc9123a951d2608d86a6fe4f117ae
|
/2_ml_model/scripts/preprocess.R
|
8b23395682024a816a83f0b6d48257d478ac4398
|
[] |
no_license
|
davidADSP/bratislava
|
851fa5f86b1ab1a6b0a18d8d72c1d8d0fc97ebbe
|
31e3090f535abb356299ab1d34fe45970a3fcaf3
|
refs/heads/master
| 2021-01-19T07:53:04.006247
| 2017-04-10T12:19:09
| 2017-04-10T12:19:09
| 87,582,354
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,723
|
r
|
preprocess.R
|
library(data.table)
library(readr)
library(caret)
library(stringr)
source('./scripts/config.R')
source('./scripts/words.R')
data = fread('./data/data_04_08_2017.csv')
data = data[price<1000000]
data = data[price>0]
data_original = data
words_data = data[,.(id=listing_id, query = '', product_title='', product_description = description, median_relevance = price, relevance_variance = 0)]
write_csv(words_data,path='data/words_data.csv')
#data[,(response_col):=log(get(response_col)+1)]
train_rows = which(!is.na(data[,get(response_col)]))
####DATE
data[,Date:=last_publish_date]
data[,year:=as.integer(substring(Date,1,4))]
data[,month:=as.integer(substring(Date,6,7))]
data[,day:=as.integer(substring(Date,9,10))]
data[,Date:=as.Date(Date)]
data[,weekday:=weekdays(Date)]
#data[,days_elapsed:=as.numeric(Date - as.Date('1995-01-01'))]
data[,Date:=NULL]
#####POSTCODE
#data[, c("Postcode.1", "Postcode.2") := tstrsplit(Postcode, " ", fixed=TRUE)][]
data[,Postcode.1:=outcode]
#data[, Postcode.2.1 := as.integer(substring(Postcode.2,1,1))]
#data[, Postcode.2.2 := substring(Postcode.2,2,3)]
data[,Postcode.1.1:= sapply(regmatches(Postcode.1,gregexpr("\\D+",Postcode.1)),function(x) x[1])] #str_extract(Postcode.1, "[A-Z]+")]
data[,Postcode.1.2:= sapply(regmatches(Postcode.1,gregexpr("\\d+\\D?",Postcode.1)),function(x) x[1])]
###### STREET
data[,Street:=tolower(street_name)]
data[,Street:=gsub('[[:punct:]]', '', Street) ]
for (i in 1:10){
data[,Street:=gsub('(london|middlesex|essex|croydon|surrey|isleworth|walthamstow|england|stratford|kent|harrow|wembley|enfield|e10|putney|bromley|wickham|en1|e6|ruislip|hornchurch|finchley)$', '', Street) ]
data[,Street:=gsub(' $', '', Street) ]
}
data[,House_Number:= as.integer(sapply(regmatches(Street,gregexpr("\\d+\\s",Street)),function(x) x[1]))]
data[is.na(House_Number),House_Number:=0]
data[,Road_Type:= sapply(regmatches(Street,gregexpr("\\w+$",Street)),function(x) x[1])]
data[,Commas:= sapply(regmatches(data[,Street],gregexpr("\\,",data[,Street])),function(x) length(x))]
data[,Address_Length:= nchar(Street)]
###############
id = data[,get(id_col)]
response = data[,get(response_col)]
write_csv(data,path='./data/data_enhanced.csv')
################
to_remove = c('Postcode.1', 'agent_name', 'agent_phone', 'description'
, 'displayable_address', 'first_publish_date', 'last_publish_date', 'image_url'
#, 'outcode'
#, 'Street'
, 'street_name')
data[,(to_remove):=NULL]
to_remove = which(sapply(data, function(x){length(unique(x))})==1)
data[,(to_remove):=NULL]
#############
# Convert categories variables to dummy one-hot
data[,c(id_col):=NULL]
data[,c(response_col):=NULL]
cat("\nConverting categorical variables to dummy one-hot encoded variables...")
feature.names <- names(data)
char_feat = feature.names[which(sapply(data,class) %in% c('character','logical'))]
sapply(data[,.SD,.SDcols=char_feat],function(x){length(unique(x))})
for (f in char_feat) {
cat('\n',f)
num_levels= length(unique(data[,get(f)]))
if (num_levels>200){
levels = data[,.N,by=f]#data.table(table(data[,get(f)]))
levels[,rank:=rank(-N,ties.method="random")]
names(levels) = c('V1','N',f)
#
# m <- t(sapply(levels[,rank],function(x){ as.integer(intToBits(x))}))
# cut = min(which(apply(m,2,sum)==0))
# m = m[,1:(cut-1)]
# colnames(m) = paste(f, 1:ncol(m),sep='_')
# levels = cbind(levels,m)
#
# levels[,rank:=NULL]
#
levels[,N:=NULL]
save(levels, file=paste0('./level_',f,'.rdata'))
data = merge(data,levels,by.y='V1',by.x=f,sort=FALSE)
data[,c(f):=NULL]
gc()
}else{
col = data.table(f = data[,as.factor(get(f))])
names(col) = f
dummy = dummyVars( ~ ., data = col)
data_out = data.table(predict(dummy, newdata = col))
save(dummy, file=paste0('./dummy_',f,'.rdata'))
data = cbind(data,data_out)
rm(col,dummy,data_out)
gc()
data[,c(f):=NULL]
}
}
##########
valid_size = 0.2
valid_rows = sample(1:nrow(data), valid_size * nrow(data)) #which(data[,year]==2015)
train_rows = which(!(train_rows %in% valid_rows))
id_train = id[train_rows]
id_valid = id[valid_rows]
struct_X_train = data.table(id = id_train, data[train_rows,])
struct_X_valid = data.table(id = id_valid, data[valid_rows,])
y_train = data.table(id = id_train, response = response[train_rows])
y_valid = data.table(id = id_valid, response = response[valid_rows])
write_csv(struct_X_train,path='data/X_train.csv')
write_csv(struct_X_valid,path='data/X_valid.csv')
write_csv(y_train,path='data/y_train.csv')
write_csv(y_valid,path='data/y_valid.csv')
#write_csv(months,path='data/adjustments.csv')
m = doWords(NULL)
struct_vars = colnames(struct_X_train)[2:ncol(struct_X_train)]
struct_vars_idx = 1:length(struct_vars)
desc_vars_no_txt = m@Dimnames$Terms
desc_vars = paste0('txt_',m@Dimnames$Terms)
desc_vars_idx = length(struct_vars) + (1:length(desc_vars))
col_headings = c(struct_vars, desc_vars)
write.csv(col_headings, file = './col_headings.csv')
write.csv(desc_vars_no_txt, file = './desc_vars_no_txt.csv')
m_train = m[train_rows,]
m_valid = m[valid_rows,]
m_train = cbind(Matrix(as.matrix(struct_X_train)[,2:ncol(struct_X_train)], sparse = TRUE), m_train)
m_valid = cbind(Matrix(as.matrix(struct_X_valid)[,2:ncol(struct_X_train)], sparse = TRUE), m_valid)
#m_train =Matrix(as.matrix(struct_X_train)[,2:ncol(struct_X_train)], sparse = TRUE)
#m_valid = Matrix(as.matrix(struct_X_valid)[,2:ncol(struct_X_train)], sparse = TRUE)
writeMM(m_train, file = './data/X_train_words.RData')
writeMM(m_valid, file = './data/X_valid_words.RData')
|
ec579eab11e3e61214b3834088ba58733545cab8
|
6cbffff7534b28f260777a84b889eb1e521c02b3
|
/man/m_plot_all_micros.Rd
|
86831ac14d6d7baa59a0f8e48872dcc3e0fa1983
|
[] |
no_license
|
sebmader/LizardsAndNiches
|
281291fcafc844f84d2027bfc3b2f4175f305b46
|
cfb3e19a9e60becdc44a2e74292b5e7a2895e120
|
refs/heads/master
| 2021-06-27T15:38:21.453007
| 2021-02-22T11:54:26
| 2021-02-22T11:54:26
| 217,536,470
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 724
|
rd
|
m_plot_all_micros.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/m_plot_all_micros.R
\name{m_plot_all_micros}
\alias{m_plot_all_micros}
\title{Plot all microclimates}
\usage{
m_plot_all_micros(multi_micro, save_plot = FALSE)
}
\arguments{
\item{multi_micro}{A tidy data frame of summarised output results of the ecotherm function
containing monthly (!) averaged microclimate variables per scenario and location
(for details see ?m_tidy_output).}
\item{save_plot}{Boolean whether the microclimate plot should be saved or not
(default = FALSE)}
}
\value{
Plot
}
\description{
This function plots the microclimate conditions from the output of
multiple micro_global models per location and scenario in one plot
}
|
ba372236dcf2558fc011f651b76ab65b7ec19ac7
|
4d252ade9889ca4860102a3a2e76cdee1acf10f9
|
/R/sim_BD.R
|
1d78825c7c7ab65409b925b0bcdca167f63fff69
|
[] |
no_license
|
josteist/Compadre
|
78a42e1a7c22b3ff094014cf7d93b959020f985f
|
9b59dd2fa7e8f5c307f76dbd2186fc6e1b8cf106
|
refs/heads/master
| 2021-06-10T23:37:11.999007
| 2021-05-29T06:23:21
| 2021-05-29T06:23:21
| 171,682,480
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,152
|
r
|
sim_BD.R
|
#' Function to simulate a birth-death-fossilization process.
#'
#' This function simulates a birth-death process with a fossilization/sampling scheme. Rates of speciation and extinction
#' can be dependent on time \emph{t} and standing diversity/richness, \emph{n}. Sampling rates can be dependent om time.
#' @param spec Speciation rate. Either given as a fixed rate or as a function of time (\emph{t}) \emph{and} richness (\emph{n}). Defaults to 0.1.
#' @param ext Extinction rate. Given as fixed rate or function of time and richness. Defaults to 0.001.
#' @param samp Sampling rate, given as fixed rate or function of time (\emph{t}) only. Defaults to 0.3
#' @param n_init Initial number of lineages. Defaults to 100.
#' @param dt_ints Array of 'intervals' in which to generate the fossil record, as an array of interval durations. All lineages sampled within these intervals are placed in the interval, regardless of precise time it was sampled for the output \emph{FosRec}. Defaults to rep(1,10)
#' @return A named list with \emph{Taxa} (time of origin and extinction of all taxa), \emph{Foss} (list of all fossilization/sampling event for all taxa), \emph{FosRec} (occurrence matrix with dimensions sampled species by intervals). The remainding entries in the list are the inputs given to the function, \emph{dts, Spec, Ext, Samp, n_init}
#'
#' @export
sim_BD <- function(spec=0.1,
ext=0.02,
samp = 0.3,
n_init=100,dt_ints=rep(1,10)){
# Simulating a birth death process with spec and ext
# as functions of (time, n_species now). Output
# is matrix of origination and extinction times. Speciation is budding.
#
if (any(sapply(list(spec,ext),function(ii){class(ii) == "function"}))){
# If any of the biological rates depend on time then do simulation with incremental time. If the rates
# are fixed, the approach is 'waiting time' based, see below
if (class(spec)!='function'){
spec_tmp = spec;
spec <- function(t,n){spec_tmp}}
if (class(ext) !='function'){
ext_tmp = ext;
ext <- function(t,n){ext_tmp}}
Times = array(NA,c(n_init*1000,2));
# Collect times here [start, end]
Times[1:n_init,1] = 0;
# Alive = array(0,c(n_init*1000,n_ints));
# Alive[1:n_init,1]= 1;
# Obs[1:n_init,1] = runif(n_init)>samp*1
# inxtmp = n_init;
# So we need to integrate forwards in time.
dxt = min(dt_ints)/(1000*n_init); #temporal resolution
# Think we collect start and end of all species
# first
t_now = 0;
txmax = n_init; # max total no species for tally
alive_now = c(1:n_init);
for (tt in 1:length(dt_ints)){
while (t_now<sum(dt_ints[1:tt])){
# For each step, go forwards in time dxt. Calculate probabilities of speciation and extinction in this small window:
sp_now = max(0,rate2prob((spec(t_now, length(alive_now)) +
spec(t_now+dxt,length(alive_now)))/2*length(alive_now),dxt));
ex_now = max(0,rate2prob((ext(t_now, length(alive_now)) +
ext(t_now+dxt, length(alive_now)))/2*length(alive_now),dxt));
while (sp_now>0.25 | ex_now>0.25){
sp_now = max(0,rate2prob((spec(t_now, length(alive_now)) +
spec(t_now+dxt,length(alive_now)))/2*length(alive_now),dxt));
ex_now = max(0,rate2prob((ext(t_now, length(alive_now)) +
ext(t_now+dxt, length(alive_now)))/2*length(alive_now),dxt));
dxt = dxt/2; # half the stepsize
print(c(sp_now,ex_now,event,t_now/sum(dt_ints),dxt))
}
event <- sample.int(3,1,prob=c(1-(sp_now+ex_now),sp_now,ex_now));
#if 1 (nothing), 2 speciate, 3 die
# Random who it happens to
# THe above doesn't work when a rate suddently increaseses drastically - then the probs are too big.
if (event==1){ # Nothing happened
t_now = t_now + dxt
} else if (event==2){ # Speciation event.
t_now = t_now + dxt;
alive_now = c(alive_now,txmax+1); # New species
if (txmax>(-1+dim(Times)[1])){ # If array is full, double it.
Times = rbind(Times,array(NA,c(dim(Times)[1],dim(Times)[2])))
}
Times[txmax+1,1] = runif(1,min=t_now-dxt,max=t_now); # exact time of "birth"
txmax = txmax+1; #ticker for number of total number of species/entry of next newborn in Times array
} else if (event==3){ # Extinction event
t_now = t_now + dxt;
whichdies <- sample(alive_now,1) # Which species dies
alive_now = alive_now[-which(alive_now %in% whichdies)]; # remove the species
Times[whichdies,2] = runif(1,min=t_now-dxt,max=t_now) # set exact point of death
}
if (length(alive_now)==0){ # If all are dead put time to tmax
t_now = sum(dt_ints);
}
if ((sp_now+ex_now)>1/4){ # If probabilities are "high", we are unintentially linearizing the system. Reduce stepsize in time.
dxt = dxt/2; # half the stepsize
print(c(sp_now,ex_now,event,t_now/sum(dt_ints),dxt))
}
else if (max(sp_now,ex_now)<1e-6){ # If probabilities are low
dxt = dxt*2; # double the stepsize
print(c(sp_now,ex_now,event,t_now/sum(dt_ints),dxt)) # This also prints a set of numbers on screen.
}
}
}
Taxa = Times[1:txmax,]; # scrap remainder of array
} else {
# All rates are fixed, waiting time simulations are faster.
l = spec
m = ext
tmax = sum(dt_ints)
taxa = array(NA,dim=c(max(100,n_init)^2,2))
taxa[1:n_init,1] = 0;
alive = 1:n_init;
ntix = n_init+1; #place next individual here
# Draging waiting times for all
# hist(replicate(1e4,which.min(c(min(rexp(n,rate = l)),min(rexp(n,rate = m))))))
# If death, killing one randomly chosen.
t = 0;
while (t<tmax){
waitingtimes <- cbind(rexp(length(alive),rate = l),rexp(length(alive),rate = m))
event <- which.min(c(min(waitingtimes[,1]),min(waitingtimes[,2])))
t = t+min(waitingtimes)
if (t>=tmax){break}
# print(event)
# if (length(alive)==0){
# t = tmax
# }
if (event==1){
# birth
taxa[ntix,1] = t;
alive = c(alive,ntix); # array of alive species
ntix = ntix+1;
if (ntix==dim(taxa)[1]){
taxa = rbind(taxa,array(NA,dim=c(n_init^2,2)))
}
} else if (event==2){
if (length(alive)>1){
dies <- sample(alive,1);
} else {
dies = alive; # if only 1 lest, sample interprets this as pick from 1:inx
}
taxa[dies,2] = t;
alive <- setdiff(alive,dies);
if (length(alive)==0){t = tmax}
}
}
Taxa = taxa[1:(ntix-1),]
}
# Now we have the durations of all taxa.
if (!is.null(dim(Taxa))){# if more than one taxa
Taxa[is.na(Taxa[,2]),2] = sum(dt_ints) # set the ones that are alive to 'end' at end of time
Taxa[Taxa[,2]==0,2] = sum(dt_ints) # set the ones that are alive to 'end' at end of time
Taxa <- Taxa[!Taxa[,1]>(sum(dt_ints)),] # Remove all born after t-max
Foss <- lapply(1:dim(Taxa)[1],function(ii){sampFosRec(Taxa[ii,1],Taxa[ii,2],samp)});
FosRec <- array(0,c(sum(sapply(Foss,length)>0),length(dt_ints)));
} else {
# If only 1 taxa, still alive?
if (is.na(Taxa[2])){
Taxa[2] = sum(dt_ints)}
Foss <- sampFosRec(Taxa[1],Taxa[2],samp)
FosRec <- array(0,c(sum(sapply(Foss,length)>0),length(dt_ints)));
}
tix = 1;
for (jj in which(sapply(Foss,length)>0)){
FosRec[tix,
rle(sort(sapply(Foss[[jj]],function(ii){which(ii<cumsum(dt_ints))[1]})))$values] <-
rle(sort(sapply(Foss[[jj]],function(ii){which(ii<cumsum(dt_ints))[1]})))$lengths
tix = tix+1;
}
rownames(FosRec) <- which(sapply(Foss,length)>0)
out <- list(Taxa = Taxa,Foss = Foss,FosRec=FosRec,dts = dt_ints,
Spec = spec, Ext = ext, Samp=samp,n_init=n_init)
attr(out,"class") <- "cmr_simulation";
return(out)
}
|
cf1b965980ae5decdf507b13e07cbdf5d357ce0e
|
4cec58954da2b527c7e70013ca1efad4b0105ceb
|
/plot3.R
|
a17edb584dfbbc107d06248cee628a59da074652
|
[] |
no_license
|
MogensYdeAndersen/ExData_Plotting1
|
ba94d0b35f6d2ad0217dd485c415a828504d3a21
|
6dc4e0fba3fde4fd5d62dbed37eeed4cc680691a
|
refs/heads/master
| 2021-01-15T11:33:22.496252
| 2015-04-12T22:17:03
| 2015-04-12T22:17:03
| 33,772,826
| 0
| 0
| null | 2015-04-11T11:08:46
| 2015-04-11T11:08:45
| null |
UTF-8
|
R
| false
| false
| 645
|
r
|
plot3.R
|
data <- read.csv("./household_power_consumption.txt", header=T, sep=";", na.string="?")
subdata <- data[data$Date %in% c("1/2/2007","2/2/2007"),]
datetime <- strptime(paste(subdata$Date, subdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
plot(datetime, subdata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(datetime, subdata$Sub_metering_2, type="l", col="red")
lines(datetime, subdata$Sub_metering_3, type="l", col="blue")
legend("topright", col=c("black",NA, "red",NA, "blue"), lty=c(1,1,1), lwd=2, c("Sub_metering_1",NA, "Sub_metering_2",NA, "Sub_metering_3"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
487a3aa0c99abb8dc9ffa2159f56c5d7a3e6ec5c
|
6baf50d0608e9af57f35334c7a59831ad567600d
|
/modified_localG.R
|
12ced3903029c2d1765dc3f9bb9daf396e32873b
|
[] |
no_license
|
manishverma09/Racial-Segregation
|
33d0d96b82ab5ad46781c310143780e42fd1e58f
|
28b68104b5d7638e7dbf3eeee64e6547a945d4f2
|
refs/heads/master
| 2022-09-27T02:34:18.810273
| 2020-06-05T03:51:12
| 2020-06-05T03:51:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,618
|
r
|
modified_localG.R
|
modified.localG = function(black_population, total_population, listw, method='kershaw', zero.policy=NULL, spChk=NULL, return_internals=FALSE, GeoDa=FALSE)
# This function is based on the original script from the SPDEP package. I am modifying it so that I can use it with ratios.
# I am leaving a number of parameters here, although as of July 25 we do not use them. But I do intend to later expand the code and use them.
# Arguments:
# black_population: Population of blacks in a geography. Numeric.
# total_population: Total population in a geography. Numeric.
# method = It could be 'kershaw' i.e. the method used in Kershaw's paper, or other (still to be developed as on July 25th)
# Returns:
# gstar: Z-score for Getis-Ord G
#
#
# The key challenge, as I see it, is the following:
#
# Getis-Ord calculates deviation of 'local mean' from the global mean. The global mean in the usual case is the mean of the entire data.
# However, when we use ratios (e.g. proportions of blacks in the total population) we would like to estimate the difference between the
# local proportion and the global mean proportion. The global mean proportion is not the mean of the distribution of the data, rather it has a
# nonlinear realtionship with the data.
# Note that using global mean can also produce negative underroot in the denominator.
# So we are interested in deviation from m(X), i.e. local(x) - m(X) where m(X) is not mean, median etc. but some nonlinear function of all x
# In the code below I calculate the difference between the local ratio and the global proportion
### 1. CHECKS borrowed from the SPDEP package
# Type of objects check
{
if (!inherits(listw, "listw"))
stop(paste(deparse(substitute(listw)), "is not a listw object"))
if (!is.numeric(black_population))
stop(paste(deparse(substitute(black_population)), "is not a numeric vector"))
if (!is.numeric(total_population))
stop(paste(deparse(substitute(total_population)), "is not a numeric vector"))
stopifnot(is.vector(black_population))
stopifnot(is.vector(total_population))
# Size consistency between different arguments
n = length(listw$neighbours)
if (n != length(black_population))stop("Different numbers of observations")
if (length(total_population) != length(black_population))stop("Different numbers of observations")
# Check for NAs
if (any(is.na(black_population))) stop(paste("NA in ", deparse(substitute(black_population))))
if (any(is.na(total_population))) stop(paste("NA in ", deparse(substitute(total_population))))
if (is.null(spChk)) spChk = get.spChkOption()
if (spChk && !chkIDs(black_population, listw))
stop("Check of data and weights ID integrity failed")
if (is.null(zero.policy))
zero.policy = get("zeroPolicy", envir = .spdepOptions)
stopifnot(is.logical(zero.policy))
### 2. PROCESSING OPTIONS (Notations here are a mix of the original SPDEP code, the two papers from Getis-Ord publiched in 1992 and 1995
# and the ArcGIS site https://pro.arcgis.com/en/pro-app/tool-reference/spatial-statistics/h-how-hot-spot-analysis-getis-ord-gi-spatial-stati.htm (since most people use this))
gstari = FALSE
if (!is.null(attr(listw$neighbours, "self.included")) &&
attr(listw$neighbours, "self.included")) gstari = TRUE
if (method == 'kershaw'){
x_bar = sum(black_population, na.rm = T) / sum(total_population, na.rm = T) # This is the global mean proportion
x = black_population /total_population # This is black proportion in each tract
x[is.infinite(x)] = 0 # We are not removing tracts with zero population, becasue we sum it does not create any degenerate cases
# Calculate the numerator of G-star
lx = lag.listw(listw, x, zero.policy=zero.policy)
wij.xbar = lapply(listw$weights, function (x) sum(x)*x_bar)
numerator = lx - unlist(wij.xbar)
# Calculate the denominator of G-star
capitalS = sqrt((sum(x^2)/length(x)) - x_bar^2) # this could be divided by (n-1)
den.part1 = sapply(listw$weights, function (x) sum(x^2)) # sum (wij^2)
den.part1 = length(x)*den.part1
den.part2 = sapply(listw$weights, function (x) (sum(x))^2)
denominator = (den.part1 - den.part2) / (length(x)-1)
denominator = capitalS*sqrt(denominator)
# Calculate gstar
gstar = numerator/denominator
} else {
gstar = 'The function currently executes Kershaw method. I will later add my method'
}
return (gstar)
}
|
1531f2b16e98126a10d199a6f4bc17764bde1ddc
|
62c4e9ee0f891c5c46f6136d460c9dc0075601f5
|
/man/Score.BinReg.Rd
|
d9c4952e510149d2ac1afd71aae944428f55f498
|
[] |
no_license
|
zrmacc/BinReg
|
beaa643afc7ef31efd0008658790a0f9682bb195
|
e00f951144d981a99024158fcb72d274f90bb91f
|
refs/heads/master
| 2020-03-26T19:29:07.052417
| 2018-09-16T16:30:40
| 2018-09-16T16:30:40
| 145,267,814
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,308
|
rd
|
Score.BinReg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Score.R
\name{Score.BinReg}
\alias{Score.BinReg}
\title{Score Test for Binary Regression}
\usage{
Score.BinReg(y, X, L, b10 = NULL, model = "logistic", df = NULL,
sig = 0.05, eps = 1e-08, maxit = 10, report = T)
}
\arguments{
\item{y}{Numeric response vector.}
\item{X}{Numeric model matrix.}
\item{L}{Logical vector, with as many entires as columns in the model matrix,
indicating which columns have fixed coefficients under the null.}
\item{b10}{Value of the regression coefficient for the selected columns under
the null. Defaults to zero.}
\item{model}{Selected from among logistic, probit, and robit.}
\item{df}{Degrees of freedom, if using the robit model.}
\item{sig}{Significance level, for CIs.}
\item{eps}{Tolerance for Newton-Raphson iterations.}
\item{maxit}{Maximum number of NR iterations.}
\item{report}{Report fitting progress?}
}
\value{
A numeric vector containing the score statistic, the degrees of
freedom, and a p-value.
}
\description{
Tests the hypothesis that a subset of the regression coefficients are fixed
at a reference value. Specifically, let \eqn{\beta} denote the regression
coefficient. Partition \eqn{\beta=(\beta_{1},\beta_{2})}. Suppose that
interest lies in testing that \eqn{\beta_{1}} is fixed at \eqn{\beta_{10}}.
\code{Score.BinReg} performs a score test of
\eqn{H_{0}:\beta_{1}=\beta_{10}}. The test is specified using a logical vector
\code{L}, with as many entries as columns in the model matrix \code{X}. The
values of \code{L} set to \code{T} are constrained under the null, while
values of \code{L} set to \code{F} are estimated under the null.
}
\examples{
\dontrun{
set.seed(101);
# Design matrix
X = cbind(1,matrix(rnorm(n=4*1e3),nrow=1e3));
# Regression coefficient
b = c(1,-1,2,-1,0);
# Logistic outcome
y = rBinReg(X,b,model="logistic");
# Test b1=b2=b3=b4=0, which is false.
Score.BinReg(y=y,X=X,L=c(F,T,T,T,T),model="logistic",report=F);
# Test b4=0, which is true.
Score.BinReg(y=y,X=X,L=c(F,F,F,F,T),model="logistic",report=F);
# Test b2=0 and b4=2, which if false.
Score.BinReg(y=y,X=X,L=c(F,F,T,F,T),b10=c(0,2),model="logistic",report=F);
# Test b1=b3=-1, which is true.
Score.BinReg(y=y,X=X,L=c(F,T,F,T,F),b10=c(-1,-1),model="logistic",report=F);
}
}
|
818fe01d09e1be1f4c42ff78d38591bdb538e4d6
|
0a5e7cc6d619f1023af926dcb2858fb2ed1453b1
|
/Problema2.R
|
5f91c867306cdb358e1568bf7c0269f49c2ace0f
|
[] |
no_license
|
Bia103/Projects-in-R
|
72815319264ce4b08a0f3a1fe346c689842d7073
|
2696e7e76693fdc8b2d3563b561e8f2704d9d50d
|
refs/heads/main
| 2023-03-09T12:30:34.437152
| 2021-02-17T12:48:40
| 2021-02-17T12:48:40
| 339,721,649
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,215
|
r
|
Problema2.R
|
#install.packages("plotrix")
library("plotrix")
par(pty="s")
plot(seq(-1,1,length=10),seq(-1,1,length=10),type="n",xlab="",ylab="")
#draw.circle(2,4,c(1,0.66,0.33),border="purple",
# col=c("#ff00ff","#ff77ff","#ffccff"),lty=1,lwd=1)
draw.circle(0,0,radius = 1,border="black",lty=1,lwd=1)
x=runif(1000,-1,1)
y=runif(1000,-1,1)
ma <-0
nra <-0
nrr <-0
for (i in 1:1000) {
if(sqrt(x[i]^2+y[i]^2) <=1){
points(x[i],y[i],col="blue")
nra <- nra+1}
else{
points(x[i],y[i],col="red")
nra <- nrr+1
}
ma <- ma + sqrt(x[i]^2+y[i]^2)
}
mt<-0
ma <- ma/1000
for (i in 1:1000) {
if(sqrt(x[i]^2+y[i]^2) <=1){
mt <- mt+(nra/1000)*sqrt(x[i]^2+y[i]^2)
}
else{
mt <- mt+(nrr/1000)*sqrt(x[i]^2+y[i]^2)
}
}
#points(x[2],y[2],col="darkgreen")
Funct5 <-function(r,t){
return(1/pi*(1/(r*cos(t)*sqrt(1-(r^2)*(cos(t)^2)))+1/(r*sin(t)*sqrt(1-(r^2)*(sin(t)^2)))))
}
R <-function(r1){
return(integrate(Funct5, 0, 2*pi,r=r1,subdivisions=2000)$value)
}
Te <-function(t1){
return(integrate(Funct5, 0, Inf,t=t1)$value)
}
ttttt<-Funct5(1/sqrt(2),-pi/4)
teta<-Te(pi/4)
rrr<-R(1/sqrt(2))
|
782c1b927652162fbd3e3f93f37cb85b45a9b201
|
677a011a3370acfdfb364c0f3aa1d833e602c55f
|
/documents/data science/R Programming/cachematrix.R
|
5c94bbba34c5203c3c8442a1f0607c7a395adf7a
|
[] |
no_license
|
lgerdine/ExData_Plotting1
|
3874b96bba5184d6a1e68b07c9f3ac8ced61bdce
|
f876564c8dc8343d3f6f40d251abba7cae7a135f
|
refs/heads/master
| 2021-01-18T01:06:37.389697
| 2015-04-09T15:31:20
| 2015-04-09T15:31:20
| 33,547,554
| 0
| 0
| null | 2015-04-07T14:18:35
| 2015-04-07T14:18:35
| null |
UTF-8
|
R
| false
| false
| 1,270
|
r
|
cachematrix.R
|
## R Programming Week 3 Homework
## Build a function that makes list of functions like the mean exercise
## Build a function that checks cache to see if the inverse of the matrix
## has been created. If not, create the inverse
## Date: 02-16-15
## Student: Laurel Gerdine
## Make list of functions to calculate inverse of matrix
makeCacheMatrix <- function(x = matrix ()) {
inverse <- NULL # set the variable that holds the inverse to empty
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setsolve <- function(solve) inverse <<- solve
getsolve <- function() inverse
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## function to check for inverse in cache and either deliver that or calculate inverse
cacheSolve <- function(x=matrix(), ...) {
inverse<-x$getsolve()
if(!is.null(inverse)){
message("getting cached data")
return(inverse)
}
matrix<-x$get()
inverse<-solve(matrix, ...)
x$setsolve(inverse)
inverse
}
## test matrices to use a d other values to set to test
test_matrix1 <- matrix (data = c(1:4), nrow = 2, ncol = 2)
test_matrix2 <- matrix (data = c(5:8), nrow = 2, ncol = 2)
x <- test_matrix1
y <- test_matrix2
LOF <- makeVector2(x)
cacheSolve(LOF)
|
625195c160999530edf7407d53d5100ef53f3e43
|
b1d1d08674ab0f4c7c93086a71933c950e36369a
|
/r_load_yelp_Json_stream.R
|
07e78a830401f84b00eb625c9bf5d6c8e70af38b
|
[] |
no_license
|
KLENAR/CapstoneProject
|
f6ad5d24ed192f8b41b7544f42c44c0cd40bb697
|
66e1357cda1c4ef8551da233586c2fae14efd344
|
refs/heads/master
| 2021-01-10T16:40:54.500770
| 2015-11-23T00:14:40
| 2015-11-23T00:14:40
| 46,687,110
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
r
|
r_load_yelp_Json_stream.R
|
# Reads Yelp data in JSON format and saves the resulting data.frames in RData files.
library(jsonlite)
business.file <- "./yelp_dataset/yelp_academic_dataset_business.json"
business <- stream_in(file(business.file))
save(business,file='business.RData')
rm(business)
checkin.file <- "./yelp_dataset/yelp_academic_dataset_checkin.json"
checkin <- stream_in(file(checkin.file))
save(checkin,file='checkin.RData')
rm(checkin)
tip.file <- "./yelp_dataset/yelp_academic_dataset_tip.json"
tip <- stream_in(file(tip.file))
save(tip,file='tip.RData')
rm(tip)
|
df8f24147f9d6a03d378b23b625fc230f4d4210c
|
c7607eb2c7074cc35b9a20a2c1f22c3654b54d6d
|
/man/get_data360.Rd
|
271365dbfe0ad9c11f197bcc0581f92dacd6da6d
|
[
"MIT"
] |
permissive
|
asRodelgo/data360r
|
9ab9654a8131c18c7ff2ab95d3f030cd9670110d
|
1ca85cbf9aa8cf48c0b61c356f99c3fd570c6edf
|
refs/heads/master
| 2021-01-23T17:57:54.879540
| 2017-09-07T19:25:51
| 2017-09-07T19:25:51
| 102,785,930
| 1
| 0
| null | 2017-09-07T21:24:54
| 2017-09-07T21:10:06
|
R
|
UTF-8
|
R
| false
| true
| 2,630
|
rd
|
get_data360.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_data360.R
\name{get_data360}
\alias{get_data360}
\title{Get TC/Govdata360 data from API}
\usage{
get_data360(site = "tc", indicator_id = NULL, dataset_id = NULL,
country_iso3 = NULL, timeframes = NULL, output_type = "wide")
}
\arguments{
\item{site}{string pertaining to the data360 site to download data from.
Possible choices: 'tc' for TCdata360, 'gov' for Govdata360}
\item{indicator_id}{NULL (optional); Vector of indicators codes.
Should be left NULL if \code{dataset_id} is not NULL.}
\item{dataset_id}{NULL (optional); Character vector of Dataset IDs for all indicator data is to be downloaded.
Should be left NULL if \code{indicator_id} is not NULL.}
\item{country_iso3}{defaults to c('USA', 'PNG', 'PHL') (optional); Vector of countries (ISO-3 3-letter character codes,
e.g. 'PNG', 'USA', 'PHL') for which the data is needed.
Leaving this NULL pulls data for every available country.}
\item{timeframes}{NULL (optional); Vector of years to restrict results data to.
Leaving this NULL gets all available timeframes.
Important note: \code{timeframes} parameter cannot be non-NULL while the other parameters are NULL, since data request is too large.}
\item{output_type}{string pertaining to the output type. Possible choices include:
'wide' == one row contains multi-year observations
'long' == one row refers to one year only.}
}
\value{
Data frame containing requested data
}
\description{
Downloads the requested data by using the TCdata360 API at \url{tcdata360.worldbank.org/docs}
or Govdata360 API at \url{govdata360.worldbank.org/docs}.
The function generates a data frame (wide or long, depending on user input).
}
\details{
Hint: Don't know what codes to write as inputs for this function? Helpful functions include:
\itemize{
\item See \code{\link{search_360}} to get search TC/Govdata360 indicators, countries, categories, and dataset lists.
\item See \code{\link{get_metadata360}} to get indicator/dataset/country-level metadata.
\item See \code{\link{get_resources360}} to get additional resource information.
}
}
\examples{
#get data for dataset ID 51 in TCdata360
df <- get_data360(dataset_id = 51)
#get data for countries USA, PHL in Govdata360
df2 <- get_data360(site = 'gov', country_iso3 = c('USA', 'PHL'))
#get data for indicator IDs 944, 972 in TCdata360
df3 <- get_data360(indicator_id = c(944, 972))
#get data for indicator IDs 944, 972 in 2011-2013 in long format in TCdata360
df4 <- get_data360(indicator_id = c(944, 972),
timeframes = c(2011, 2012, 2013), output_type = 'long')
}
|
fc3caf3a18f8e82c49910f86773621678ad2e6c1
|
aeebd1497c7446e8ec967ba774ca5e016ce062a4
|
/propect value2/export data.R
|
23f361d9cc8435206e0b3c7d82b1bf2d60aa684c
|
[] |
no_license
|
shenfan2018/shenfan2018
|
212e881877df52b8772905b5a3546739cd4b5921
|
0bb70a7b0cdb0dc4d14a9576b02b6f22c7e9dfdb
|
refs/heads/master
| 2020-04-01T18:05:52.266821
| 2019-11-20T12:45:09
| 2019-11-20T12:45:09
| 153,471,302
| 1
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 3,319
|
r
|
export data.R
|
load("fundPV-st1.RData")
# 加入monthly return
load("C:/Users/shenfan/source/repos/shenfan2018/fund 2004-2017/fund-NAV.RData")
# 月度的收益率
ret <- data.NAV[, year := year(date)
][, quarter := quarter(date)
][quarter == 1 | quarter == 2, sem := 1
][quarter == 3 | quarter == 4, sem := 2
][, month := month(date)
][, AdjustedNAVGrowth2 := 1 + AdjustedNAVGrowth
][, .(month_return = prod(AdjustedNAVGrowth2, na.rm = TRUE) - 1), keyby = .(id, year, sem, month)]
data <- ret[fund.PV, on = .(id, year, month)
][, quarter := quarter(date)
][, .(id, date, year, month, month_return, TK, PW, LA, CC)]
write.csv(data, "C://Users//shenfan//Desktop//monthPV.csv")
##############################################################################
fund.TK <- read_excel("C:/Users/shenfan/Desktop/prospect value/data/fund-returnwithTK20190328.xlsx")
fund.TK <- as.data.table(fund.TK)
# 所有char变成numeric
fund.TK <- fund.TK[, colnames(fund.TK[, 7:18]) := lapply(.SD[, 7:18], as.numeric)
][, date := as.Date(date)]
# id的问题
fund.TK <- fund.TK[, id := as.character(id)
][, id := str_pad(id, 6, side = "left", pad = "0")]
write.csv(fund.TK, "C://Users//shenfan//Desktop//fundPV.csv")
##############################################################################
load("week-fund-PV52.RData")
load("C:/Users/shenfan/source/repos/shenfan2018/fund 2004-2017/fund-NAV.RData")
# 月度的收益率
ret <- data.NAV[, year := year(date)
][, quarter := quarter(date)
][quarter == 1 | quarter == 2, sem := 1
][quarter == 3 | quarter == 4, sem := 2
][, month := month(date)
][, AdjustedNAVGrowth2 := 1 + AdjustedNAVGrowth
][, .(month_return = prod(AdjustedNAVGrowth2, na.rm = TRUE) - 1), keyby = .(id, year, sem, month)]
data <- ret[PV52, on = .(id, year, month)
][, quarter := quarter(date)
][, .(id, date, year, month, month_return, TK52, PW52, LA52, CC52)]
write.csv(data, "C://Users//shenfan//Desktop//weekPV52.csv")
###########################################################################
load("week-fund-PV26.RData")
load("C:/Users/shenfan/source/repos/shenfan2018/fund 2004-2017/fund-NAV.RData")
# 月度的收益率
ret <- data.NAV[, year := year(date)
][, quarter := quarter(date)
][quarter == 1 | quarter == 2, sem := 1
][quarter == 3 | quarter == 4, sem := 2
][, month := month(date)
][, AdjustedNAVGrowth2 := 1 + AdjustedNAVGrowth
][, .(month_return = prod(AdjustedNAVGrowth2, na.rm = TRUE) - 1), keyby = .(id, year, sem, month)]
data <- ret[PV26, on = .(id, year, month)
][, quarter := quarter(date)
][, .(id, date, year, month, month_return, TK26, PW26, LA26, CC26)]
write.csv(data, "C://Users//shenfan//Desktop//weekPV26.csv")
#####################################################################
load("decayfundPV-st1.RData")
load("C:/Users/shenfan/source/repos/shenfan2018/fund 2004-2017/fund-NAV.RData")
# 月度的收益率
ret <- data.NAV[, year := year(date)
][, quarter := quarter(date)
][, month := month(date)
][, AdjustedNAVGrowth2 := 1 + AdjustedNAVGrowth
][, .(month_return = prod(AdjustedNAVGrowth2, na.rm = TRUE) - 1), keyby = .(id, year, quarter, month)]
data <- ret[fund.PV, on = .(id, year, month), nomatch = 0
][, .(id, date, year, month, month_return, TKrho0.8, TKrho0.85, TKrho0.9)]
write.csv(data, "C://Users//shenfan//Desktop//decayPV.csv")
|
d904360e2d5a98cbd4817ecd91b5c599327cce7f
|
1dd840b99146dbdd57e3b267d6243d695354bdd0
|
/Predict.R
|
2b45910b5cb72948b2098ea509ea3d308afe6ab7
|
[] |
no_license
|
diardelavega/TotalPrediction
|
654e6ca7d4d6f1df3153dd94ab31c7a0790e738b
|
464cafdc346c419ba58d7c34ac7ff82ebfddb4ff
|
refs/heads/master
| 2020-04-12T08:52:37.906304
| 2017-03-04T17:22:23
| 2017-03-04T17:22:23
| 61,332,137
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,780
|
r
|
Predict.R
|
# Predict the results for the next set of matches
# load the objects & liraries -> predict & stroe the prediction in a csv file
#think for the reevaluation of the prediction (after we have the actual results of the match)
os<-Sys.info()["sysname"]; # find the operating system
base<-"/home/user/Git"; # the base for the files to load
if(grepl("win",tolower(os))){
base <- "C:";
}
# *paths are vectors of string with the paths to the files of every competition to be predicted
predictAll <- function(dtfPaths,trainPaths,testPaths,dtfKind){
exit <- "PRED_END_OK";
# dtfPaths is a vector of the DTF dirPath of the competition in hand
library(methods);
predAtt_Loader();
print("predAtt_Loader");
libLoader();
print("libLoader");
dataStructLoader();
print("dataStructLoader");
#DTFLoader();
#print("DTFLoader");
predExit <- tryCatch({
log <- "/home/user/BastData/R_LOG";# initial log file path is for linux o-systems
if(grepl("win",tolower(os))){
log <- "C:/BastData/R_LOG";
}
write(c("PREDICT....; on:",date(),dtfKind,"dtf_len :",length(dtfPaths),"train_len :",length(trainPaths),"test_len :",length(testPaths)), file = log, ncolumns = 13, append = T, sep = " ")
for(i in 1:length( dtfPaths)){
print(dtfPaths[i]);
write(c("\t",testPaths[i]), file = log, ncolumns = 10, append = T, sep = ",")
tempdtf <<- read.csv(trainPaths[i]); # train datasets
print("tempdtf");
tt <<- read.csv(testPaths[i]) #test dataset/weekly matches
print("tt");
dtf <<- tt #to call the diffFunc with the hardcoded "dtf" as dataframe
print("dtf");
ntt <<- diffFunc(); #with ntt for the diff based attributes & datasets
print("ntt");
ttFixer(tempdtf)
print("ttFixer");
dtf <<- tempdtf
print("dtf -2");
ndtf <<- diffFunc();
print("ndtf");
filNam = dirMker(testPaths[i]); # exists the posibility that the file will be empty
print(filNam);
write("\t LOADED & FIXED PARAMETERS", file = log, ncolumns = 10, append = T, sep = ",")
print("write");
tryCatch({
if("h" %in% dtfKind){
fnam=paste0(dtfPaths[i],"/head.dtf.RData");
if(file.exists(fnam)){
load(fnam)
print("------------------------------------: HEAD")
#during prediction every instance of the dtfObj is assigned with the prediction vector, to be used for reevaluation
hDtf$predCalcScore();
write("#head", file = filNam, ncolumns = 10, append = T, sep = ",")
write(hDtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
# after the DTH objs have been updated with the vector in each instance save them again
save(hDtf,file=fnam)
rm(hDtf)
write("\t head", file = log, ncolumns = 10, append = T, sep = ",")
} }
})
tryCatch({
if("s" %in% dtfKind){
fnam=paste0(dtfPaths[i],"/score.dtf.RData");
if(file.exists(fnam)){
load(fnam)
print("------------------------------------: SCORE")
csDtf$predCalcScore();
write("#score", file = filNam, ncolumns = 10, append = T, sep = ",")
write(csDtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
save(csDtf,file=fnam)
rm(csDtf)
write("\t score", file = log, ncolumns = 10, append = T, sep = ",")
} }
})
tryCatch({
if("ft" %in% dtfKind){
fnam=paste0(dtfPaths[i],"/ft.dtf.RData");
if(file.exists(fnam)){
load(fnam)
print("------------------------------------: FT")
tftDtf$predCalcScore();
write("#ft", file = filNam, ncolumns = 10, append = T, sep = ",")
write(tftDtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
save(tftDtf,file=fnam)
rm(tftDtf)
write("\t ft", file = log, ncolumns = 10, append = T, sep = ",")
} }
})
tryCatch({
if("p2" %in% dtfKind){
fnam=paste0(dtfPaths[i],"/p2.dtf.RData");
if(file.exists(fnam)){
load(fnam)
print("------------------------------------: P2")
p2Dtf$predCalcScore();
write("#p2", file = filNam, ncolumns = 10, append = T, sep = ",")
write(p2Dtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
save(p2Dtf,file=fnam)
rm(p2Dtf)
write("\t p2", file = log, ncolumns = 10, append = T, sep = ",")
} }
})
tryCatch({
if("p1" %in% dtfKind){
fnam=paste0(dtfPaths[i],"/p1.dtf.RData");
if(file.exists(fnam)){
load(fnam)
print("------------------------------------: P1")
p1Dtf$predCalcScore();
write("#p1", file = filNam, ncolumns = 10, append = T, sep = ",")
write(p1Dtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
save(p1Dtf,file=fnam)
rm(p1Dtf)
write("\t p1", file = log, ncolumns = 10, append = T, sep = ",")
} }
})
tryCatch({
if("ht" %in% dtfKind){
fnam=paste0(dtfPaths[i],"/ht.dtf.RData");
if(file.exists(fnam)){
load(fnam)
print("------------------------------------: HT")
thtDtf$predCalcScore();
write("#ht", file = filNam, ncolumns = 10, append = T, sep = ",")
write(thtDtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
save(thtDtf,file=fnam)
rm(thtDtf)
write("\t ht", file = log, ncolumns = 10, append = T, sep = ",")
} }
})
# print("------------------------------------: HEAD")
# hDtf$predCalcScore();
# print("------------------------------------: SCORE")
# csDtf$predCalcScore();
# print("------------------------------------: P1")
# p1Dtf$predCalcScore();
# print("------------------------------------: P2")
# p2Dtf$predCalcScore();
# print("------------------------------------: HT")
# thtDtf$predCalcScore();
# print("------------------------------------: FT")
# tftDtf$predCalcScore();
#
# # write in the output(prediction file) the points for each prediction we made
# # the order in which the objs are written is importan so that they can be understood when they are read
# filNam = dirMker(testPaths[i]);
# write(hDtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
# write(csDtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
# write(p1Dtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
# write(p2Dtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
# write(thtDtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
# write(tftDtf$getEnsamble(), file = filNam, ncolumns = dim(tt)[1], append = T, sep = ",")
# after the DTH objs have been updated with the vector in each instance save them again
# save(hDtf,csDtf,p1Dtf,p2Dtf,tftDtf,thtDtf,file=dtfPaths[i]);
# dtfobjcleaner();
}# for
return(exit);
},
error = function(err) {
# error handler picks up where error was generated
#print(paste("MY_ERROR: ",err))
write(paste("\t MY_ERROR: ",err), file = log, ncolumns = 10, append = T, sep = ",")
exit <- "PRED_ERR_END";
return(exit);
},
finally = {
write(paste(" \t ENDED.....: i of for:",i), file = log, ncolumns = 10, append = T, sep = ",")
# in case of error save whatever can be saved
# save(hDtf,csDtf,p1Dtf,p2Dtf,tftDtf,thtDtf,file=dtfPaths[i]);
# dtfobjcleaner();
}) # END tryCatch
return(predExit);
}
dirMker <- function(test_path){
# file_path <- patha
fileName<- gsub("Pred/Test","WeekPredPoints",test_path);
fileName<- gsub("__Test","__Pred",fileName);
pathSegment <- strsplit(fileName,"/")[[1]];
dirName <- paste0(pathSegment[1:length(pathSegment)-1],collapse = "/")
if(!dir.exists(dirName)){
dir.create(dirName,recursive = T,mode = 753)
}
if(dir.exists(dirName)){
file.create(fileName)
}
return(fileName) ;
}
dtfObjLoader <- function(path){
# load the dtf ojects from the file
load(path);
}
dtfobjcleaner <- function(){
# after we finished pur work with the dtf objs remove them to let space for the next set of them
rm(hDtf,csDtf,p1Dtf,p2Dtf,tftDtf,thtDtf);
}
libLoader <- function(){
# loads libraries needed for the predictive algorithms to work
library(plyr)
library(e1071) #svm
library(C50)
library(randomForest)
library(ipred)
library(RWeka)
library(rpart)
library(tree)
}
predAtt_Loader <- function(){
### files containing pred_attribute dataset << pred_att ~ {att1,att2,...attn} >>
#source("C:/TotalPrediction/Head_AttPredDataset.R");
#source("C:/TotalPrediction/Score_AttPredDataset.R");
#source("C:/TotalPrediction/P1_AttPredDataset.R");
#source("C:/TotalPrediction/P2_AttPredDataset.R");
#source("C:/TotalPrediction/totFt_AttPredDataset.R");
#source("C:/TotalPrediction/totHt_AttPredDataset.R");
source(paste0(base,"/TotalPrediction/Head_AttPredDataset.R"));
source(paste0(base,"/TotalPrediction/Score_AttPredDataset.R"));
source(paste0(base,"/TotalPrediction/P1_AttPredDataset.R"));
source(paste0(base,"/TotalPrediction/P2_AttPredDataset.R"));
source(paste0(base,"/TotalPrediction/totFt_AttPredDataset.R"));
source(paste0(base,"/TotalPrediction/totHt_AttPredDataset.R"));
}
diffFunc <- function(){
t1adoe <- dtf$t1AtackIn - abs(dtf$t2DefenseOut) # because defence is negative nr
t2adoe <- abs(dtf$t1DefenseIn) - dtf$t2AtackOut
t1e <- dtf$t1Atack - abs(dtf$t2Defense) # because defence is negative nr
t2e <- dtf$t2Atack - abs(dtf$t1Defense)
#----------
datk <- dtf$t1Atack-dtf$t2Atack
datkin <- dtf$t1AtackIn-dtf$t2AtackIn
datkout <- dtf$t1AtackOut-dtf$t2AtackOut
ddef <- dtf$t1Defense-dtf$t2Defense
ddefin <- dtf$t1DefenseIn-dtf$t2DefenseIn
ddefout <- dtf$t1DefenseOut-dtf$t2DefenseOut
doav_ht <- dtf$t1AvgHtScoreIn-dtf$t2AvgHtScoreOut
doav_ft <- dtf$t1AvgFtScoreIn-dtf$t2AvgFtScoreOut
#----------
dav_htin <- dtf$t1AvgHtScoreIn-dtf$t2AvgHtScoreIn
dav_htout <- dtf$t1AvgHtScoreOut-dtf$t2AvgHtScoreOut
dav_ftin <- dtf$t1AvgFtScoreIn-dtf$t2AvgFtScoreIn
dav_ftout <- dtf$t1AvgFtScoreOut-dtf$t2AvgFtScoreOut
owd <- dtf$t1WinsIn-dtf$t2WinsOut
odd <- dtf$t1DrawsIn- dtf$t2DrawsOut
old <- dtf$t1LosesIn - dtf$t2LosesOut
#----------
dwin <- dtf$t1WinsIn-dtf$t2WinsIn
dwout <- dtf$t1WinsOut-dtf$t2WinsOut
ddin <- dtf$t1DrawsIn-dtf$t2DrawsIn
ddout <- dtf$t1DrawsOut-dtf$t2DrawsOut
dlin <- dtf$t1LosesIn-dtf$t2LosesIn
dlout <- dtf$t1LosesOut-dtf$t2LosesOut
pd <- dtf$t1Points-dtf$t2Points
fd <- dtf$t1Form-dtf$t2Form
mfd1<-c()
mfd2<-c()
for(i in 1:dim(dtf)[1]){mfd1[i] <- mean(dtf[i,13],dtf[i,14],dtf[i,15],dtf[i,16])}
for(i in 1:dim(dtf)[1]){mfd2[i] <- mean(dtf[i,39],dtf[i,40],dtf[i,41],dtf[i,42])}
#owd <- dtf$t1WinsIn-dtf$t2WinsOut
#odd <- dtf$t1DrawsIn- dtf$t2DrawsOut
#old <- dtf$t1LosesIn - dtf$t2LosesOut
#----------dtf data
dtf$mfd1 <<-mfd1
dtf$mfd2 <<-mfd2
dtf$odd <<- odd
dtf$old <<- old
dtf$owd <<-owd
#ttdf$mfd1 <-mfd1
#ttdf$mfd2 <-mfd2
#ttdf$odd <- odd
#tdf$old<- old
#tdf$owd <-owd
#----------------
f1d <- dtf[,13]-dtf[,39]
f2d <- dtf[,14]-dtf[,40]
f3d <- dtf[,15]-dtf[,41]
f4d <- dtf[,16]-dtf[,42]
#--------------
ndf <- data.frame(
mfd1,mfd2,pd,fd,
# f1d,f2d,f3d,f4d,
t1adoe,t2adoe,t1e,t2e,
owd,odd,old,
dwin,dwout,ddin,ddout,dlin,dlout,
datk,datkin,datkout,ddef,ddefin,ddefout,
doav_ht,doav_ft,
dav_htin,dav_htout,dav_ftin,dav_ftout
)
ndf$week <- dtf$week
ndf$headOutcome <-dtf$headOutcome
ndf$scoreOutcome<-dtf$scoreOutcome
ndf$ht1pOutcome <-dtf$ht1pOutcome
ndf$ht2pOutcome <-dtf$ht2pOutcome
ndf$ggOutcome <-dtf$ggOutcome
ndf$totHtScore <- dtf$totHtScore
ndf$totFtScore <-dtf$totFtScore
ndf$t1 <-dtf$t1
ndf$t2<-dtf$t2
ndf$bet_1<-dtf$bet_1
ndf$bet_X<-dtf$bet_X
ndf$bet_2<-dtf$bet_2
ndf$bet_O<-dtf$bet_O
ndf$bet_U<-dtf$bet_U
ndf$t1Classification<-dtf$t1Classification
ndf$t2Classification<-dtf$t2Classification
ndf$mfd <- ndf$mfd1-ndf$mfd2
ndf$t1Form <- dtf$t1Form
ndf$t2Form <- dtf$t2Form
ndf$f1d <- f1d
ndf$f2d <- f2d
ndf$f3d <- f3d
ndf$f4d <- f4d
# rm(datk,datkin,datkout,ddef,ddefin,ddefout,doav_ht,doav_ft,dav_htin,dav_htout,
# dav_ftin,dav_ftout, owd,odd,old,dwin,dwout,ddin,ddout,dlin,dlout,pd,fd,mfd1,mfd2,f1d,f2d,f3d,f4d,
# t1adoe,t2adoe,t1e,t2e )
return(ndf);
}
ttFixer <- function(tempdtf){
tt$mfd1 <<- ntt$mfd1
tt$mfd2 <<- ntt$mfd2
tt$odd <<- ntt$odd
tt$old <<- ntt$old
tt$owd <<- ntt$owd
tt$t1 <<- factor(tt$t1, levels = levels(tempdtf$t1))
tt$t2 <<- factor(tt$t2, levels = levels(tempdtf$t2))
tt$t1Classification <<- factor(tt$t1Classification,levels = levels(tempdtf$t1Classification))
tt$t2Classification <<- factor(tt$t2Classification,levels = levels(tempdtf$t2Classification))
ntt$t1 <<- factor(ntt$t1, levels = levels(tt$t1))
ntt$t2 <<- factor(ntt$t2, levels = levels(tt$t2))
ntt$t1Classification <<- factor(ntt$t1Classification,levels = levels(tt$t1Classification))
ntt$t2Classification <<- factor(ntt$t2Classification,levels = levels(tt$t2Classification))
print(tt$t1)
}
dataStructLoader <- function(){
### the file with the description of the structure of the DTF obj
#source("C:/TotalPrediction/dataStructure.R");
source(paste0(base,"/TotalPrediction/dataStructure.R"));
}
#-------------Test & try
#dtf <- read.csv("c:/BastData/Pred/Data/Norway/Eliteserien__112__Data")
#dim(dtf)
#ndf <- diffFunc()
#trpath <- "C:/BastData/Pred/Data/Norway/Eliteserien__112__Data"
#dtfpath <- "C:/BastData/DTF/Norway/Eliteserien__112.dtf.RData"
#tspath <- "C:/BastData/Pred/Test/Norway/Eliteserien__112__Test__2016-07-29"
|
6b5fff862b38a918dda958deab9cd83754b8de84
|
ce055040549214f56f9c603eb0dd67d6b7ad196e
|
/tests/cluster,worker-termination.R
|
64e4185abc545192b7759e6d314aa9b7c7742d6a
|
[] |
no_license
|
HenrikBengtsson/future
|
d3e103cf60bd8bcd9e7e1c45d239ea0e9f3dd18e
|
30715de064db5cc0927fc71201f7866e5f45137e
|
refs/heads/develop
| 2023-08-28T20:12:44.689930
| 2023-08-18T11:59:09
| 2023-08-18T11:59:09
| 37,042,109
| 971
| 104
| null | 2023-06-17T07:33:21
| 2015-06-08T02:37:06
|
R
|
UTF-8
|
R
| false
| false
| 2,686
|
r
|
cluster,worker-termination.R
|
source("incl/start.R")
library("listenv")
options(future.debug = FALSE)
## IMPORTANT: Since we're killing parallel workers, some of them will not
## get a chance to clean up their R temporary folders. Here we configuring
## them to use temporary folders with this R process temporary folder.
## This way they'll be removed when this R process terminates
Sys.setenv(TMPDIR = tempdir())
message("*** cluster() - terminating worker ...")
message("Library paths: ", paste(sQuote(.libPaths()), collapse = ", "))
message("Package path: ", sQuote(system.file(package = "future")))
message("TMPDIR for parallel workers: ", sQuote(Sys.getenv("TMPDIR")))
types <- "PSOCK"
## Speed up CRAN checks: Skip on CRAN Windows 32-bit
if (isWin32) types <- NULL
if (supportsMulticore()) types <- c(types, "FORK")
pid <- Sys.getpid()
message("Main PID (original): ", pid)
cl <- NULL
for (type in types) {
message(sprintf("Cluster type %s ...", sQuote(type)))
cl <- parallel::makeCluster(1L, type = type, timeout = 60)
print(cl)
## Crashing FORK:ed processes seems too harsh on R (< 3.3.0)
if (type != "FORK" || getRversion() >= "3.3.0") {
message("*** cluster() - crashed worker ...")
plan(cluster, workers = cl, .skip = FALSE)
x %<-% 42L
stopifnot(x == 42L)
## Force R worker to terminate
## It's not safe to use quit() here when using type = "FORK" [1]
## [1] https://stat.ethz.ch/pipermail/r-devel/2021-August/080995.html
x %<-% tools::pskill(pid = Sys.getpid())
res <- tryCatch(y <- x, error = identity)
print(res)
stopifnot(
inherits(res, "error"),
inherits(res, "FutureError")
)
## Cleanup
print(cl)
## FIXME: Why doesn't this work here? It causes the below future to stall.
# parallel::stopCluster(cl)
## Verify that the reset worked
cl <- parallel::makeCluster(1L, type = type, timeout = 60)
print(cl)
plan(cluster, workers = cl, .skip = FALSE)
x %<-% 43L
stopifnot(x == 43L)
message("*** cluster() - crashed worker ... DONE")
} ## if (type != "FORK" || getRversion() >= "3.3.0")
## Sanity checks
pid2 <- Sys.getpid()
message("Main PID (original): ", pid)
message("Main PID: ", pid2)
stopifnot(pid2 == pid)
## Cleanup
print(cl)
str(cl)
parallel::stopCluster(cl)
## Sanity checks
pid2 <- Sys.getpid()
message("Main PID (original): ", pid)
message("Main PID: ", pid2)
stopifnot(pid2 == pid)
message(sprintf("Cluster type %s ... DONE", sQuote(type)))
} ## for (type ...)
message("*** cluster() - terminating worker ... DONE")
## Sanity checks
pid2 <- Sys.getpid()
message("Main PID (original): ", pid)
message("Main PID: ", pid2)
stopifnot(pid2 == pid)
source("incl/end.R")
|
4b728bdaf04eb2f3c37741fd8982463fa685eeb7
|
03aee522019d1b85e54ac199e099001a3fb785e1
|
/DESeq_normalization_of_HTSEQ_counts.R
|
345f0a1fbaa527be582ab4a2832faf05d3e11a90
|
[] |
no_license
|
btmonier/pub_expn_rare_alleles
|
412dc287057e0a0b2608a3d8f8d1a5aabf71beec
|
72b6e87482689df2b4324d1222ffea6ba4f213ad
|
refs/heads/master
| 2020-04-19T19:37:05.446289
| 2019-02-06T19:40:09
| 2019-02-06T19:40:09
| 168,393,207
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,488
|
r
|
DESeq_normalization_of_HTSEQ_counts.R
|
# Karl Kremling
#Jan 23 2016
#DESeq norm of samtools idxstats counts
#source("http://bioconductor.org/biocLite.R")
#import DESeq and normalize
#biocLite("DESeq2")
# if this doesn't work on linux you amy need to sudo apt-get install libxml2-dev
library("DESeq2")
#install.packages("fastmatch")
library("fastmatch")
library("parallel")
#AGP v3.29 version
basedir="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv3.29_w_gtf_annotation/"
#AGP v4.34 version w unique mappers only (but shouldn't matter because HTSeq only uses top one)
#basedir="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv4.34_w_gtf_annotation/HTSEQ_counts_multimap1/"
#AGP v4.34 version w up to 10 multimapping (but shouldn't matter because HTSeq only uses top one)
#basedir="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv4.34_w_gtf_annotation/HTSEQ_counts_multimap10/"
#AGP v4.34 version 3' end extended 500 bp
#basedir="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv4.34_w_gtf_annotation_genes_ext_500bp/"
#Sorghum Btx623 base and tip
#basedir="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_sorghum_and_maize/all_tissues_08262017sorghum_3p_FWD/HTSEQ_counts/sorghum_only/"
addNoise <- function(mtx) {
set.seed(122)
set.seed(123) # have to do this to reset it to the beignning , also wasnt using set seed for the individual tissues
a=mtx[(!is.na(mtx))]
max_val=min(a[a>0])/2
temp_mtx = mtx
cat(max_val)# determines the min non-zero expression value which will be the upper bound for the random number generation
if (!is.matrix(mtx)) mtx <- matrix(mtx, byrow = TRUE, nrow = 1) # converts the df to a matrix
random.stuff <- matrix(runif(prod(dim(mtx)), min = 0.00000001, max = max_val), nrow = dim(mtx)[1]) #creates a matrix of random values
random.stuff + temp_mtx
}
# addNoise <- function(mtx) {
# if (!is.matrix(mtx)) mtx <- matrix(mtx, byrow = TRUE, nrow = 1)
# random.stuff <- matrix(runif(prod(dim(mtx)), min = 0.00000001, max = 0.0001), nrow = dim(mtx)[1])
# cat(length(random.stuff), "\n")
# cat(length(mtx))
# random.stuff + mtx
# }
###
#Function to take in HTSEQ counts and convert them into a normalized matrix
#cts_to_norm_mat <- function(basedir, tissue){
date()
files <- list.files(paste(basedir, "HTSEQ_counts/", sep = ""), full.names=T, pattern="count.txt", recursive=F)
#get rid of the empty files
non0list=NULL
for (i in files){non0list=c(non0list, file.info(i)$size!=0)}
files=files[non0list]
genes <- read.table(files[1], header=F, sep="\t")[,1] # gene/contig names MAKE SURE ALL FILES HAVE SAME GENE ORDER
df <- do.call(cbind,mclapply(files,function(fn)read.table(fn,header=F, sep="\t")[,2])) # takes 30 mins for 2208 files w 150k genes
#colnames(df)= substring(files, first=)
trim_left=sub(".*BGXX_", "", files)
trim_right=sub("_R1_.*", "", trim_left)
colnames(df) = trim_right
rownames(df)= genes
date()
#Remove the complete count columns at the end of the HTSEQ vectors
df=df[!rownames(df) %in% c("__no_feature","__ambiguous", "__too_low_aQual", "__not_aligned", "__alignment_not_unique"),]
###Use the poisitive id list from the genetic distances to keep only the counts which are from positively identified samples
positive_id_file_list=read.table("/media/kak268/B_2TB_Internal/Genotypes/RNA_SNPs_Chr10_Fei_pipeline/Positive_id_list_filenames_only_RNAseq_samples_based_on_dist_from_corresponding_HMP32_line.txt")
trim_left_positive_list=sub(".*BGXX_", "", positive_id_file_list[,1])
trim_right_positive_list=sub("_R1.fastq.gz", "", trim_left_positive_list)
#keep only cols with positive id
df_matched_by_genet_dist=subset(df, select=trim_right_positive_list)
#dim(df_redundant_trinity_summed_and_orig_B73_counts_only_positively_matched_from_genet_dist)
#AGP v3.29 version
#write.table(df_matched_by_genet_dist, "/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv3.29_w_gtf_annotation/count_mat/df_STAR_HTSeq_counts_B73.txt", quote=F)
#AGP v4.34 version multimap1
#write.table(df_matched_by_genet_dist, "/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv4.34_w_gtf_annotation/count_mat/df_STAR_HTSeq_counts_B73.txt", quote=F)
#AGP v4.34 version multimap10
#write.table(df_matched_by_genet_dist, "/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv4.34_w_gtf_annotation/count_mat_multimap10/df_STAR_HTSeq_counts_B73.txt", quote=F)
##AGP v4.34 version 3' end extended 500 bp
#write.table(df_matched_by_genet_dist, paste(basedir, "count_mat/df_STAR_HTSeq_counts_B73.txt", sep = ""), quote=F)
###DESEQ Normalization
#Make design matrix for use in DESeq2
colData=colnames(df_matched_by_genet_dist)
#colnames(colData) = c("Individual")
#Make improved taxanames
#TaxaNames=sub(paste(".*", tissue, "_", sep=""), "", colData[,1]) #remove everything up to and including the tissue name ".
#TaxaNames=substr(TaxaNames, 1, nchar(TaxaNames)-7)
#Deseq normalization
dds = DESeqDataSetFromMatrix(countData = df_matched_by_genet_dist, colData = as.data.frame(colData), design= ~ 1) # this colData and design are irrelevant since all we want are counts normed by total numbers, not by design
dds = estimateSizeFactors(dds)
counts.mat = counts(dds, normalized=T)
#Remove rows (genes/contigs) which have 0 expression values in all the taxa (or > 3/4 of the taxa)
#counts.mat = counts.mat[apply(counts.mat==0,1,sum)<=0.75*dim(counts.mat)[2],]
counts.mat = counts.mat[apply(counts.mat==0,1,sum)<=dim(counts.mat)[2]-1,]
counts.mat = counts.mat[ order(row.names(counts.mat)), ] # sort by the name of the gene so that specific genes can be easily compared between df
#write counts without rounding, or adding small rand and log2 transforming
counts.mat.w.smp.names=rbind(colnames(df_matched_by_genet_dist), counts.mat)
rownames(counts.mat.w.smp.names)[1]="<Trait>" # this allows it to be recognized by TASSEL. This works because write.table puts first col name over col 1, which results in an off by 1 error unless you add a string there, or include the option col.names=NA when you write to file
#AGP v3.29 version
write.table(x=t(counts.mat.w.smp.names), file="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv3.29_w_gtf_annotation/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed.txt", quote=F, col.names=T, row.names=F)
#AGP v4.34 version
#write.table(x=t(counts.mat.w.smp.names), file=paste(basedir, "/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed.txt",sep=""), quote=F, col.names=T, row.names=F)
#DESeq normalization and FPM conversion
fpm.counts.mat=fpm(dds, robust = TRUE)
fpm.counts.mat = fpm.counts.mat[apply(fpm.counts.mat==0,1,sum)<=dim(fpm.counts.mat)[2]-1,]
fpm.counts.mat = fpm.counts.mat[ order(row.names(fpm.counts.mat)), ] # sort by the name of the gene so that specific genes can be easily compared between df
#write counts without rounding, or adding small rand and log2 transforming
fpm.counts.mat.w.smp.names=rbind(colnames(df_matched_by_genet_dist), fpm.counts.mat)
rownames(fpm.counts.mat.w.smp.names)[1]="<Trait>" # this allows it to be recognized by TASSEL. This works because write.table puts first col name over col 1, which results in an off by 1 error unless you add a string there, or include the option col.names=NA when you write to file
#AGP v3.29 version
write.table(x=t(fpm.counts.mat.w.smp.names), file="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv3.29_w_gtf_annotation/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed_fpm.txt", quote=F, col.names=T, row.names=F)
#AGP v4.34 version
#write.table(x=t(fpm.counts.mat.w.smp.names), file=paste(basedir, "/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed_fpm.txt",sep=""), quote=F, col.names=T, row.names=F)
#write counts without rounding, but add small rand and log2 transform
counts.mat.w.sm.add.log2=log2(addNoise(counts.mat))
counts.mat.w.sm.add.log2.w.smp.names=rbind(colnames(df_matched_by_genet_dist), counts.mat.w.sm.add.log2)
rownames(counts.mat.w.sm.add.log2.w.smp.names)[1]="<Trait>" # this allows it to be recognized by TASSEL. This works because write.table puts first col name over col 1, which results in an off by 1 error unless you add a string there, or include the option col.names=NA when you write to file
#AGP v3.29 version
#write.table(x=t(counts.mat.w.sm.add.log2.w.smp.names), file="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv3.29_w_gtf_annotation/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed_sm_rand_add_log2tform.txt", quote=F, col.names=T, row.names=F)
#AGP v4.34 version
#write.table(x=t(counts.mat.w.sm.add.log2.w.smp.names), file=paste(basedir, "/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed_sm_rand_add_log2tform.txt", sep=""), quote=F, col.names=T, row.names=F)
#write DESeq2 counts with rounding, but without adding small rand and log2 transforming
counts.mat.round=round(counts.mat,digits=5)
counts.mat.round.w.smp.names=rbind(colnames(df_matched_by_genet_dist), counts.mat.round)
rownames(counts.mat.round.w.smp.names)[1]="<Trait>" # this allows it to be recognized by TASSEL. This works because write.table puts first col name over col 1, which results in an off by 1 error unless you add a string there, or include the option col.names=NA when you write to file
#AGP v3.29 version
write.table(x=t(counts.mat.round.w.smp.names), file="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv3.29_w_gtf_annotation/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed_rounded.txt", quote=F, col.names=T, row.names=F)
#AGP v4.34 version w unique mappers
#write.table(x=t(counts.mat.round.w.smp.names), file=paste(basedir, "/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed_rounded.txt",sep=""), quote=F, col.names=T, row.names=F)
#write FPM DESeq2 counts with rounding, but without adding small rand and log2 transforming
fpm.counts.mat.round=round(fpm.counts.mat,digits=5)
fpm.counts.mat.round.w.smp.names=rbind(colnames(df_matched_by_genet_dist), fpm.counts.mat.round)
rownames(fpm.counts.mat.round.w.smp.names)[1]="<Trait>" # this allows it to be recognized by TASSEL. This works because write.table puts first col name over col 1, which results in an off by 1 error unless you add a string there, or include the option col.names=NA when you write to file
#AGP v3.29 version
write.table(x=t(fpm.counts.mat.round.w.smp.names), file="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv3.29_w_gtf_annotation/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed_fpm_rounded.txt", quote=F, col.names=T, row.names=F)
#AGP v4.34 version w unique mappers
#write.table(x=t(counts.mat.round.w.smp.names), file=paste(basedir, "/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed_rounded.txt",sep=""), quote=F, col.names=T, row.names=F)
#write counts with rounding after adding small rand and log2 transform
counts.mat.w.sm.add.log2.rd=round(log2(addNoise(counts.mat)), digits=5)
counts.mat.w.sm.add.log2.rd.w.smp.names=rbind(colnames(df_matched_by_genet_dist), counts.mat.w.sm.add.log2.rd)
rownames(counts.mat.w.sm.add.log2.rd.w.smp.names)[1]="<Trait>" # this allows it to be recognized by TASSEL. This works because write.table puts first col name over col 1, which results in an off by 1 error unless you add a string there, or include the option col.names=NA when you write to file
#AGP v3.29 version
#write.table(x=t(counts.mat.w.sm.add.log2.rd.w.smp.names), file="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv3.29_w_gtf_annotation/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed_sm_rand_add_log2tform_rounded.txt", quote=F, col.names=T, row.names=F)
#AGP v4.34 version
write.table(x=t(counts.mat.w.sm.add.log2.rd.w.smp.names), file=paste(basedir, "/count_mat/df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed_sm_rand_add_log2tform_rounded.txt",sep=""), quote=F, col.names=T, row.names=F)
#read in DeSEQ counts write counts by tissue after removing genes that are completely zero in a tissue adding small rand and boxcox transforming
filename="df_STAR_HTSeq_counts_B73_match_based_on_genet_dist_DESeq2_normed_rounded_origNames_and_Altnames_noL3Mid" #exclude .txt
#AGP v3.29 version
count_dir="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv3.29_w_gtf_annotation/count_mat/count_mats_matched_to_AltNames/"
#AGP v4.34 version
count_dir="/media/kak268/A_2TB_Internal/RNA/Expressions_quants/HTSEQ_counts_STAR_against_Zea_mays_B73_AGPv4.34_w_gtf_annotation/count_mat/count_mats_matched_to_AltNames/"
DESeq_counts=read.table(file=paste(count_dir, filename, ".txt", sep=""), header=T) # takes ~5 mins for 2000x140000
DESeq_counts[DESeq_counts=="NA"]=NaN
DESeq_counts=DESeq_counts
last_name_col=10
library("MASS")
for (tissue in unique(DESeq_counts$TissueWODate)){
cat(date(), " ")
cat(tissue, "\n")
subset_DESeq_counts=DESeq_counts[DESeq_counts$TissueWODate==tissue,] #subset by tissue
subset_names=subset_DESeq_counts[,1:last_name_col]
subset_no_names=subset_DESeq_counts[,-c(1:last_name_col)]
subset_no_names_no_complete_0 = subset_no_names[,apply(subset_no_names==0,2,sum)<=dim(subset_no_names)[1]-1] # remove cols with only zeroes (for cols make sure to use 2 in apply and put the comma before the expression)
subset_no_names_sm_rand=addNoise(subset_no_names_no_complete_0) #adds a small random value which is < 1/2 the minimum expression value in the df
lambdaMASS_vec=NULL
for (i in colnames(subset_no_names_sm_rand)){
bc= boxcox(subset_no_names_sm_rand[[i]]~1, plotit=F, lambda=seq(-2,2,0.01))
lambdaMASS_vec<-c(lambdaMASS_vec, bc$x[which.max(bc$y)])
}
#http://www.isixsigma.com/tools-templates/normality/making-data-normal-using-box-cox-power-transformation/
bc_tranformer = function(obs, lambda) {
y=NULL
if (lambda!=0){y=obs^lambda}
else {y=log(obs)}
return(y)
}
df_bc=mapply(bc_tranformer, subset_no_names_sm_rand, lambdaMASS_vec)
df_bc_w_names=cbind(subset_names, df_bc)
write.table(x=df_bc_w_names, file=paste(count_dir, "count_mats_separated_by_tissue_and_individually_box_coxed/", filename, "_sm_rand_and_box_coxed_", tissue,".txt", sep=""), row.names=F, quote=F)
}
test=cbind(x=c(1,2,3,4,5,0), y=c(3,4,11,6,0.1,7))
addNoise(test)
#Apply test and box cox test with lambda=0
# subset_no_names_sm_rand=subset_no_names_sm_rand[1:10, 1:10]
# lambdaMASS_vec=NULL
# for (i in colnames(subset_no_names_sm_rand)){
# bc= boxcox(subset_no_names_sm_rand[[i]]~1, plotit=F, lambda=seq(-2,2,0.01))
# lambdaMASS_vec<-c(lambdaMASS_vec, bc$x[which.max(bc$y)])
# }
# lambdaMASS_vec[5]=0 # hardcoding one of the lamdas to 0 in order to test the tranformation code
# bc_tranformer = function(obs, lambda) {
# y=NULL
# if (lambda!=0){y=obs^lambda}
# else {y=log(obs)}
# return(y)
# }
# df_bc=mapply(bc_tranformer, subset_no_names_sm_rand, lambdaMASS_vec)
# df_bc[1:2, 1:5]
|
486b8550523771ae3a7a21eeb43a7a2933079a9d
|
3fbee967f2e6a778e738c439be35a8e9c10c46d1
|
/dplyr-mtcars.R
|
24861bc381894bc18367c814a62f44a4a9ba6d02
|
[] |
no_license
|
elissabedamatta/analytics-1
|
28fcad4546b898788a3e7864ea41eedb1643c580
|
e05eb9a131b30296689dc71783c3f53038a7425f
|
refs/heads/master
| 2020-04-02T16:32:47.971958
| 2018-10-28T08:34:12
| 2018-10-28T08:34:12
| 154,617,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,475
|
r
|
dplyr-mtcars.R
|
#analysis of dataset mtcars using dplyr
#filename : dplyr-mtcars.R
library(dplyr)
?mtcars
#structure of data set
str(mtcars) #structure
dim(mtcars) #dimensions
names(mtcars) ;colnames(mtcars) #column names
rownames(mtcars) #rownames
summary(mtcars) #summary of dataset
#summary activities on mtcars
t1= table(mtcars$am)
pie(t1)
19/32* 360
pie(t1, labels = c('Auto','Manual'))
t2= table(mtcars$gear)
pie(t2)
barplot(t2)
barplot(t2, col = 1:3)
barplot(t2,col = 1:3, horiz = T)
barplot(t2,col = c('green','blue','red'),xlab = 'gear',ylab = 'No of cars',ylim = c(0,20))
title(main = 'Distibution of gears of cars',sub = 'No of gears')
#using dplyr %>% is chaining function
mtcars %>% select(mpg,gear) %>% slice(c(1:5,10))
#select for columns, slice for rows
mtcars %>% arrange(mpg) #ascending order of mileage
mtcars %>% arrange(am, desc(mpg)) %>% select(am, mpg) #ascending order of am, descending order of mpg
mtcars %>% muatate(rn= rownames(mtcars)) %>% select(rn, mpg)
#display rownames with mpg
mtcars %>% slice(c(1,5,7))
mtcars %>% sample_n(3)
mtcars %>% sample_frac(.2)
mtcars %>% select(sample(x=c(1:11),size = 2)) %>% head
sample(x=1:11,size = 2)
mtcars %>% mutate( newmpg = mpg * 1.1)
mutate(mtcars, newmpg = mpg * 1.2)
# type of Tx, mean(mpg)
mtcars %>% group_by(am) %>% summarise(MeanMPG = mean(mpg))
mtcars %>% group_by(am) %>% summarise(MeanMPG = mean(mpg), MaxHP= max(hp), MinWT = min(wt))
mtcars %>% group_by(gear,cyl) %>% summarise(MeanMPG = mean(mpg))
|
be349e106d161ae65d753b0b08ee8cdf9eb8f1ff
|
c224e403165e0461d90a0a1ec22e8e8a050a376e
|
/Week5/signal_analysis.R
|
9494cba3c75df9a6961d18accf6a4b01ea71a534
|
[] |
no_license
|
nasimulhasan/Data_Science
|
14eae70c1983f1a24ec9944d270dd904508fd7e9
|
d2f037ce331d9936256b0eb0bc21f962de0c7f4d
|
refs/heads/master
| 2020-03-18T08:44:02.514118
| 2017-08-27T10:14:52
| 2017-08-27T10:14:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 213
|
r
|
signal_analysis.R
|
setwd("F:/Introduction to Data Science/Week5/Assessment/Kaggle_Higgs_Boson_Machine_Learning_Challenge")
getwd()
signal <- read.csv("signal.csv")
dim(signal)
View(signal)
names(signal)
str(signal)
summary(signal)
|
2c5b7523579475037f335aff8a258456f8475f64
|
d7170c2a8234c947432ee1cbffe2eb22db31b221
|
/transmission_model/r/common/uncertain-parameters.R
|
5c2f27e60d620203d1855df2ffcbbf5874da1b95
|
[] |
no_license
|
khanna7/BARS
|
c3ab28354f4bb711c7d8307486194769454c86ff
|
6829f0da638c0c839c40dd6f6b84a645c62f4446
|
refs/heads/master
| 2023-07-06T06:33:57.160645
| 2023-06-28T23:17:24
| 2023-06-28T23:17:24
| 40,013,171
| 6
| 2
| null | 2016-05-17T17:08:43
| 2015-07-31T16:01:07
|
C
|
UTF-8
|
R
| false
| false
| 571
|
r
|
uncertain-parameters.R
|
## list uncertain parameters
#####################
## Transmission Parameters
num.sex.acts.base <- 2.4 ##vary between 0.5 and 3
## acute, chornic, late multipliers: might also be varied, but i am not sure about the ranges
#####################
## Testing and diagnosis
non.testers.prop <- 0.25 ## (vary between 1% and 35%)
#####################
## Socioeconomic status
insurance.prop <- ## searching for a good mean value, will decide range later.
#####################
## PrEP
prep.use.rate <- 6/100 ## (vary between 0 and 30%)
|
d1a9448fec01f9c6b77772e526a30fdea3f74ac5
|
14d2380e1ed07afd73795c3f302a05aca3c28b43
|
/simulateSVM.R
|
8e74671f715217dee819a14c022c01ee45bb3737
|
[] |
no_license
|
jt86/r-project
|
e5b4f971a8f95ce1c057cddfad6c2edec5f82d92
|
5403befe8523555c58fc1f19e001b91abaa53ceb
|
refs/heads/master
| 2021-01-12T11:12:05.947828
| 2016-11-24T16:52:09
| 2016-11-24T16:52:09
| 72,865,351
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,725
|
r
|
simulateSVM.R
|
library(e1071)
set.seed(0)
# i <- commandArgs()[1]
i <- 1
tuneParametersVal <- function(X, Y, cost = exp(seq(-3, 7, length = 5)),
gamma = exp(seq(-3, 3, length = 5)), percentage_of_data_for_validation = 0.1) {
mat <- as.matrix(dist(X))^2 #
med <- median(mat[ upper.tri(mat) ])
gamma <- gamma / med
i_pos <- which(Y == 1)
i_neg <- which(Y == -1)
half <- round(percentage_of_data_for_validation * length(Y))
errors <- matrix(0, length(gamma), length(cost))
for (fold in 1 : 5) {
sample_pos = sample(i_pos, half)
sample_neg = sample(i_neg, half)
s = sample(c(sample_pos, sample_neg))
Xval <- X[ s, ]
Yval <- Y[ s ]
s_remain <- (1 : nrow(X))[ -c(s) ]
Xtrain <- X[ s_remain, ]
Ytrain <- Y[ s_remain ]
for (i in 1 : length(gamma))
for (j in 1 : length(cost)) {
model <- svm(Xtrain, as.factor(Ytrain), kernel = "radial", gamma = gamma[ i ], cost = cost[ j ],
scale = FALSE)
errorTest <- mean(predict(model, Xval) != Yval)
errors[ i, j] <- errors[ i, j] + errorTest
cat(".")
}
cat("\n")
}
errors <- errors/5.0
bestCost <- NULL
bestGamma <- NULL
bestError <- +Inf
for (i in 1 : length(gamma))
for (j in 1 : length(cost)) {
if (bestError >= errors[ i, j ]) {
bestError <- errors[ i, j]
bestCost <- cost[ j ]
bestGamma <- gamma[ i ]
}
}
#retrain the model
model <- svm(X, as.factor(Y), kernel = "radial", gamma = bestGamma, cost = bestCost, scale = FALSE)
list(model = model, bestError = bestError, bestGamma = bestGamma, bestCost = bestCost)
}
print(i)
#Loading the data
load(paste("./data/",i,"data.dat",sep = ""))
Xtrain <- data$x[ data$itrain, ]
Ytrain <- as.vector(c(data$y_train))
Xtest <- data$x[ data$itest, ]
Ytest <- as.vector(c(data$y_test))
#zero mean unit variance normalization
meanTrain <- apply(Xtrain, 2, mean)
sdTrain <- apply(Xtrain, 2, sd)
sdTrain[ sdTrain == 0 ] <- 1
Xtrain <- (Xtrain - matrix(meanTrain, nrow(Xtrain), ncol(Xtrain), byrow = TRUE)) /
matrix(sdTrain, nrow(Xtrain), ncol(Xtrain), byrow = TRUE)
Xtest <- (Xtest - matrix(meanTrain, nrow(Xtest), ncol(Xtest), byrow = TRUE)) /
matrix(sdTrain, nrow(Xtest), ncol(Xtest), byrow = TRUE)
#SVM baseline
time <- system.time( ret <- tuneParametersVal(Xtrain, Ytrain))
print(ret)
resultRet <- ret
ret <- ret$model
errorTest <- mean(predict(ret, Xtest) != Ytest)
write.table(errorTest, file = paste("./results/SVM/",i,"_errorTest_X_val.txt",sep = ""), row.names = F, col.names = F, append = FALSE)
write.table(t(time), file = paste("./results/SVM/",i,"_time_X_val.txt",sep = ""), row.names = F, col.names = F, append = FALSE)
|
630c458a4806cc5b95fcab7c75cb634518484129
|
298c7371b27659a18775e402c88877b8b6ae2f40
|
/sqm/R/exClassifed.R
|
67bd196500209ae12257fbaccd5568140edfc63c
|
[] |
no_license
|
lhsego/sUtils
|
371db06261e4bcb5c977e34f862c7d0a3708f147
|
bead4267ccb782982159fb0b6ed273caae70a850
|
refs/heads/master
| 2021-08-30T08:12:48.040566
| 2017-12-17T00:00:15
| 2017-12-17T00:00:15
| 112,430,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 231
|
r
|
exClassifed.R
|
##' Example data (TODO: Document this)
##'
##' Some more description
##'
##' @name exClassified
##' @docType data
##' @format TODO
##' @keywords datasets
##' @examples
##' data(exClassifed)
##' str(exClassified)
NULL
|
38c33d6ca5e2bbaa4d10af0551ce029d0c6f8dc1
|
7e21929e11598cb2c811361e2632a13f411ee98c
|
/plot1.R
|
d448146e7b93971b9c1d9e41cce1120f13e42579
|
[] |
no_license
|
JoeMerengues/ExData
|
a55f31b746f00ecf6b43f7e005da85f7166eb682
|
8bf66a73a78fd9f7cb8c00a40f4ab40e73f9704f
|
refs/heads/master
| 2021-01-10T16:39:21.955959
| 2015-11-22T20:41:54
| 2015-11-22T20:41:54
| 46,677,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 487
|
r
|
plot1.R
|
#Leemos los archivos
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Se realiza la agrupacion
aggTotals <- aggregate(Emissions ~ year,NEI, sum)
#Desplegamos la grafica
barplot(
(aggTotals$Emissions)/10^6,
names.arg=aggTotals$year,
xlab="Year",
ylab="PM2.5 Emissions (10^6 Tons)",
main="Total PM2.5 Emissions From All US Sources"
)
#Salvamos la grafica a un archivo png
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
|
579016ac56d62b273d5a73d328e79b257babf521
|
ba65d8b42dfce42e1a4594d5a58a815194082112
|
/man/callFilters.Rd
|
f425a02e177d8e80efdc290f813050a1be556f76
|
[
"MIT"
] |
permissive
|
acc-bioinfo/TMBleR
|
b4ac594173ecc2ead98fd19696136f0d235065a3
|
f3ded88b111b8db0867222aaa8be4bcd9fe8e80d
|
refs/heads/main
| 2023-06-19T22:21:49.508537
| 2021-07-16T16:42:20
| 2021-07-16T16:42:20
| 378,995,779
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,810
|
rd
|
callFilters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/callFilters.R
\name{callFilters}
\alias{callFilters}
\title{Filters variants according to input arguments}
\usage{
callFilters(
vcf,
assembly,
design,
vaf.cutoff,
remove.cancer,
remove.nonexonic,
tsList,
variantType
)
}
\arguments{
\item{vcf}{\code{CollapsedVCF} object}
\item{assembly}{human genome assembly: hg19 or hg38}
\item{design}{a \code{GRanges} object containing WES or panel design}
\item{vaf.cutoff}{minimum value of variant allele frequency accepted}
\item{remove.cancer}{logical value 'TRUE' or 'FALSE' indicating whether or
not to remove cancer variants, which is variants described in COSMIC or
truncating mutations in tumor suppressors}
\item{remove.nonexonic}{logical value 'TRUE' or 'FALSE' indicating whether or
not to remove SNV mapped in non exonic regions}
\item{tsList}{path to file containing list of tumor suppressors. If not
provided a list of 1217 tumor suppressors from the TSgene2 database
(<https://bioinfo.uth.edu/TSGene/>) is used.}
\item{variantType}{type of variant to remove: synonymous, frameshift or
nonsense}
}
\value{
Returns a \code{list} with the following elements: a \code{GRanges},
\code{CollapsedVCF}, \code{data.frame} object containing variants passing the
filter, a \code{charcater string} describing applied filter (if any), a
\code{GRanges} or \code{character vector} with the sequencing design,.
}
\description{
This function calls different filtering functions on the input vcf object
according to the input arguments and gives in output a list with the three
elements: an object containg variants which passed the filter, a character
string describing the applied filter (if any) and an object containing the
sequencing design
}
\author{
Laura Fancello
}
|
53ffa1b6f479a7e0ef1e8435dd5d4cd541f6f843
|
b72a579eddbd2e20a0d154a4704fa28dc89adf5f
|
/code/LD_simulation/example_code/simulate_LD_data.R
|
eb9f4cfe46b238b23cc6342f3bf0ecf5795cdda1
|
[] |
no_license
|
andrewhaoyu/multi_ethnic
|
cf94c2b02c719e5e0cbd212b1e09fdd7c0b54b1f
|
d1fd7d41ac6b91ba1da8bb8cd1b2b0768f28b984
|
refs/heads/master
| 2023-06-24T20:47:18.268972
| 2023-06-13T15:30:14
| 2023-06-13T15:30:14
| 214,069,397
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,918
|
r
|
simulate_LD_data.R
|
#simulate genotype data by chromosome and by ethnic groups
#since hapgen2 required at least one disease SNP, we need to know the position information for one SNP
#simulate data for EUR,AFR, AMR, EAS, SAS
#-tag command allows you to subset SNPs to a subset
#but I notice that the -tag command makes hapgen2 really slow
#so I just simulate all the SNPs and then subset the tag snps by myself
library(data.table)
eth <- c("EUR","AFR","AMR","EAS","SAS")
code <- rep("c",length(eth)*10000)
temp <- 1
n <- c(180,180,180,180,180)
#tag SNPs are the SNPs we need
tag_all <- list()
for(i in 1:length(eth)){
tag_temp <- list()
for(j in 1:22){
tag_temp[[j]] <- read.table(paste0("/data/zhangh24/KG.impute2/tag/",eth[i],"_chr",j,".tag"),header=F)
}
tag_all[[i]] <- tag_temp
}
#for put chr and ethnic groups in inner loop to avoid the same start time for hapgen2
#hapgen2 use the start time to set random seed
for(k in 1:100){ #k is the number of replicate
for(j in 1:22){ #j is the chromosome number
for(i in 1:5){ #i is the eth (EUR, AFR, AMR, EAS, SAS)
tag <- tag_all[[i]][[j]]
code[temp] <- paste0("/data/zhangh24/software/hapgen2 ",
"-m /data/zhangh24/KG.impute2/1000GP_Phase3/genetic_map_chr",j,"_combined_b37.txt ",
"-l /data/zhangh24/KG.impute2/1000GP_Phase3/1000GP_Phase3_chr",j,".legend ",
"-h /data/zhangh24/KG.impute2/",eth[i],"/chr",j,".hap ",
"-o /data/zhangh24/multi_ethnic/result/LD_simulation/",eth[i],"/chr",j,"_",k," ",
"-n ",n[i]," 1 -dl ",tag[1,1]," 1 1 1 -no_haps_output"
)
temp = temp+1
}
}
}
code <- code[1:(temp-1)]
write.table(code,file = paste0("/data/zhangh24/multi_ethnic/code/LD_simulation/simulate_LD_data_other_eth.sh"),row.names = F,col.names = F,quote=F)
#alternative way is to add the tag flag to hapgen2, but it's very slow
code <- rep("c",length(eth)*10000)
temp <- 1
n <- c(180,180,180,180,180)
for(k in 1:100){
for(j in 22){
for(i in 1){
tag <- read.table(paste0("/spin1/users/zhangh24/KG.impute2/tag/chr",j,".tag"),header=F)
code[temp] <- paste0("/data/zhangh24/software/hapgen2 ",
"-m /spin1/users/zhangh24/KG.impute2/1000GP_Phase3/genetic_map_chr",j,"_combined_b37.txt ",
"-l /data/zhangh24/KG.impute2/1000GP_Phase3/1000GP_Phase3_chr",j,".legend ",
"-h /data/zhangh24/KG.impute2/",eth[i],"/chr",j,".hap ",
"-o /data/zhangh24/multi_ethnic/result/LD_simulation/",eth[i],"/chr",j,"_",k," -n ",n[i]," 1 -dl ",tag[1,1]," 1 1 1 ",
"-t /spin1/users/zhangh24/KG.impute2/tag/chr",j,".tag -no_haps_output")
temp = temp+1
}
}
}
code <- code[1:(temp-1)]
write.table(code,file = paste0("/data/zhangh24/multi_ethnic/code/LD_simulation/simulate_LD_data.sh"),row.names = F,col.names = F,quote=F)
|
d8f565e388a687e4f47f691d1198ac1d28cf53ce
|
dd02a0e78e5d1f18dbe1411ce2e74e50e3269152
|
/main.R
|
d02e90fc5fa4da6f7c04052c620de83a7d20ec0b
|
[] |
no_license
|
John-Snyder/Bayesian-SSURGO-Prediction
|
e30836e0a27b291b478244cd86fda1203b396bfc
|
b424c61892cf0e62885426afef788f6d5777d14b
|
refs/heads/master
| 2020-05-01T13:01:40.990142
| 2019-04-02T15:35:37
| 2019-04-02T15:35:37
| 177,479,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,591
|
r
|
main.R
|
# sudo yum install libcurl-devel openssl-devel libxml2-devel udunits2-devel
#install.packages(c("httr","jsonlite"))
BartMemoryInGB <- 30
MemToJavaParam <- paste0("-Xmx",BartMemoryInGB,"g")
options(java.parameters = MemToJavaParam)
library(bartMachine)
set_bart_machine_num_cores(30)
source("functions.R")
library(soilDB)
library(dplyr)
library(ALEPlot)
YLD_Data <- read.csv("CornYLD_County_2015to2018.csv")
YLD_Data <- YLD_Data %>% filter(Year==2018, !grepl(pattern = "OTHER",YLD_Data$County))
CountyList <- paste0(YLD_Data$County," county, ",YLD_Data$State) %>% tolower
# N = NH4 + NO3
DBcols <-
"compname,
taxclname, taxorder, taxsuborder, taxgrtgroup, taxsubgrp,taxmoistscl,
areatypename, areaname,
frag3to10_r, claytotal_r, om_r, caco3_r, gypsum_r, sar_r, ec_r, cec7_r, sumbases_r, ph1to1h2o_r, ph01mcacl2_r,
freeiron_r, feoxalate_r, extracid_r, extral_r, aloxalate_r, pbray1_r, ptotal_r, awc_r"
library(parallel)
start <- Sys.time()
cl <- makePSOCKcluster(30)
setDefaultCluster(cl)
clusterEvalQ(NULL, library(soilDB))
clusterEvalQ(NULL, library(dplyr))
clusterExport(NULL, c('YLD_Data','DBcols','CountyList','GetDatForCounty'))
dat2 <- parLapply(NULL, 1:nrow(YLD_Data), function(z) GetDatForCounty(z, YLD_Data=YLD_Data,DBcols=DBcols, CountyList = CountyList))
dat2 <- dat2[which(lapply(dat2,ncol) %>% unlist == max(lapply(dat2,ncol) %>% unlist))]
stopCluster(cl)
YLD_Data <- do.call("rbind", dat2)
Sys.time() - start
YLD_Data %>%
group_by(State) %>%
summarise(Frequency = mean(ph1to1h2o_r)) %>% View~
#
YLD_Data <- apply(YLD_Data,2,function(x) as.numeric(as.character(x)))
keep <- !apply(YLD_Data,2,function(x) sum(is.na(x))>.5*length(x))
YLD_Data <- YLD_Data[,keep]
YLD_Data <- YLD_Data %>% data.frame %>% dplyr::select(-c("Year","State.ANSI","Ag.District.Code","County.ANSI","watershed_code" )) %>%
filter(!is.na(Value))
X.BART <- YLD_Data %>% dplyr::select(-"Value") %>% select_if(is.numeric)
Y.BART <- YLD_Data %>% dplyr::select("Value") %>% unlist
##build BART regression model
bart_machine = bartMachine(X.BART , Y.BART, num_trees = 50, num_burn_in = 1000,
num_iterations_after_burn_in = 3000, use_missing_data = TRUE)
summary(bart_machine)
investigate_var_importance(bart_machine)
#ARR <- bartMachineArr(bart_machine, R = 10)
X.ALE <- X.BART %>% na.omit
X.ALE %>% names
X.ALE[,11] %>% hist
ALEPlot(X = X.ALE, X.model = bart_machine, pred.fun = PostPredFun, J=11, K = 100, NA.plot = TRUE)
aac <- bart_machine_get_posterior(bart_machine,new_data = X.ALE)$y_hat_posterior_samples
asd <- PostPredFun(bart_machine,X.ALE,1)
|
777d9c15e7949582bb162c53824357cc09d262b3
|
9d1da35b09703cdbe441e02413f69a66e3c804d8
|
/TimeSeriesAnalysisLibrary/packages/TimeSeriesTransformation/R/class.ArcSineTransformation.R
|
bc01de0bcd9c20d039ebf1ca743494c329bc437f
|
[
"MIT"
] |
permissive
|
jmorenov/TimeSeriesAnalysis
|
6fd50463fea78ba0be2f2eb320b6fe2ecabb5482
|
e9b357666e1700a96c715d78279a33fde70c207a
|
refs/heads/master
| 2020-12-02T18:01:30.524431
| 2017-07-09T09:23:52
| 2017-07-09T09:23:52
| 96,461,687
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,437
|
r
|
class.ArcSineTransformation.R
|
#' ArcSineTransformation
#'
#' Clase que sobreescribe la clase \code{\link{Transformation}} implementando
#' la transformacion arco seno
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @format A ArcSineTransformation Class of type \code{\link{R6Class}}.
#' @section Warning: Advertencias.
#' @section Extend: \code{\link{Transformation}}
#' @section Methods:
#' \describe{
#' \item{\code{example_method(parameter_1 = 3)}}{This method uses \code{parameter_1} to ...}
#' }
#' @author Javier Moreno <javmorenov@@gmail.com>
ArcSineTransformation <- R6::R6Class("ArcSineTransformation", inherit = Transformation,
private = list(),
public = list(
initialize = function() {
super$initialize("ArcSineTransformation", "Arc Sine transformation")
},
apply = function(data) {
result <- super$apply(data)
transformationResult <- asin(data$getAllValues())
newTimeSeriesData <- TimeSeriesData$new(transformationResult)
result$set(newTimeSeriesData)
result
}
))
|
5a71bd3ad1d2a8599b5ee63fbf7285e885c8afea
|
7a0597d7acedc339111100388435509bbd6d471b
|
/modules/recoveryPlots.R
|
bfa65eefc39165cd863f1055b4138cdd2a836724
|
[] |
no_license
|
hdsu-bioquant/ShinyButchR
|
d583bc1a02fb09ea89392db11d053dd9d1d5e983
|
dc0633f889ead4bf97889453be9cb182858ea04a
|
refs/heads/master
| 2023-03-03T23:50:52.889197
| 2021-02-19T12:37:01
| 2021-02-19T12:37:01
| 252,409,838
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,280
|
r
|
recoveryPlots.R
|
#------------------------------------------------------------------------------#
# Recovery plots Server & UI #
#------------------------------------------------------------------------------#
recovplotsUI <- function(id) {
tagList(uiOutput(NS(id, "title_sel_K")),
plotOutput(NS(id, "plot_recoveryplots")))
#plotOutput(NS(id, "plot_recoveryplots"))
}
recovplotsServer <- function(id, nmf_obj, annot_react) {
moduleServer(id, function(input, output, session) {
observeEvent({
nmf_obj()
input$sel_K
input$inputannot_selcols
}, {
#req(nmf_obj())
output$plot_recoveryplots <- renderPlot({
req(nmf_obj())
req(input$sel_K)
req(input$sel_K %in% nmf_obj()@OptKStats$k)
k <- as.numeric(input$sel_K)
hmat <- HMatrix(nmf_obj(), k = k)
annot <- annot_react()
# annot <- annot[match(colnames(hmat), annot[,1]), -1, drop=FALSE]
# annot <- annot[, input$inputannot_selcols_recov]
# make factor of selected annotation
if (!input$inputannot_selcols %in% colnames(annot)) {
annot_char <- FALSE
} else {
annot_char <- setNames(annot[, input$inputannot_selcols],
annot[,1])
}
if (is.character(annot_char) | is.factor(annot_char)) {
recovery_plot(hmat, annot_char)
} else {
ggplot() +
annotate("text", x = 0, y = 0,
label = c("Please select a categorical variable")) +
theme_void()
}
},
height = 300
)
})
})
}
##----------------------------------------------------------------------------##
## Recovery plots function ##
##----------------------------------------------------------------------------##
auc <- function(rnk.list, max = NULL) {
aux <- sapply(rnk.list,function(rnk) {
if (is.null(max)) {
max <- max(rnk)
}
rnk <- sort(rnk)
X <- 0
i <- 1
ngenes <- length(rnk)
while ((rnk[i] <= max) && (i <= length(rnk))) {
X <- X + max -rnk[i]
i <- i+1
}
#print(rnk)
#print(class(rnk))
rauc <- X/(i-1)/max
#print(rauc)
rauc
})
return(aux)
}
# input named factor/character
# List of named factors/character
# data frame, col names of selected annotation. Use first column or row names as IDss
recovery_plot <- function(h, annot){
# Add sig IDs if missing
if (is.null(rownames(h))) {
rownames(h) <- paste0('Sig ',1:nrow(h))
}
# check i annot lenght == h
if (is.factor(annot) | is.character((annot))) {
annot_list <- list(main_annot = as.factor(annot))
} else {
stop("Not a valid annotation input")
}
n_samples <- ncol(h)
#which.a = annotID
#annot.factor <- annot[,annotID]
## -------------------------------------------------------------------##
## Find ranks ##
##--------------------------------------------------------------------##
# cycle all annots
ALL_RNKS_list <- lapply(annot_list, function(annot_factor){
# cycle annot levels
lIds <- setNames(levels(annot_factor), levels(annot_factor))
ALL_RNKS <- lapply(lIds,function(l) {
# cycle h matrix rows and find ranks
lapply(setNames(1:nrow(h), rownames(h)),function(i) {
exp <- sort(h[i,],decreasing=TRUE) # sorted exposure
i_rnk <- match(names(annot_factor)[annot_factor==l], names(exp))
sort(i_rnk[!is.na(i_rnk)]) # keep steps/ranks
})
#print(RNKS)
#return(RNKS)
})
ALL_RNKS
})
## -------------------------------------------------------------------##
## Find AUC and P-value ##
##--------------------------------------------------------------------##
# cycle all annots
AUC_list <- lapply(ALL_RNKS_list, function(ALL_RNKS){
AUC_singleannot <- lapply(ALL_RNKS,function(r) {
# AUC random set
AUC_RAND <- do.call("rbind",lapply(r, function(x) {
l = lapply(1:500,function(i) {
sample(1:n_samples, length(x))
})
aux = auc(l, max = n_samples)
return(c(mean(aux), sd(aux)))
}))
# AUC
#AUC <- lapply(ALL_RNKS, auc, max = n_samples)
AUC <- auc(r, max = n_samples)
#print(AUC)
# Find P - value
AUC_df <- data.frame(AUC_RAND, AUC)
colnames(AUC_df) = c('mean','sd','val')
AUC_df <- AUC_df %>%
tibble::rownames_to_column("SignatureID") %>%
mutate(z = (val - mean)/sd) %>%
mutate(p = ifelse(z>0, pnorm(z, lower.tail=FALSE), pnorm(z)))
#Return randon and AUC - P-val
return(AUC_df)
})
bind_rows(AUC_singleannot, .id = "Annotation_level")
})
AUC_allannot <- bind_rows(AUC_list, .id = "Annotation")
# Add min and max to rank, for step plot
# cycle all annots
ALL_RNKS_list <- lapply(ALL_RNKS_list, function(ALL_RNKS){
# cycle annot levels
lapply(ALL_RNKS, function(x){
# cycle h matrix rows and find ranks
lapply(x, function(xi) c(0, xi, n_samples))
})
})
#print(ALL_RNKS_list)
# Bind ranks and p vals (long data frame - p val repeated)
ALL_RNKS_df <- bind_rows(lapply(ALL_RNKS_list,
bind_rows,.id = "Annotation_level"),
.id = "Annotation") %>%
pivot_longer(-c("Annotation", "Annotation_level"), names_to = "SignatureID", values_to = "Rank") %>%
left_join(AUC_allannot, by = c("Annotation", "Annotation_level", "SignatureID"))
gg_recov <- ALL_RNKS_df %>%
group_by(Annotation, Annotation_level, SignatureID ) %>%
mutate(Frequency = c(seq(0, 1, length.out = n()-1), 1)) %>% # all y axis step
mutate(issignif = p < 0.05) %>%
ggplot(aes(x = Rank, y = Frequency, color = SignatureID,
linetype = issignif, size = issignif)) +
# geom_step(data = function(x){x %>% filter(!issignif)}, size = 0.5) +
# geom_step(data = function(x){x %>% filter(issignif)}, size = 1.5) +
geom_step() +
geom_abline(intercept = 0, slope = 1/n_samples) +
#facet_wrap(Annotation ~ Annotation_level) +
facet_wrap(.~Annotation_level) +
# chance line style
scale_linetype_manual(name = c("Significant p-val<0.05"),
values = c("TRUE" = 1, "FALSE" = 2)) +
scale_size_manual(name = c("Significant p-val<0.05"),
values = c("TRUE" = 1, "FALSE" = 0.5)) +
#theme_bw() +
theme_cowplot() +
panel_border(color = "grey40", size = 1, linetype = 1,
remove = FALSE)
#return(ALL_RNKS_df)
return(gg_recov)
}
# ##----------------------------------------------------------------------------##
# ## Recovery plots ##
# ##----------------------------------------------------------------------------##
#
#
# recov_df <- recovery_plot(hmatrix_norm_ids, setNames(rna_annotation$Celltype, rna_annotation$sampleID))
#
#
#
# #theme(panel.grid = element_blank())
|
81f81e25bc2f51e4692c183cec8f4c3ca3face55
|
db456f35d9012cdb1d893244783d3550d7772ee5
|
/plots/diss-ctx-susp/plot.R
|
ea14b1a87f591383540e0db65c87015aa902c292
|
[] |
no_license
|
TUD-OS/M3-bench
|
45aca4cad7b42391228e89f46f6192d5474e310e
|
4288d21f9895ed1f81cb3c1a2c92292069b18ad9
|
refs/heads/master
| 2020-04-07T12:52:12.297424
| 2018-11-20T12:33:05
| 2018-11-20T12:33:05
| 158,384,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,227
|
r
|
plot.R
|
library(extrafont)
library(RColorBrewer)
source("tools/helper.R")
args <- commandArgs(trailingOnly = TRUE)
scaling <- 1.5
namescale <- 1.6
colors <- brewer.pal(n = 4, name = "Pastel1")
times <- read.table(as.character(args[2]), header=F, sep=" ") / 1000
stddev <- read.table(as.character(args[3]), header=F, sep=" ") / 1000
pdf(as.character(args[1]), width=10, height=4)
par(cex.lab=scaling, cex.axis=scaling, cex.main=scaling, cex.sub=scaling, family="Ubuntu")
par(mar=c(5,10.5,3,1))
plot <- barplot(t(times), beside=F, horiz=T,
xlim=c(0,11), xlab="Time (µs)", axes=F,
col=colors,
cex.names=namescale, las=1, mgp=c(3, 1, 0),
names.arg=c("M³-C (local)", "M³-C (rem-sh)", "M³-C (rem-ex)", "M³-B (rem-sh)", "M³-B (rem-ex)",
"M³-A (rem-sh)", "M³-A (rem-ex)", "NOVA (remote)", "NOVA (local)"))
error.bar(plot, colSums(t(times)), t(stddev), horizontal=T)
axis(1, at = seq(0, 11, 1), las = 1)
# legend
par(fig=c(0,1,0,1), oma=c(0,0,0,0), mar=c(0,0,0.1,0), new=TRUE)
plot(0, 0, type="n", bty="n", xaxt="n", yaxt="n")
legend("top", c("Wake", "CtxSw", "Fwd", "Comm"), xpd=TRUE, horiz=T, bty="n",
inset=c(0,0), cex=namescale, fill=colors)
dev.off()
embed_fonts(as.character(args[1]))
|
1342615b2193b64ae351d6a728d7a74e9801456f
|
256bc4a27136ea06c8c1588b1af30c1c1b9fff76
|
/Plotting_CSV.R
|
edc2f353295666f59998c8ec28210b91cf767d6b
|
[] |
no_license
|
pappakrishnan/Income-Distribution---R-file-
|
95e82e5c26fba2ca9db098869379b664307f1c91
|
70452864981a612e5c538b2347e6d3501d8bf6a9
|
refs/heads/master
| 2021-01-20T22:31:20.618884
| 2015-03-21T00:19:59
| 2015-03-21T00:19:59
| 24,928,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
Plotting_CSV.R
|
data<-read.table("UserData.csv",TRUE,",")
#head(data)
par(mfrow=c(2,2))
plot(data$age,data$income,xlab="Age",ylab="Income")
hist(data$age,xlab="Age",ylab="Number of people")
boxplot(data$income,ylab="Income")
summary(data$income)
boxplot(data$age,ylab="Age")
summary(data$age)
|
9599df05a60bc02c53a0cd46b07d9b88a6756d6b
|
3ee150e7597d341cc6aa2c4f852717cc714a79b0
|
/cachematrix.R
|
0a34437b0f1b968f28c026dd8586083f53cb9446
|
[] |
no_license
|
latentvariable/ProgrammingAssignment2
|
9486fc96d8c9cf4295608b195684aa9df642382e
|
fcbbd921e60af77e2b87e009db467b614d98d4a2
|
refs/heads/master
| 2021-01-17T09:03:35.036190
| 2017-03-05T14:33:47
| 2017-03-05T14:33:47
| 83,975,084
| 0
| 0
| null | 2017-03-05T14:12:24
| 2017-03-05T14:12:24
| null |
UTF-8
|
R
| false
| false
| 1,465
|
r
|
cachematrix.R
|
#R Programing Assignment - Week 3
#Caching inverse of a Matrix
#Function 1: Make Cache Matrix
#Specifies an empty matrix 'm' as argument
makeCacheMatrix <- function(m = matrix()){
inv <- NULL #setting the inverse of the matrix to be empty
#Using the set function below to set the value of the matrix
set <- function(n){
m <<- n
inv <<- NULL
}
get <- function() m #Retrieves the input matrix
setinv <- function(inverse) inv <<- inverse #Defines the setter for inverse
getinv <- function() inv #Defines the getter for inverse to retrieve value of inverse
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
#Function 2: Function to solve for the inverse of given matrix
#This function starts with the matrix 'm' as argument
cacheSolve <- function (m,...){
inv <- m$getinv() #Calling function getinv to retrieve inverse
#To check if the 'inv' variable above is empty, it runs the IF loop below. If it is empty, it returns NULL.
#If not, it displays the inverse stored in inv and the message "Getting Cached Data"
if(!is.null(inv)){
message("Getting Cached Data")
return(inv)
}
#Calculation of the inverse
data <- m$get() #Assigns the input matrix to variable 'data'
inv <- solve(data, ...) #Uses the solve function to get the inverse of 'data'
m$setinv(inv) #Uses the setter function 'setinv' to set the value of the matrix inverse to 'inv'
inv #Prints the calculated inverse to screen
}
|
7bef4e95220d6b376393f05f8b3ca207a6c64374
|
26c902fec6602d97aa759d17fc04f63cfec763ab
|
/data-raw/HCP_HB_rev.R
|
c87cbd5e8d825a43313364772482873537dc3b03
|
[] |
no_license
|
jacob-ogre/hcphb
|
a2c9d0231cee49798ea9f24c5946facb9fecbd62
|
6a8fcda2efad6eb7faea284b6633cd86e2827b5b
|
refs/heads/master
| 2020-12-25T13:23:17.499113
| 2017-03-21T10:36:06
| 2017-03-21T10:36:06
| 62,728,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,293
|
r
|
HCP_HB_rev.R
|
# BSD_2_clause
# library(dplyr)
# library(NLP)
# library(openNLP)
# library(pdftools)
# library(stringr)
rev <- pdftools::pdf_text("data-raw/HCP_handbook_rev_draft_28Jun2016.pdf")
length(rev)
main <- rev[13:length(rev)]
toc <- rev[1:12]
execsum <- rev[13:14]
acronyms <- rev[15:16]
glossary <- rev[17:42]
ch1 <- rev[44:57]
ch2 <- rev[58:76]
ch3 <- rev[77:104]
ch4 <- rev[105:123]
ch5 <- rev[124:131]
ch6 <- rev[132:140]
ch7 <- rev[141:162]
ch8 <- rev[163:169]
ch9 <- rev[170:213]
ch10 <- rev[214:247]
ch11 <- rev[248:273]
ch12 <- rev[274:282]
ch13 <- rev[283:296]
ch14 <- rev[297:331]
ch15 <- rev[332:342]
ch16 <- rev[343:359]
ch17 <- rev[360:372]
appA <- rev[373:383]
appB <- rev[384:388]
appC <- rev[389:391]
hcp_rev_foc <- list(execsum=execsum, glossary=glossary, ch1=ch1, ch2=ch2,
ch3=ch3, ch4=ch4, ch5=ch5, ch6=ch6, ch7=ch7, ch8=ch8,
ch9=ch9, ch10=ch10, ch11=ch11, ch12=ch12, ch13=ch13,
ch14=ch14, ch15=ch15, ch16=ch16, ch17=ch17, appA=appA,
appB=appB, appC=appC)
make_sentences <- function(ch) {
data <- paste(ch, collapse = "\f")
data <- stringr::str_split(data, pattern = "[_]+\n")
data <- data[[1]][length(data[[1]])]
s <- NLP::as.String(data)
stok <- openNLP::Maxent_Sent_Token_Annotator()
ann <- NLP::annotate(s, stok)
sents <- s[ann]
return(sents)
}
# get all sentences:
all_sent <- lapply(hcp_rev_foc, make_sentences)
lens <- unlist(lapply(all_sent, FUN = length))
hcp_rev_sentences <- lapply(all_sent, gsub, pattern = "\n", replacement = " ")
hcp_rev_sentences <- lapply(hcp_rev_sentences,
FUN = gsub,
pattern = "[ ]{2,}",
replacement = " ")
hcp_rev_sent <- hcp_rev_sentences
hcp_rev_all <- rev
devtools::use_data(hcp_rev_all, overwrite = TRUE)
devtools::use_data(hcp_rev_foc, overwrite = TRUE)
devtools::use_data(hcp_rev_sent, overwrite = TRUE)
# frs <- lapply(hcp_rev_sentences,
# FUN = str_match_all,
# pattern = "[0-9]+ FR [0-9]+")
# frs <- unlist(frs)
# frs
#
# cons <- lapply(hcp_rev_sentences,
# FUN = str_match_all,
# pattern = "\\w{0,10} [Cc]onsult[a-z]+ \\w{0,10}")
# cons <- unlist(cons)
# head(cons, 15)
|
99f0678eb10da8c3eb863a634c09e483428e8ca2
|
556620b3c33b25191a697a50bcb3c9a5dd045706
|
/Analysis Files/fake_med/PS_med.R
|
26f699dffc84a99f4413b219da71bb9d6ae8c51d
|
[] |
no_license
|
dgiova/PM2.5-Nonattainment
|
6a74133876f2c049be2b5674168eb4e33e29a485
|
64a087ae9f44f56f8bf834231452ceb2edd7026e
|
refs/heads/master
| 2023-01-19T12:22:22.082920
| 2020-11-20T07:55:49
| 2020-11-20T07:55:49
| 307,199,582
| 0
| 1
| null | 2020-10-25T22:17:37
| 2020-10-25T21:49:46
| null |
UTF-8
|
R
| false
| false
| 3,668
|
r
|
PS_med.R
|
set.seed(51)
load(file="~/PM2.5/pm_withps.Rda")
######## Prune observations that are out of range ###########
dat=subset(dat, outofrange==0)
dat$pscat = as.factor(dat$pscat)
######## Set the Working Directory so that output is saved in the right place ########
wd <- "~/PM2.5/fake_med/"
setwd(wd)
##############################################################
################ Specify the Array Parameters ################
##############################################################
outcomenames = c("total_death_FFS_MA.2012",
"COPD.2012", "CV_stroke.2012", "HF.2012", "HRD.2012", "IHD.2012",
"PVD.2012", "RTI.2012") ### The outcomes that will be considered
denominators = c("Tot_den_for_death_MA_FFS.2012", rep("Person_year_FFS.2012", length(outcomenames)-1))
outfiles = c("mort", "copd", "cvstroke", "hf", "hrd", "ihd", "pvd", "rti") ### To append the output R object name
##############################################################
###################### Array Starts Here #####################
##############################################################
j = as.numeric(Sys.getenv('SLURM_ARRAY_TASK_ID'))
print(j)
######## Choose which health outcome to use ########
dat$h = dat[, outcomenames[j]] ### Set h (outcome variable)
dat$denom = dat[, denominators[j]]
dat$baseout = dat[, paste(strsplit(outcomenames[j], "[.]")[[1]][1], "2004", sep = ".")]
dat$baserate = dat$baseout/dat$denom
######## Choose how to transform pollution ########
dat$y = log(dat$pmfu) #dat$fu - dat$base
dat$ytemp=dat$y
dat$ytemp[is.na(dat$ytemp)]= mean(dat$y, na.rm=TRUE)
######## Choose how to specify the models ########
### Medium Models
load('~/PM2.5/vars_with_imbalance.RData')
vars_with_imbalance
toadjust = vars_with_imbalance[!(vars_with_imbalance %in% c("pmbase", "Tot_den_for_death_MA_FFS.2012"))]
medformula = as.formula(paste("h ~", paste(c("pscat", "pmbase2002_2004", toadjust, "baserate", "ytemp", "offset(log(denom))"), collapse = "+")))
mh0=glm(medformula, family = poisson(link="log"), data = subset(dat, a==0))
mh1=glm(medformula, family = poisson(link="log"), data = subset(dat, a==1))
library(HEIfunctions)
q=2
n=dim(dat)[[1]]
nsamp = 51
thin = 1
alpha0prop=summary(mh0)$cov.unscaled*.8
alpha1prop=summary(mh1)$cov.unscaled
ypropsd=rep(.75, n*q)
#### Proposals for log(pm) ######
ypropsd[seq(1,n*q,q)] = log(dat$pmbase2002_2004)*.15
ypropsd[seq(2,n*q,q)] = log(dat$pmbase2002_2004)*.15
coords <- cbind(dat$Longitude, dat$Latitude)
phistart <- makephi(coords, 10)
philower <- makephi(coords, 4)
phiupper <- makephi(coords, 30)
tuning = list(A = 0.1, psi = 0.2, theta = 1, alpha0=.09*alpha0prop, alpha1=.09*alpha1prop, Y=ypropsd)
prior = list(KIG = rep(.5,2), psi = rep(.5,2), theta1 = rep(philower,2), theta2 = rep(phiupper,2))
starting = list(B=NULL, A = NULL, psi = NULL, theta = phistart, alpha0=mh0$coef, alpha1=mh1$coef)
pollutionformula = as.formula( paste("y ~", paste(all.vars(mh0$formula)[!(all.vars(mh0$formula) %in% c("h", "ytemp", "denom"))], collapse="+")) )
healthformula = as.formula( paste("h ~", paste(all.vars(mh0$formula)[!(all.vars(mh0$formula) %in% c("h", "ytemp", "denom"))], collapse="+")) )
starttime=proc.time()
pstratamod=principalstratmod(
formula = pollutionformula, data=dat, trt="a", coords,
formula_h = healthformula, denom="denom", nsamp, thin, tuning, prior, starting,
outputbinsize = 10, outputfilename = paste("pstratamod_temp_", outfiles[j], ".Rda", sep=""))
rtime=proc.time() - starttime
save(pstratamod, file = paste("pstratamod_", outfiles[j], ".Rda", sep=""))
|
d74364fa5305826453aeb0fbcc8d291a6fec80f9
|
f2cedce2f263a0054186982e8d32912abdaa81cb
|
/man/ModeCanada.Rd
|
9ec4a83c973dbf6c838b67ad60c60ea4f6b16e5f
|
[] |
no_license
|
ycroissant/mlogit
|
aed63c21eaec7cc086326686b89c426ea3996183
|
106803ef2da23b8966cdda5da526f0a1336a9dc6
|
refs/heads/master
| 2021-12-15T14:22:24.236444
| 2021-12-13T04:49:23
| 2021-12-13T04:49:23
| 196,389,801
| 7
| 3
| null | 2021-10-04T19:30:29
| 2019-07-11T12:26:08
|
R
|
UTF-8
|
R
| false
| true
| 1,470
|
rd
|
ModeCanada.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{ModeCanada}
\alias{ModeCanada}
\title{Mode Choice for the Montreal-Toronto Corridor}
\format{
A dataframe containing
\itemize{
\item case: the individual index,
\item alt: the alternative, one of train, car, bus and air,
\item choice: one if the mode is chosen, zero otherwise,
\item cost: monetary cost,
\item ivt: in vehicule time,
\item ovt: out vehicule time,
\item frequency: frequency,
\item income: income,
\item urban: urban,
\item noalt: the number of alternatives available.
}
}
\source{
kindly provided by S. Koppelman
}
\description{
A sample of 3880 travellers for the Montreal-Toronto corridor
}
\examples{
data("ModeCanada", package = "mlogit")
bususers <- with(ModeCanada, case[choice == 1 & alt == "bus"])
ModeCanada <- subset(ModeCanada, ! case \%in\% bususers)
ModeCanada <- subset(ModeCanada, noalt == 4)
ModeCanada <- subset(ModeCanada, alt != "bus")
ModeCanada$alt <- ModeCanada$alt[drop = TRUE]
KoppWen00 <- mlogit.data(ModeCanada, shape='long', chid.var = 'case',
alt.var = 'alt', choice = 'choice',
drop.index = TRUE)
pcl <- mlogit(choice ~ freq + cost + ivt + ovt, KoppWen00, reflevel = 'car',
nests = 'pcl', constPar = c('iv:train.air'))
}
\references{
\insertRef{BHAT:95}{mlogit}
\insertRef{KOPP:WEN:00}{mlogit}
\insertRef{WEN:KOPP:01}{mlogit}
}
\keyword{datasets}
|
29e65de949d624cbb627321b8d26253e2c9fa4d6
|
b2e83000cf9ee09a1904aa4a9530f237e903637f
|
/Make a Dashboard with 4 maps - setup.R
|
f0ff5ea5ccedc1e60a356eb75fa49a9931339471
|
[] |
no_license
|
bigck2/Census
|
2bd777b7c53bc67f376bc8fb06b002873a46b02c
|
40c6a5a4e8d519df82907db4229963ddfce51114
|
refs/heads/master
| 2021-05-15T03:37:44.766990
| 2018-01-07T22:41:29
| 2018-01-07T22:41:29
| 109,280,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,402
|
r
|
Make a Dashboard with 4 maps - setup.R
|
library(tidyverse) # for everything
library(stringr) # for strings
library(tidycensus) # to pull census data
library(tigris) # to get shapefiles
options(tigris_use_cache = TRUE)
library(sp) # for over() / sp objects
library(RColorBrewer) # for brewer.pal()
library(leaflet) # for mapping
library(rgeos) # for trueCentroids()
library(htmltools) # for htmlEscape()
census_api_key("f6259ec5d271471f6656ae7c66e2f41b867e5cb1")
my_vec <- c(total_population = "B01003_001",
inc_median_household_income = "B19013_001",
median_gross_rent = "B25031_001",
gr_to_income_median = "B25071_001")
# access the api to get the data
dat <- get_acs(geography = "zcta",
variables = my_vec,
survey = "acs5",
year = 2016)
# Create a named vector (opposite of my_vec) (a lookup vector)
my_vec_lookup <- names(my_vec)
names(my_vec_lookup) <- my_vec
# Index the lookup table / vector by the variable column
dat$var <- my_vec_lookup[dat$variable]
# create a column that is clean with just the zip code
dat <- dat %>%
mutate(zip = str_sub(NAME, -5))
# download shape files for ALL US zips
zips <- zctas(cb = TRUE)
# download shape files for ALL US states
my_states <- states(cb = TRUE, resolution = "20m")
# filter to just TX
tx <- my_states[my_states$NAME == "Texas", ]
# filter zips to only zips in TX
tx_zips <- over(zips, tx)
index <- !is.na(tx_zips$STATEFP)
tx_zips <- zips[index,]
rm(index)
rm(tx, my_states, zips)
rm(my_vec, my_vec_lookup)
dat <- dat %>%
select(-moe, -variable, -NAME)
dat <- dat %>%
spread(key = var, value = estimate)
# geo_join the spatialdata to the regular data
tx_zips <- geo_join(tx_zips, dat, "GEOID10", "zip")
# Figure out cut points ---------------------------------------------------
ggplot(data = dat, aes(x = gr_to_income_median)) + geom_histogram()
ggplot(data = dat, aes(x = inc_median_household_income)) + geom_histogram()
ggplot(data = dat, aes(x = median_gross_rent)) + geom_histogram()
ggplot(data = dat, aes(x = total_population)) + geom_histogram()
# gr_to_income_meidan
my_breaks <- c(0, 20, 30, 40, max(dat$gr_to_income_median, na.rm = TRUE))
my_labels <- c("0-20",
"21-30",
"30-40",
">=41")
tx_zips$rent_to_income <- cut(x = tx_zips$gr_to_income_median,
breaks = my_breaks,
labels = my_labels)
ggplot(data = tx_zips@data, aes(x = gr_to_income_median, fill = rent_to_income)) +
geom_histogram()
# inc_median_household
my_breaks <- c(0, 40000, 50000, 60000,
70000, 80000, 90000, 110000,
150000, max(dat$inc_median_household_income, na.rm = TRUE) )
my_labels <- c("<$40k",
"$41-50k",
"$51-60k",
"$61-70k",
"$71-80k",
"$81-90k",
"91-110k",
"$111-150k",
">$150k")
tx_zips$median_household_income <- cut(x = tx_zips$inc_median_household_income,
breaks = my_breaks,
labels = my_labels)
ggplot(data = tx_zips@data, aes(x = inc_median_household_income, fill = median_household_income)) +
geom_histogram()
# median_gross_rent
my_breaks <- c(0, 750, 1000,
1250, 1500, 1750,
2000, 2500, max(dat$median_gross_rent, na.rm = TRUE))
my_labels <- c("$0-750",
"$751-1,000",
"$1,001-1,250",
"$1,251-$1,500",
"$1,501-1,750",
"1,751-2,000",
"$2,001-2,500",
">$2,500")
tx_zips$median_rent <- cut(tx_zips$median_gross_rent,
breaks = my_breaks,
labels = my_labels)
ggplot(data = tx_zips@data, aes(x = median_gross_rent, fill = median_rent)) +
geom_histogram()
# total population
my_breaks <- c(0, 10000, 20000, 30000,
40000, 50000, 70000,
max(dat$total_population, na.rm = TRUE))
my_labels <- c("0-10k",
"11-20k",
"21-30k",
"31-40k",
"41-50k",
"51-70k",
">70k")
tx_zips$population <- cut(tx_zips$total_population,
breaks = my_breaks,
labels = my_labels)
ggplot(data = tx_zips@data, aes(x = total_population, fill = population)) +
geom_histogram()
rm(my_breaks, my_labels)
# Calculate Polygon Centroids ---------------------------------------------
# TODO Delete this code, it isn't really needed in mapping
# TODO: this could be useful to calculate the nearest 10 zip codes or something
# trueCentroids <- gCentroid(tx_zips, byid = TRUE)
#
# tx_zips$lon <- trueCentroids@coords[,1]
# tx_zips$lat <- trueCentroids@coords[,2]
#
# rm(trueCentroids)
# TODO calculate population density
my_area <- gArea(tx_zips, byid = TRUE)
# Make some maps ----------------------------------------------------------
# population,
my_cols <- brewer.pal(9, "YlGnBu")
factpal <- colorFactor(palette = my_cols, levels = levels(tx_zips$population))
leaflet(tx_zips) %>%
addProviderTiles(providers$Stamen.TonerLite) %>%
addPolygons(stroke = TRUE,
weight = 0.5,
smoothFactor = 0.5,
fillOpacity = 0.5,
fillColor = ~factpal(population),
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
popup = ~htmlEscape(paste(zip,
format(total_population,
big.mark = ","),
sep = ": " )
)
) %>%
addLegend("bottomright",
pal = factpal,
values = ~population,
title = "Total Population",
opacity = 1) %>%
setView(lng = -97.04034, lat = 32.89981, zoom = 10)
# median_household_income,
my_reds <- brewer.pal(9, "Reds")
factpal_2 <- colorFactor(palette = my_reds,
levels = levels(tx_zips$median_household_income))
leaflet(tx_zips) %>%
addProviderTiles(providers$Stamen.TonerLite) %>%
addPolygons(stroke = TRUE,
weight = 0.5,
smoothFactor = 0.5,
fillOpacity = 0.5,
fillColor = ~factpal_2(median_household_income),
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
popup = ~htmlEscape(paste(zip,
scales::dollar(inc_median_household_income),
sep = ": " )
)
) %>%
addLegend("bottomright",
pal = factpal_2,
values = ~median_household_income,
title = "Median Household Income",
opacity = 1) %>%
setView(lng = -97.04034, lat = 32.89981, zoom = 10)
# median_rent,
my_purples <- brewer.pal(9, "Purples")
factpal_3 <- colorFactor(palette = my_purples,
levels = levels(tx_zips$median_rent))
leaflet(tx_zips) %>%
addProviderTiles(providers$Stamen.TonerLite) %>%
addPolygons(stroke = TRUE,
weight = 0.5,
smoothFactor = 0.5,
fillOpacity = 0.5,
fillColor = ~factpal_3(median_rent),
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
popup = ~htmlEscape(paste(zip,
scales::dollar(median_gross_rent),
sep = ": " )
)
) %>%
addLegend("bottomright",
pal = factpal_3,
values = ~median_rent,
title = "Median Rent",
opacity = 1) %>%
setView(lng = -97.04034, lat = 32.89981, zoom = 10)
# rent_to_income
my_blues <- brewer.pal(9, "PuBu")
factpal_4 <- colorFactor(palette = my_blues,
levels = levels(tx_zips$rent_to_income))
leaflet(tx_zips) %>%
addProviderTiles(providers$Stamen.TonerLite) %>%
addPolygons(stroke = TRUE,
weight = 0.5,
smoothFactor = 0.5,
fillOpacity = 0.5,
fillColor = ~factpal_4(rent_to_income),
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
popup = ~htmlEscape(paste(zip,
scales::comma(gr_to_income_median),
sep = ": " )
)
) %>%
addLegend("bottomright",
pal = factpal_4,
values = ~rent_to_income,
title = "Rent to Income",
opacity = 1) %>%
setView(lng = -97.04034, lat = 32.89981, zoom = 10)
# write_rds(tx_zips, "tx_zips.rda")
|
c8264e46e7b38c053a5380c3af0d2d4551a6dd9e
|
9cbc8d7ae4c57f4948d47f11e2edcba21a1ba334
|
/sources/modules/VETransportSupplyUse/man/calcCongestion.Rd
|
a8aadc217e4f036f1e8ec70279f36de5cd5b307b
|
[
"Apache-2.0"
] |
permissive
|
rickdonnelly/VisionEval-Dev
|
c01c7aa9ff669af75765d1dfed763a23216d4c66
|
433c3d407727dc5062ec4bf013abced4f8f17b10
|
refs/heads/master
| 2022-11-28T22:31:31.772517
| 2020-04-29T17:53:33
| 2020-04-29T17:53:33
| 285,674,503
| 0
| 0
|
Apache-2.0
| 2020-08-06T21:26:05
| 2020-08-06T21:26:05
| null |
UTF-8
|
R
| false
| true
| 2,154
|
rd
|
calcCongestion.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CalculateCongestionBase.R
\name{calcCongestion}
\alias{calcCongestion}
\title{Function that calculates the MPG adjustment, travel time and travel delay}
\usage{
calcCongestion(Model_ls, DvmtByVehType, PerCapFwyLnMi, PerCapArtLnMi,
Population, BasePopulation, CongPrice_ClFc, IncdReduc = 0, FwyArtProp,
BusVmtSplit_Fc, TruckVmtSplit_Fc, UsePce = FALSE, ValueOfTime = 16,
CurrYear)
}
\arguments{
\item{Model_ls}{A list of congestion models and parameters required by the models}
\item{DvmtByVehType}{A data frame of dvmt by vehicle types}
\item{PerCapFwyLnMi}{A named numeric vector of free way lane miles}
\item{PerCapArtLnMi}{A named numeric vector of arterial lane miles}
\item{Population}{A numeric indicating the current year population}
\item{BasePopulation}{A numeric indicating the base year population}
\item{CongPrice_ClFc}{A matrix of congestion pricing by congestion level
and functional class}
\item{IncdReduc}{A numeric indicating proportion of incidence reduced by ITS}
\item{FwyArtProp}{A numeric indicating the proportions of daily VMT for light
vehicles that takes place on freeways and arterials}
\item{BusVmtSplit_Fc}{A data frame indicating the bus vmt split by freeways, arterials
and others}
\item{TruckVmtSplit_Fc}{A data frame indicating the truck vmt split by freeways, arterials
and others}
\item{UsePce}{A logical suggesting whether to convert heavy truck and bus dvmt to
passenger car equivalents for congestion calculation. (Default: FALSE)}
\item{ValueOfTime}{A numeric representing weights on time to model congestion price}
\item{CurrYear}{A character indicating current run year}
}
\value{
A list containing mpg adjustments, travel time, and travel delay hours
by vehicle types.
}
\description{
\code{calcCongestion} calculates the MPG adjustment, travel time and travel delay
due to congestion.
}
\details{
This function takes a list of congestion models, dvmt by vehicle types, freeway
and arterial lane miles, population, and other information to calculate
adjustments to fuel efficiency, travel time, and travel delay.
}
|
59ba02f739121b128c1ac3908130e5fae18427e4
|
3365692614767c738e38448f3a5d022cbe0ca45a
|
/Unfinished Analysis code/Hierarchical_clustering.R
|
ce298ebf9b39f8921697b481e51271519112d33d
|
[] |
no_license
|
LanceStasinski/Dryas2
|
89ac26c1a19cd3dabc9636b26967e24d1df75928
|
049f4461b0251174be4f0e208795333ea9a3c935
|
refs/heads/master
| 2023-06-02T02:32:56.480834
| 2021-06-17T17:33:59
| 2021-06-17T17:33:59
| 268,616,307
| 2
| 3
| null | 2021-03-22T20:17:30
| 2020-06-01T19:43:25
|
R
|
UTF-8
|
R
| false
| false
| 1,656
|
r
|
Hierarchical_clustering.R
|
################################################################################
#Set up
################################################################################
library(spectrolab)
library(ape)
################################################################################
#Load Data
################################################################################
clean_all = readRDS("clean_all.rds")
all_vn = readRDS("all_vn.rds")
####################################
#Hierarchical Clustering: spectra
####################################
d = dist(as.matrix(clean_all))
hc = hclust(d)
####################################
#Plot: Spectra
####################################
#create phylogram
hcp = as.phylo(hc)
#change labels
meta = meta(clean_all)
species = unique(meta$Species)
cols = setNames(c("red", "blue", "yellow"), species)
plot(hcp, type = "phylogram", show.tip.label = F, cex = 0.5,
label.offset = 1)
tiplabels(pch = 19, col = cols)
legend("topleft", names(cols), fill = cols, bty = "n")
####################################
#Hierarchical Clustering: vector normalized spectra
####################################
vnd = dist(as.matrix(all_vn))
vnhc = hclust(vnd)
####################################
#Plot: vector normalized spectra
####################################
#create phylogram
vn_hcp = as.phylo(vnhc)
#change labels
vn_meta = meta(all_vn)
species = unique(vn_meta$Species)
cols = setNames(c("red", "blue", "yellow"), species)
plot(vn_hcp, type = "phylogram", show.tip.label = F, cex = 0.5,
label.offset = 1)
tiplabels(pch = 19, col = cols)
legend("topleft", names(cols), fill = cols, bty = "n")
|
d21842a328617d6d0190ab90f204ade766fc8cd0
|
448e3fbc77e3290c1c4196c2258b3d444c8c0d45
|
/man/emcncf.Rd
|
14a8a7f2ffe16ecc2e772dc88ccab4dc6aa71732
|
[] |
no_license
|
mskcc/facets
|
1fe15ddabe7b157c4ffdeddb048797c8a1f4f83b
|
f3c93ee65b09fc57aaed22a2eb9faa05586a9dc0
|
refs/heads/master
| 2023-06-09T01:38:44.938368
| 2021-10-12T12:04:39
| 2021-10-12T12:04:39
| 35,636,429
| 135
| 71
| null | 2023-06-04T17:04:27
| 2015-05-14T20:54:49
|
R
|
UTF-8
|
R
| false
| false
| 1,348
|
rd
|
emcncf.Rd
|
\name{emcncf}
\alias{emcncf}
\title{EM estimate of copy number and cellular fraction of segment clusters}
\description{
Uses genotype mixture model to estimate the cluster specific copy number and cellular fraction.
Uses estimates based on the cnlr.median and mafR as initial values for the EM iteration.
}
\usage{
emcncf(x, trace=FALSE, unif=FALSE, min.nhet=15, maxiter=10, eps=1e-3)
}
\arguments{
\item{x}{the output from procSample. This function uses the elements
jointseg, out and dipLogR from the output.}
\item{trace}{flag to print the EM criteria at every step}
\item{unif}{random EM start values of cellular fractions instead of
clusteredcncf values}
\item{min.nhet}{minimum number of heterozygote snps in a segment used to call minor cn}
\item{maxiter}{maximum number of EM iterations}
\item{eps}{the convergence threshold}
}
\value{
A list containing:
\item{loglik}{loglikelihood value of the fitted model}
\item{purity}{fraction tumor cells in the tumor sample}
\item{ploidy}{average total copy number of the tumor cells}
\item{dipLogR}{estimated logR value of diploid segments}
\item{cncf}{dataframe consisting of the columns of segmentation output as well as
cellular fraction (cf), total (tcn) and lesser (lcn) copy number of
each segment and their em counterpart (with .em suffix)}
}
|
f8fd8e0ca541c7a37a6f81144b5841c3c03cdd06
|
de4d6b98dbbb613ff7cf2f39adbd94a3c6da11ac
|
/bivariate_cont_cat.R
|
bbbfc427eba0da4d7b499d2efc6c90944526bfa9
|
[] |
no_license
|
buddalasunil999/onlinedatalabscripts
|
fd9b076f69c70df50210d76d4e7e5af0cef45c11
|
229619423851168451907acff4070b3e4a0b745c
|
refs/heads/master
| 2021-06-06T20:45:44.050281
| 2016-11-13T03:41:29
| 2016-11-13T03:41:29
| 69,343,368
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,677
|
r
|
bivariate_cont_cat.R
|
#bivariate data analysis
#####################################Bivariate analysis continuous ~ categorical###################################################################
#visualization first-function 6
bi_plot <- function(varname,dependentname,class,testdata, choices, outputPath1, outputPath2,outputPath4,color)
{
svg(outputPath1)
group=table(class)
noclass=length(table(class))
par(mfrow=c(1,noclass))
for (i in 1:noclass){
hist(varname[class==attributes(group[i])$names],main=paste(dependentname,"histogram"),xlab=attributes(group[i])$names,col=color)
}
dev.off()
svg(outputPath2)
boxplot(varname~class,main= paste("boxplot of ", dependentname),data=testdata,col=color)
dev.off()
svg(outputPath4)
layout(cbind(1,2), widths=c(7,1))
xprob=seq(0,1,by=0.05)
k=length(group)
contents=names(group)
for ( i in 1:k){
q=quantile(varname[class==contents[i]],na.rm=TRUE,probs=seq(0,1,0.05))
if (i == 1) {
plot(q,xprob,xlim=c(min(varname,na.rm=TRUE),max(varname,na.rm=TRUE)),type="n",ylab="quantile",xlab=dependentname,main=paste("cumulative probability plot",dependentname))
}
lines(q,xprob,lty=i,lwd=2)
}
par(mar=c(0, 0, 0, 0))
plot.new()
legend('center',contents,lty=1:k,cex=0.6)
dev.off()
}
#Summary analysis by subgroup (a categorical variable)
bi_mean_class<-function(varname,class)
{
meanResult <- c()
group=attributes(table(class))$dimnames$class
grouplength=length(group)
for ( i in (1:grouplength)) {
meanvar=mean(subset(varname,class==group[i]),na.rm=T)
meanResult <- append(meanResult, meanvar)
}
names(meanResult) <- group
return(meanResult)
}
bi_summary_class=function(varname,class)
{
group=attributes(table(class))$dimnames$class
grouplength=length(group)
summaryList <- vector("list", length=grouplength)
for ( i in (1:grouplength)) {
summaryvar=summary(subset(varname,class==group[i]),na.rm=T)
summaryList[[i]] <- as.list(summaryvar)
}
names(summaryList) <- group
return(summaryList)
}
#Hypothesis test -parametric test-t test two samples
bi_ttest=function(varname,class,testdata){
dataList <- vector("list", length=6)
group=table(class)
difference=mean(varname[class==attributes(group[1])$names])-mean(varname[class==attributes(group[2])$names])
dataList[[1]] <- list("mean difference" = difference)
dataList[[2]] <- as.list(t.test(varname~class,data=testdata)$statistic)
dataList[[3]] <- as.list(t.test(varname~class,data=testdata)$parameter)
pvalue <- as.list(t.test(varname~class,data=testdata)$p.value)
dataList[[4]] <- c("p value of t test" = pvalue)
conf_int <- t.test(varname~class,data=testdata)$conf.int
dataList[[5]] <- list("confidence interval of difference" = conf_int)
dataList[[6]] <- as.list(t.test(varname~class,data=testdata)$estimate) #output this -need to sort out the parameters of t ,df, p-value,95 percent confidence interval and sample estimates of mean
return(dataList)
}
#Hypothesis test - parametric test - paired sample t test
bi_ttest_paired=function(varname,class,testdata)
{
summary <- capture.output(t.test(varname~class,paired=T,data=testdata))
summary <- c(summary, "of note: 95% confidennce interval is for the estimate of mean difference")
return(summary)
# output nees t, df, p-value, 95 percent confidence interval, sample estimates
}
#Hypothesis
#Hypothesis test - non parametric test- Kolmogorov-Smirnov test.
bi_kstest=function(varname,class, testdata)
{
with(testdata,{
group=table(class);
return(capture.output(ks.test(varname[class==attributes(group[2])$names],varname[class==attributes(group[1])$names], alternative = "l")))})
}
#Analysis of variance
bi_anova=function(varname,class,testdata,outputPath2,color)
{
dataList <- vector("list", length=3)
fit=glm(varname~factor(class),family=gaussian,data=testdata)
coefficientsVal=fit$coefficients; #print this as one cell in the table
aic=summary(fit)$aic #print this as one cell in the table
devianceval=summary(fit)$deviance #print this as one cell in the table
coefficientsFootNote <- c("Of Note: the presented coefficients are differences between means of each category versus. reference category.", "*Reference category is the lowest level of the independent variable", "for example: the coefficient is the mean difference in BMI between severity 2 vs. 1, 3 vs. 1 and 4 vs. 1 respectively.")
dataList[[1]] <- list(as.list(coefficientsVal), "footnote" = coefficientsFootNote)
dataList[[2]] <- as.list(aic)
dataList[[3]] <- as.list(devianceval)
names(dataList) <- c("coefficients of Analysis of Variance","AIC (Akaike Information Criterion)","residual deviance")
resultData <- list(toJSON(dataList), capture.output(summary(fit))) #print this as one large cell in the table
svg(outputPath2)
qqnorm(fit$residuals,main="qq normal plot for diagnosis of the model",col=color)
dev.off()
return(resultData)
}
bivariate_cont_cat <- function(choices, uploaddata, outputPath1, outputPath2,outputPath3,outputPath4,color){
dependent <- uploaddata[[choices[1]]]
independent <- uploaddata[[choices[2]]]
valuesList <- vector("list", length=5)
noclass<-length(table(independent))
bi_plot(dependent, choices[1], independent,uploaddata, choices, outputPath1, outputPath2, outputPath4, color)
meanClass <- bi_mean_class(varname=dependent,class=independent)
valuesList[[1]] <- toJSON(as.list(meanClass))
summaryList <- bi_summary_class(varname=dependent,class=independent)
valuesList[[2]] <- toJSON(summaryList)
if(noclass == 2){
ttest <- bi_ttest(dependent,independent,testdata=uploaddata)
valuesList[[3]] <- toJSON(ttest)
}
kstest <- bi_kstest(dependent,independent,uploaddata) #need to output all informations p value
valuesList[[4]] <- kstest
if(noclass > 2){
anovaResult <- bi_anova(dependent,independent,uploaddata,outputPath3,color)
valuesList[[5]] <- anovaResult
}
names(valuesList) <- c("Mean", "Summary", "ttest", "kstest", "anova")
return(valuesList)
}
bivariate_cont_cat_paired <- function(choices, uploaddata, outputPath1,outputPath2,outputPath3, color){
dependent <- uploaddata[[choices[1]]]
independent <- uploaddata[[choices[2]]]
valuesList <- vector("list", length=4)
bi_plot(dependent,choices[1],independent,uploaddata, choices, outputPath1,outputPath2,outputPath3, color)
meanClass <- bi_mean_class(varname=dependent,class=independent)
summaryList <- bi_summary_class(varname=dependent,class=independent)
ttestPaired <- bi_ttest_paired(dependent,independent,uploaddata)
valuesList[[1]] <- toJSON(as.list(meanClass))
valuesList[[2]] <- toJSON(summaryList)
valuesList[[3]] <- ttestPaired
names(valuesList) <- c("Mean", "Summary", "ttestPaired")
return(valuesList)
}
|
ff3d3ea6c6bc25ea2047c1c549b70e75461d2ca5
|
7648147e8da326932f2fbb0eb9474315f3056103
|
/day01/part2.R
|
524cfaf0f9fd17d5a1dbef98dd96c34c09456743
|
[] |
no_license
|
jkittner/AdventOfCode2020
|
2de7ab12a3413ef3790892d06e5b8f18e8d5a9c1
|
b74e0866b11e87526b80fb169d8e2b1c1f128474
|
refs/heads/master
| 2023-03-05T01:48:27.094703
| 2021-02-22T17:39:02
| 2021-02-22T17:39:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 495
|
r
|
part2.R
|
require(data.table)
find_sum_to_2020 <- function(exp_rep) {
for (i in exp_rep) {
for (j in exp_rep) {
for (k in exp_rep) {
val = i + j + k
if (val == 2020) {
multip_var = i * j * k
return(multip_var)
}
}
}
}
}
main <- function() {
data = fread('input.txt')
solution = find_sum_to_2020(data$V1)
cat(paste('the solution is:', solution))
}
main()
|
64b1380c34a929ff395eeebde7c4161ac8033a7a
|
da7ad84936816cbf9388df36ec18aa2c2d789325
|
/viz/R/grid_gen.R
|
bf1fdd43362fbf1b50da73534b2a9e9a0b092597
|
[] |
no_license
|
yaesoubilab/TBABM
|
14fbf673c9b4fc8b8b1462d30dc3782ae6c7aa18
|
7c8165db1a86f2f011a10601602c46b85a7bdc90
|
refs/heads/master
| 2023-08-09T02:27:45.410474
| 2023-07-26T01:26:40
| 2023-07-26T01:26:40
| 114,150,710
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,662
|
r
|
grid_gen.R
|
source("R/graph_catalog.R")
initial.dir<-getwd()
print(initial.dir)
quit()
GraphAndSaveRun <- function(folder_name) {
cat <- CreateGraphCatalog(folder_name)
slash <- ifelse(folder_name[length(folder_name)] == '/', "", "/")
filename_prefix <- paste0(folder_name, slash)
grids <- c("demographicGrid", "hivGrid", "tbGrid")
display_4k <- list(name="lg", ppi=157, width=19, height=12.5, maxCols=6)
display_laptop <- list(name="sm", ppi=135, width=9.5, height=8, maxCols=3)
display_retina <- list(name="sm_2x", ppi=220, width=9.5, height=8, maxCols=4)
displays <- list(display_4k, display_laptop, display_retina)
thingsToRender <- list(grid=grids, display=displays) %>% cross()
render <- function(spec) {
print(spec)
filename <- paste0(filename_prefix, spec$grid, "_", spec$display$name, ".png")
ncols = min(4, spec$display$maxCols)
nrows = max(3, ceiling(12/ncols))
# Set up a place to render the graph to
png(filename=filename,
width=spec$display$width,
height=spec$display$height,
units="in",
res=spec$display$ppi,
bg="transparent")
# Render the grid
cat[[spec$grid]](cols=ncols, rows=nrows)
# Save the grid
dev.off()
}
walk(thingsToRender, render)
}
main <- function(args) {
if (length(args))
stat <- file.info(args)
if (stat$isdir != TRUE) {
printf("ERROR: One of the arguments is not a directory. Exiting.")
printf("USAGE: [dir1]+ where dir1 is a directory containing results from 1 model run")
quit(status=1)
}
walk(args, GraphAndSaveRun)
}
main(commandArgs(trailingOnly=TRUE))
|
dcf7393e6376f4c8d7d5ea07b9d714ec6b862359
|
6b626067b7d0787c446309c0d43d7f95e1ace510
|
/R-5.R
|
7450ae7f5544c81df3dffd009321f44c12cf9607
|
[] |
no_license
|
nyaundid/R-Studio
|
919cd0f788dbc81a4753647dfc508ce6f8ceb74e
|
b55371a3f623e0945471e980cb4322195d32a0e1
|
refs/heads/master
| 2020-05-02T18:10:12.366491
| 2019-03-28T04:07:05
| 2019-03-28T04:07:05
| 178,120,653
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 717
|
r
|
R-5.R
|
population <- ames$Gr.Liv.Area
samp <- sample(population, 60)
sample_mean <- mean(samp)
se <- sd(samp)/sqrt(60)
lower <- sample_mean - 1.96 * se
upper <- sample_mean + 1.96 * se
c(lower, upper)
mean(population)
samp_mean <- rep(NA, 50)
samp_sd <- rep(NA, 50)
n <- 60
for(i in 1:50){
samp <- sample(population, n)
samp_mean[i] <- mean(samp)
samp_sd[i] <- sd(samp)
}
lower <- samp_mean - 1.96 * samp_sd/sqrt(n)
upper <- samp_mean + 1.96 * samp_sd/sqrt(n)
c(lower[1], upper[1])
plot_ci(lower, upper, mean(population))
lower <- samp_mean - 2.58 * samp_sd/sqrt(n)
upper <- samp_mean + 2.58 * samp_sd/sqrt(n)
c(lower[1], upper[1])
plot_ci(lower, upper, mean(population))
|
4edb7479f8d89682bd5fe633006f268f24492243
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/VGAM/examples/AR1.Rd.R
|
05897f2cf431175df3d530ba3be725bb3e122035
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,663
|
r
|
AR1.Rd.R
|
library(VGAM)
### Name: AR1
### Title: Autoregressive Process with Order-1 Family Function
### Aliases: AR1
### Keywords: models regression
### ** Examples
### Example 1: using arima.sim() to generate a 0-mean stationary time series.
nn <- 500
tsdata <- data.frame(x2 = runif(nn))
ar.coef.1 <- rhobit(-1.55, inverse = TRUE) # Approx -0.65
ar.coef.2 <- rhobit( 1.0, inverse = TRUE) # Approx 0.50
set.seed(1)
tsdata <- transform(tsdata,
index = 1:nn,
TS1 = arima.sim(nn, model = list(ar = ar.coef.1),
sd = exp(1.5)),
TS2 = arima.sim(nn, model = list(ar = ar.coef.2),
sd = exp(1.0 + 1.5 * x2)))
### An autoregressive intercept--only model. ###
### Using the exact EIM, and "nodrift = TRUE" ###
fit1a <- vglm(TS1 ~ 1, data = tsdata, trace = TRUE,
AR1(var.arg = FALSE, nodrift = TRUE,
type.EIM = "exact",
print.EIM = FALSE),
crit = "coefficients")
Coef(fit1a)
summary(fit1a)
## Not run:
##D ### Two responses. Here, the white noise standard deviation of TS2 ###
##D ### is modelled in terms of 'x2'. Also, 'type.EIM = exact'. ###
##D fit1b <- vglm(cbind(TS1, TS2) ~ x2,
##D AR1(zero = NULL, nodrift = TRUE,
##D var.arg = FALSE,
##D type.EIM = "exact"),
##D constraints = list("(Intercept)" = diag(4),
##D "x2" = rbind(0, 0, 1, 0)),
##D data = tsdata, trace = TRUE, crit = "coefficients")
##D coef(fit1b, matrix = TRUE)
##D summary(fit1b)
##D
##D ### Example 2: another stationary time series
##D nn <- 500
##D my.rho <- rhobit(1.0, inverse = TRUE)
##D my.mu <- 1.0
##D my.sd <- exp(1)
##D tsdata <- data.frame(index = 1:nn, TS3 = runif(nn))
##D
##D set.seed(2)
##D for (ii in 2:nn)
##D tsdata$TS3[ii] <- my.mu/(1 - my.rho) +
##D my.rho * tsdata$TS3[ii-1] + rnorm(1, sd = my.sd)
##D tsdata <- tsdata[-(1:ceiling(nn/5)), ] # Remove the burn-in data:
##D
##D ### Fitting an AR(1). The exact EIMs are used.
##D fit2a <- vglm(TS3 ~ 1, AR1(type.likelihood = "exact", # "conditional",
##D type.EIM = "exact"),
##D data = tsdata, trace = TRUE, crit = "coefficients")
##D
##D Coef(fit2a)
##D summary(fit2a) # SEs are useful to know
##D
##D Coef(fit2a)["rho"] # Estimate of rho, for intercept-only models
##D my.rho # The 'truth' (rho)
##D Coef(fit2a)["drift"] # Estimate of drift, for intercept-only models
##D my.mu /(1 - my.rho) # The 'truth' (drift)
## End(Not run)
|
fb993f08abb22a840b1c68c9e9702ad5c68c718b
|
4d8240d88a8da976c16f1ec48a921569f30b12fb
|
/metadata.R
|
c05534d92af1403a61b89adace5681a681afae60
|
[] |
no_license
|
mjenica11/limma_tutorial
|
471a4b1a9864d9e447d0a2e5acaa8636d381d0e0
|
699b48ca536381e6e2e72c5b910e13b41740b18c
|
refs/heads/main
| 2023-03-22T00:30:57.936448
| 2021-03-18T17:56:50
| 2021-03-18T17:56:50
| 349,155,970
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,253
|
r
|
metadata.R
|
# Combine GTEx v8 metadata into single metadata file with information relevant to DE w/ limma
# Sample ID, tissue type, sex, age, RIN (RNA integrity number), post-mortem interval
# Subset only the brain samples in individuals older than 55
# Input files
COUNTS <- "/scratch/mjpete11/limma_tutorial/data/GTEx_Analysis_2017-06-05_v8_RNASeQCv1.1.9_gene_reads.gct"
META1 <- "/scratch/mjpete11/limma_tutorial/data/GTEx_Analysis_v8_Annotations_SampleAttributesDS.txt"
META2 <- "/scratch/mjpete11/limma_tutorial/data/GTEx_Analysis_v8_Annotations_SubjectPhenotypesDS.txt"
# Output files
BRAIN_META <- "/scratch/mjpete11/limma_tutorial/data/brain_metadata.csv"
SUMMARY_STATS <- "/scratch/mjpete11/limma_tutorial/data/metadata_summary_stats.csv"
# Libraries
library(tidyverse)
library(data.table)
library(stringr)
# Read in files
# Contains: sample ID, tissue type, RIN, post-mortem info
meta1 <- read_tsv(META1, col_names=TRUE)
# Contains sex info
meta2 <- read_tsv(META2, col_names=TRUE)
# Count data
counts <- data.frame(fread(COUNTS))
#_______________________________________________________________________________
# metadata preprocessing
#_______________________________________________________________________________
# Which columns contain relevant info in meta1 df?
meta1[1,] # SAMPID = 1, SMRIN = 5, SMTSD (detailed tissue type) = 7, SMSISCH (post-mort) = 9
# Subset sample ID, tissue type, RIN, and ischemic time
meta1 <- meta1[,c(1,5,7,9)]
# Rename columns
colnames(meta1) <- c("Sample_ID", "RIN", "Tissue", "Ischemic_Time")
# Add individual ID col;
# grep pattern won't work for the leukemia cell line samples, but I will drop all the cell lines
meta1[["Individual_ID"]] <- str_extract(meta1$Sample_ID, "GTEX-[0-9A-Z]+")
# Reformat tissue names to be contain only '_' between words
meta1$Tissue <- str_replace_all(meta1$Tissue, c("-" = "", " " = "_", "__" = "_"))
# Replace . to - in colnames
colnames(counts) <- str_replace_all(colnames(counts), pattern = "\\.","-")
# Get list of female IDs
fems <- meta2$SUBJID[which(meta2$SEX==2)]
# Make list of sex of each individual
sex <- with(meta1['Individual_ID'], ifelse(Individual_ID %in% fems, "Female", "Male"))
# Add column containing sex
meta1 <- cbind(meta1, "Sex"=sex)
# Add column containing age (only decade intervals are publically accessible)
meta1$Age <- meta2$AGE[match(meta1$Individual_ID, meta2$SUBJID)]
# Rearrange column order
meta <- meta1 %>% select(Individual_ID, Sex, Age, Tissue, Sample_ID, Ischemic_Time, RIN)
# Drop samples in metadata that do not have count data
select_samples <- colnames(counts)[colnames(counts) %in% meta$Sample_ID]
meta <- meta[meta$Sample_ID %in% select_samples, ]
# Subset sample replicates
Reps <- meta %>%
group_by(Individual_ID, Tissue) %>%
filter(n() > 1) %>%
ungroup()
# Samples minus replicates and non-brain_meta tissue
brain_meta <- meta[grepl("Brain", meta$Tissue),]
# Remove the tissue replciates (Cerebellum/Cortex are replicates of Cerebellar/Frontal_Cortex)
brain_meta <- brain_meta[!grepl("Cerebellum|Brain_Cortex", brain_meta$Tissue),]
# Subset samples >= 50; re-do once I get the metadata with the exact ages and not just decade intervals
brain_meta <- brain_meta %>% filter(Age >= 50)
# Number of samples in meta
nrow(brain_meta) # 1,831
# Function to summarise percent missing values
Missing <- function(x){
x %>%
select(everything()) %>%
summarise_all(list(~sum(is.na(.))))/nrow(x) * 100
}
# How many samples have missing values?
Missing(meta)
# Drop rows missing values
meta <- meta %>% drop_na()
# Check that all samples missing values were removed
Missing(meta)
# Quick view
head(brain_meta)
tail(brain_meta)
# Summary stats: how many tissues per sex
Summary_Stat <- function(x){
x %>% group_by(Tissue, Sex) %>% tally()
}
# Print summary statistics
print(Summary_Stat(brain_meta), n=22)
#______________________________________________________________________________________________________
# Write to file
#______________________________________________________________________________________________________
# summary stats
write.csv((as.data.frame(Summary_Stat(brain_meta))), SUMMARY_STATS)
# metadata files
write.csv(brain_meta, BRAIN_META, row.names=FALSE)
|
6f6a2d83be2dbe195957c39414a559f655b48fb4
|
023ece33e24b95bd333d09a7cd5532019c6bee43
|
/thesis-utils.R
|
77a14c1c4bbdfef66b4fcf74ede189f52163466a
|
[] |
no_license
|
paleolimbot/minimal-thesis-bookdown
|
be8e3aeef886e3e58c085f194a205997c8d47412
|
0061d2aff50f15e65a9c6a66960a20d99bb80386
|
refs/heads/master
| 2021-01-26T06:38:41.372948
| 2020-02-26T19:40:41
| 2020-02-26T19:40:41
| 243,350,318
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,650
|
r
|
thesis-utils.R
|
library(tidyverse)
# it's unlikely you'll be able to make the tables you want
# without the kableExtra package
library(kableExtra)
# you probably want all these chunk options to be the same
# for every chapter
# chunk options
knitr::opts_chunk$set(
echo = FALSE,
fig.align = "center"
)
# word documents have very low figure resolution by default, which is
# unlikely to impress your supervisor
is_word_output <- function() {
!knitr::is_html_output() && !knitr::is_latex_output()
}
if (is_word_output()) {
knitr::opts_chunk$set(dpi = 300)
}
# default theme function...useful to be able to use this
# theme with different base font sizes in case you run into
# trouble with this later on
theme_thesis <- function(...) {
theme_bw(...) + theme(strip.background = element_blank())
}
# set the default ggplot2 theme
theme_set(theme_thesis(10))
# I think a custom "thesis_kable" function is useful
# for consistent tables throughout the thesis. It's a wrapper
# around kable(), and you can add the kableExtra modifiers
# after it to further modify the output
thesis_kable <- function(tbl, ..., style = list(), longtable = FALSE) {
# A font size of 10pt in tables is likely to save you at least a little
# trouble
default_style <- list(font_size = if (knitr::is_latex_output()) 10)
style_options <- c(style, default_style)[union(names(style), names(default_style))]
# kableExtra doesn't do raw markdown output
if (!is_word_output()) {
kbl <- kable(
tbl,
...,
booktabs = TRUE,
longtable = longtable
)
rlang::exec(kableExtra::kable_styling, kbl, !!!style_options)
} else {
knitr::kable(tbl, format = "pandoc", ...)
}
}
# this lets you use some markdown markup for table cell values
thesis_kable_raw_markdown <- function(tbl, ...) {
if (knitr::is_latex_output()) {
tbl <- tbl_markdown_to_latex(tbl)
}
thesis_kable(
tbl,
escape = FALSE,
...
)
}
# this lets you use some markdown markup for figure captions and short captions
md_caption <- function(x) {
if (knitr::is_latex_output()) {
markdown_to_latex(x)
} else {
x
}
}
markdown_to_latex <- function(x) {
x %>%
str_replace_all("\\*\\*(.*?)\\*\\*", "\\\\textbf{\\1}") %>%
str_replace_all("\\*(.*?)\\*", "\\\\emph{\\1}") %>%
str_replace_all("\\^(.*?)\\^", "$^{\\\\text{\\1}}$") %>%
str_replace_all("~(.*?)~", "$_{\\\\text{\\1}}$") %>%
str_replace_all("%", "\\\\%")
}
tbl_markdown_to_latex <- function(tbl) {
tbl %>%
mutate_if(is.character, markdown_to_latex) %>%
mutate_if(is.factor, markdown_to_latex) %>%
rename_all(markdown_to_latex)
}
|
9ec3e6c66841618f921ce83ef602d3d423cf0653
|
f8fe19a2008e19b858d65f2f5a57658c304c93a3
|
/man/nearest.Rd
|
666b3990b5018f1d818f2d7debab55df854517a8
|
[] |
no_license
|
cran/osrmr
|
53cd57409ef02d6b970316f21331d782815f500f
|
86c78bae1a9f6f9989e53e11cd71019287c289f1
|
refs/heads/master
| 2021-06-05T03:02:53.041393
| 2021-05-31T08:40:02
| 2021-05-31T08:40:02
| 127,176,140
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,337
|
rd
|
nearest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nearest.R
\name{nearest}
\alias{nearest}
\title{nearest accessible position}
\usage{
nearest(lat, lng, api_version = 5, localhost = F, timeout = 0.001)
}
\arguments{
\item{lat, }{A numeric (-90 < lat < 90)}
\item{lng, }{A numeric (-180 < lng < 180)}
\item{api_version, }{A numeric (either 4 or 5)}
\item{localhost, }{A logical (TRUE = localhost is used, FALSE = onlinehost is used)}
\item{timeout}{A numeric indicating the timeout between server requests (in order to prevent queue overflows). Default is 0.001s.}
}
\value{
A data.frame with lat and lng
}
\description{
nearest() calculates the nearest position to the given coordinates which can be accessed by car.
The coordinate-standard is WGS84. Attention: The OSRM API v4 is only working locally, but
not with the 'OSRM' webserver.
}
\examples{
\dontrun{
osrmr::nearest(47,9, 5, FALSE)
Sys.setenv("OSRM_PATH_API_5"="C:/OSRM_API5")
osrmr::run_server(Sys.getenv("OSRM_PATH_API_5"), "switzerland-latest.osrm")
osrmr::nearest(47,9, 5, TRUE)
osrmr::quit_server()
Sys.unsetenv("OSRM_PATH_API_5")
Sys.setenv("OSRM_PATH_API_4"="C:/OSRM_API4")
osrmr::run_server(Sys.getenv("OSRM_PATH_API_4"), "switzerland-latest.osrm")
osrmr::nearest(47,9, 4, TRUE)
osrmr::quit_server()
Sys.unsetenv("OSRM_PATH_API_4")}
}
|
b30527a8da66eef1c9325c55c25d9f45a764867c
|
3810f013ef1bb6da62ae44849f04575ee8daf2f7
|
/man/as.data.frame.incidence2.Rd
|
0dc8d39957b5f2a7c8398e6a8b5c98eff8973f87
|
[
"MIT"
] |
permissive
|
minghao2016/incidence2
|
72dac5797bb44a8df537e3212710247ec8365a82
|
5aa52edf3526def57cff1458dfad026940674723
|
refs/heads/master
| 2023-01-11T11:30:00.512052
| 2020-11-12T14:21:25
| 2020-11-12T14:21:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 592
|
rd
|
as.data.frame.incidence2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conversion.R
\name{as.data.frame.incidence2}
\alias{as.data.frame.incidence2}
\title{Convert incident object to dataframe}
\usage{
\method{as.data.frame}{incidence2}(x, ...)
}
\arguments{
\item{x}{An \code{\link[=incidence]{incidence()}} object.}
\item{...}{Not used.}
}
\description{
Convert incident object to dataframe
}
\examples{
dat <- data.frame(dates = Sys.Date() + 1:100,
names = rep(c("Jo", "John"), 5))
dat <- incidence(dat, date_index = dates, groups = names)
as.data.frame(dat)
}
|
a4ba1a7365c0f5b73b1aa00dfb220652bc0fa8ea
|
10873a4e41464f753732b28ba9425cda5520f850
|
/emulator/res/lsr_zp.r
|
88c961273c9e71165e0bad2de400b67eeb543881
|
[] |
no_license
|
uatach/mc861-nes
|
3b3e11bb6876ca47b319acd25d7714b7c7bb9069
|
9583086364ab5866c7104408bb154bec6f3e164c
|
refs/heads/master
| 2020-08-02T12:47:28.823972
| 2019-10-04T20:53:15
| 2019-10-04T20:53:15
| 211,356,279
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,727
|
r
|
lsr_zp.r
|
| pc = 0xc003 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc005 | a = 0x08 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc007 | a = 0x08 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0044] = 0x08 |
| pc = 0xc009 | a = 0x02 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00b | a = 0x02 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0044] = 0x04 |
| pc = 0xc00c | a = 0x02 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00e | a = 0x20 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc010 | a = 0x20 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x000f] = 0x20 |
| pc = 0xc012 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc014 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x000f] = 0x10 |
| pc = 0xc016 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x000f] = 0x08 |
| pc = 0xc018 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x000f] = 0x04 |
| pc = 0xc01a | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x000f] = 0x02 |
| pc = 0xc01c | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x000f] = 0x01 |
| pc = 0xc01e | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110111 | MEM[0x000f] = 0x00 |
| pc = 0xc020 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 | MEM[0x000f] = 0x00 |
| pc = 0xc022 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110110 | MEM[0x000f] = 0x00 |
|
65c0802525c0acc98496ee25c838e54dbdb2b7a2
|
eb59d9f92cd907aaad4881992f323cc1529b39fa
|
/man/many_normal_plots.Rd
|
7c8ff9a6dcd12b4a6d5774e470b61eba62bb5a5c
|
[] |
no_license
|
cran/TeachBayes
|
a20021b9fd698140185aeb5916a9f5e42b901826
|
47f45100474dd8e8288b06386ca91a16288b5922
|
refs/heads/master
| 2021-01-11T22:12:37.681770
| 2017-03-25T09:58:44
| 2017-03-25T09:58:44
| 78,935,724
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 546
|
rd
|
many_normal_plots.Rd
|
\name{many_normal_plots}
\alias{many_normal_plots}
\title{
Graph of several normal curves
}
\description{
Graph of several normal curves
}
\usage{
many_normal_plots(list_normal_par)
}
\arguments{
\item{list_normal_par}{
list of vectors, where each vector is a mean and standard deviation for a normal distribution
}
}
\value{
Displays the normal curves on a single panel with labels
}
\author{
Jim Albert
}
\examples{
normal_parameters <- list(c(100, 15),
c(110, 15), c(120, 15))
many_normal_plots(normal_parameters)
}
|
70aa53e52404029e01a436b39a592869416a19c2
|
9b6a655f98a5b583e405f3db21196b816698c296
|
/analysis/cgpfa.R
|
e595d2adc4c4d34891de0eab7a1bf4780df9f0f7
|
[] |
no_license
|
bgriffen/heteromotility
|
205deeb1221612d0c5b46b3819636d0efba178e4
|
07a02d6c6fc64bb470f9b02f6f45c8e676cde400
|
refs/heads/master
| 2021-09-22T17:45:22.757980
| 2018-09-12T20:16:44
| 2018-09-12T20:16:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,465
|
r
|
cgpfa.R
|
### Satellite Cell PFA for combined three plates
pfa_state_locations <- function(df, bin_sizes){
require(reshape2)
require(ash)
# PCA to generate 2 dimensional state space
df.vals <- data.frame( scale( cbind(df[4:22], df[29:32], df[39:42], df[49:52]) ) )
df.pca <- prcomp(df.vals)
df.comp <- data.frame( df.pca$x[,1:2] )
pca_frame <- cbind(df[,1:3], df.comp)
pca_frame <- transform(pca_frame, cell_id = colsplit(cell_id, "-", names = c("cell", "split")))
# Find PC ranges to bin space
PC1_range <- c( floor(range(df.comp[,1])[1]), ceiling(range(df.comp[,1])[2]) )
PC2_range <- c( floor(range(df.comp[,2])[1]), ceiling(range(df.comp[,2])[2]) )
require(ash)
ab <- matrix( c(PC1_range[1], PC2_range[1], PC1_range[2], PC2_range[2]), 2, 2 )
# Cols = Plate, XY, Split, Cell, BinRow(Y), BinCol(X)
# Rows = Cells
state_locations <- data.frame(matrix(ncol = 6, nrow = nrow(df)))
colnames(state_locations) <- c("plate", "xy", "cell", "split", "biny", "binx")
for (i in 1:nrow(pca_frame)){
cell <- pca_frame[i,]
position <- cell[,4:5]
bins <- bin2(position, ab, nbin = bin_sizes)
bin_yx <- which(bins$nc != 0, arr.ind = T)
state_locations[i,] <- c(cell$plate, as.character(cell$Well.XY), cell$cell_id$cell, cell$cell_id$split, bin_yx)
}
state_locations$plate <- as.numeric(state_locations$plate)
state_locations$cell <- as.numeric(state_locations$cell)
state_locations$split <- as.numeric(state_locations$split)
state_locations$binx <- as.numeric(state_locations$binx)
state_locations$biny <- as.numeric(state_locations$biny)
return(state_locations)
}
pfa_state_vectors <- function(state_locations){
## Count State Transitions
#
# Returns
# -------
# state_vectors : data.frame.
# N x 5 matrix, where N is the number of unit transitions observed in the timecourse
# Columns: biny, binx, vy, vx, cells
# binx and biny are the coordinates for each course-grained bin
# vx and vy are the corresponding components of the mean transition vector
# cells is the total number of cells observed in the state over time
bin_transitions <- matrix(ncol = 4, nrow = 0) # cols = (biny, binx, v_y, v_x)
for (s in 0:(max(state_locations$split)-1)){
# Make data.frames for each state time scale
t0_state <- subset(state_locations, state_locations$split==s)
t1_state <- subset(state_locations, state_locations$split==s+1)
# Check to ensure all cells are in both state periods
occupied_bins <- unique(t0_state[,c("biny","binx")])
for (i in 1:nrow(occupied_bins)){
cells <- subset(state_locations, (state_locations$biny == occupied_bins[i,1] & state_locations$binx == occupied_bins[i,2]))
for (j in 1:nrow(cells)){
position <- c(cells[j,]$biny, cells[j,]$binx) # position at t0
trans <- subset(t1_state, (t1_state$plate == cells[j,1] & t1_state$xy == cells[j,2] & t1_state$cell == cells[j,3]))
trans_pos <- c(trans$biny, trans$binx)
v <- trans_pos - position
print(i)
print(j)
while ( sum(sqrt(v**2)) > 0 ){
# Performs path finding by moving in the direction of greater magnitude
# If direction mags are ==; randomly chooses direction
if ( abs(v[1]) > abs(v[2]) ){
bin_transitions <- rbind(bin_transitions, c(position, sign(v[1]), 0))
position <- position + c(sign(v[1]), 0)
v <- v - c(sign(v[1]), 0)
} else if ( abs(v[1]) == abs(v[2]) ){
choice <- rbinom(n = 1, size = 1, prob = 0.5)
if (choice == 0){
bin_transitions <- rbind(bin_transitions, c(position, sign(v[1]), 0))
position <- position + c(sign(v[1]), 0)
v <- v - c(sign(v[1]), 0)
} else {
bin_transitions <- rbind(bin_transitions, c(position, 0, sign(v[2])))
position <- position + c(0, sign(v[2]))
v <- v - c(0, sign(v[2]))
}
}
else {
bin_transitions <- rbind(bin_transitions, c(position, 0, sign(v[2])))
position <- position + c(0, sign(v[2]))
v <- v - c(0, sign(v[2]))
}
}
}
}
}
bin_transitions <- data.frame(bin_transitions)
colnames(bin_transitions) <- c("biny","binx","vector_y", "vector_x")
bin_transitions$binx <- as.numeric(bin_transitions$binx)
bin_transitions$biny <- as.numeric(bin_transitions$biny)
bin_transitions$vector_y <- as.numeric(bin_transitions$vector_y)
bin_transitions$vector_x <- as.numeric(bin_transitions$vector_x)
## Find State Vectors
state_vectors <- data.frame(matrix(ncol=5, nrow=0))
i = 1
y_bins <- sort(unique(bin_transitions$biny))
for (y in y_bins){
y_df <- subset(bin_transitions, bin_transitions$biny == y)
x_bins <- sort(unique(y_df$binx))
for (x in x_bins){
xy_df <- subset(y_df, y_df$binx == x)
v_mean <- c(mean(xy_df$vector_y), mean(xy_df$vector_x))
cell_num <- nrow(xy_df)
state_vectors <- rbind(state_vectors, c(y, x, v_mean, cell_num))
}
}
colnames(state_vectors) <- c("biny", "binx", "v_y", "v_x", "cells")
return(state_vectors)
}
pfa_vector_distribution <- function(state_locations){
mat_rows = sum(state_locations$split == 1)+sum(state_locations$split==2)
vector_dist <- matrix(nrow=0, ncol = 3)
for (s in 0:(max(state_locations$split)-1)){
t0_state <- subset(state_locations, state_locations$split==s)
t1_state <- subset(state_locations, state_locations$split==s+1)
for ( i in 1:nrow(t0_state) ){
v <- c((t1_state[i,]$biny - t0_state[i,]$biny),(t1_state[i,]$binx - t0_state[i,]$binx))
v_mag <- sqrt(sum(v**2))
vector_dist <- rbind(vector_dist, c(v, v_mag))
}
}
vd <- data.frame(vector_dist)
colnames(vd) <- c("v_y", "v_x", "v_mag")
return(vd)
}
sem <- function(x){sd(x)/sqrt(length(x))}
vector_distribution_stats <- function(state_locations, out_path, prefix = NULL){
require(moments)
vd <- pfa_vector_distribution(state_locations)
write.csv(vd$v_mag, file = paste(out_path, prefix, "v_mag_vals.csv", sep=''), row.names = F)
mean_mag <- mean(vd$v_mag)
var_mag <- var(vd$v_mag)
skew_mag <- skewness(vd$v_mag)
kurt_mag <- kurtosis(vd$v_mag)
sem_mag <- sem(vd$v_mag)
n <- nrow(vd)
# prob of flux for a given cell
p_flux <- sum(vd$v_mag > 0)/nrow(vd)
# directedness vector
dir_v <- c(mean(vd$v_x), mean(vd$v_y))
mag_dir <- sqrt(sum(dir_v**2))
r <- data.frame(mean_mag, var_mag, skew_mag, kurt_mag, sem_mag, n, p_flux, dir_v[1], dir_v[2], mag_dir)
colnames(r) <- c('mean_mag', 'var_mag', 'skew_mag', 'kurt_mag', 'sem_mag', 'n', 'p_flux', 'dir_vx', 'dir_vy', 'mag_dir')
write.csv(r, file = paste(out_path, prefix, "vector_dist_stats.csv", sep =''), row.names = F)
p <- ggplot(vd, aes(v_mag)) + geom_density()
ggsave(p, filename = paste(prefix, "vector_dist_density.png", sep=''), path = out_path, width = 3, height = 3)
}
output_state_vectors <- function(state_vectors, output_file){
output <- data.frame(cbind(state_vectors$binx, state_vectors$biny, state_vectors$v_x, state_vectors$v_y, state_vectors$cells))
colnames(output) <- c("x", "y", "v_x", "v_y", "cells")
write.csv(output, file = output_file, row.names = F)
}
save_state_vectors <- function(df, bin_sizes, output_file){
state_locations <- pfa_state_locations(df, bin_sizes)
state_vectors <- pfa_state_vectors(state_locations)
output_state_vectors(state_vectors, output_file)
}
pfa_flux_plot <- function(df, bin_sizes, output_path, experiment = NULL){
require(ggplot2)
state_locations <- pfa_state_locations(df, bin_sizes)
state_vectors <- pfa_state_vectors(state_locations)
## Vector field plot
v_plot <- ggplot(state_vectors, aes(x = binx, y = biny)) + geom_segment(aes(xend = binx+v_x, yend = biny+v_y), arrow = arrow(length = unit(0.1, "cm")))
v_plot <- v_plot + labs(title = "Probability Flux (tau0 - tau1)", x = "PC1", y = "PC2")
ggsave(v_plot, file = paste(experiment, "pfa_flux_vectors.png", sep = ''), path = output_path, width = 4, height = 4)
output_file = paste(output_path, experiment, "state_vectors.csv", sep = '')
output_state_vectors(state_vectors, output_file)
vector_distribution_stats(state_locations, output_path, prefix = experiment)
return(v_plot)
}
## MuSCs
experiment1 = 'musc/20160626'
experiment2 = 'musc/20160701_0'
experiment3 = 'musc/20160701_1'
pfa_musc <- function(experiment1, experiment2, experiment3, plot_path='data/musc/', bin_sizes=c(15,15), tau=20){
df_20f1 = read.csv(paste("data/", experiment1, "/fgf2_exp_motility_statistics_split_", tau, ".csv", sep = ''))
df_20n1 = read.csv(paste("data/", experiment1, "/nofgf2_exp_motility_statistics_split_", tau, ".csv", sep = ''))
plate1f <- rep(1, nrow(df_20f1))
plate1n <- rep(1, nrow(df_20n1))
df_20f2 = read.csv(paste("data/", experiment2, "/fgf2_exp_motility_statistics_split_", tau, ".csv", sep = ''))
df_20n2 = read.csv(paste("data/", experiment2, "/nofgf2_exp_motility_statistics_split_", tau, ".csv", sep = ''))
plate2f <- rep(2, nrow(df_20f2))
plate2n <- rep(2, nrow(df_20n2))
df_20f3 = read.csv(paste("data/", experiment3, "/fgf2_exp_motility_statistics_split_", tau, ".csv", sep = ''))
df_20n3 = read.csv(paste("data/", experiment3, "/nofgf2_exp_motility_statistics_split_", tau, ".csv", sep = ''))
plate3f <- rep(3, nrow(df_20f3))
plate3n <- rep(3, nrow(df_20n3))
comb_df20f <- rbind(df_20f1, df_20f2, df_20f3)
plate <- c(plate1f, plate2f, plate3f)
comb_df20f <- data.frame(plate, comb_df20f)
comb_df20n <- rbind(df_20n1, df_20n2, df_20n3)
plate <- c(plate1n, plate2n, plate3n)
comb_df20n <- data.frame(plate, comb_df20n)
v_comb_df20f <- pfa_flux_plot(comb_df20f, bin_sizes = bin_sizes, output_path = plot_path, experiment = paste("MuSC_fgf2_", tau, '_', 'b', bin_sizes[1], '_', sep=''))
v_comb_df20n <- pfa_flux_plot(comb_df20n, bin_sizes = bin_sizes, output_path = plot_path, experiment = paste("MuSC_nofgf2_", tau, '_', 'b', bin_sizes[1], '_', sep=''))
}
bin_sizes_all = rbind(c(3,3), c(5,5), c(10,10), c(15,15), c(20,20), c(30,30))
for (tau in c(20, 25, 30)){
for (j in 1:nrow(bin_sizes_all)){
pfa_musc(experiment1, experiment2, experiment3, tau=tau, bin_sizes=bin_sizes_all[j,])
}
}
## Myoblasts
pfa_myoblast <- function(myo_exp1, myo_exp2, tau = 20, bin_sizes=c(15,15), plot_path){
myo1_f <- read.csv(paste('data/', myo_exp1, '/', 'fgf2_exp_motility_statistics_split_', tau, '.csv', sep = ''))
myo1_n <- read.csv(paste('data/', myo_exp1, '/', 'nofgf2_exp_motility_statistics_split_', tau, '.csv', sep = ''))
myo2_f <- read.csv(paste('data/', myo_exp2, '/', 'fgf2_exp_motility_statistics_split_', tau, '.csv', sep = ''))
myo2_n <- read.csv(paste('data/', myo_exp2, '/', 'nofgf2_exp_motility_statistics_split_', tau, '.csv', sep = ''))
comb_myo20f <- rbind(myo1_f, myo2_f)
plate <- c(rep(1, nrow(myo1_f)), rep(2, nrow(myo2_f)))
comb_myo20f <- data.frame(plate, comb_myo20f)
comb_myo20n <- rbind(myo1_n, myo2_n)
plate <- c(rep(1, nrow(myo1_n)), rep(2, nrow(myo2_n)))
comb_myo20n <- data.frame(plate, comb_myo20n)
v_myo20f <- pfa_flux_plot(comb_myo20f, bin_sizes = bin_sizes, plot_path, experiment = paste("Myoblast_FGF2_", tau, '_b', bin_sizes[1], '_', sep=''))
v_myo20n <- pfa_flux_plot(comb_myo20n, bin_sizes = bin_sizes, plot_path, experiment = paste("Myoblast_noFGF2_", tau, '_b', bin_sizes[1], '_', sep=''))
}
myo_exp1 <- "myoblast/20160623"
myo_exp2 <- "myoblast/20160720"
plot_path = "data/myoblast/"
bin_sizes_all = rbind(c(3,3), c(5,5), c(10,10), c(15,15), c(20,20), c(30,30))
for (tau in c(20, 25, 30)){
for (j in 1:nrow(bin_sizes_all)){
pfa_myoblast(myo_exp1, myo_exp2, tau=tau, bin_sizes = bin_sizes_all[j,], plot_path=plot_path)
}
}
## MEFs
plot_path = 'data/mef/'
pfa_mycwt_combined <- function(myc_exp, myc2_exp, wt_exp, wt2_exp, wt3_exp, wt4_exp, tau = 20, bin_sizes=c(15,15), plot_path){
myc_df <- read.csv(paste('data/', myc_exp, '/', 'exp_motility_statistics_split_', tau, '.csv', sep =''))
myc2_df <- read.csv(paste('data/', myc2_exp, '/', 'exp_motility_statistics_split_', tau, '.csv', sep =''))
wt_df <- read.csv(paste('data/', wt_exp, '/', 'exp_motility_statistics_split_', tau, '.csv', sep =''))
wt2_df <- read.csv(paste('data/', wt2_exp, '/', 'exp_motility_statistics_split_', tau, '.csv', sep =''))
wt3_df <- read.csv(paste('data/', wt3_exp, '/', 'exp_motility_statistics_split_', tau, '.csv', sep =''))
wt4_df <- read.csv(paste('data/', wt4_exp, '/', 'exp_motility_statistics_split_', tau, '.csv', sep =''))
comb_myc_df <- rbind(myc_df, myc2_df)
plate <- c(rep(1,nrow(myc_df)), rep(2, nrow(myc2_df)))
comb_myc_df <- data.frame(plate, comb_myc_df)
comb_wt_df <- rbind(wt_df, wt2_df, wt3_df, wt4_df)
plate <- c( rep(1, nrow(wt_df)), rep(2, nrow(wt2_df)), rep(3, nrow(wt3_df)), rep(4, nrow(wt4_df)) )
comb_wt_df <- data.frame(plate, comb_wt_df)
v_myc <- pfa_flux_plot(df = comb_myc_df, bin_sizes = bin_sizes, plot_path, experiment = paste("MEF_MycRas_", tau, '_b', bin_sizes[1], '_', sep=''))
v_wt <- pfa_flux_plot(df = comb_wt_df, bin_sizes = bin_sizes, plot_path, experiment = paste("MEF_WT_", tau, '_b', bin_sizes[1], '_', sep=''))
}
myc_exp = "mef/mycras/20160917"
myc2_exp = "mef/mycras/20160918"
wt_exp = "mef/wt/20160711"
wt2_exp = "mef/wt/20160925_0"
wt3_exp = "mef/wt/20160925_1"
wt4_exp = "mef/wt/20160927"
bin_sizes_all = rbind(c(3,3), c(5,5), c(10,10), c(15,15), c(20,20), c(30,30))
for (tau in c(20, 25, 30)){
for (j in 1:nrow(bin_sizes_all)){
pfa_mycwt_combined(myc_exp, myc2_exp, wt_exp, wt2_exp, wt3_exp, wt4_exp, tau=tau, bin_sizes=bin_sizes_all[j,], plot_path=plot_path)
}
}
### Combined Simulations
plot_path = "data/sims/"
fbm2rw <- read.csv('data/sims/fbm2urw_split_20.csv')
plate <- rep(1, nrow(fbm2rw))
fbm2rw <- data.frame(plate, fbm2rw)
power2fbm <- read.csv('data/sims/pwr2fbm_split_20.csv')
plate <- rep(1, nrow(power2fbm))
power2fbm <- data.frame(plate, power2fbm)
power2rw <- read.csv('data/sims/pwr2urw_split_20.csv')
plate <- rep(1, nrow(power2rw))
power2rw <- data.frame(plate, power2rw)
v_fbm2rw <- pfa_flux_plot(fbm2rw, c(15,15), plot_path, experiment = 'fbm2rw_')
v_pwr2fbm <- pfa_flux_plot(power2fbm, c(15,15), plot_path, experiment = 'pwr2fbm_')
for (j in 1:nrow(bin_sizes_all)){
v_pwr2rw <- pfa_flux_plot(power2rw, bin_sizes_all[j,], plot_path, experiment = paste('pwr2rw_', 'b', bin_sizes_all[j,1], '_', sep=''))
}
# controls
pwr2pwr <- read.csv('data/sims/pwr2pwr_split_20.csv')
plate <- rep(1, nrow(pwr2pwr))
pwr2pwr <- data.frame(plate, pwr2pwr)
rw2rw <- read.csv('data/sims/rw2rw_split_20.csv')
plate <- rep(1, nrow(rw2rw))
rw2rw <- data.frame(plate, rw2rw)
v_pwr2pwr <- pfa_flux_plot(pwr2pwr, c(15,15), plot_path, experiment = "pwr2pwr_")
v_rw2rw <- pfa_flux_plot(rw2rw, c(15,15), plot_path, experiment = "rw2rw_")
bin_sizes_all = rbind(c(3,3), c(5,5), c(10,10), c(15,15), c(20,20), c(30,30))
for (j in 1:nrow(bin_sizes_all)){
v_rw2rw <- pfa_flux_plot(rw2rw, bin_sizes_all[j,], plot_path, experiment = paste("rw2rw_", 'b', bin_sizes_all[j,1], '_', sep=''))
}
## Compare MEF and MuSC vector distributions
compare_vdists <- function(df1, df2, plot_path, exp1 = NULL, exp2 = NULL){
vd1 <- pfa_vector_distribution(pfa_state_locations(df1, c(15,15)))
vd2 <- pfa_vector_distribution(pfa_state_locations(df2, c(15,15)))
t <- t.test(vd1$v_mag, vd2$v_mag, alternative = 'two.sided', var.equal = F, conf.level = 0.95)
capture.output(t, file = paste(plot_path, exp1, exp2, 'vmag_ttest.txt', sep = ''))
return(t)
}
plot_path = "data/split/vdist_comparisons/"
compare_vdists(comb_df20f, comb_myc_df, plot_path, exp1 = "MuSC_fgf2_", exp2 = "MEF_MycRas_")
compare_vdists(comb_df20f, comb_wt_df, plot_path, exp1 = "MuSC_fgf2_", exp2 = "MEF_WT_")
compare_vdists(comb_df20n, comb_myc_df, plot_path, exp1 = "MuSC_nofgf2_", exp2 = "MEF_MycRas_")
compare_vdists(comb_df20n, comb_wt_df, plot_path, exp1 = "MuSC_nofgf2_", exp2 = "MEF_WT_")
compare_vdists(comb_df20f, comb_df20n, plot_path, exp1 = "MuSC_fgf2_", exp2 = "MuSC_nofgf2_")
compare_vdists(comb_myc_df, comb_wt_df, plot_path, exp1 = "MEF_MycRas_", exp2 = "MEF_WT_")
compare_vdists(power2rw, pwr2pwr, plot_path, exp1 = "pwr2rw_", exp2 = "pwr2pwr_")
compare_vdists(power2rw, rw2rw, plot_path, exp1 = "pwr2rw_", exp2 = "rw2rw_")
compare_vdists(pwr2pwr, rw2rw, plot_path, exp1 = "pwr2pwr_", exp2 = "rw2rw_")
|
6cbcd256e130e3224ecea7d67a24aab642fa6fdf
|
b364a9a99f4d1c4d159f94d2246df7f9c1f66451
|
/dada2_insect_pipe_v1.R
|
327d71c0b83a3d55d901bdf492a1be4458d60c90
|
[] |
no_license
|
ngeraldi/eDNA_DADA2_taxonomy_pipeline
|
7a677285f38f0ce02c0bd62c8e7e3b2a845f152a
|
aca769a73400c88f72331bed684f0dc8d8e720e2
|
refs/heads/master
| 2023-07-20T21:25:04.136230
| 2023-05-07T07:30:10
| 2023-05-07T07:30:10
| 242,739,654
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,186
|
r
|
dada2_insect_pipe_v1.R
|
# install.packages("devtools")
library("insect") ## devtools::install_github("shaunpwilkinson/insect")
library(ape)
library('taxize') # install.packages("taxize")
library(ShortRead) # packageVersion("ShortRead") # install.packages("ShortRead")
library(seqinr) # install.packages("seqinr")
library(dada2); packageVersion("dada2") # devtools::install_github("benjjneb/dada2")
library(dplyr)
library(tidyr)
#################################################################################
#################################################################################
#################################################################################
#################################################################################
# Mar 2021 done
# arctic surface
# - rs surface euka and 18smini , Dammam , 18stoek, 18smini , euka02 02 and 03 - with ortega
# with sarah - arctic surface , euka02, euka03 : arctic core euka02 euka03 18smini 18s_stoeck
# with sarah - atacama 18smini euka02
###### !!!!!!!!!!!!!!!!!! set universal varibles !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
### change everything with till next section
# enter study name for folder - examples "extraction_test18" , "RS_surface", "Dammam", "Arctic_core" ,
## "Atacama" Arctic_surface
study_name<-"RS_surface" #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# enter primer name, "co1" , "euka02" , "euka03" , "rbclmini" , "18smini" , "vert" , "18s_stoeck"
primer_name<-"18smini"
## if replicate- mulitiple same primer used in same study
## should be no if 1st replicate or no replicate
### if yes will add number to outputs
replicate<-"no"
#rep_label<-"rep23"
# set computer to get correct directory, "win" or "mac"
computer<-"mac"
dir<-"C:/Users/nathangeraldi/Dropbox/"
## location to put filtered reads , one up from fastqfile below!!!!!!!!!!!!!!!!!!!!!
filtfile<-"eDNA_db/Arctic_surface/AS_2_euka02_CLL7R"
## location of miseq fastq files - within dir below
#fastqfile<-"eDNA_db/other_miseq/Extraction_test2_aug2018/primer_cut_vert_exttest2"
#fastqfile<-"eDNA_db/Nojood/noj_18suni_B5W44/noj_18suni_B5W44_cut" #B5W44 APM90
#
#fastqfile<-"eDNA_db/Red_sea_surface/RS_euka02_CLBR2/RS_euka02_CLBR2_cut"
#fastqfile<-"eDNA_db/Red_sea_surface/RS_18smini_BGDBR/RS_18smini_cut"
#fastqfile<-"eDNA_db/Red_sea_surface/RSS_CO1/RS_CO1_cut"
#fastqfile<-"eDNA_db/Red_sea_surface/RSS_vert/RS_vert_cut"
#fastqfile<-"eDNA_db/Red_sea_surface/RS_rbclmini_BF9VN/RS_rbclmini_cut"
#
#fastqfile<-"eDNA_db/Arctic_surface/AS_rbclmini_BR3D3/AS_rbclmini_cut"
#fastqfile<-"eDNA_db/Arctic_surface/AS_euka02_BR345/AS_euka02_cut"
#fastqfile<-"eDNA_db/Arctic_surface/AS-2_CO1_CWC3J/AS-2_CO1_cut"
fastqfile<-"eDNA_db/Arctic_surface/AS_2_euka02_CLL7R/AS_2_euka02_CLL7R_cut"
#fastqfile<-"eDNA_db/Dammam_cores/DAM_Euka02_BFB79/DAM_Euka02_cut"
#fastqfile<-"eDNA_db/Dammam_cores/Dammam_core_18SV4/DAM_18sv4_cut"
#fastqfile<-"eDNA_db/Dammam_cores/Dammam_core_CO1/primer_cut_co1_dammam"
#fastqfile<-"eDNA_db/Dammam_cores/DC_18smini/DAM_18smini_cut"
#fastqfile<-"eDNA_db/Dammam_cores/DC_vert/DAM_vert_cut"
#fastqfile<-"eDNA_db/Dammam_cores/DAM_rbclmini_BTWPY/DAM_rbcl_cut"
#fastqfile<-"eDNA_db/Dammam_cores/Dammam_core_Euka02_rep23_J74L5/Dam_euka02_rep23_J74L5_cut"
#
#fastqfile<-"eDNA_db/Arctic_core/Arctic_core_18SV4/AC_18sV4_cut"
#fastqfile<-"eDNA_db/Arctic_core/AC_euka02_BJ34K/AC_euka02_cut"
#fastqfile<-"eDNA_db/Arctic_core/AC_vert/AC_vert_cut"
#fastqfile<-"eDNA_db/Arctic_core/AC_18smini-BTWR2/AC_18smini_cut"
#fastqfile<-"eDNA_db/Arctic_core/AC_rbclmini-BTKHB/AC_rbcl_cut"
#fastqfile<-"eDNA_db/Arctic_core/AC-2_CO1_CWDR5/AC-2_CO1_cut"
#fastqfile<-"eDNA_db/Arctic_core/AC-2_18SV4_CW3MM/AC-2_18SV4_cut"
#fastqfile<-"eDNA_db/Arctic_core/AC-2_euka02_CWBMM/AC-2_euka02_CWBMM_cut"
#
#fastqfile<-"eDNA_db/Atacama/Atacama_CO1_C6RMT/Atacama_CO1_cut_check"
#fastqfile<-"eDNA_db/Atacama/Atacama_vert_CJPDF/Atacama_vert_cut"
#fastqfile<-"eDNA_db/Atacama/Atacama_euka02_C6RNK/Atacama_euka02_cut"
# Location for summary table --- need to create folder !!!!!
out_file<-"Documents/KAUST/eDNA/R/pipe_summary"
# sample data !! can skip - not used til post process
#sample<-openxlsx::read.xlsx("C:/Users/geraldn/Dropbox/Documents/KAUST/eDNA/Samples_Data/Extraction test aug18/EXPERIMENT DESIGN 2018 PCR TEMPLATE.xlsx",
# startRow=1, sheet=1, check.names=T)
#################################################################################
#################################################################################
################# get primer specific information
# euka02
if (primer_name=="euka02" | primer_name=="euka03") {
trunclength_FandT<-c(105,105)
final_trim<-seq(100,150)# peak length 108-110
insect_ref<-"euka02_learn.rds"
#dada_ref<-"SILVA_138_trimmed_euka02_dada2_names.fasta"
dada_ref<-"SILVA_138_trimmed_euka02_dada2_names_with_ortega_macroalgae.fasta"
# dada_ref<-"SILVA_138_trimmed_euka02_dada2_names_with_ortega_bachmann_macroalgae.fasta"
}
# co1
if (primer_name=="co1") {
trunclength_FandT<-c(260,180)
final_trim<-seq(280,355) # peak length 313
insect_ref<-"CO1_marine_from_insect.rds"
dada_ref<-"MIDORI_LONGEST_GB239_CO1_trimmed_co1_dada2_names.fasta"
}
# rbclmini
if (primer_name=="rbclmini") {
trunclength_FandT<-c(180,180)
# trunclength_FandT<-c(170,160)
final_trim<-seq(180,240) # peak length 300
insect_ref<-"minirbcl_learn.rds"
dada_ref<-"ncbi_rbcl_trimmed_rbclmini_ncbi_names_rbclmini_dada2_names.fasta"
}
# 18smini
if (primer_name=="18smini") {
trunclength_FandT<-c(150,150)
final_trim<-seq(150,200)# peak length 165
insect_ref<-"18smini_learn.rds"
dada_ref<-"SILVA_138_trimmed_18smini_dada2_names.fasta"
}
# vert
if (primer_name=="vert") {
trunclength_FandT<-c(120,110)
final_trim<-seq(115,140)# peak length 123
insect_ref<-"12s_ruiz_learn.rds"
dada_ref<-"ncbi_vert_trimmed_ncbi_names_vert_dada2_names.fasta"
#dada_ref<-"ncbi_12s_euk_only_dada2_mock_virtualPCR.fasta"
}
# 18s_stoeck
if (primer_name=="18s_stoeck") {
trunclength_FandT<-c(280,200)
final_trim<-seq(331,431) # peak 381,384
insect_ref<-"18s_stoeck_learn.rds"
dada_ref<-"SILVA_138_trimmed_18s_stoeck_dada2_names.fasta"
}
################# get computer specific information
if (computer=="mac") {
dir<-"/Users/nathangeraldi/Dropbox/"
multithreadTF<-TRUE}
### kaust windows
if (computer=="win") {
dir<-"C:/Users/geraldn/Dropbox/"
multithreadTF<-FALSE}
########################## load or reopen workspace
setwd(paste(dir,out_file,sep=""))
# load(workspname)
# save.image(workspname)
## setting file names -- should need to change
### set project folder location
workspname<-paste(out_file,"/" , study_name,"__", primer_name,".rdata", sep="")
out_name<-paste(study_name,primer_name,sep="__")
### !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
## if project has more than one miseq per primer pair, use next
# this is old need to just add one number to letter above last, becuase need to sort alphabetically - does not work with adding number !!!!!
# out_name<-paste(study_name,primer_name,"2",sep="__")
#if (replicate=="yes") {
#workspname<-paste(out_file,"/",study_name,"__","euka03",".rdata", sep="")
#out_name<-paste(study_name,"euka03",sep="__")
#}
### file for filtered fastq
filter_folder<-paste("filtered",out_name) #
###path to fastq files
path<-paste(dir,fastqfile, sep="")
## path to filtered files
filt_path<-paste(dir,filtfile, sep="")
###path_out for summaries
path_out<-paste(dir,out_file, sep="")
## set working directory baed on path
setwd(path)
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
## get list of files - for ncbi deposit
# path<-"/Users/geraldn/Dropbox/eDNA_db/Dammam_cores/Dammam_core_18SV4/Lane1/version_01"
fns <- list.files(path)
mes<-data.frame(fns) %>%
mutate(num=as.numeric(gsub("_.*$", "", fns))) %>%
# arrange(num) %>%
filter(grepl("R2",fns))
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
## begin pipline ######
## create list of files
# path<-"/Users/geraldn/Dropbox/eDNA_db/Dammam_cores/DAM_Euka02_BFB79/Lane1/version_01"
fns <- list.files(path)
########## filter and trim #################################################################
fnFs <- sort(list.files(path, pattern="_R1_cut.fastq", full.names = TRUE))
fnRs <- sort(list.files(path, pattern="_R2_cut.fastq", full.names = TRUE))
## extract sample names
sample.names <- sapply(strsplit(basename(fnFs), "_"), `[`, 1) # use 1, clean up miseq double names, use later on
## plot errors !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# plotQualityProfile(fnRs[10])
################# begin filtering and trimming
# create empty folers
filt_path2 <- file.path(filt_path, filter_folder) # Place filtered files in filtered/ subdirectory
filtFs <- file.path(filt_path2, paste0(sample.names, "_F_filt.fastq.gz"))
filtRs <- file.path(filt_path2, paste0(sample.names, "_R_filt.fastq.gz"))
# filter
# if need to change - trunclength_FandT<-c(210,180)
out <- filterAndTrim(fnFs, filtFs, fnRs, filtRs, truncLen=trunclength_FandT,
maxN=0, maxEE=c(2,2), truncQ=2, rm.phix=T,
compress=TRUE, multithread=multithreadTF) #
# maxEE- default 2,2 , uses Q score, EE = sum(10^(-Q/10)), sum(10^(-5/10))
# could cut primers here - trimLeft = 0, trimRight = 0, trim-left-f 17 and trim-left-r 21
# head(out)
### learn error rates
# if sample has no reads pass filter
seq_end<-length(filtFs) # c(1:21,23:seq_end)
errF <- learnErrors(filtFs[1:seq_end], multithread=TRUE)
errR <- learnErrors(filtRs[1:seq_end], multithread=TRUE)
### plot
plotErrors(errF, nominalQ=TRUE)
### dereplicate #####################################
## need to correct length if one smample does not have sequences - look at out
# length()
# seq_end<-length(filtFs)-1
seq_end<-length(filtFs)
derepFs <- derepFastq(filtFs[1:seq_end], verbose=TRUE)
derepRs <- derepFastq(filtRs[1:seq_end], verbose=TRUE)
# Name the derep-class objects by the sample names
names(derepFs) <- sample.names[1:seq_end]
names(derepRs) <- sample.names[1:seq_end] # c(1:21,23:seq_end)
## sample inference- read correction
dadaFs <- dada(derepFs, err=errF, multithread=T, pool=TRUE)#, pool=TRUE
dadaRs <- dada(derepRs, err=errR, multithread=T, pool=TRUE)
### dadaFs[[1]]
##### merge #############################################
mergers <- mergePairs(dadaFs, derepFs, dadaRs, derepRs, verbose=TRUE)
# Inspect the merger data.frame from the first sample
head(mergers[[1]])
seqtab <- makeSequenceTable(mergers)
dim(seqtab) # head(seqtab)
# Inspect distribution of sequence lengths
table(nchar(getSequences(seqtab)))
# reduce to more specific length
final_trim
seqtab2 <- seqtab[,nchar(colnames(seqtab)) %in% final_trim]
dim(seqtab2)
table(nchar(getSequences(seqtab2)))
## remove chimeras
seqtab.nochim <- removeBimeraDenovo(seqtab2, method="consensus", multithread=TRUE, verbose=TRUE)
dim(seqtab.nochim)
# see percen removed
sum(seqtab.nochim)/sum(seqtab)
###################################################################
##### produce summary table
# sum(getUniques(out)) sum(getUniques(out))
getN <- function(x) sum(getUniques(x))
track <- cbind(out, sapply(dadaFs, getN), sapply(mergers, getN), rowSums(seqtab), rowSums(seqtab.nochim))
# If processing a single sample, remove the sapply calls: e.g. replace sapply(dadaFs, getN) with getN(dadaFs)
colnames(track) <- c("input", "filtered", "denoised", "merged", "tabled", "nonchim")
rownames(track) <- sample.names
head(track)
hist(log(colSums(seqtab.nochim))) # number of reads per unique sequence
###############################################################################
################### save and read
###############################################################################
write.table(track, paste(path_out,"/", out_name ,"_summary.csv",sep=""),row.names=T, sep=",")
saveRDS(seqtab.nochim, paste(path_out,"/",out_name,"_seqtab.rds",sep=""))
################################################################################################################
################################################################################################################
####
####
#### Clear environment (need to rerun 1-153) or Close and re-open R ####################
###
#####
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
######## assign taxonomy begin #################################################################
################################################################################################################
###
### set up paths and folders if starting here
#################### !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #######################
# ############# !!!!!! run lines 1 to 172 !!!!!!! ###
####
##########################################################################################
########## get seqtab
setwd(path_out)
# out_name<-"Arctic_surface__euka02"
seqtab <- readRDS(paste(path_out,"/",out_name,"_seqtab.rds",sep="")) ## this is pooled nochim see line 141
# mes<-colSums(seqtab) # mes[1:100]
##########################################################################################################
###### for insect !!!!!!!!!!!!!!!!!
#################################################################################################
## get reference dendogram
insect_ref_path<-"C:/Users/geraldn/Dropbox/eDNA_db/reference_data/insect_learn" # setwd(insect_ref_path)
ff <- list.files(insect_ref_path) # list.files
ref<-readRDS(paste(insect_ref_path,"/",insect_ref,sep="")) # ref<-readRDS("18smini_learn.rds")
#################################################
# alter seqtab table to fit insect
x <- char2dna(colnames(seqtab))
## name the sequences sequentially
names(x) <- paste0("ASV", seq_along(x)) # head(x)
#############################################
## assign taxonomy by insect
# default threshold= 0.9, decay= TRUE
# !!!! specify levels same as dada2 ????
tax <- classify(x, ref, threshold = 0.5, decay = TRUE, cores = "autodetect", ping = 1,
ranks = c("superkingdom","kingdom", "phylum", "class", "order", "family", "genus",
"species"))
# to do ?? to match dada2 rename columns\
# ???tax<-tax
## combine to take quick loook names(tax)
tax2 <- tax %>%
group_by(taxID,superkingdom,kingdom,phylum,class,order,family,genus,species,taxon) %>%
summarize(mean=mean(score), n=n())
#
setwd(path_out)
write.table(tax, paste(out_name,"_taxass_insect.csv", sep=""), sep=",")
#
###############################################################################
###############################################################################
#### assign using dada2 !!!!!!!!!!!!!!!!!!!!!!!
#############################################################
## get reference fasta and fix names
path_ref<-paste(dir,"eDNA_db/reference_data/dada_final", sep="")
# reference file name
# dada_ref<-"SILVA_138_trimmed_euka02_dada2_names_with_ortega_bachmann_macroalgae.fasta"
ref<-dada_ref
#
list.files(path_ref)
setwd(path_ref) #
# check library
# lib<- read.fasta(file = ref, as.string = TRUE, strip.desc=TRUE) #
#fast1<-getAnnot(lib)## head(lib) ## psych::describe(getLength(lib))
##
set.seed(100) # Initialize random number generator for reproducibility
## try tryRC=TRUE # reverse of ? also taxLevels=c() default minBoot=50
taxass50<- assignTaxonomy(seqtab, ref, multithread = T, verbose = T, minBoot=50,
taxLevels=c("Superkingdom","Kingdom","Phylum", "Class", "Order", "Family", "Genus","Species"))
taxass70<- assignTaxonomy(seqtab, ref, multithread = T, verbose = T, minBoot=70,
taxLevels=c("Superkingdom","Kingdom","Phylum", "Class", "Order", "Family", "Genus","Species"))
taxass90<- assignTaxonomy(seqtab, ref, multithread = T, verbose = T, minBoot=90,
taxLevels=c("Superkingdom","Kingdom","Phylum", "Class", "Order", "Family", "Genus","Species"))
#unname(head(taxass))
setwd(path_out)
# !!!! need to check out put !!!!!!!!!!!!!!!!!
# out_name<-"Arctic_surface__euka02_with_ortega_and_Bachm"
write.table(taxass50, paste(out_name,"_taxass50.csv", sep=""), sep=",",row.names=T, col.names = NA)
write.table(taxass70, paste(out_name,"_taxass70.csv", sep=""), sep=",",row.names=T, col.names = NA)
write.table(taxass90, paste(out_name,"_taxass90.csv", sep=""), sep=",",row.names=T, col.names = NA)
taxall<-data.frame(taxass70)
mess<-taxall %>%
# dplyr::filter(Phylum=="Chordata") #%>%
dplyr::filter(complete.cases(Species)) %>%
dplyr::group_by_all() %>%
summarise(n())
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
#### mess ------ old code left behind, do not use
###############################################################################
setwd(paste(dir,"Documents/KAUST/eDNA/R/export/dada",sep=""))
save.image("dada_kapser_tax.rdata")
# Close R, Re-open R
setwd(paste(dir,"Documents/KAUST/eDNA/R/export/dada",sep=""))
load("dada.rdata")
############################## put together
taxonlist<-as.data.frame(taxseq)
mess<- taxonlist %>%
group_by(Class) %>%
summarise(n())
setwd(paste(dir,"eDNA_db/reference_data/dada_final", sep=""))
# run lines 1-85 for set up
taxseq<- seqinr::read.fasta(file =ref, as.string = TRUE, strip.desc=TRUE) # # psych::describe(getLength(taxseq))
fast1<-getAnnot(taxseq)## get only anotation data
head(fast1) # head(mess)
#fastg<-gsub(" .*$","",fast1) ## keep only before first " " head(fastg) # use for NCBI
fastg<-gsub("\\..*$","",fast1) ## keep only before first "." ### use for silva
seq1 <- getSequence(taxseq) # head(seq1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.