blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c16138f40d5b5352e7daf49bbdad4ff0eca4353f | eaf28912e257e8782ff06c94e694235f0a2b0873 | /man/deletions.Rd | 6eab8ef6288a268ccb9a4c02c3f509302722f180 | [
"MIT"
] | permissive | robertzk/objectdiff | 5032dee34062cc589a6ec972f10a2eddef301dbe | 59c0e155d08731f07de14a0652e9195e98ebbc5b | refs/heads/master | 2021-01-18T22:36:40.184773 | 2016-11-01T21:35:55 | 2016-11-01T21:35:55 | 23,096,345 | 8 | 1 | null | 2016-11-01T21:35:55 | 2014-08-19T04:05:08 | R | UTF-8 | R | false | true | 445 | rd | deletions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diff.R
\name{deletions}
\alias{deletions}
\title{Compute a patch of deletions on a recursive object.}
\usage{
deletions(old_object, new_object)
}
\arguments{
\item{old_object}{ANY. The "before" object.}
\item{new_object}{ANY. The "new" object. These are usually a data.frame or
an environment.}
}
\description{
Compute a patch of deletions on a recursive object.
}
|
b608c7156f7aac002c96b4a98df26f0bba0e994b | 719acccfe2380a91718bf6969487f646590a0174 | /R/ppclp.R | 6f9d20ae4e026c0185fb3dc5ceffa837bcbaac58 | [] | no_license | CHuanSite/ppclp | 6a91c88545c66269f9b0f1993c6f790cca781e6f | 1a72faee75cb282726daed67429844755e12a8a0 | refs/heads/master | 2021-06-23T06:57:44.793291 | 2021-05-05T22:40:48 | 2021-05-05T22:40:48 | 216,651,612 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 11,198 | r | ppclp.R | #' 2D prbabilistic principal curve with length penalty
#'
#' This function applies the probabilistic principal curve algorithm with length penalty
#'
#' @param x The coordinate of the x axis
#' @param y The coordinate of the y axis
#' @param x_fix The cordinate of the starting point of the curve
#' @param y_fix The coordinate of the ending point of the curve
#' @param K the number of knots of the curve
#' @param degree_free the degree of freedom of the B spline
#' @param lambda The magnitude value added to the length penalty
#' @param T The total number of iterations to do the EM algorithm
#' @import tidyverse splines splines2
#' @export ppclp2D
#' @examples
#' data("threeExample")
#' tmpCurve = ppclp2D(threeExample$x, threeExample$y, threeExample$xFix, threeExample$yFix)
#' plot(threeExample$x, threeExample$y, xlim = c(0,1), ylim = c(0,1), pch = 16, cex = 0.8)
#' lines(tmpCurve$xFit, tmpCurve$yFit, type = "l", col = "red", lwd = 5)
ppclp2D <- function(x, y, x_fix, y_fix, K = 50, degree_free = 10, lambda = 0.5, T = 100){
## Total number of points in the curve
N = length(x)
## Basis for the spline
w <- seq(0, 2 * pi, by = 2 * pi / K)[1 : K]
B = cbind(1, bs(w, df = degree_free))
B_der = cbind(0, dbs(w, df = degree_free))
B_der_der = cbind(0, dbs(w, df = degree_free, derivs = 2))
B_tilde = B[c(1, nrow(B)), ]
## PI is the matrix of p_ik
PI = matrix(1 / K, nrow = N, ncol = K)
PI_sum_old = rep(1 / K, K)
## sigma is the variance of the noise in the gaussian distribution
sigma_old = 1
## beta_x and beta_y are the coefficients for the splines to the x-axis and y-axis
beta_x_old = runif(degree_free + 1, -5, 5)
beta_y_old = runif(degree_free + 1, -5, 5)
likelihood_store = c()
length_penalty = t(B_der) %*% B_der / K * 2 * pi
smooth_penalty = t(B_der_der) %*% B_der_der / K * 2 * pi
## The procedure of the EM-Algorithm
for(t in 1 : T){
## items used during the EM procedure
x.i.matrix = matrix(x,nrow=length(x),ncol=K,byrow=FALSE)
x.k.matrix = matrix(B %*% beta_x_old, nrow = N, ncol = length(B %*% beta_x_old), byrow = TRUE)
y.i.matrix = matrix(y,nrow = length(y), ncol = K, byrow = FALSE)
y.k.matrix = matrix(B %*% beta_y_old, nrow = N, ncol = length(B %*% beta_y_old), byrow = TRUE)
## E-step
PI = exp(-1 / as.numeric((2 * sigma_old)) * ((x.i.matrix - x.k.matrix) ^ 2 + (y.i.matrix - y.k.matrix)^ 2)) %*% diag(PI_sum_old)
PI = PI / apply(PI, 1, sum)
## M-step
## Update PI_sum
PI_sum_new = 1 / N * apply(PI, 2, sum)
## Update sigma
sigma_temp = 0
sigma_temp = sum(((x.i.matrix - x.k.matrix)^2 + (y.i.matrix - y.k.matrix)^2 ) * PI)
sigma_new = sigma_temp / (2 * N)
## Update beta_x and beta_y
B_XX = 0
B_YY = 0
B_ZZ = 0
B_XY = 0
for(i in 1 : N){
B_XX = B_XX + t(B) %*% as.matrix(PI[i, ]) * x[i]
B_YY = B_YY + t(B) %*% as.matrix(PI[i, ]) * y[i]
}
diag_B = apply(PI, 2, sum) %>% diag
B_XY = t(B) %*% diag_B %*% B
## Inverse matrix for the estimation
Inverse_M = solve(B_XY + lambda * length_penalty )
beta_x_new = Inverse_M %*% B_XX
beta_y_new = Inverse_M %*% B_YY
## Psu-inverse matrix for the estimation of coefficient
Inverse_P = Inverse_M %*% t(B_tilde) %*%
solve(B_tilde %*% Inverse_M %*% t(B_tilde))
beta_x_new = beta_x_new -
Inverse_P %*%
(B_tilde %*% beta_x_new - x_fix)
beta_y_new = beta_y_new -
Inverse_P %*%
(B_tilde %*% beta_y_new - y_fix)
## Computation of the log likelihood
likelihood = 0
for(i in 1 : N){
likelihood_temp = 0
for(k in 1 : K){
likelihood_temp = likelihood_temp + PI_sum_new[k] * 1 / sigma_new * exp(-1/(2 * sigma_new) * ((x[i] - B[k, ] %*% beta_x_new )^2 + (y[i] - B[k, ] %*% beta_y_new)^2))
}
likelihood = likelihood + log(likelihood_temp)
}
PI_sum_old = PI_sum_new
sigma_old = sigma_new
beta_x_old = beta_x_new
beta_y_old = beta_y_new
#print(likelihood)
likelihood_store = c(likelihood_store, likelihood)
}
plot(x,y, xlim = c(0,1), ylim = c(0,1), pch = 16, cex = 0.8)
lines(B %*% beta_x_new, B %*% beta_y_new, type = "l", col = "red", lwd = 5)
dev.off()
return(list(xFit = B %*% beta_x_new, yFit = B %*% beta_y_new))
}
#' 3D prbabilistic principal curve with length penalty
#'
#' This function applies the probabilistic principal curve algorithm with length penalty to 3D data
#'
#' @param x The coordinate of the x axis
#' @param y The coordinate of the y axis
#' @param z The coordinate of the z axis
#' @param x_fix The x coordinate of the starting and endingpoint of the curve
#' @param y_fix The y coordinate of the starting and endingpoint of the curve
#' @param z_fix The z coordinate of the starting and endingpoint of the curve
#' @param K the number of knots of the curve
#' @param degree_free the degree of freedom of the B spline
#' @param lambda The magnitude value added to the length penalty
#' @param T The total number of iterations to do the EM algorithm
#' @import tidyverse splines splines2
#' @export ppclp3D
#' @examples
#' data("spectExample")
#' tmpCurve = ppclp3D(spectExample$x, spectExample$y, spectExample$z, spectExample$xFix, spectExample$yFix, spectExample$zFix)
#' plot_ly() %>% add_trace(x = spectExample$x, y = spectExample$y, z = spectExample$z, type = "scatter3d", mode = "markers", name = 'points', marker = list(size = 1, color = 'rgba(0, 0, 0, .9)', opacity = 0.4)) %>%
#' add_trace(x = spectExample$xFix[1], y = spectExample$yFix[1], z = spectExample$zFix[1], type = "scatter3d", mode = "markers", name = 'A', marker = list(size = 10, color = 'rgba(0, 255, 0, .9)', opacity = 1)) %>%
#' add_trace(x = spectExample$xFix[2], y = spectExample$yFix[2], z = spectExample$zFix[2], type = "scatter3d", mode = "markers", name = 'M', marker = list(size = 10, color = 'rgba(0, 0, 255, .9)', opacity = 1)) %>%
#' add_trace(x = as.vector(tmpCurve$xFit), y = as.vector(tmpCurve$yFit), z = as.vector(tmpCurve$zFit), type = "scatter3d", mode = "lines", name = "theoretical line", line = list(width = 5, color = 'rgba(255, 0, 0, .9)'))
ppclp3D <- function(x, y, z, x_fix, y_fix, z_fix, K = 200, degree_free = 50, lambda = 100, T = 20){
N = length(x)
## Basis for the spline
w <- seq(0, 2 * pi, by = 2 * pi / K)[1 : K]
B = cbind(1, bs(w, df = degree_free))
B_der = cbind(0, dbs(w, df = degree_free))
B_der_der = cbind(0, dbs(w, df = degree_free, derivs = 2))
B_tilde = B[c(1,nrow(B)), ]
## PI is the matrix of p_ik
PI = matrix(1 / K, nrow = N, ncol = K)
PI_sum_old = rep(1 / K, K)
## sigma is the variance of the noise in the gaussian distribution
sigma_old = 1
## beta_x and beta_y are the coefficients for the splines to the x-axis and y-axis
beta_x_old = runif(degree_free + 1, -5, 5) * 0
beta_y_old = runif(degree_free + 1, -5, 5) * 0
beta_z_old = runif(degree_free + 1, -5, 5) * 0
likelihood_store = c()
length_penalty = t(B_der) %*% B_der / K * 2 * pi
smooth_penalty = t(B_der_der) %*% B_der_der / K * 2 * pi
## The procedure of the EM-Algorithm
for(t in 1 : T){
## items used during the EM procedure
x.i.matrix = matrix(x,nrow=length(x),ncol=K,byrow=FALSE)
x.k.matrix = matrix(B %*% beta_x_old, nrow = N, ncol = length(B %*% beta_x_old), byrow = TRUE)
y.i.matrix = matrix(y,nrow = length(y), ncol = K, byrow = FALSE)
y.k.matrix = matrix(B %*% beta_y_old, nrow = N, ncol = length(B %*% beta_y_old), byrow = TRUE)
z.i.matrix = matrix(z, nrow = length(z), ncol = K, byrow = FALSE)
z.k.matrix = matrix(B %*% beta_z_old, nrow = N, ncol = length(B %*% beta_z_old), byrow = TRUE)
sigma_old = matrix(sigma_old, nrow = N, ncol = K)
## E-step
PI = exp(-1 / as.numeric((2 * sigma_old)) * ((x.i.matrix - x.k.matrix) ^ 2 + (y.i.matrix - y.k.matrix)^ 2 + (z.i.matrix - z.k.matrix)^ 2)) %*% diag(PI_sum_old)
PI = PI / apply(PI, 1, sum)
## M-step
## Update PI_sum
PI_sum_new = 1 / N * apply(PI, 2, sum)
## Update sigma
sigma_temp = 0
sigma_temp = sum(((x.i.matrix - x.k.matrix)^2 + (y.i.matrix - y.k.matrix)^2 + (z.i.matrix - z.k.matrix)^2) * PI)
sigma_new = 1 * sigma_temp / (3 * N)
## Update beta_x and beta_y
B_XX = 0
B_YY = 0
B_ZZ = 0
B_XY = 0
for(i in 1 : N){
#B_XY = B_XY + t(B) %*% diag(PI[i, ]) %*% B
B_XX = B_XX + t(B) %*% as.matrix(PI[i, ]) * x[i]
B_YY = B_YY + t(B) %*% as.matrix(PI[i, ]) * y[i]
B_ZZ = B_ZZ + t(B) %*% as.matrix(PI[i, ]) * z[i]
}
diag_B = apply(PI, 2, sum) %>% diag
B_XY = t(B) %*% diag_B %*% B
#B_XX = apply(t(B) %*% (t(PI) %*% diag(x)), 1, sum)
## Inverse matrix for the estimation
Inverse_M = solve(B_XY + lambda * length_penalty)
beta_x_new = Inverse_M %*% B_XX
beta_y_new = Inverse_M %*% B_YY
beta_z_new = Inverse_M %*% B_ZZ
## Psu-inverse matrix for the estimation of coefficient
Inverse_P = Inverse_M %*% t(B_tilde) %*%
solve(B_tilde %*% Inverse_M %*% t(B_tilde))
beta_x_new = beta_x_new -
Inverse_P %*%
(B_tilde %*% beta_x_new - x_fix)
beta_y_new = beta_y_new -
Inverse_P %*%
(B_tilde %*% beta_y_new - y_fix)
beta_z_new = beta_z_new -
Inverse_P %*%
(B_tilde %*% beta_z_new - z_fix)
PI_sum_old = PI_sum_new
sigma_old = sigma_new
beta_x_old = beta_x_new
beta_y_old = beta_y_new
beta_z_old = beta_z_new
print(t)
}
## Plot the Principal for the Colon Image
x.fit = B %*% beta_x_new
y.fit = B %*% beta_y_new
z.fit = B %*% beta_z_new
return(list(xFit = x.fit, yFit = y.fit, zFit = z.fit))
}
# library(oro.nifti)
# library(brainR)
# library(rgl)
#
# Img = readANALYZE("/Users/huanchen/Downloads/DREAM-Amer_Sabrine/J001_Product\ A/J001_111516_2hr/1.2.840.113704.3.1.32161115.10110.83/J001_111516_2\ hr_ECTHd1_IRAC001_DS.img")
# ctImg = readNIfTI("/Users/huanchen/Downloads/DREAM-Amer_Sabrine/J001_Product\ A/J001_111516_2hr/1.2.840.113704.3.1.32161115.10110.83/J001_111516_2\ hr_ECTHd1_IRAC001_DS.nii.gz")
#
#
# nColLevels = 11
# col = c(gray(0), heat.colors(nColLevels - 1))
# colCT = gray(seq(0, 1, length = nColLevels))
#
# withinThresh = Img[Img >= 10]
# withinThreshCT = ctImg[ctImg >= 10]
# breaks = quantile(withinThresh, (0 : nColLevels) / nColLevels)
# breaksCT = c(0, quantile(withinThreshCT, (0 : (nColLevels - 1)) / (nColLevels - 1)))
#
# XYZ = c(70, 70, 32)
#
# image(z = ctImg[,,XYZ[3]],
# 1 : dim(Img)[1],
# 1 : dim(Img)[2],
# col = colCT,
# asp = 1,
# axes = FALSE,
# #breaks = breaksCT,
# xlab= "",
# ylab = ""
# )
#
#
# image(z = Img[, ,XYZ[3]],
# 1 : dim(Img)[1],
# 1 : dim(Img)[2],
# col = col,
# breaks = breaks,
# asp = 1,
# axes = FALSE,
# add = TRUE
# )
#
# title("Z slice")
# abline(v = XYZ[1], col = "white", lwd = 3)
# abline(h = XYZ[2], col = "white", lwd = 3)
#
#
# spectExample = list(x = spectExample$x, y = spectExample$y, z = spectExample$z, xFix = spectExample$xFix, yFix = spectExample$yFix, zFix = spectExample$zFix, Img = Img, ctImg = ctImg)
#
# save(spectExample, file = "data/spectExample.RData")
#
#
#
|
b9e081f9ddc985c3e167e60bf4254ff89bd0e9f3 | 079047680235f782798648adb73d2317cbee42eb | /Semana 7/Atividade2.R | 0c6ca58982548d5e31e7b64cba9ed5f157bc43aa | [] | no_license | marcustorresz/ETLdata | a8fcb0688caa6e5dd58f608daed95ccd6cda4cde | b78a432ae90fe068ba701cdf76081757000c3750 | refs/heads/main | 2023-05-31T04:52:44.468261 | 2021-07-10T21:42:12 | 2021-07-10T21:42:12 | 354,947,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,116 | r | Atividade2.R | ################### ATIVIDADE 2 ############
#O segundo exercício tem por base o que aprendemos sobre ETL e large data.
######################## PACOTES ###############################
rm(list=ls())
if (!require('ff')) install.packages('ff'); library('ff')
if (!require('ffbase')) install.packages('ffbase'); library('ffbase')
if (!require('dplyr')) install.packages('dplyr'); library('dplyr')
if (!require('plyr')) install.packages('plyr'); library('plyr')
if (!require('readr')) install.packages('readr'); library('readr')
setwd("C:/Users/marvi/OneDrive - Universidade Federal de Pernambuco/UFPE/DOUTORADO/2021.1/Disciplinas/ETL/ETLdata/Semana 7")
#1. Extraia em padrão ff todas as bases de situação final de alunos disponíveis neste endereço:
# http://dados.recife.pe.gov.br/dataset/situacao-final-dos-alunos-por-periodo-letivo
data2020 <- read.csv.ffdf(file = "http://dados.recife.pe.gov.br/dataset/ce5168d4-d925-48f5-a193-03d4e0f587c7/resource/9dc84eed-acdd-4132-9f1a-a64f7a71b016/download/situacaofinalalunos2020.csv")
data2019 <- read.csv.ffdf(file = "http://dados.recife.pe.gov.br/dataset/ce5168d4-d925-48f5-a193-03d4e0f587c7/resource/3b03a473-8b20-4df4-8628-bec55541789e/download/situacaofinalalunos2019.csv")
data2018 <- read.csv.ffdf(file = "http://dados.recife.pe.gov.br/dataset/ce5168d4-d925-48f5-a193-03d4e0f587c7/resource/8f3196b8-c21a-4c0d-968f-e2b265be4def/download/situacaofinalalunos2018.csv")
data2017 <- read.csv.ffdf(file = "http://dados.recife.pe.gov.br/dataset/ce5168d4-d925-48f5-a193-03d4e0f587c7/resource/70c4e6fc-91d2-4a73-b27a-0ad6bda1c84d/download/situacaofinalalunos2017.csv")
data2016 <- read.csv.ffdf(file = "http://dados.recife.pe.gov.br/dataset/ce5168d4-d925-48f5-a193-03d4e0f587c7/resource/f42a3c64-b2d7-4e2f-91e5-684dcd0040b9/download/situacaofinalalunos2016.csv")
data2015 <- read.csv.ffdf(file = "http://dados.recife.pe.gov.br/dataset/ce5168d4-d925-48f5-a193-03d4e0f587c7/resource/264f0a37-ad1c-4308-9998-4f0bd3c6561f/download/situacaofinalalunos2015.csv")
data2014 <- read.csv.ffdf(file = "http://dados.recife.pe.gov.br/dataset/ce5168d4-d925-48f5-a193-03d4e0f587c7/resource/0a2aec2f-9634-4408-bbb4-37e1f9c74aa1/download/situacaofinalalunos2014.csv")
data2013 <- read.csv.ffdf(file = "http://dados.recife.pe.gov.br/dataset/ce5168d4-d925-48f5-a193-03d4e0f587c7/resource/95eb9ea8-cd75-4efa-a1ba-ba869f4e92b9/download/situacaofinalalunos2013.csv")
data2012 <- read.csv.ffdf(file = "http://dados.recife.pe.gov.br/dataset/ce5168d4-d925-48f5-a193-03d4e0f587c7/resource/f6633c26-be36-4c27-81cb-e77d90316cff/download/situacaofinalalunos2012.csv")
#2. Junte todas as bases extraídas em um único objeto ff.
# Não sei por que, mas eu tentei juntar os bancos no total e não consegui fazendo uma lista. Há alguma maneira mais efetiva
DATA <- ffdfappend(data2020,data2019,data2018)
DATA2 <- ffdfappend(data2017,data2016,data2015)
DATA3 <- ffdfappend(data2014,data2013,data2012)
DATA <- ffdfappend(DATA,DATA2,DATA3)
#3. Limpe sua staging area
rm(list=(ls()[ls()!="DATA"]))
#4. Exporte a base única em formato nativo do R
write_rds(DATA, "DATA.rds")
??commmit
|
4789670d4838218f2377682026714f5c7083f838 | 1fd6f360719c77940faa7cc580c5567dc3b28541 | /man/repeat_lasso.Rd | 0eb960868a6036a4288f4219a2343f9f2a5bd1d2 | [] | no_license | kmayerb/tuna | 410fffee0322ffb7ec11d056bd5272a482b46ce2 | 2c9f64276e53150a7a112b785dbce12e63c7126a | refs/heads/master | 2020-06-04T23:39:26.547640 | 2019-06-17T17:06:38 | 2019-06-17T17:06:38 | 192,236,299 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 879 | rd | repeat_lasso.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tuna_lasso.R
\name{repeat_lasso}
\alias{repeat_lasso}
\title{repeat_lasso}
\usage{
repeat_lasso(trials, my_x, my_y, my_alpha = 1, my_family = "gaussian")
}
\arguments{
\item{trials}{number of times to repeat cross-validation on the lasso}
\item{my_x}{matrix of numeric predictors}
\item{my_y}{matrix (1D) response}
\item{my_alpha}{numeric from 0 (ridge) to 1 ()}
\item{my_family}{string "gaussian", 'binomial"}
}
\value{
list, first element is data frame summarizing fits, second element contains all of the glmnet cvfit objects
}
\description{
call run_lasso over a specified number of trials to see the frequency with which the regularization
proceedure include each coefficient
}
\examples{
repeat_lasso(10,
my_x = as.matrix(mtcars[1:15,2:dim(mtcars)[2]]),
my_y = as.matrix(mtcars[1:15,1]))
}
|
36ec314635488336a2d740469326db0dbb98d886 | aa2a8db459bc3f62518af710392795abdc38a042 | /datatable.R | 543f646f0773d4a1aec7d7b9a259f8bb1341e444 | [] | no_license | pluswc/lecture | 7fbe2c773c07c2c460daa33c3822b935dad457ab | 5a24e9e3015dbe5dc21bf1f6cb4192eac9c044ac | refs/heads/main | 2023-03-30T18:40:45.286831 | 2021-04-05T10:12:08 | 2021-04-05T10:12:08 | 345,593,055 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,934 | r | datatable.R |
library(data.table)
library(tidyverse)
library(skimr)
# data table basic --------------------------------------------------------
# dt[i, j, by]
# dt 테이블을 사용
# i 를 사용하여 subset 구성 (where)
# j 를 사용하여 columns 선택 (select)
# by 를 사용하여 그룹화
# create data.table -------------------------------------------------------
dt <- data.table(a = c(1, 2),
b = c("a", "b"))
df <- data.frame(a = c(1, 2),
b = c("a", "b"))
str(df)
str(dt)
as.data.table(df)
setDT(df)
str(df)
# penguins data 사용 --------------------------------------------------------
# data 가져오기
library(ggplot2)
install.packages('GGally')
library(GGally)
ggpairs(penguins)
remotes::install_github("allisonhorst/palmerpenguins")
library(webshot)
library(palmerpenguins)
webshot(url="https://allisonhorst.github.io/palmerpenguins/", selector = "#meet-the-palmer-penguins > p > img", "fig/penguin-species.png")
webshot(url="https://allisonhorst.github.io/palmerpenguins/", selector = "#bill-dimensions > p > img", "fig/penguin-variable.png")
penguins <- penguins
head(penguins)
summary(penguins)
# data 탐색 -----------------------------------------------------------------
# summary
summary(penguins)
skim(penguins)
# na data 확인
is.na(penguins)
# Q1. na가 있는 펭귄은 제거
# 1) na가 1개라도 있는 행 찾기
# 2) 찾은 행 제거
# Subest rows using i -----------------------------------------------------
penguins_dt <- setDT(copy(penguins))
str(penguins_dt)
skim(penguins_dt)
# 행 번호를 이용하여 subset 추출
penguins_dt[1:2, ]
penguins_dt[species == 'Adelie', ]
# i 에서 사용할 수 있는 논리 연산자
# "<", "<=", ">", ">="
# "!", "is.na()", "!is.na()", "%in%"
# "|", "&", "%like%", "%between%"
penguins_dt[species == 'Adelie' &
sex == 'male', ]
# j를 이용한 columns 선택 -------------------------------------------------------
penguins[,2]
penguins[,-2]
penguins_dt[,c(2, 3)]
penguins_dt[,-c(2, 3)]
penguins_dt[,c("bill_length_mm", "bill_depth_mm")]
penguins_dt[,-c("bill_length_mm", "bill_depth_mm")]
penguins_dt[,.(bill_length_mm, bill_depth_mm)]
## summary
penguins_dt[,mean(bill_length_mm)]
penguins_dt[,.(mean(bill_length_mm))]
penguins_dt[,.(mean = mean(bill_length_mm))]
penguins_dt[,.(sum = sum(bill_length_mm))]
penguins_dt[,.(mean = mean(bill_length_mm),
sum = sum(bill_length_mm))]
# 컬럼간 계산 및 생성
penguins_dt[, bill_length_mean := mean(bill_length_mm, na.rm = T)]
# 동시에 여러 컬럼 생성
# 1)
penguins_dt[,`:=` (bill_depth_mm_mean = mean(bill_depth_mm, na.rm = T),
flipper_length_mean = mean(flipper_length_mm, na.rm = T))]
# 2)
penguins_dt[,c("bill_depth_mean", "flipper_depth_mean") := .(mean(bill_depth_mm),
mean(flipper_length_mm))]
# 3)
nm <- c("bill_depth_mean", "flipper_depth_mean")
penguins_dt[,nm := .(mean(bill_depth_mm),
mean(flipper_length_mm))]
# 여러 컬럼을 사용하여 생성
penguins_dt[,island_sex := paste(island, sex, sep = "_") %>%
as.factor()]
# 일부 컬럼 변경
penguins_dt[1, sex := 'female']
penguins_dt[1, sex := 'male']
# 컬럼 삭제
penguins_dt[,island_sex:=NULL]
penguins_dt[,`:=` (bill_length_mean = NULL,
bill_depth_mm_mean = NULL,
flipper_length_mean = NULL)]
# 컬럼 type 변경
# 'as.integer', 'as.numeric', 'as.character', 'as.Data'
str(penguins_dt)
penguins_dt[,year := as.factor(year)]
str(penguins_dt)
# Q2. 펭귄종류에 따라 'bill_length_mm', 'bill_depth_mm', 'flipper_length_mm' 의 평균을 계산하고 출력하시오
# 아래와 같이 출력
# species bill_length_mean bill_length_sd bill_depth_mean bill_depth_sd flipper_length_mean flipper_length_sd
# 1: Adelie 38.82397 2.662597 18.34726 1.219338 190.1027 6.521825
# 2: Gentoo 47.56807 3.106116 14.99664 0.985998 217.2353 6.585431
# 3: Chinstrap 48.83382 3.339256 18.42059 1.135395 195.8235 7.131894
species <- penguins_dt[,species %>% unique]
copy_penguins_dt <- copy(penguins_dt)
result <- data.table()
for (sp in species) {
}
result
# Group according to by ---------------------------------------------------
penguins_dt[,mean(bill_length_mm), by = .(species)]
penguins_dt[,bill_length_mean := mean(bill_length_mm), by = .(species)]
penguins_dt[,`:=` (bill_depth_mean = mean(bill_depth_mm),
flipper_depth_mean = mean(flipper_length_mm)),
by = .(species)]
penguins_dt[,mean(bill_length_mean), by=.(body_mass_g >= 4000, species)]
penguins_dt %>% setcolorder(c("species", "island", "sex", "year"))
penguins_dt
penguins_dt[,length(bill_length_mean), by = .(species:year)]
# Q2. 펭귄종류, 성별에 따라 'bill_length_mm', 'bill_depth_mm', 'flipper_length_mm' 의 min, mean, max를 가지는 테이블을 만드시오
# 'bill_length_min', 'bill_length_mean', 'bill_length_median', 'bill_length_max'와 같이생성
# species sex species bill_length_min bill_length_mean bill_length_max bill_depth_min bill_depth_mean
# 1: Adelie female Adelie 32.1 37.25753 42.2 15.5 17.62192
# 2: Adelie male Adelie 34.6 40.39041 46.0 17.0 19.07260
# 3: Gentoo female Gentoo 40.9 45.56379 50.5 13.1 14.23793
# 4: Gentoo male Gentoo 44.4 49.47377 59.6 14.1 15.71803
# 5: Chinstrap female Chinstrap 40.9 46.57353 58.0 16.4 17.58824
# 6: Chinstrap male Chinstrap 48.5 51.09412 55.8 17.5 19.25294
# bill_depth_max flipper_length_min flipper_length_mean flipper_length_max
# 1: 20.7 172 187.7945 202
# 2: 21.5 178 192.4110 210
# 3: 15.5 203 212.7069 222
# 4: 17.3 208 221.5410 231
# 5: 19.4 178 191.7353 202
# 6: 20.8 187 199.9118 212
copy_penguins_dt <- copy(penguins_dt)
result <- data.table()
for (sp in species) {
}
# Join table --------------------------------------------------------------
penguins_info <- data.table(species = c('Adelie', 'Gentoo', 'Chinstrap', 'etc'),
species_a = c('Adelie', 'Gentoo', 'Chinstrap', 'etc'),
korean = c('아델리 펭귄', '전투 펭귄', '턱끈 펭귄', 'etc'),
info = c('각진 머리와 작은 부리 때문에 알아보기 쉽다.',
'머리에 모자처럼 둘러져 있는 하얀 털 때문에 알아보기가 쉽다.',
'목에서 머리 쪽으로 이어지는 검은 털이 눈에 띈다.',
'etc'))
penguins_info[penguins_dt, on=.(species)]
penguins_dt[penguins_info,,
on = .(species)]
penguins_info[penguins_dt[,.(species, island, sex)], on = .(species)]
penguins_info[penguins_dt[,.(species, island, sex)], on = .(species_a = species)]
# Advanced ----------------------------------------------------------------
# .N, .SD, .I, .GRP, .BY
# number of last row (.N)
penguins_dt[.N]
penguins_dt[, .N]
penguins_dt[, .N, by=.(species)]
penguins_dt[, group_ID := seq_len(.N), by=.(species)]
# grouping_number (.I)
penguins_dt[,.I]
penguins_dt[,.I[1], by=.(species)]
# Subset of Data (.SD)
penguins_dt[,.SD]
penguins_dt[,.SD[1]]
penguins_dt[,.SD[[1]]]
penguins_dt[,.SD, .SDcols = c('island', 'sex')]
penguins_dt[,.SD, .SDcols =! c('island', 'sex')]
penguins_dt[,lapply(.SD, min)),
.SDcols = c('bill_length_mm', 'bill_depth_mm', 'flipper_length_mm')]
penguins_dt[,lapply(.SD, function(x) return(list(min = min(x), mean = mean(x), max = max(x)))) %>% unlist() %>% as.list(),
.SDcols = c('bill_length_mm', 'bill_depth_mm', 'flipper_length_mm'),
by = .(species:sex)]
# Group number (.GRP)
penguins_dt[,grp := .GRP, by=species]
# .BY
penguins_dt[,switch(.BY[[1]] %>% as.character(),
'Adelie' = 'A',
'Chinstrap' = 'G',
'Gentoo' = 'C'), by=.(species)]
|
d97a33f17f14721e48e76846426616953556ebd4 | 54ed1cde940049aecaf859c897a12ff90f83aff8 | /man/give_me_states_combination.Rd | 2d2ce2cd6ddf18f50134f2806783dba2aca15d49 | [] | no_license | leonelhalsina/lemad | e23c735fa9499c9912efddd766be6f92f3c89271 | cca43ebff36b67fd78a11c58257a8f4cc15572bf | refs/heads/main | 2023-09-01T17:51:24.864455 | 2023-08-07T13:25:25 | 2023-08-07T13:25:25 | 508,330,655 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 759 | rd | give_me_states_combination.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lemad_utils.R
\name{give_me_states_combination}
\alias{give_me_states_combination}
\title{All possible region combination.}
\usage{
give_me_states_combination(areas, num_max_multiregion)
}
\arguments{
\item{areas}{Vector with single region It is NOT the species presence.}
\item{num_max_multiregion}{integer indicating the maximum number of regions a lineage can possible take at a given point in time. It can go from 2 to length(areas).}
}
\value{
A vector with all the combinations of regions
}
\description{
Combines the regions into all the possible multi-region distribution.
}
\examples{
areas <- c("A", "B", "C")
give_me_states_combination(areas,num_max_multiregion = 3)
}
|
ffc5848332d3f58eeb88615a45f8ba98263ae865 | 5fdcdc5f734696f363f58a0cfc5e0a960cee8859 | /R/GetConfidentCalls.R | 4c0b5f73418f46e246faadd50f595e13b0889a60 | [] | no_license | lengning/EBSeqHMM | 4411d43582afc1c0e14306ce2aaa6e3e80250ef6 | 6b6efdb7faea6283cfe1710dfd8087ce2aa15319 | refs/heads/master | 2021-01-21T21:43:32.107304 | 2016-03-21T16:10:00 | 2016-03-21T16:10:00 | 31,791,187 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,665 | r | GetConfidentCalls.R | #' @title Obtain confident gene calls for classifying genes into expression paths
#' @usage GetConfidentCalls(EBSeqHMMOut, FDR=.05, cutoff=0.5, OnlyDynamic=TRUE,Paths=NULL)
#' @param EBSeqHMMOut output from EBSeqHMMTest function
#' @param FDR Target FDR, default is 0.05.
#' @param cutoff cutoff to use for defining a confident call. Genes with PP_path greater or
#' equal to cutoff will be called as a confident call. Default is 0.5.
#' @param OnlyDynamic if specifies as T, only dynamic paths will be shown
#' @param Paths paths that are of interest. Default is NULL. If it is not specified, all possible
#' paths will be considered.
#' @note Output: output a list of genes that are classified to a expression path as a confident assignment.
#' @author Ning Leng
#' @examples
#' data(GeneExampleData)
#' CondVector <- rep(paste("t",1:5,sep=""),each=3)
#' Conditions <- factor(CondVector, levels=c("t1","t2","t3","t4","t5"))
#' Sizes <- MedianNorm(GeneExampleData)
#' EBSeqHMMGeneOut <- EBSeqHMMTest(Data=GeneExampleData, sizeFactors=Sizes, Conditions=Conditions,
#' UpdateRd=2)
#' GeneDECalls <- GetDECalls(EBSeqHMMGeneOut, FDR=.05)
#' GeneConfCalls <- GetConfidentCalls(EBSeqHMMGeneOut, FDR=.05,cutoff=.5, OnlyDynamic=TRUE)
#' @details Function GetConfidentCalls() can be used to obtain a list of DE genes/isoforms
#' with user specific cutoffs. To obtain a list of DE genes/isoforms with
#' a target FDR alpha, the user may specify FDR=alpha. To further choose
#' genes/isoforms with high posterior probability of being its most likely path,
#' the user may specify the option cutoff (default is 0.5). Then genes or isoforms with PP(most likely path ) > = 0.5 will be selected
#' @return Overall: a list of genes/isoforms that are identified as DE under the target FDR, shown are their names and PPs;
#' EachPath: a list object, each sublist contains confident calls
#' (genes/isoforms) that have PP(path)>=cutoff for a particular expression path, shown are their names and PPs;
#' NumEach: length of each sublist in EachPath.
#' EachPathName: gene/isoform names in each of the sublists in EachPath
GetConfidentCalls <- function(EBSeqHMMOut,FDR=0.05, cutoff=.5, OnlyDynamic=TRUE,Paths=NULL){
SigPPLarge <- EBSeqHMMOut$MgAllPP
SigMAPLargeChar <- EBSeqHMMOut$MgAllMAPChar
SigMaxValLarge <- EBSeqHMMOut$MgAllMaxVal
AllPaths <- colnames(EBSeqHMMOut$MgAllPP)
WithUp <- grep("Up",AllPaths)
WithDown <- grep("Down",AllPaths)
UpAndDown <- union(WithUp, WithDown)
AllEEPath <- AllPaths[-UpAndDown]
NonEEPath <- AllPaths[UpAndDown]
WithEEPath <- grep("EE",AllPaths)
DynPath <- AllPaths[-WithEEPath]
if(is.null(Paths)&OnlyDynamic==TRUE)PathsConsider <- DynPath
if(is.null(Paths)&OnlyDynamic==FALSE)PathsConsider <- NonEEPath
if(!is.null(Paths))PathsConsider <- Paths
HMMCall <- rownames(SigPPLarge)[which(SigPPLarge[,AllEEPath]<=FDR & SigMaxValLarge>cutoff
& SigMAPLargeChar%in%PathsConsider)]
Mat <- cbind(SigMAPLargeChar[HMMCall],round(SigMaxValLarge[HMMCall],4))
rownames(Mat) <- HMMCall
colnames(Mat) <- c("Most_Likely_Path","Max_PP")
MatOrder1 <- order(Mat[,2],decreasing=TRUE)
Mat1 <- Mat[MatOrder1,]
if(length(Mat1)==0)stop("No DE genes identified under defined FDR")
List <- sapply(PathsConsider,function(i){
tt <- which(Mat1[,1]==i)
if(length(tt)>0){
t2 <- matrix(Mat1[tt,],ncol=2)
rownames(t2) <- rownames(Mat1)[tt]
t2
}})
Length <- sapply(List,function(i){if(length(i)>0)tt <- nrow(i)
else tt <- 0
tt})
Names <- sapply(List,function(i){if(length(i)>0)tt <- rownames(i)
else tt <- NULL
tt})
Out <- list(Overall=Mat1,EachPath=List, NumEach=Length, EachPathNames=Names )
}
|
b94d7ae570532a9283c29c04979427935b3b3b3f | 9fb037a022c0b0c0d4bbb43c982a43118efcf432 | /tests/testthat/test-simData.R | ca29887424ec6ea85f0489f2b0f4a28c019cada9 | [] | no_license | jan-glx/muscat | ae7557518df470519c34806f1305a145bfe976d2 | be69b6dcaa0eac415b3787bc3ae634bcc10a7982 | refs/heads/master | 2021-01-04T00:03:41.871405 | 2019-10-29T17:43:39 | 2019-10-29T17:43:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,812 | r | test-simData.R | context("Simulation of 'complex' scRNA-seq data")
k <- paste0("cluster", seq_len(5))
s <- paste0("sample", seq_len(4))
g <- paste0("group", seq_len(3))
test_that(".sample_n_cells", {
n <- 50
x <- .sample_n_cells(n, k, s)
expect_true(all(unlist(x) == n))
expect_identical(rownames(x), s)
expect_identical(colnames(x), k)
expect_true(all(vapply(x, length, numeric(1)) == 2))
n <- c(10, 100)
x <- .sample_n_cells(n, k, s)
rng <- vapply(x, range, numeric(2))
expect_true(all(rng[1, ] >= n[1]))
expect_true(all(rng[2, ] <= n[2]))
})
test_that(".split_cells", {
n_cells <- 1e3
cells <- paste0("cell", seq_len(n_cells))
x <- matrix(0,
nrow = 1, ncol = n_cells,
dimnames = list(NULL, cells))
cd <- data.frame(
row.names = cells,
cluster_id = sample(k, n_cells, TRUE),
sample_id = sample(s, n_cells, TRUE))
cs <- .split_cells(cd, "cluster_id")
expect_identical(names(cs), k)
expect_identical(
as.numeric(vapply(cs, length, numeric(1))),
as.numeric(table(cd$cluster_id)))
cs <- .split_cells(cd, c("cluster_id", "sample_id"))
expect_identical(names(cs), k)
nms_lvl2 <- vapply(cs, names, character(length(s)))
expect_true(all(apply(nms_lvl2, 2, identical, s)))
cs <- .split_cells(cd, c("sample_id", "cluster_id"))
expect_identical(names(cs), s)
nms_lvl2 <- vapply(cs, names, character(length(k)))
expect_true(all(apply(nms_lvl2, 2, identical, k)))
})
test_that(".sample_cell_md", {
n <- 1e3
ids <- list(k, s, g)
md <- .sample_cell_md(n, ids)
ns <- apply(md, 2, table)
ms <- vapply(ns, mean, numeric(1))
expect_true(all(vapply(seq_len(3), function(i)
ms[[i]] == n / length(ids[[i]]), logical(1))))
})
|
094feb890a1ba127118994fdcd7acba91cb1f58d | ca8aa65e3b285151bf35fb00610efec3ca6801ff | /tests/testthat/test-1-workspace.R | 922a094982ff67c82063823536d867985bdc3aac | [] | no_license | rmhorton/AzureML | e563c026d410d59795c91c5cd7e8c3e9ea77e3d4 | 4af20bb561dcefd09886875a7268f69ada0fb7b9 | refs/heads/master | 2021-01-17T19:16:44.318622 | 2015-11-09T06:48:50 | 2015-11-09T06:48:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,103 | r | test-1-workspace.R | if(interactive()) library("testthat")
keyfile <- system.file("tests/testthat/config.json", package = "AzureML")
if(keyfile == ""){
message("To run tests, add a file tests/testthat/config.R containing AzureML keys")
message("No tests ran")
} else {
jsondata <- jsonlite::fromJSON(keyfile)
workspace_id <- jsondata$id
authorization_token <- jsondata$authorization_token
# ------------------------------------------------------------------------
context("Connect to workspace")
test_that("Can connect to workspace with supplied id and auth", {
skip_on_cran()
skip_on_travis()
ws <- workspace(workspace_id, authorization_token)
expect_is(ws, c("Workspace"))
expect_equal(ls(ws), c("datasets", "experiments", "id"))
expect_equal(ws$id, workspace_id)
})
test_that("Can connect to workspace with config file", {
skip_on_cran()
skip_on_travis()
ws <- workspace(config = keyfile)
expect_is(ws, c("Workspace"))
expect_equal(ls(ws), c("datasets", "experiments", "id"))
expect_equal(ws$id, workspace_id)
})
}
|
adfcdb585968ca1c27c46c8f388553e02e6867fe | 411f8128677355a640d6fa6b59ba733db2f11a79 | /inst/shiny/app2de.R | 5d9c84a383546440ccbeb496db33011c576c1b42 | [] | no_license | sigbertklinke/gettext | a550955f8fa48f7cef0ba46d6da4dff622c531fb | 309d104f19260cc421c592dd83cd44a4a96e2d98 | refs/heads/master | 2020-11-27T00:58:07.139418 | 2020-02-17T10:15:27 | 2020-02-17T10:15:27 | 229,250,435 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 237 | r | app2de.R | launch.browser <- function(url) {
url <- sprintf('%s/?lang=%s',url, 'de')
invisible(.Call("rs_shinyviewer", url, getwd(), 3))
}
#
library("shiny")
runApp(system.file('shiny', 'app2', package='gettext'), launch.browser=launch.browser) |
1f02cac9a05a85ffe36e135af09020f96dff34c6 | e1c2d5a4586db6a87f16ca3ee413db18bbc6fa55 | /Tutorial.R | 8a4866ae9a5a167e4d5fcd6f14eb828b285aedd6 | [
"MIT"
] | permissive | wal/titanic | 9faee1563cedd516bd8f3d005a507ff87f2b387c | 2d3fe3d0c250027419f3220cd9bef59ae2d9c1cf | refs/heads/master | 2020-03-23T19:42:45.038826 | 2018-07-23T10:46:52 | 2018-07-23T10:46:52 | 141,997,810 | 0 | 0 | null | 2018-07-23T10:15:03 | 2018-07-23T10:15:02 | null | UTF-8 | R | false | false | 525 | r | Tutorial.R | # http://www.kaggle.com/c/titanic-gettingStarted
# Starting point taken from http://trevorstephens.com/
library(tidyverse)
# Import
train <- read_csv("data/train.csv")
test <- read_csv("data/train.csv")
# Aim is to predict Survived
# EDA
glimpse(train)
table(train$Survived) # count of survivors
prop.table(table(train$Survived)) # proportion of survivors
# Hypothesis 1 - Everybody Dies
submission_1 <- tibble(PassengerId = test$PassengerId, Survived = 0)
write_csv(submission_1, "submissions/sub1-everyonedies.csv")
|
86fe8538533c547bfffdbe5548ff61934615e02b | 48d082482bea42a51efbcfbf7262cddfb30a5ec2 | /analysis_with_tuning/after_analysis_win_by_threshold.R | 027add05d97db6509029899a09906486998bbd00 | [] | no_license | jbleich89/bart_gene | 6e8aa1b9a54edde99efcbd8023c235f0ef8a85d2 | acd196b1c8ef7acdfe1b35afba2c093de86ab46a | refs/heads/master | 2021-01-02T08:47:40.128065 | 2014-02-26T00:45:42 | 2014-02-26T00:45:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,325 | r | after_analysis_win_by_threshold.R | ####win by THREHSOLD% matrix
THRESHOLD = 0.2
wins = list()
for (gene_num in 1 : length(all_results)){
gene_name = names(all_results)[gene_num]
wins[[gene_name]] = array(0, c(length(cs), length(METHODS), length(alphas)))
dimnames(wins[[gene_name]]) = list(
"c" = as.character(cs),
"method" = METHODS,
"alpha" = alphas
)
best = 9999999
second_best = 9999999
lowest_c = NULL
lowest_method = NULL
lowest_alpha = NULL
all_rmses = as.data.frame(matrix(NA, nrow = length(c_param) * length(METHODS) * length(alphas), ncol = 3))
names(all_rmses) = c("rmse", "c", "m")
counter = 1
for (c_param in cs){
c_param = as.character(c_param)
for (method in METHODS){
for (alpha in alphas){
oo_rmse = all_validations[[gene_name]][[c_param]][["20"]][[alpha]][[method]]
all_rmses[counter, ] = c(oo_rmse, c_param, method)
# if (length(oo_rmse) == 0){
# cat(paste("ERROR:", gene_name, c_param, method, "\n"))
# } else if (oo_rmse < best){
#
# second_best = best
# best = oo_rmse
# lowest_c = c_param
# lowest_method = method
# lowest_alpha = alpha
# print(paste(gene_num, "oo_rmse beaten best:", best, "second_best", second_best, "c", c_param, "method", method))
# }
counter = counter + 1
}
}
}
#sort
all_rmses$rmse = as.numeric(all_rmses$rmse)
all_rmses = all_rmses[sort.int(all_rmses$rmse, index.return = TRUE)$ix, ]
if ((all_rmses$rmse[2] - all_rmses$rmse[1]) / all_rmses$rmse[2] >= THRESHOLD){
# print(paste(gene_num, "FINAL oo_rmse MORE than THRESHOLD best:", all_rmses$rmse[1], "second_best", all_rmses$rmse[2], "c", all_rmses$c[1], "method", all_rmses$m[1]))
wins[[gene_name]][all_rmses$c[1], all_rmses$m[1], alphas[1]] = 1
}
}
aggregated_win_by_threshold = array(0, c(length(cs),length(METHODS), length(alphas)))
dimnames(aggregated_win_by_threshold) = list(
"c" = as.character(cs),
"method" = METHODS,
"alpha" = alphas
)
for (gene_num in 1 : length(all_results)){
gene_name = names(all_results)[gene_num]
aggregated_win_by_threshold = aggregated_win_by_threshold + wins[[gene_name]]
}
aggregated_win_by_threshold
#num genes where it mattered
sum(aggregated_win_by_threshold)
#pct genes where it mattered
sum(aggregated_win_by_threshold) / length(all_results)
xtable(aggregated_win_by_threshold[,, 1], digits = 0)
|
513ab79e3c452d1ad9c2deaf8ed080d8b71d7492 | 154a10a79fae5c50cfea6ea910684a2cdee6eb5c | /Activity2.R | 7cae56546b3fbe48f68272b65b62ff80ec5a1d7b | [] | no_license | tenzinsherpa/ENVST206 | 35b44a366e69649a5a6cb18362246f5eed6384b2 | b85f87beb69e07e1cff44ca338c2ae8fda1068df | refs/heads/master | 2023-01-15T15:51:55.347063 | 2020-11-24T17:07:22 | 2020-11-24T17:07:22 | 291,063,687 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,420 | r | Activity2.R | #activity 2
#the examples are on the top and the activity problems that require coding are at the bottom (for now)
heights <- c(3,2,3)
datW <- read.csv("/Users/TenzinSherpa/Documents/ENVST206 Data/a02/noaa2011124.csv")
help(matrix)
#create a matrix with 2 columns that fill in by row
Mat<-matrix(c(1,2,3,4,5,6), ncol=2, byrow=TRUE)
Mat
#create a matrix with two columns that fill in by column
Mat.bycol<-matrix(c(1,2,3,4,5,6), ncol=2, byrow=FALSE)
Mat.bycol
#locate a specific area of the matrix formatted [row,column]
Mat.bycol[1,2]
#a space in the square bracket after or before a comma means all items in that slot.
#look at all values in row 1
Mat.bycol[1,]
#look at all values in column 2
Mat.bycol[,2]
#access to the excel files
datW <- read.csv("/Users/TenzinSherpa/Documents/ENVST206 Data/a02/noaa2011124.csv")
#get more information about the dataframe
str(datW)
#converting name sites into factors
datW$NAME <-as.factor(datW$NAME)
#find out all unique site names
levels(datW$NAME)
#look at the mean maximum temperature for Aberdeen
#we should get NA because data is missing from the data set
mean(datW$TMAX[datW$NAME == "ABERDEEN, WA US"])
#look at the mean maximum temperature for Aberdeen
#with na.rm argument set to true to ingnore NA
mean(datW$TMAX[datW$NAME == "ABERDEEN, WA US"], na.rm=TRUE)
#next look at the standard deviation
sd(datW$TMAX[datW$NAME == "ABERDEEN, WA US"], na.rm=TRUE)
#calculate the average daily temperature
#This temperature will be halfway between the minimum and maximum temperature
datW$TAVE <- datW$TMIN + ((datW$TMAX-datW$TMIN)/2)
#get the mean across all sites
#the by function is a list of one or more variables to index over.
#FUN indicates the function we want to use
#if you want to specify any function specific arguments use a comma and add them after the function
#here we want to use the na.rm arguments specific to
averageTemp <- aggregate(datW$TAVE, by=list(datW$NAME), FUN="mean",na.rm=TRUE)
averageTemp
#change the automatic output of column names to be more meaningful
#note that MAAT is a common abbreviation for Mean Annual Air Temperature
colnames(averageTemp) <- c("NAME","MAAT")
averageTemp
#convert level to number for factor data type
#you will have to reference the level output or look at the row of data to see the character designation.
datW$siteN <- as.numeric(datW$NAME)
#make a histogram for the first site in our levels
#main= is the title name argument.
#Here you want to paste the actual name of the factor not the numeric index
#since that will be more meaningful.
hist(datW$TAVE[datW$siteN == 1],
freq=FALSE,
main = paste(levels(datW$NAME)[1]),
xlab = "Average daily temperature (degrees C)",
ylab="Relative frequency",
col="grey75",
border="white")
help(dnorm)
#pnorm(value to evaluate at (note this will evaluate for all values and below),mean, standard deviation)
pnorm(0,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#pnrom with 5 gives me all probability (area of the curve) below 5
pnorm(5,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#pnrom with 5 gives me all probability (area of the curve) below 5
pnorm(5,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE)) - pnorm(0,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#pnrom of 20 gives me all probability (area of the curve) below 20
#subtracting from one leaves me with the area above 20
1 - pnorm(20,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#qnorm gives me the value at which all values and below equal the probability in my argument
#Here I'm calculating the value of the 95th quantile or a probability of 0.95
qnorm(0.95,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#question 2
#numeric data vector
number <- c(3.4, 5.2, 9.7, 45.8, 67.9)
number
#character data vector
character <- c("dog", "cat", "fish","turtle","rhino")
character
#integer data vector
vecExample<- c(5, 67, 234, 1, 57)
vecExample
#factor data vector using the character data vector
character <- c("dog", "cat", "fish","turtle","rhino")
vecFactor <- as.factor(character)
vecFactor
#question 3
help("hist")
#question 4
hist(datW$TAVE[datW$siteN == 5],
freq=FALSE,
main = paste(levels(datW$NAME)[5]),
xlab = "Average daily temperature (degrees C)",
ylab="Relative frequency",
col="grey75",
border="white")
#question 5
#find the current high temperature threshold.
qnorm(0.95,
mean(datW$TAVE[datW$siteN == 1],na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#then you want to find how often that temperature and values greater than it will occur under the new mean
1 - pnorm(18.51026,
mean(datW$TAVE[datW$siteN == 1]+4,na.rm=TRUE),
sd(datW$TAVE[datW$siteN == 1],na.rm=TRUE))
#question 6
hist(datW$PRCP[datW$siteN == 1],
freq=FALSE,
main = paste(levels(datW$NAME)[1]),
xlab = "Average daily percipitation (mm)",
ylab="Relative freqauency",
col="grey75",
border="white")
#question 7
totalPRCP <- aggregate(datW$PRCP, by=list(datW$NAME, datW$year), FUN= "sum", na.rm= TRUE)
colnames(totalPRCP) <-c("NAME", "YEAR", "PRCP")
totalPRCP
#question 8
#Aberdeen, WA US
hist(totalPRCP$PRCP[totalPRCP$NAME == "ABERDEEN, WA US"],
freq=FALSE,
main = "ABERDEEN, WA US",
xlab = "Annual Percipitation (mm)",
ylab="Relative frequency",
col="grey75",
border="white")
#MANDAN EXPERIMENT STATION, ND US
hist(totalPRCP$PRCP[totalPRCP$NAME == "MANDAN EXPERIMENT STATION, ND US"],
freq=FALSE,
main = "MANDAN EXPERIMENT STATION, ND US",
xlab = "Annual Percipitation (mm)",
ylab="Relative frequency",
col="grey75",
border="white")
#question 9
#how likely a year of precipitation of 700mm or less
pnorm(700,
mean(totalPRCP$PRCP[totalPRCP$NAME == "MANDAN EXPERIMENT STATION, ND US"],na.rm=TRUE),
sd(totalPRCP$PRCP[totalPRCP$NAME == "MANDAN EXPERIMENT STATION, ND US"],na.rm=TRUE))
pnorm(700,
mean(totalPRCP$PRCP[totalPRCP$NAME == "ABERDEEN, WA US"],na.rm=TRUE),
sd(totalPRCP$PRCP[totalPRCP$NAME == "ABERDEEN, WA US"],na.rm=TRUE))
|
4545d6028d31ffb5b3a086947321e589e69601a6 | c44140ecc0297a4440077eb4fa486b1ec1f36026 | /grafico_ggplot2-histograma.R | 468c3a78ec716aa5780dd3c16a6e75ee0ad60c39 | [] | no_license | The-Icaro/R | 721c5b2cd0f11b7c6b989e22f20931ea67f562c5 | 748876dc159eb2a967ac77e7983347e6da9db5a8 | refs/heads/main | 2023-08-22T20:46:18.867241 | 2021-10-12T21:09:25 | 2021-10-12T21:09:25 | 370,146,697 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 629 | r | grafico_ggplot2-histograma.R | install.packages("ggplot2")
install.packages('ggalt')
library("ggplot2")
library("ggalt")
# Ter algum dado pra trabalhar
data("midwest", package = "ggplot2") # Dados do próprio ggplot2
# Substituir a notação cietífica, pelo número completo
options(scipen = 999) # números muito grandes
# Gráfico Base
ggplot(midwest, aes(x = popblack)) +
geom_histogram(bins = 10, aes(fill = state)) +
xlim(c(0,9)) +
ylim(c(0,7.5)) +
labs(x = 'População Negra (x . 10000)', y = 'Frequência (por município)',
title = 'População Negra Por Estado', fill = 'Estado') +
facet_grid(~ state)
|
9d1346cb8e29e0de3d2de85758ca614ce1045d64 | bc45b1235117ddd9a8fd4f8b249904c46a578829 | /cluster.R | f59ee5272a6ecdf8d7ee2c251cfeaa53f6216f2c | [] | no_license | fyGuo/Maternal-health-seeking-behavior | b4fd7005e157826d29a0cca1853b3bc744063476 | eb86c3308956d954ae1f2abba926a9589e92ee72 | refs/heads/master | 2022-07-16T14:40:56.090844 | 2020-05-16T13:23:41 | 2020-05-16T13:23:41 | 261,179,802 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,720 | r | cluster.R | library(ggplot2)
library(cluster)
library(clustMixType)
library(dplyr)
library(readr)
library(Rtsne)
library(ggthemes)
library(klaR)
library(factoextra)
library(dplyr)
wm_clean<-read.csv("D:\\tang_data\\cleandata0503.csv")
# try new ways to cluster
#ANC:never used; used but below 4; >=4 times
hsb<-dplyr::select(wm_clean,MN2,ac4,probithpl,PN25A,
PN25B,PN25C)
hsb$anc_new[hsb$MN2==2]<-0
hsb$anc_new[hsb$MN2==1&hsb$ac4==0]<-1
hsb$anc_new[hsb$MN2==1&hsb$ac4==1]<-2
table(hsb$anc_new)
hsb<-dplyr::select(hsb,-ac4,-MN2)
str(hsb)
table(hsb$anc_new)
s<-fviz_nbclust(x=hsb,FUNcluster = kmodes, method = "silhouette",k.max = 5)
s
# you could see in this way 3 clusters are not the optimal solution,although the cluster results are same
hsb[] <- lapply(hsb, unclass)
set.seed(2)
r<-kmodes(hsb,modes=3)
r$modes
cluster1<-r$cluster
#So let's turn to orignial methods
hsb<-dplyr::select(wm_clean,MN2,ac4,probithpl,PN25A,
PN25B,PN25C)
s<-fviz_nbclust(x=hsb,FUNcluster = kmodes, method = "silhouette",k.max = 5)
s
set.seed(2)
r<-kmodes(hsb,modes=3)
r$modes
results<-r$modes
cluster2<-r$cluster
results$MN2[results$MN2==2]<-0
results$PN25A[results$PN25A==2]<-0
results$PN25B[results$PN25B==2]<-0
results$PN25C[results$PN25C==2]<-0
results
#let's check two different ways change the cluster results?o or not
sum(!(cluster1==cluster2))/length(cluster1)
#the different proportion is only 0.0059, but the latter way is much easier to
#justify using silihoutte index
wm_clean$cluster<-r$cluster
results<-r$modes
results$MN2[results$MN2==2]<-0
results$MN2[results$PN25A==2]<-0
results$MN2[results$PN25B==2]<-0
results$MN2[results$PN25C==2]<-0
write.csv(wm_clean,"D:\\tang_data\\cluster0503.csv")
|
6a7f400ffb249af6876c3ae5a81c028a9b6180e3 | a567ad69ca8d6dae90ac654361825e6883be61af | /FirstScript.R | 95a8acc32c7f62e852abe06d2d67a94253c6a42d | [] | no_license | amitdev81296/LearningR | a3f07a7cc1840ad60eca8204aa4e5d72cc1bdf20 | 916c094b2cde8788993906d6c7feefdf42ded01e | refs/heads/master | 2022-09-11T10:06:44.020088 | 2020-05-30T06:29:35 | 2020-05-30T06:29:35 | 266,742,779 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 130 | r | FirstScript.R | # Creating an R script
testSample <- c(1,2,3,4)
print(testSample)
firstVar <- 10
print(firstVar)
secondVar <- 15
print(secondVar)
|
2fa78c0ebdd7c0cf9b4f0de0ecc8f2b26c8dd60f | c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab | /man/gtkEditableGetPosition.Rd | 95d6aaccce41a8bc1be25a8a0ee48f532310cf2f | [] | no_license | cran/RGtk2.10 | 3eb71086e637163c34e372c7c742922b079209e3 | 75aacd92d4b2db7d0942a3a6bc62105163b35c5e | refs/heads/master | 2021-01-22T23:26:26.975959 | 2007-05-05T00:00:00 | 2007-05-05T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 680 | rd | gtkEditableGetPosition.Rd | \alias{gtkEditableGetPosition}
\name{gtkEditableGetPosition}
\title{gtkEditableGetPosition}
\description{Retrieves the current cursor position.}
\usage{gtkEditableGetPosition(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkEditable}}] a \code{\link{GtkEditable}} widget.}}
\value{[integer] the position of the cursor. The cursor is displayed
before the character with the given (base 0) index
in the widget. The value will be less than or
equal to the number of characters in the widget.
Note that this position is in characters, not in
bytes.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
b52ebe051da535beb71a224c9e96c7ed83e883c9 | c20629bc224ad88e47849943e99fe8bc6ccb1f17 | /2018-09-10_density-estimation.R | b77ff0f8d8a71fc2166bb812a11ea8129c71d88b | [] | no_license | nayefahmad/R-vocab-and-experiments | 71a99e4d3ff0414d1306a5c7cabfd79b49df17f9 | 4384d3d473b0a9d28d86c3d36b0b06a1f91b862e | refs/heads/master | 2022-12-13T01:12:11.134952 | 2020-08-24T04:59:52 | 2020-08-24T04:59:52 | 103,333,690 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,027 | r | 2018-09-10_density-estimation.R |
#************************************************************
# ESTIMATING A DENSITY FUNCTION FROM UNIVARIATE DATA
#************************************************************
library("tidyverse")
# Example data:
x <- c(0, 0, 0, 1, 5, 10)
# Estimate density using density( ) function:
?density
x.dens.1 <- density(x) # bandwidth automatically selected
# change the bandwidth to be narrower:
x.dens.2 <- density(x, bw = .2) # bw defines the bandwidth
# plot the densities:
plot(x.dens.1)
plot(x.dens.2)
# examine the density object:
x.dens.1
names(x.dens.1)
str(x.dens.1)
# question: how to sample discrete random variables from this estimated density function?
sample(x.dens.1$x, 100)
# compare estimated density and sample from the estimated density:
x.dens.1 %>% plot(xlim = c(-10, 20))
sample(x.dens.1$x,
prob = x.dens.1$y,
100000,
replace = TRUE) %>% hist(xlim =c(-10, 20))
x.dens.2 %>% plot(xlim = c(-10, 20))
sample(x.dens.2$x,
prob = x.dens.2$y,
100000,
replace = TRUE) %>% hist(xlim =c(-10, 20))
#*****************************************
# small sample from a known normal dist -------
#*****************************************
rnorm(20, 100) %>% hist
# get 10 replications at once, save in a df
?replicate
p1.normal.samples <-
replicate(3, rnorm(20,100, 15)) %>%
as.tibble() %>%
bind_cols(data.frame(obs = 1:20)) %>%
select(obs,
everything()) %>%
# gather into single col to plot:
gather(key = "sample_number",
value = "value",
-obs) %>%
# draw plot:
ggplot(aes(x = value,
col = sample_number,
group = sample_number)) +
geom_density() +
coord_cartesian(xlim = c(50,150)) +
theme_classic() +
# removes legend but doesn't rescale plot:
# guides(fill = "false") +
theme(legend.position = "none"); p1.normal.samples
|
c43b7915a8948159436982bfa15d3d57aa35e05b | 5cabcda3f3feb15262c39cb9de7088adec791ed2 | /R/RcppExports.R | c6af44f696f1aa77b3a7158720327176139e833f | [
"MIT"
] | permissive | DIGI-VUB/text.alignment | a7346e8ca48708a7ff73a336a6610908ce2deb03 | 0dea4b146b31390170ede0d638fbf306524c8ee1 | refs/heads/master | 2022-09-01T19:48:02.997227 | 2022-08-17T08:38:31 | 2022-08-17T08:38:31 | 250,082,889 | 10 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,241 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
smith_waterman_mark_chars <- function(text, edit_mark) {
.Call('_text_alignment_smith_waterman_mark_chars', PACKAGE = 'text.alignment', text, edit_mark)
}
lowercase <- function(data) {
.Call('_text_alignment_lowercase', PACKAGE = 'text.alignment', data)
}
smith_waterman_matrix <- function(a, b, score_match, score_gap, score_mismatch, x) {
.Call('_text_alignment_smith_waterman_matrix', PACKAGE = 'text.alignment', a, b, score_match, score_gap, score_mismatch, x)
}
smith_waterman_function <- function(a, b, score_gap, similarity) {
.Call('_text_alignment_smith_waterman_function', PACKAGE = 'text.alignment', a, b, score_gap, similarity)
}
smith_waterman_path <- function(m, original_a, original_b, row_i, col_i, edit_mark) {
.Call('_text_alignment_smith_waterman_path', PACKAGE = 'text.alignment', m, original_a, original_b, row_i, col_i, edit_mark)
}
smith_waterman_path_integer <- function(m, original_a, original_b, row_i, col_i, edit_mark) {
.Call('_text_alignment_smith_waterman_path_integer', PACKAGE = 'text.alignment', m, original_a, original_b, row_i, col_i, edit_mark)
}
|
51a7bcfd2aa78084c64c87e738cae6497015bc45 | e5622ae5fa1e916d65a021dc6470ad593ed6cf04 | /!Fragments Analysis v1.R | 80ec84b8f21cdf471b0f6606fc67558f952cfad9 | [] | no_license | yura542/MassSpecSimulation | 18147c5a7a54e02c8e11df8aff8a4e3d275e187b | cd8bb1227573ef5315c584a38834e8d6ee3341bc | refs/heads/master | 2020-12-08T00:03:13.922440 | 2020-01-16T13:42:01 | 2020-01-16T13:42:01 | 232,832,176 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,192 | r | !Fragments Analysis v1.R | # Fragment molecule
# USE R 3.6 OR HIGHER!!!!!!!
# USE BOTH R AND JAVA x64!!!!!!
require(rcdk)
s=read.table("Fragmented spectra.txt")
names(s)=c('i','n','m','smiles','int','precursor')
s_precursor=s[s$precursor==TARGET_SMILES,]
Unique_fragments_m=unique(s$m)
unified_fragments=data.frame()
for(precursor in unique(s$precursor)) {
s1=s[s$precursor==precursor,]
unified_fragments_vec=rep(0,length(Unique_fragments_m))
unified_fragments_vec[match(s1$m, Unique_fragments_m)]=1
unified_fragments=rbind(unified_fragments,unified_fragments_vec)
}
Difference=apply(head(unified_fragments,5000),1,function(x) {return(as.numeric(x-unified_fragments[which (TARGET_SMILES==as.character(unique(s$precursor))),]))})
Difference_ind=apply(Difference,2,function(x) {return(10-length(x[x==-1]))})
hist(Difference_ind,col=2,31)
unique(s$precursor)[Difference_ind==10]
hist(Difference_ind,31,col=2,plot=FALSE)
###### Unresolved
Isomer_Molecules_Unresolved=read.csv("Isomer_Molecules_Unresolved.csv", header=TRUE, sep=',')
s=s[s$precursor %in% Isomer_Molecules_Unresolved$smiles,]
s=s[s$precursor %in% Isomer_Molecules_HO$smiles,]
s=rbind(s_precursor,s) |
f984d546c21983095cd3ddfe9b6c7566d548fef7 | 904fcdd647828aabb08dc7e563dfce0f985947e5 | /introduction-to-r/scripts/packages.R | 1156bb90f037d786ee4367140fd736634663b0b8 | [] | no_license | stghn/Free-Online-R-Courses | 915d554cdb787ceaeb150ffb27a66f5bb52eb339 | e604b7319881f13b85f137579b8f6754fcb338ab | refs/heads/master | 2023-07-06T11:59:02.450121 | 2021-08-05T10:19:06 | 2021-08-05T10:19:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,169 | r | packages.R | ## ----install0, echo=FALSE------------------------------------------------
library('kableExtra')
## ----install1, eval=FALSE------------------------------------------------
## install.packages('ggplot2')
## ----install2, eval=FALSE------------------------------------------------
## devtools::install_github("tidyverse/ggplot2")
## remotes::install_github("tidyverse/dplyr")
## ----install3, eval=FALSE------------------------------------------------
## devtools::install_bitbucket("dannavarro/lsr-package")
## remotes::install_bitbucket("dannavarro/lsr-package")
## ----install4, eval=FALSE------------------------------------------------
## install_bioc("SummarizedExperiment")
## ----install6, eval=FALSE------------------------------------------------
## install.packages('quantstrat', repos = 'https://r-forge.r-project.org/')
## ----install7, eval=FALSE------------------------------------------------
## remotes::install_version('dplyr', version = 0.5.0)
## ----install8, eval=FALSE------------------------------------------------
## remotes::install_github('tidyverse/dplyr@*release')
## ----install14, echo=FALSE, messages=FALSE-------------------------------
text_tbl <- data.frame(
Function = c("`installed.packages()`", "`library('package_name')`", "`available.packages()`",
"`old.packages()`", "`new.packages()`", "`update.packages()`", "`remove.packages('package_name')`"),
Descritpion = c(
"View currently installed packages. ",
"Load package into the current R session. ",
"List of packages available for installation. ",
"List of packages which have new versions available. ",
"List of packages already not installed. ",
"Update packages which have new versions available. ",
"Remove installed packages. "
)
)
kable(text_tbl, format = "html") %>%
kable_styling(full_width = F) %>%
column_spec(1, bold = FALSE, border_right = T) %>%
column_spec(2, width = "30em")
## ----install9------------------------------------------------------------
.libPaths()
## ----install11, eval=FALSE-----------------------------------------------
## library(lubridate, lib.loc = "C:/Program Files/R/R-3.4.1/library")
|
7f82c34772037e4b1587300732fa5826d1904682 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/rotasym/man/unif.Rd | 9990f9fc6696e53249463b143f36c8813833596d | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,233 | rd | unif.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unif.R
\name{unif}
\alias{unif}
\alias{d_unif_sphere}
\alias{r_unif_sphere}
\alias{w_p}
\title{Uniform distribution on the hypersphere}
\usage{
d_unif_sphere(x, log = FALSE)
r_unif_sphere(n, p)
w_p(p, log = FALSE)
}
\arguments{
\item{x}{locations in \eqn{S^{p-1}} to evaluate the density. Either a
matrix of size \code{c(nx, p)} or a vector of length \code{p}. Normalized
internally if required (with a \code{warning} message).}
\item{log}{flag to indicate if the logarithm of the density (or the
normalizing constant) is to be computed.}
\item{n}{sample size, a positive integer.}
\item{p}{dimension of the ambient space \eqn{R^p} that contains
\eqn{S^{p-1}}. A positive integer.}
}
\value{
Depending on the function:
\itemize{
\item \code{d_unif_sphere}: a vector of length \code{nx} or \code{1} with
the evaluated density at \code{x}.
\item \code{r_unif_sphere}: a matrix of size \code{c(n, p)} with the
random sample.
\item \code{w_p}: the surface area of \eqn{S^{p-1}}.
}
}
\description{
Density and simulation of the uniform distribution on
\eqn{S^{p-1}:=\{\mathbf{x}\in R^p:||\mathbf{x}||=1\}}{
S^{p-1} := \{x \in R^p : ||x|| = 1\}}, \eqn{p\ge 1}. The density is just the
inverse of the surface area of \eqn{S^{p-1}}, given by
\deqn{\omega_p:=2\pi^{p/2}/\Gamma(p/2).}{
\omega_p := 2\pi^{p/2} / \Gamma(p/2).}
}
\details{
If \eqn{p = 1}, then \eqn{S^{0} = \{-1, 1\}} and the "surface area" is
\eqn{2}. The function \code{w_p} is vectorized on \code{p}.
}
\examples{
## Area of S^{p - 1}
# Areas of S^0, S^1, and S^2
w_p(p = 1:3)
# Area as a function of p
p <- 1:20
plot(p, w_p(p = p), type = "o", pch = 16, xlab = "p", ylab = "Area",
main = expression("Surface area of " * S^{p - 1}), axes = FALSE)
box()
axis(1, at = p)
axis(2, at = seq(0, 34, by = 2))
## Simulation and density evaluation for p = 1, 2, 3
# p = 1
n <- 500
x <- r_unif_sphere(n = n, p = 1)
barplot(table(x) / n)
head(d_unif_sphere(x))
# p = 2
x <- r_unif_sphere(n = n, p = 3)
plot(x)
head(d_unif_sphere(x))
# p = 3
x <- r_unif_sphere(n = n, p = 3)
rgl::plot3d(x)
head(d_unif_sphere(x))
}
\author{
Eduardo García-Portugués, Davy Paindaveine, and Thomas Verdebout.
}
|
092743712e9d2ce37bc692d5dc81e16e126bb3ee | 05872cc0d95725105a29bb8423c4d10e05a94f87 | /code/Old_code/shear/W2_F319_willow_seedling_shear_stress.R | cc7caf1677ef53e1552b4eb77d19dafa912feb9c | [] | no_license | ksirving/willow_model_application | 70fb1dcbf245b800e70c38cc51ce1bece7e69b0f | 22dca39026b50e847e25ff087256d745b7d53897 | refs/heads/master | 2023-03-29T03:35:23.689091 | 2021-03-24T21:14:53 | 2021-03-24T21:14:53 | 287,610,376 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,593 | r | W2_F319_willow_seedling_shear_stress.R | ## willow model application
## Jenny Rogers & Katie Irving
library(sf)
library(tidyverse)
library(tidyr)
library(sm)
library(lubridate) # work with dates
library(dplyr) # data manipulation (filter, summarize, mutate)
library(ggplot2) # graphics
library(gridExtra) # tile several plots next to each other
library(scales)
library(data.table)
### data upload
shear <- read.csv("input_data/shear.csv", skip = 2)
head(shear)
######## seedling inudation curve from data predicting percent mortality from duration and depth Halsell et al and Vandersande et al
names(shear)[c(1,2, 4)] <- c("shear", "Year", "mortality")
shear$Year <- as.factor(shear$Year)
png("Figures/Seedling_Shear_colours.png", width = 600, height = 600)
ggplot(data = shear, mapping = aes(x = shear, y = mortality))+
geom_point(aes( group = Year, color = Year), size = 4) +
# geom_point(size = 4)+
geom_smooth(method = "lm")+
labs(y = "Mortality (%)", x = "Bed Shear Stress (Pa)")+
theme_classic()+
theme(axis.text = element_text(size = 30), axis.title = element_text(size = 30),
legend.text=element_text(size=20), legend.title=element_text(size=20))
dev.off()
summary(shear_seedling <- lm(mortality~shear, data = shear))
save(shear_seedling, file= "shear_seedling.rda")
## upload hydraulic data
# N11101250 <- read.csv("input_data/HecRas/hydraulic_ts_11101250.csv")
# F34D <- read.csv("input_data/HecRas/hydraulic_ts_F34D.csv")
# F37B_High <- read.csv("input_data/HecRas/hydraulic_ts_F37B_High.csv")
# F45B <- read.csv("input_data/HecRas/hydraulic_ts_F45B.csv")
# F300 <- read.csv("input_data/HecRas/hydraulic_ts_F300.csv")
F319 <- read.csv("input_data/HecRas/hydraulic_ts_F319.csv")
# LA13 <- read.csv("input_data/HecRas/hydraulic_ts_LA13.csv")
# LA14 <- read.csv("input_data/HecRas/hydraulic_ts_LA14.csv")
# LA20 <- read.csv("input_data/HecRas/hydraulic_ts_LA20.csv")
## select columns
hydraul <- F319[,c(-1)]
hyd_shear <- hydraul[,c(1:3,6,10,14)]
colnames(hyd_shear) <-c("DateTime", "node", "Q", "shear_lb_ft_LOB", "shear_lb_ft_MC", "shear_lb_ft_ROB")
# nas <- which(complete.cases(hyd_dep) == FALSE)
# hyd_dep[nas,]
## convert unit to pascals
hyd_shear <- hyd_shear %>%
mutate(shear_pa_LOB = (shear_lb_ft_LOB/0.020885),
shear_pa_MC = (shear_lb_ft_MC/0.020885),
shear_pa_ROB = (shear_lb_ft_ROB/0.020885)) %>%
select(-contains("ft")) %>%
mutate(date_num = seq(1,length(DateTime), 1))
hyd_shear
hyd_shear<-reshape2::melt(hyd_shear, id=c("DateTime","Q", "node", "date_num"))
hyd_shear <- hyd_shear %>% rename(shear = value)
hyd_shear <- filter(hyd_shear, variable == "shear_pa_MC")
## predict on node data using model
names(hyd_shear)
## workflow
## get probabilities for shearth at each hourly time step
## get thresholds i.e. 25, 50, 75%
head(hyd_shear)
summary(shear_seedling)
new_data <- hyd_shear %>%
mutate(prob_fit = predict(shear_seedling, newdata = hyd_shear, type="response")) #%>%
mutate(prob_fit = ifelse(prob_fit >100, 100, prob_fit)) ## percentage goes up to 200 so cut off at 100
head(new_data)
range(new_data$prob_fit)
save(new_data, file="output_data/W2_F319_seedling_shear_discharge_probability_time_series_red_columns.RData")
# format probability time series ------------------------------------------
## look at data using lubridate etc
names(new_data)
## format date time
new_data$DateTime<-as.POSIXct(new_data$DateTime,
format = "%Y-%m-%d %H:%M",
tz = "GMT")
## create year, month, day and hour columns
new_data <- new_data %>%
mutate(month = month(DateTime)) %>%
mutate(year = year(DateTime)) %>%
mutate(day = day(DateTime)) %>%
mutate(hour = hour(DateTime)) %>%
mutate(water_year = ifelse(month == 10 | month == 11 | month == 12, year, year-1))
head(new_data)
save(new_data, file="output_data/W2_F319_seedling_shear_discharge_probs_2010_2017_TS.RData")
# probability as a function of discharge -----------------------------------
load( file="output_data/W2_F319_seedling_shear_discharge_probs_2010_2017_TS.RData")
head(new_data)
peak <- new_data %>%
group_by(variable) %>%
filter(prob_fit == max(prob_fit)) #%>%
peakQM <- filter(peak, variable=="shear_pa_MC")
peakQM <- max(peakQM$Q)
peakQM
## filter data by cross section position
new_dataM <- filter(new_data, variable == "shear_pa_MC")
## Main channel curves
load(file="root_interpolation_function.Rdata")
newx1a <- RootLinearInterpolant(new_dataM$Q, new_dataM$prob_fit, 25)
newx1a
newx2a <- RootLinearInterpolant(new_dataM$Q, new_dataM$prob_fit, 50)
newx2a
newx3a <- RootLinearInterpolant(new_dataM$Q, new_dataM$prob_fit, 75)
newx3a
## MAKE DF OF Q LIMITS
## MAKE DF OF Q LIMITS
limits <- as.data.frame(matrix(ncol=3, nrow=12)) %>%
rename(LOB = V1, MC = V2, ROB = V3)
rownames(limits)<-c("Low_Prob_1", "Low_Prob_2", "Low_Prob_3", "Low_Prob_4",
"Med_Prob_1", "Med_Prob_2", "Med_Prob_3", "Med_Prob_4",
"High_Prob_1", "High_Prob_2", "High_Prob_3", "High_Prob_4")
limits$MC <- c(newx1a[1], newx1a[2],newx1a[3], newx1a[4],
newx2a[1], newx2a[2],newx2a[3], newx2a[4],
newx3a[1], newx3a[2],newx3a[3],newx3a[4])
limits
write.csv(limits, "output_data/W2_F319_seedling_shear_Q_limits.csv")
#### plot
png("figures/Application_curves/Shear/F319_seedling_shear_prob_Q_thresholds.png", width = 500, height = 600)
labels <- c(shear_pa_LOB = "Left Over Bank", shear_pa_MC = "Main Channel", shear_pa_ROB = "Right Over Bank")
ggplot(new_data, aes(x = Q, y=prob_fit)) +
geom_line(aes(group = variable, lty = variable)) +
scale_linetype_manual(values= c("dotted", "solid", "dashed"))+
# name="Cross\nSection\nPosition",
# breaks=c("shear_pa_LOB", "shear_pa_MC", "shear_pa_ROB"),
# labels = c("LOB", "MC", "ROB")) +
facet_wrap(~variable, scales="free_x", nrow=3, labeller=labeller(variable = labels)) +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=25, x=newx1a[1]), color="green") +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=25, x=newx1a[2]), color="green") +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=25, x=newx1a[3]), color="green") +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=25, x=newx1a[4]), color="green") +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=50, x=newx2a[1]), color="red") +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=50, x=newx2a[2]), color="red") +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=50, x=newx2a[3]), color="red") +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=50, x=newx2a[4]), color="red") +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=75, x=newx3a[1]), color="blue") +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=75, x=newx3a[2]), color="blue") +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=75, x=newx3a[3]), color="blue") +
geom_point(data = subset(new_data, variable =="shear_pa_MC"), aes(y=75, x=newx3a[4]), color="blue") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = "none") +
labs(title = "F319: Willow Seedling/Shear: Probability ~ Q",
y = "Probability",
x = "Q (cfs)") #+ theme_bw(base_size = 15)
dev.off()
# create year_month column
new_dataMx <- new_dataM %>% unite(month_year, water_year:month, sep="-", remove=F)
head(new_dataMx)
# dataframe for stats -----------------------------------------------------
## define seasons/critical period
non_critical <- c(1:3,10:12)
critical <- c(4:9)
new_dataMx <- new_dataMx %>%
mutate(season = ifelse(month %in% non_critical, "non_critical", "critical") )
# time stats - mid channel ------------------------------------------------
if(is.na(newx1a[1])) {
low_threshM <- expression(Q < 0)
## 1a) if 1 threshold value and it's lower than the peak (ascending slope)
} else if(length(newx1a)==1 && newx1a < peakQM){
# sum the amount of time above threshold
low_threshM <- expression(Q >= newx1a)
## 1b) if 1 threshold value and it's higher than the peak (descending slope)
} else if (length(newx1a)==1 && newx1a > peakQM){
# sum the amount of time below the threshold
low_threshM <- expression(Q <= newx1a)
## 2a) if 2 threshold values and the first one is lower than the peak(positive parabol)
} else if (length(newx1a)==2 && newx1a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold
low_threshM <- expression(Q >= newx1a[1] & Q <= newx1a[2])
## 2b) if 2 threshold values and the first one is higher OR the 2nd one is lower than the peak (negative parabol)
} else if(length(newx1a)==2 && (newx1a[1] > peakQM || newx1a[2] < peakQM )) {
# sum the amount of time below the first and above the 2nd threshold
low_threshM <- expression(Q <= newx1a[1] & Q >= newx1a[2])
## 3a) if 3 threshold values and the 3rd one is higher then the peak (begins positive slope)
} else if (length(newx1a) == 3 && newx1a[3] > peakQM) {
# sum the amount of time below the first and above the 2nd threshold and below the 3rd
low_threshM <- expression(Q <= newx1a[1] | Q >= newx1a[2] & Q <= newx1a[3])
## 3b) if 3 threshold values and the 1st one is lower then the peak (begins negative slope)
} else if (length(newx1a) == 3 && newx1a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
low_threshM <- expression(Q >= newx1a[1] & Q <= newx1a[2] | Q >= newx1a[3])
## 4a) if 4 threshold values and all are higher than the peak (begins positive slope)
} else if (length(newx1a) == 4 && newx1a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold or above the 3rd and below 2nd
low_threshM <- expression(Q >= newx1a[1] & Q <= newx1a[2] | Q >= newx1a[3] & Q <= newx1a[4])
## 4b) if 4 threshold values and all are higher than the peak, the 1st one and 2nd are lower, or all are lower (begins negative slope)
} else if (length(newx1a) == 4 && (newx1a[1] < peakQM && newx1a[2] < peakQM && newx1a[3] < peakQM && newx1a[4] < peakQM || newx1a[1] > peakQM
&& newx1a[2] > peakQM && newx1a[3] > peakQM && newx1a[4] > peakQM || newx1a[2] < peakQM && newx1a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
low_threshM <- expression(Q <= newx1a[1] & Q >= newx1a[2] | Q <= newx1a[3] & Q >= newx1a[4])
}
low_threshM
### medium threshold
if(is.na(newx2a[1])) {
med_threshM <- expression(Q < 0)
## if 1 threshold value and it's lower than the peak (ascending slope)
} else if(length(newx2a)==1 && newx2a < peakQM){
# sum the amount of time above threshold
med_threshM <- expression(Q >= newx2a)
## if 1 threshold value and it's higher than the peak (descending slope)
} else if (length(newx2a)==1 && newx2a > peakQM){
# sum the amount of time below the threshold
med_threshM <- expression(Q <= newx2a)
## if 2 threshold values and the first one is lower than the peak(positive parabol)
} else if (length(newx2a)==2 && newx2a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold
med_threshM <- expression(Q >= newx2a[1] & Q <= newx2a[2])
## if 2 threshold values and the first one is higher OR the 2nd one is lower than the peak (negative parabol)
} else if(length(newx2a)==2 && (newx2a[1] > peakQM || newx2a[2] < peakQM) ) {
# sum the amount of time below the first and above the 2nd threshold
med_threshM <- expression(Q <= newx2a[1] & Q >= newx2a[2])
## if 3 threshold values and the 3rd one is higher then the peak (begins positive slope)
} else if (length(newx2a) == 3 && newx2a[3] > peakQM) {
# sum the amount of time below the first and above the 2nd threshold and below the 3rd
med_threshM <- expression(Q <= newx2a[1] | Q >= newx2a[2] & QM <= newx2a[3])
## if 3 threshold values and the 1st one is lower then the peak (begins negative slope)
} else if (length(newx2a) == 3 && newx2a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
med_threshM <- expression(Q >= newx2a[1] & Q <= newx2a[2] | Q >= newx2a[3])
## 4a) if 4 threshold values and all are higher than the peak (begins positive slope)
} else if (length(newx2a) == 4 && newx2a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold or above the 3rd and below 2nd
med_threshM <- expression(Q >= newx2a[1] & Q <= newx2a[2] | Q >= newx2a[3] & Q <= newx2a[4])
## 4b) if 4 threshold values and all are higher than the peak, the 1st one and 2nd are lower, or all are lower (begins negative slope)
} else if (length(newx2a) == 4 && (newx2a[1] < peakQM && newx2a[2] < peakQM && newx2a[3] < peakQM && newx2a[4] < peakQM || newx2a[1] > peakQM
&& newx2a[2] > peakQM && newx2a[3] > peakQM && newx2a[4] > peakQM || newx2a[2] < peakQM && newx2a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
med_threshM <- expression(Q <= newx2a[1] & Q >= newx2a[2] | Q <= newx2a[3] & Q >= newx2a[4])
}
med_threshM
### high threshold
if(is.na(newx3a[1])) {
high_threshM <- expression(Q < 0)
## if 1 threshold value and it's lower than the peak (ascending slope)
} else if(length(newx3a)==1 && newx3a < peakQM){
# sum the amount of time above threshold
high_threshM <- expression(Q >= newx3a)
## if 1 threshold value and it's higher than the peak (descending slope)
} else if (length(newx3a)==1 && newx3a > peakQM){
# sum the amount of time below the threshold
high_threshM <- expression(Q <= newx3a)
## if 2 threshold values and the first one is lower than the peak(positive parabol)
} else if (length(newx3a)==2 && newx3a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold
high_threshM <- expression(Q >= newx3a[1] & Q <= newx3a[2])
## if 2 threshold values and the first one is higher OR the 2nd one is lower than the peak (negative parabol)
} else if(length(newx3a)==2 && (newx3a[1] > peakQM || newx3a[2] < peakQM) ) {
# sum the amount of time below the first and above the 2nd threshold
high_threshM <- expression(Q <= newx3a[1] & Q >= newx3a[2])
## if 3 threshold values and the 3rd one is higher then the peak (begins positive slope)
} else if (length(newx3a) == 3 && newx3a[3] > peakQM) {
# sum the amount of time below the first and above the 2nd threshold and below the 3rd
high_threshM <- expression(Q <= newx3a[1] | Q >= newx3a[2] & Q <= newx3a[3])
## if 3 threshold values and the 1st one is lower then the peak (begins negative slope)
} else if (length(newx3a) == 3 && newx3a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
high_threshM <- expression(Q >= newx3a[1] & Q <= newx3a[2] | QM >= newx3a[3])
## 4a) if 4 threshold values and all are higher than the peak (begins positive slope)
} else if (length(newx3a) == 4 && newx3a[1] < peakQM) {
# sum the amount of time above the first and below the 2nd threshold or above the 3rd and below 2nd
med_threshM <- expression(Q >= newx3a[1] & Q <= newx3a[2] | Q >= newx3a[3] & Q <= newx3a[4])
## 4b) if 4 threshold values and all are higher than the peak, the 1st one and 2nd are lower, or all are lower (begins negative slope)
} else if (length(newx3a) == 4 && (newx3a[1] < peakQM && newx3a[2] < peakQM && newx3a[3] < peakQM && newx3a[4] < peakQM || newx3a[1] > peakQM
&& newx3a[2] > peakQM && newx3a[3] > peakQM && newx3a[4] > peakQM || newx3a[2] < peakQM && newx3a[3] > peakQM)) {
# sum the amount of time above the first and below the 2nd threshold and above the 3rd
med_threshM <- expression(Q <= newx3a[1] & Q >= newx3a[2] | Q <= newx3a[3] & Q >= newx3a[4])
}
high_threshM
###### calculate amount of time
time_statsm <- new_dataMx %>%
dplyr::group_by(water_year) %>%
dplyr::mutate(Low = sum(eval(low_threshM))/length(DateTime)*100) %>%
dplyr::mutate(Medium = sum(eval(med_threshM))/length(DateTime)*100) %>%
dplyr::mutate(High = sum(eval(high_threshM))/length(DateTime)*100) %>%
ungroup() %>%
dplyr::group_by(water_year, season) %>%
dplyr::mutate(Low.Seasonal = sum(eval(low_threshM))/length(DateTime)*100) %>%
dplyr::mutate(Medium.Seasonal = sum(eval(med_threshM))/length(DateTime)*100) %>%
dplyr::mutate(High.Seasonal = sum(eval(high_threshM))/length(DateTime)*100) %>%
distinct(year, Low , Medium , High , Low.Seasonal, Medium.Seasonal, High.Seasonal) %>%
mutate(position="MC")
time_statsm
time_stats <- time_statsm
## melt
melt_time<-reshape2::melt(time_stats, id=c("year","season", "position", "water_year"))
melt_time <- rename(melt_time, Probability_Threshold = variable)
write.csv(melt_time, "output_data/W2_F319_willow_seedling_shear_time_stats.csv")
## subset annual stats
ann_stats <- unique(melt_time$Probability_Threshold)[1:3]
melt_time_ann <- melt_time %>% filter(Probability_Threshold %in% ann_stats ) %>%
select(-season, -year) %>% distinct()
## subset seasonal stats
seas_stats <- unique(melt_time$Probability_Threshold)[4:6]
melt_time_seas <- filter(melt_time, Probability_Threshold %in% seas_stats )
# Number of days above discharge ------------------------------------------
new_dataM <- new_dataM %>%
ungroup() %>%
group_by(month, day, water_year, ID01 = data.table::rleid(eval(low_threshM))) %>%
mutate(Low = if_else(eval(low_threshM), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID02 = data.table::rleid(eval(med_threshM))) %>%
mutate(Medium = if_else(eval(med_threshM), row_number(), 0L)) %>%
ungroup() %>%
group_by(month, day, water_year, ID03 = data.table::rleid(eval(high_threshM))) %>%
mutate(High = if_else(eval(high_threshM), row_number(), 0L))
new_dataM <- mutate(new_dataM, position="MC")
# melt data frame so that each probability column are all in one row
## select only columns needed - Q, month, year, day all IDs and probs
# names(new_data)
new_datax <- select(new_dataM, c(Q, month, water_year, day, ID01, Low, ID02, Medium, ID03, High, position, DateTime) )# all probs
names(new_datax)
## melt
melt_data<-reshape2::melt(new_datax, id=c("ID01", "ID02", "ID03", "day", "month", "water_year", "Q", "position"))
melt_data <- rename(melt_data, Probability_Threshold = variable,
consec_hours = value)
melt_data
## groups data by year, month and ID & threshold
## counts the number of days in each month probability is within the depth of each threshold - days are not necessarily conseq
## each threshold separately
## count how many full days i.e. 24 hours
total_days01 <- melt_data %>%
filter(Probability_Threshold == "Low") %>%
group_by(ID01, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_low = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days01
## count the number of days in each month
total_days_per_month01 <- total_days01 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_low = sum(n_days_low))
total_days_per_month01
total_days02 <- melt_data %>%
filter(Probability_Threshold == "Medium") %>%
group_by(ID02, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_medium = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month02 <- total_days02 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_medium = sum(n_days_medium))
# total_days_per_month02
total_days03 <- melt_data %>%
filter(Probability_Threshold == "High") %>%
group_by(ID03, day, month, water_year, position) %>%
summarise(n_hours = max(consec_hours)) %>%
mutate(n_days_high = ifelse(n_hours >= 24, 1, 0)) # %>%
total_days_per_month03 <- total_days03 %>%
group_by(month, water_year, position) %>%
summarise(days_per_month_high = sum(n_days_high))
total_days_per_month03
## combine all thresholds
total_days <- cbind( total_days_per_month01,total_days_per_month02[,4], total_days_per_month03[,4])
head(total_days)
write.csv(total_days, "output_data/W2_F319_willow_seedling_shear_total_days.csv")
# # create year_month column
total_days <- ungroup(total_days) %>%
unite(month_year, water_year:month, sep="-", remove=F)
## convert month year to date format
library(zoo)
total_days$month_year <- zoo::as.yearmon(total_days$month_year)
total_days$month_year <- as.Date(total_days$month_year)
## change names of columns
total_days <- rename(total_days, Low = days_per_month_low, Medium = days_per_month_medium, High = days_per_month_high)
# total_hours <- rename(total_hours, Low = n_days_low, Medium = n_days_medium, High = n_days_high)
## define seasons/critical period
non_critical <- c(1:3,10:12)
critical <- c(4:9)
total_days <- total_days %>%
mutate(season = ifelse(month %in% non_critical, "non_critical", "critical") )
unique(total_days$season)
str(total_days)
# ## melt data
# ## melt data
melt_days<-reshape2::melt(total_days, id=c("month_year", "water_year", "month", "season", "position"))
melt_days <- rename(melt_days, Probability_Threshold = variable,
n_days = value)
head(melt_days)
## save df
write.csv(melt_days, "output_data/W2_F319_willow_seedling_shear_total_days_long.csv")
|
18c481932a137610f50ec73e420bb82d082eaae2 | ba3f90e83341174a1970706c7ce54c2d570ac828 | /AnalyticsEdge/Complete/Week 5/pubmed/pubmed.R | 402ebd7317d643d796bb4266cca3cd8956bd9996 | [] | no_license | sivansasidharan/Edx-References | 1dd9eff813cc560c59e51e70bdb5888a462a022f | 43b36fbd79f57d6ffb50714b580250cb0cf8c214 | refs/heads/master | 2020-03-27T23:30:31.343412 | 2018-09-04T09:40:32 | 2018-09-04T09:40:32 | 147,321,105 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,752 | r | pubmed.R | #!/usr/bin/Rscript
# AUTOMATING REVIEWS IN MEDICINE
######################################################################################################
# PROBLEM 1 - LOADING THE DATA
# Loading data
trials = read.csv("clinical_trial.csv", stringsAsFactors=FALSE)
summary(trials)
str(trials)
# How many characters are there in the longest abstract?
max(nchar(trials$abstract))
# How many search results provided no abstract?
str(subset(trials, nchar(trials$abstract) == 0))
# What is the shortest title of any article?
trials$title[which.min(nchar(trials$title))]
######################################################################################################
# PROBLEM 2 - PREPARING THE CORPUS
library(tm)
# Preprocessing title variable
corpusTitle = Corpus(VectorSource(trials$title))
corpusTitle = tm_map(corpusTitle, tolower)
corpusTitle = tm_map(corpusTitle, removePunctuation)
corpusTitle = tm_map(corpusTitle, removeWords, stopwords("english"))
corpusTitle = tm_map(corpusTitle, stemDocument)
dtmTitle = DocumentTermMatrix(corpusTitle)
# Preprocessing abstract variable
corpusAbstract = Corpus(VectorSource(trials$abstract))
corpusAbstract = tm_map(corpusAbstract, tolower)
corpusAbstract = tm_map(corpusAbstract, removePunctuation)
corpusAbstract = tm_map(corpusAbstract, removeWords, stopwords("english"))
corpusAbstract = tm_map(corpusAbstract, stemDocument)
dtmAbstract = DocumentTermMatrix(corpusAbstract)
# Limit dtmTitle and dtmAbstract to terms with sparseness of at most 95%
dtmTitle = removeSparseTerms(dtmTitle, 0.95)
dtmAbstract = removeSparseTerms(dtmAbstract, 0.95)
# How many terms remain in dtmTitle and dtmAbstract after removing sparse terms
dtmTitle
dtmAbstract
#
dtmAbstractMatrix = as.data.frame(as.matrix(dtmAbstract))
which.max(colSums(dtmAbstractMatrix))
######################################################################################################
# PROBLEM 3 - BUILDING A MODEL
colnames(dtmTitle) = paste0("T", colnames(dtmTitle))
colnames(dtmAbstract) = paste0("A", colnames(dtmAbstract))
dtm = cbind(dtmTitle, dtmAbstract)
dtmAbstractMatrix= as.data.frame(as.matrix(dtmAbstract))
dtmTitleMatrix = as.data.frame(as.matrix(dtmTitle))
dtm = cbind(dtmTitleMatrix, dtmAbstractMatrix)
dtm$trial = trials$trial
# How many columns are in this combined data frame?
str(dtm)
# Splitting data
library(caTools)
set.seed(144)
spl = sample.split(dtm$trial, 0.7)
train = subset(dtm, spl == TRUE)
test = subset(dtm, spl == FALSE)
# What is the accuracy of the baseline model on the training set?
table(train$trial)
# Building a CART model
library(rpart)
library(rpart.plot)
trialCART = rpart(trial ~., data=train, method="class")
# What is the name of the first variable the model split on?
prp(trialCART)
# What is the maximum predicted probability for any result?
predTrain = predict(trialCART, newdata=train, method="class")
predTrain = predTrain[, 2]
max(predTrain)
# What is the training set accuracy of the CART model?
table(train$trial, predTrain >= 0.5)
# 0.8233487
# Sensitivity (TPR) and Specificity (SPC)
TPR = 441/(131+441)
SPC = 631/(631+99)
# Evaluate the CART model on the testing set
# What is the testing set accuracy, assuming a probability threshold of 0.5
predTest = predict(trialCART, newdata=test, method="class")
predTest = predTest[, 2]
table(test$trial, predTest >= 0.5)
# Using the ROCR package, what is the testing set AUC of the prediction model?
library(ROCR)
predROCR = prediction(predTest, test$trial)
perfROCR = performance(predROCR, "tpr", "fpr")
#plot(perfROCR, colorize=TRUE)
performance(predROCR, "auc")@y.values
######################################################################################################
# PART 5: DECISION-MAKER TRADEOFFS
|
4fe21c9a7f726ab1c66f65ef52118ef0aad0963e | b7457a6e39c6f2d9e0d54d0ba19fb013517a11bf | /R/barplot_three_way_count.R | 678e99164c3304abb8b4e0d184a98e30d126e2ba | [
"MIT"
] | permissive | stevenndungu/quickr | 57522cb1d2bb2e34e9823f81a03e4f78ff0d06f3 | 3d30c85d7bced4550a40ac1b8fcde81add83694a | refs/heads/master | 2022-05-27T22:52:15.251104 | 2020-05-01T09:32:16 | 2020-05-01T09:32:16 | 228,349,778 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,848 | r | barplot_three_way_count.R | #' A function that plots a three way barplot with labels (counts) see example
#' @param df A data frame with x variable, y variable and the factor variable.
#' @param x_variable the variable that will form the x-axis
#' @param y_variable the variable that will form the y-axis
#' @param groupby_variable the factor variable that holds the groupings
#' @param title the title of the plot
#' @param xlablabel the x label of the plot
#' @param ylablabel the y label of the plot
#' @param scale_values Specify the factor groups with the colours you would like them to take eg "MMale" = "#66AAD7"
#' @param sizee the size of title
#' @param angle the of lables
#' @param sizelabel the size of labels
#'
#' @examples
#'
#'
#' @export
#'
threeway_barplot_count <- function(df,x_variable, y_variable,groupby_variable, title, xlablabel = "",ylablabel = "Percent",scale_values = c("My spouse" = "#66AAD7", "My spouse & I" = "#0071BC","Myself" = "#00558D","Refuse to answer" = "#CCE3F2"), sizee = 12,angle = 0,sizelabel = 3){
ggplot2::ggplot(df, mapping = aes(x = {{x_variable}}, y = {{y_variable}}, fill = {{groupby_variable}})) +
ggplot2::geom_bar(stat = "identity", position = ggplot2::position_dodge(.85),width = 0.75) +
ggplot2:: geom_text(ggplot2::aes(label = Total_count), family = "Source Sans Pro Semibold",vjust = -0.25, size = sizelabel, position = ggplot2::position_dodge(0.85)) +
ggplot2::scale_fill_manual("legend", values = scale_values) +
ggthemes::theme_hc() + ggplot2::theme(plot.title = ggplot2::element_text(size = sizee, hjust = 0.5),
plot.subtitle = ggplot2::element_text(hjust = 0.5),
text = ggplot2::element_text(size = 12, family = "Source Sans Pro Semibold"),
panel.border = ggplot2::element_blank(),
axis.text.x = ggplot2::element_text(angle = angle, hjust = 0.5),
panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_line(size = 0.6,colour = "gray"),
axis.line.x = ggplot2::element_line(color = "gray", size = 1),
legend.position = "right",legend.title = ggplot2::element_blank()) +
ggplot2::ggtitle(title) +
ggplot2::ylab(ylablabel) +
ggplot2::xlab(xlablabel)
}
#threeway_barplot_count(df = res ,x_variable = g1a4, y_variable = Total_count, groupby_variable = g1a6, title = "Number of respondents from Kebelle by Gender", xlablabel = "",ylablabel = "Percent",scale_values = c( "Male" = "#0071BC","Female" = "#CCE3F2" ), angle = 0)
|
06ef77eb9d0ca11fc74a2b0092db63f66ffa6f1d | 8d8d1d24986dce6b8a56ed8bcb71ada4b4eeb2bd | /man/maayan_pdzbase.Rd | e9ddbd5e30e258bacb4524c6005f1ca378e96214 | [
"MIT"
] | permissive | schochastics/networkdata | edaed94b788dcd925f55ae07f8a2d8b58d45ae8e | 535987d074d35206b6804e9c90dbfa4b50768632 | refs/heads/master | 2023-01-07T07:20:41.475574 | 2023-01-05T18:54:17 | 2023-01-05T18:54:17 | 226,346,857 | 142 | 17 | null | null | null | null | UTF-8 | R | false | true | 795 | rd | maayan_pdzbase.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-konnect.R
\docType{data}
\name{maayan_pdzbase}
\alias{maayan_pdzbase}
\title{Maayan Pdzbase}
\format{
igraph object
}
\source{
Data downloaded from http://konect.uni-koblenz.de/
orginaly from http://research.mssm.edu/maayan/datasets/qualitative_networks.shtml
}
\usage{
maayan_pdzbase
}
\description{
This is a network of protein–protein interactions from PDZBase.
}
\references{
Jerome Kunegis. KONECT - The Koblenz Network Collection. In Proc. Int. Web Observatory Workshop, pages 1343-1350, 2013.
Thijs Beuming, Lucy Skrabanek, Masha Y. Niv, Piali Mukherjee, and Harel Weinstein. PDZBase: A protein--protein interaction database for PDZ-domains. Bioinformatics, 21(6):827--828, 2005.
}
\keyword{datasets}
|
4f252db78a18ce8e3d19e5263744102e97695f2e | 05310c9acb2387eee7804fe8bb997a67c2f9bc10 | /cachematrix.R | 4db07fc23a5419741667d1ecc97628cd34a2d3ca | [] | no_license | chsels/ProgrammingAssignment2 | 2c01474a7773e79ef1b4357bc6a48acef66355b9 | d64531a173a9c3ddfcf3758ec945f535d67af388 | refs/heads/master | 2021-01-16T19:41:17.769348 | 2014-08-21T21:38:52 | 2014-08-21T21:38:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,453 | r | cachematrix.R | ## Together these functions are able to calculate and cache the inverse of a matrix defined by the user.
## Inverting matricies can be time consuming, therefore caching of the solution improves program efficiency
## by preventing redundant calculations of matrix inverses.
## makeCacheMatrix is a function that returns a list consisting of 4 functions:
## set- used for resetting the matrix, running set will ensure that cacheSolve recalculates the inverse
## i.e., just run matrix_name$set(newmatrix) to reset variable
## getinverse- gets the current inverse that is cached
## list-outputs the list of functions
## takes argument of matrix to be inverted
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve - used to solve the inverse if not already cached. Skips solving for inverse if already cached
## takes argument that is a variable produced by the makeCacheMatrix function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinverse(m)
m
}
|
620cb9cda62addbe49560b29949a6a1e30e73dc7 | a10a1b8c532d5a6e76bb5ef11a87a9efcfec5072 | /plot3.R | 231bfc7d558565e6551e1a5f792db1a7a6122b16 | [] | no_license | eppuJ/ExData_Plotting1 | 7c23f008856715259bdbb86669ebf41e02eab3b3 | 0c0abdae25dfde69ae28a2ce7697959484545c86 | refs/heads/master | 2020-12-14T08:49:22.712979 | 2016-07-10T17:42:22 | 2016-07-10T17:42:22 | 62,992,096 | 0 | 0 | null | 2016-07-10T09:45:30 | 2016-07-10T09:45:30 | null | UTF-8 | R | false | false | 1,710 | r | plot3.R | ##Download and unzip data
if(!file.exists('data.zip')){
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url,destfile='data.zip')
}
unzip('data.zip')
##Read data to R
data<-read.table("household_power_consumption.txt", header=TRUE, sep=";")
##We need to change Date and Time values to numeric, thus concetenating Date and Time
data$DateTime<-paste(data$Date, data$Time)
##Change DateTime to yyy-mm-dd hh:mm:ss
data$DateTime<-strptime(data$DateTime, "%d/%m/%Y %H:%M:%S")
#Define day limits according to assignment
start<-which(data$DateTime==strptime("2007-02-01", "%Y-%m-%d"))
end<-which(data$DateTime==strptime("2007-02-02 23:59:00", "%Y-%m-%d %H:%M:%S"))
data2<-data[start:end,]
##setting system locale to USA to ensure correct time format
Sys.setlocale("LC_TIME", "USA")
##as variables used in analysis are factor variables we need to use as.character and as.numeric
##making the plot
png(filename= "plot3.png", width=480, height=480, units="px")
with(data2, plot(DateTime, Sub_metering_1, type="n", xlab="daytime",xaxt="n"))
plot(data2$DateTime, as.numeric(as.character(data2$Sub_metering_1)),type="l",
ylab ="Energy sub metering", xlab="", xaxt="n")
lines(data2$DateTime, as.numeric(as.character(data2$Sub_metering_2)),type="l", col="red")
lines(data2$DateTime, data2$Sub_metering_3,type="l", col="blue", xaxt="n")
axis(1, at=c(as.numeric(min(data2$DateTime)), as.numeric(min(data2$DateTime))+86400
, as.numeric(min(data2$DateTime))+2*86400), labels=c("Thu", "Fri", "Sat"))
legend('topright', c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1)
,col=c("black","red","blue"))
dev.off()
|
fbdd93b27a810cc3836e7c62337f4c2640ebe9b2 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.security.identity/man/kms_generate_random.Rd | 7c74cd17e0d5768e456ffbb619555b61701eb6ac | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 2,349 | rd | kms_generate_random.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kms_operations.R
\name{kms_generate_random}
\alias{kms_generate_random}
\title{Returns a random byte string that is cryptographically secure}
\usage{
kms_generate_random(
NumberOfBytes = NULL,
CustomKeyStoreId = NULL,
Recipient = NULL
)
}
\arguments{
\item{NumberOfBytes}{The length of the random byte string. This parameter is required.}
\item{CustomKeyStoreId}{Generates the random byte string in the CloudHSM cluster that is
associated with the specified CloudHSM key store. To find the ID of a
custom key store, use the
\code{\link[=kms_describe_custom_key_stores]{describe_custom_key_stores}}
operation.
External key store IDs are not valid for this parameter. If you specify
the ID of an external key store,
\code{\link[=kms_generate_random]{generate_random}} throws an
\code{UnsupportedOperationException}.}
\item{Recipient}{A signed \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/#term-attestdoc}{attestation document}
from an Amazon Web Services Nitro enclave and the encryption algorithm
to use with the enclave's public key. The only valid encryption
algorithm is \code{RSAES_OAEP_SHA_256}.
This parameter only supports attestation documents for Amazon Web
Services Nitro Enclaves. To include this parameter, use the \href{https://docs.aws.amazon.com/enclaves/latest/user/developing-applications.html#sdk}{Amazon Web Services Nitro Enclaves SDK}
or any Amazon Web Services SDK.
When you use this parameter, instead of returning plaintext bytes, KMS
encrypts the plaintext bytes under the public key in the attestation
document, and returns the resulting ciphertext in the
\code{CiphertextForRecipient} field in the response. This ciphertext can be
decrypted only with the private key in the enclave. The \code{Plaintext}
field in the response is null or empty.
For information about the interaction between KMS and Amazon Web
Services Nitro Enclaves, see \href{https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html}{How Amazon Web Services Nitro Enclaves uses KMS}
in the \emph{Key Management Service Developer Guide}.}
}
\description{
Returns a random byte string that is cryptographically secure.
See \url{https://www.paws-r-sdk.com/docs/kms_generate_random/} for full documentation.
}
\keyword{internal}
|
d3d3bac2d15508c1768c9dcdab1c7a980fd341a0 | d3eb653a6e24f38e2470f6e9b32c55e3d23ebc4a | /7212c.R | 3330935701c1c86f423427488eb3d5aa2048d0db | [] | no_license | hemants/r | 622491255f0797404c6ac0d093ea37b324ab6e9b | 4683053f8467e9918644d6c66433676923e27653 | refs/heads/master | 2020-05-03T08:55:55.187168 | 2019-05-19T08:46:28 | 2019-05-19T08:46:28 | 178,540,217 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,648 | r | 7212c.R | # ____________Statistics Assignment__________________________________________
#Q1. For the data given above, create a data frame in R, and do the following
group1 = c(0,0,0,0,0,0,1,1,1,16)
group2 = c(3,12,3.5,14,3,2.8,5,9,5.5,9)
group3 = c(14,9.5,4.5,7.5,5,2,4.8,3.6,6,8.5)
group4 = c(9,4.5,9,8,4,6,5,3.5,2.8,12)
group5 = c(16,12,6.5,1,11,5,3,8,3,4)
group6 = c(13,10,10,1,3,4,6,3.8,4,8)
dataFrame = data.frame(group1, group2, group3, group4, group5, group6)
#a. Compute the mean, variance and standard deviation of each group
summary(dataFrame) # gives summary of df
#mean value: by converting data frame to column vector
mean(dataFrame[,1])
mean(dataFrame[,2])
mean(dataFrame[,3])
mean(dataFrame[,4])
mean(dataFrame[,5])
mean(dataFrame[,6])
#mean value: on entire data frame
colMeans(dataFrame)
#variance:
var(dataFrame[,1])
var(dataFrame[,2])
var(dataFrame[,3])
var(dataFrame[,4])
var(dataFrame[,5])
var(dataFrame[,6])
#standard deviation:
sd(dataFrame[,1])
sd(dataFrame[,2])
sd(dataFrame[,3])
sd(dataFrame[,4])
sd(dataFrame[,5])
sd(dataFrame[,6])
#b. Compute mean, variance and standard deviation for the entire data
# join all the data frame column to a vector
df_vector = c(dataFrame[,1],dataFrame[,2])
df_vector = c(df_vector, dataFrame[,3])
df_vector = c(df_vector, dataFrame[,4])
df_vector = c(df_vector, dataFrame[,5])
df_vector = c(df_vector, dataFrame[,6])
length(df_vector)
mean(df_vector)
sd(df_vector)
var(df_vector)
#Q2. boxplot is a function in R to visualize a boxplot in R.
# Study the syntax and get the boxplot of the data given above.
boxplot(dataFrame)
# Do you observe any extreme values in any of the groups and
# Answer: Group 1 have extream value, outlier
# which groups are more similar than others?
# Answer: Group 3 & 4
#3. Please review the probability lecture notes and solutions covered previously
#_______________________ R Assignment ___________________________________________
#Q1. Create two numeric vectors (elements of your choice).
# Both these vectors should be of different lengths
# [ Case 1: when longer length is a multiple of shorter length.
# Case 2: longer length is not a multiple of shorter length]
num_vector_case1_1 = c(1:5)
num_vector_case1_2 = c(1:10)
num_vector_case2_1 = c(1:5)
num_vector_case2_2 = c(1:7)
#a. Combine these two vectors to create a matrix
matrix_case1 = cbind(num_vector_case1_1, num_vector_case1_2);matrix_case1
matrix_case2 = rbind(num_vector_case2_1, num_vector_case2_2);matrix_case2
#warning: number of columns of result is not a multiple of vector length
#b. Combine these two vectors to create a data frame. What are your observations
#Hint: To create a matrix you may use cbind and to create a data frame use data.frame
dataFrame_case1 = data.frame(num_vector_case1_1, num_vector_case1_2);dataFrame_case1
dataFrame_case2 = data.frame(num_vector_case2_1, num_vector_case2_2);dataFrame_case2
#error: arguments imply differing number of rows: 5, 7
#Q2. We have seen that in a two-dimensional object for e.g., a matrix, to perform the subset,
# we use square brackets [ ]. Inside this, the index is given as [rows , columns].
# For e.g., If m is a matrix, m[2,3] gives out the element at 2nd row and 3rd column and so on.
# Now let’s create a matrix of size 3 X5 with the values 1,7, 10, 15, 8, 3, 2, 23, 9, 11, 7, 2, 8,19,1
# and values filled by row
q2Matrix = matrix(data = c(1,7, 10, 15, 8, 3, 2, 23, 9, 11, 7, 2, 8,19,1 ),nrow = 3, byrow = T );q2Matrix
#a. Extract all the elements that are greater than or equal to 10 from the matrix. Observe the format of the output
for(row in 1:nrow(q2Matrix)){
for(col in 1:ncol(q2Matrix)){
if(q2Matrix[row,col] >= 10){
print(q2Matrix[row,col]);
}
}
}
#b. Now, I want to extract all the rows, if the row has value 10 in any of the second column.
for(row in 1:nrow(q2Matrix)){
for(col in 1:ncol(q2Matrix)){
if(col %% 2 ==0 && q2Matrix[row,col] == 10){
print(q2Matrix[row,]);
break;
}
}
}
#c. Name the rows as r1,r2 and so on and columns as c1,c2 and so on. This can be done
# using function dimnames.
dimnames(q2Matrix) = list(c("r1","r2","r2"),c("c1","c2","c3","c4","c5"));dimnames(q2Matrix)
# Please look in help on how to use it. We have used this
# function in matrix function in class as well.
# Let’s subset the matrix using the row and column names instead of indices.
# Since these are names and not indices, we may need to use quotes.
# i. Get the element of second row and third columns
q2Matrix[2,3]
# ii. Get all elements of third column
q2Matrix[,3]
# iii. Get elements of first and second row of first column
q2Matrix[c(1,2),1]
|
1181428383e2c180fc2fdb67c00862da850a41c3 | 51a7c23253d463cd8104aec304afa27e27eea1e2 | /ui.R | 69bb7e4519911c46b9f865d5b6ab97e73cc6d4b7 | [] | no_license | juanky201271/cp-data-science-capstone | 3ba97ca40dd8e37300515191a92fd2b2a42b6afa | e8ad2f0863db145c2857dffad8f2a7cc48042ca3 | refs/heads/master | 2020-05-28T08:46:43.557321 | 2019-06-01T13:40:52 | 2019-06-01T13:40:52 | 188,944,640 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,312 | r | ui.R | library(shiny)
library(shinyjs)
library(dplyr)
library(NLP)
library(tm)
library(tidytext)
library(tidyr)
library(quanteda)
library(stringi)
library(MASS)
appCSS <- "
#loading-content {
position: absolute;
background: #555555;
opacity: 0.8;
z-index: 100;
left: 0;
right: 0;
height: 90%;
text-align: center;
color: #FFFFFF;
}
#loading-bbdd-content {
position: absolute;
background: #555555;
opacity: 0.8;
z-index: 100;
left: 0;
right: 0;
height: 90%;
text-align: center;
color: #FFFFFF;
}
#app-content {
opacity: 1;
z-index: 1;
}
"
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Next Word Prediction"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
useShinyjs(),
inlineCSS(appCSS),
div(
id = "loading-content",
h2("... Working ...")
),
div(
id = "loading-bbdd-content",
h2("... loading Ngrams ...")
),
div(id = "app-content",
textAreaInput("iText", "You can write whatever you want :", width = "100%", height = "150px" ),
actionButton("iWord1", "...", onclick = "Shiny.setInputValue('btnLabel', this.innerText);", class = "btn-info"),
actionButton("iWord2", "...", onclick = "Shiny.setInputValue('btnLabel', this.innerText);"),
actionButton("iWord3", "...", onclick = "Shiny.setInputValue('btnLabel', this.innerText);"),
helpText("When you write a space the App will predict the next word. If you write one or more words without a space at the end, the App will wait. If you puss one of three button, the word is going to join to your sentence with a space at the end.")
)
,
actionButton("iCancel", "Cancel"),
textOutput("tError")
),
# Show a plot of the generated distribution
mainPanel(
strong(helpText("Data :")),
textOutput("oPorc"),
textOutput("oWordsNum"),
strong(helpText("Table of predicted words :")),
tableOutput("oTableWords"),
helpText("freq = freq * 100000"),
strong(helpText("Turned Text :")),
textOutput("oTurnedText"),
strong(helpText("The better prediction :")),
textOutput("oPredText")
)
)
))
|
ceebdf07ff914ec799aaceb1496fa6a7e06c5097 | 8587cbe2e309f29a1e539e31509ccf75daacc764 | /14(11th Vclass)/Ising.R | e0a49157e58b30d6b24af190b9e53e377f3a5f63 | [] | no_license | alidarvishi14/applied-stochastic-process | 6d39ff6f7db0b7f60527b9c36cde61c3d2ab9941 | b528f5aec7dc6ac64001ef5e16a38fb731ad6807 | refs/heads/main | 2023-02-22T11:49:19.103362 | 2021-01-25T12:33:27 | 2021-01-25T12:33:27 | 332,736,616 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 441 | r | Ising.R | J = -1
n = 6
beta = 10
nsim = 10^2
grid = matrix(rep(-1,n^2),ncol = n ,nrow = n)
for (i in 1:nsim){
loc = ceiling(n*runif(2))
neighbors =
rbind(grid[2:n,],rep(0,n))+
rbind(rep(0,n),grid[1:(n-1),])+
cbind(grid[,2:n],rep(0,n))+
cbind(rep(0,n),grid[,1:(n-1)])
pow = neighbors[loc[1],loc[2]]
if (runif(1)<=exp(2*beta*J*pow)/(1+exp(2*beta*J*pow))){
grid[loc[1],loc[2]]=1
}else{
grid[loc[1],loc[2]]=-1
}
}
grid |
26adc55c6116e8917c3e266458a2e53d1983de8b | 3f3265a63541003a268aa0f9743ddd28dbbb13b5 | /scripts/clean.R | a0731e25853e23ad7de36ac7e4256249e7149628 | [] | no_license | yannabraham/Resultats-Trail-Blanc-des-Vosges-2015 | 2303ae63a95ec81ead1bf34801cf5e3efb107644 | fbc6d2b9dc5b27a1a9d90c9840d530890514674c | refs/heads/master | 2021-01-22T22:53:25.697511 | 2015-03-13T09:10:24 | 2015-03-13T09:10:24 | 31,549,304 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,349 | r | clean.R | ## cleaning up the dataset
library(stringr)
dta <- read.delim("data/Resultats-Trail-Blanc-des-Vosges-2015.tsv",header=F,
col.names=c('Classement_Scratch','Nom','Temps','Dossard','Classement_Categorie',
'Categorie','Sexe','Club','Vitesse','bad.column'),
dec=',',fileEncoding='UTF8'
)
# high level cleanup
dta[!is.na(dta$bad.column),'Vitesse'] <- dta[!is.na(dta$bad.column),'bad.column']
levels(dta$Categorie) <- str_trim(levels(dta$Categorie))
levels(dta$Nom) <- str_trim(levels(dta$Nom))
levels(dta$Nom) <- str_replace(levels(dta$Nom),' ','')
levels(dta$Nom) <- str_replace(levels(dta$Nom),' ',' ')
levels(dta$Sexe) <- str_trim(levels(dta$Sexe))
levels(dta$Categorie) <- str_trim(levels(dta$Categorie))
levels(dta$Temps) <- str_trim(levels(dta$Temps))
# detail cleanup
levels(dta$Nom)[levels(dta$Nom)=='BONNET Elise bonnet nee le 14/01/1988'] <- 'BONNET Elise'
levels(dta$Nom)[levels(dta$Nom)=='?Dossard #1703'] <- 'INCONNU'
# unwanted spaces?
table(str_count(levels(dta$Nom),' '))
# several composite names, some composite surnames
levels(dta$Nom)[str_count(levels(dta$Nom),' ')>1]
# save back
write.table(dta[,c('Classement_Scratch','Nom','Temps','Dossard',
'Classement_Categorie','Categorie','Sexe','Club','Vitesse')],
'../Resultats-Trail-Blanc-des-Vosges-2015-clean.tsv',sep='\t',
row.names=FALSE,quote=FALSE,fileEncoding='UTF8')
|
7ba006fc5326210dff95078c8570f6d73e961d62 | 8a736317e9732b939803d041f2448c125ff49e5f | /R/fold.R | d9597ee9e004172ae8f6d97d7055cd0d4c46a73e | [] | no_license | mbojan/isnar | f753c9d6a6c2623e7725c2f03035c1f9cc89ba85 | 56177701d509b267eff845e15514e4cf48e69675 | refs/heads/master | 2021-06-10T15:32:48.345605 | 2021-02-17T20:00:40 | 2021-02-17T20:00:40 | 19,937,772 | 8 | 3 | null | 2015-04-03T12:41:31 | 2014-05-19T10:28:23 | R | UTF-8 | R | false | false | 1,189 | r | fold.R | #' Folding square matrices around the diagonal
#'
#' Fold a square matrix by collapsing lower triangle on upper triangle, or vice
#' versa, through addition.
#'
#' By default, for \code{direction=="upper"}, the function takes the values in
#' the lower triangle of \code{x} and adds them symetrically to the values in
#' the upper triangle. The values on the diagonal remain unchanged. The lower
#' triangle is filled with 0s. If \code{direction=="lower"} the upper triangle
#' is collapsed on to the lower triangle.
#'
#' @param x square numeric matrix
#'
#' @param direction character, one of \code{"upper"} or \code{"lower"},
#' direction of folding
#'
#' @return Square matrix of the same \code{dim} as \code{x} with the lower
#' (upper) triangle folded onto the upper (lower) triangle.
#'
#' @seealso \code{\link{upper.tri}}, \code{\link{lower.tri}}
#'
#' @example examples/fold.R
#' @export
fold <- function(x, direction=c("upper", "lower"))
{
stopifnot(is.matrix(x))
stopifnot(dim(x)[1] == dim(x)[2])
m <- t(x) + x
diag(m) <- diag(x)
d <- match.arg(direction)
if( d == "upper" )
m[ lower.tri(m) ] <- 0
else
m[ upper.tri(m) ] <- 0
m
}
|
e61db8ec2097fa26c99d67bd5142690583eeada9 | 85fd7570382bdccabc8daf3a41b561efb9cdf622 | /_Scenarios/__testEnv_Food/code/B.PP.GenBms.UpdateState.01.R | 1815e66bc604f5773f5057fb351b43405ed6ba5a | [] | no_license | AndrewJConstable/EPOCuniverse | 2b86f40d2539cf79a66285cc8cd67d0c391bb669 | e6561eed547befde5b88563f73d96ce9033a5179 | refs/heads/master | 2021-11-25T23:35:40.476219 | 2021-11-17T01:15:33 | 2021-11-17T01:15:33 | 104,179,577 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,633 | r | B.PP.GenBms.UpdateState.01.R | # Create S4 method 'updateState'
#B.PP.GenBms.update_State.01<-function (
setMethod("updateState", signature(element="PPgenBMS", universe="Universe"),
function(element, universe) # access to universe if needed
{
# Function: B.GET.update_State.01
# Version 0.01
# Description: update the State of the element in each polygon
# based on the Transition part of the element
# Consumption is adjusted if needed and available after the transition
# Mortality and Emigration are reset to NULL at the end of transition
# Consumption and Young are reset when usedd.
#
# Notes all $Mortality records should be in the abundance units relevant to the specific polygon
# it is assumed that conversion from one unit to another has been achieved elsewhere
# an error will be recorded if this is not the case
################################################################################
# Transition list
# Update = logical (flag to enter this subroutine)
# Transition$Mortality for subject element
#
# 1. Subject element polygon (relative in element)
# 2. Subject element units of qnty
# 3. Subject element stage
# 4. Subject element stage quantity lost
# 5. Subject element stage size
# 6. mortality source module
# 7. mortality source element
# 8. mortality source local population (relative in element)
# 9. mortality source stage
# Transition$Emigration of Subject element
# 1. subject polygon (relative in element)
# 2. destination polygon (relative in element)
# 3. units of qnty
# 4. stage
# 5. stage quantity
# 6. stage size
# Transition$Young = rep(0,PolygonsN) # for accumulating offspring from reproduction in each polygon of the element
# Transition$Consumption
#
# 1. mortality source local population (relative in element)
# 2. mortality source stage
# 3. quantity of source local stage that consumed prey
# 4. prey module
# 5. prey element
# 6. prey polygon (relative in element)
# 7. units of qnty
# 8. stage
# 9. stage realised quantity
# 10. stage maximum quantity
# 11. stage size
# 12. calendar year
# 13. period
# 14. proportion of year
################################################################################
# 1. a. transition matrix information
Cols.TransMortality <- 9
Cols.TransEmigration <- 5
Cols.TransConsumption <- 14
# 1. b. Subject Element details
elemState <- getState(element)
elemTrans <- getTransition(element)
elemListIndexes <- getElementIndexes(universe, element=getSignature(element, "ID"))
Mortality <- elemTrans$Mortality
Emigration <- elemTrans$Emigration
if (!is.null(Mortality)) if(!is.matrix(Mortality)) Mortality <- matrix(Mortality ,ncol=Cols.TransMortality ,byrow=TRUE)
if (!is.null(Emigration)) if(!is.matrix(Emigration)) Emigration <- matrix(Emigration ,ncol=Cols.TransEmigration ,byrow=TRUE)
################################################################################
# 2. Adjust mortality and emigration as combined rate processes
# This assumes that losses from a polygon are fixed losses for activities but are rate losses for biota
# Thus, these routines update the mortality and emigration components of Transition before
# the state of the element is modified
# Loop through stages, polygons
for (st in 1:elemState$StageN) {
for (pn in 1:getSlot(element, "polygonsN")) {
#-------------------------------------------------------------------------------
# 2.1 set up stage-polygon matrices
# calculate abundance of the stage in the current state
# (abundance of correct units for a polygon * age structure)
# not reserved for losses due to activities in the activities module (these are exempted from the adjustment here)
# then subset mortality, emigration for the stage and polygon (not including records from activities)
# then estimate log of survivorship rate (M) for each source of loss
StageQtyInPolygon<-elemState$Abundance[[elemState$StageStrUnits]][pn]*elemState$Stage[st,pn]
Records.Mort <- 0
Records.Em <- 0
# if StageQtyInPolygon==0 then there would be no mortality or migration
if(StageQtyInPolygon>0){
if (!is.null(Mortality)) {
StageQtyInPolygon<-StageQtyInPolygon - sum(Mortality[(is.element(Mortality[,1],pn) &
is.element(Mortality[,3],st) &
is.element(Mortality[,6],3)),4])
MortStage <- matrix(Mortality[(is.element(Mortality[,1],pn) & is.element(Mortality[,3],st) & !is.element(Mortality[,6],3)),],ncol=Cols.TransMortality)
Records.Mort <- nrow(MortStage)
# check mortality quantities are in correct units
if (sum(!is.element(MortStage[,2], elemState$StageStrUnits))>0) {
epocErrorMessage(element, "transition$change records not all in the same units: Element ",
as.character(e)," Polygon ",as.character(pn), halt=TRUE)
} # end if
M.mort<- ifelse(Records.Mort>0,(-log(1-MortStage[,4] /StageQtyInPolygon)),0)
} else M.mort<-0
# end !is.null mortality
if (!is.null(Emigration)) {
EmStage <- matrix(Emigration[(is.element(Emigration[,1],pn) & is.element(Emigration[,4],st)),],ncol=Cols.TransEmigration)
Records.Em <- nrow(EmStage)
M.Em <- ifelse(Records.Em>0,(-log(1-EmStage[,5]/StageQtyInPolygon)),0)
} else M.Em<-0 # end !is.null emigration
#-------------------------------------------------------------------------------
# 2.2 Calculate total mortality
Z <- sum(c(M.mort,M.Em))
#-------------------------------------------------------------------------------
# 2.3 given StageQtyInPolygon and sum of the log rates, calculate the total loss given the combined rates
TotalLoss <- StageQtyInPolygon*(1-exp(-Z))
#-------------------------------------------------------------------------------
# 2.4 Determine the loss.adjusted due to each component of loss using Baranov equation
if(TotalLoss>0){
Loss.mort <- TotalLoss*M.mort/Z
Loss.em <- TotalLoss*M.Em /Z
} # end if totalloss
#-------------------------------------------------------------------------------
# 2.4 adjust consumption matrices in transition of consumer elements
if(Records.Mort>0) for (rc in 1:Records.Mort){
sm <- MortStage[rc,6] # mortality source module
se <- MortStage[rc,7] # mortality source element
if((sm*se)>0){ # do the following when not natural mortality of the subject element
sp <- MortStage[rc,8] # mortality source polygon (relative position)
sst <- MortStage[rc,9] # mortality source stage
yr <- getRTState(universe, "currentYear")
pe <- getRTState(universe, "currentPeriod")
mortalityScElement <- getEPOCElement(universe, sm, se)
consumption <- getTransition(mortalityScElement, "Consumption")
if(!is.matrix(consumption)){
consumption <- matrix(consumption,ncol=Cols.TransConsumption,byrow=TRUE)
setTransition(mortalityScElement, "Consumption", value=consumption)
}
consumption[
(consumption[,1]==sp &
consumption[,2]==sst &
consumption[,4]==elemListIndexes[1] &
consumption[,5]==elemListIndexes[2] &
consumption[,6]==pn &
consumption[,8]==st &
consumption[,12]==yr &
consumption[,13]==pe),9] <- Loss.mort[rc]
setTransition(mortalityScElement, "Consumption", consumption)
} # end if sm*se>0
} # end loop rc and end if
#-------------------------------------------------------------------------------
# 2.5 update quantities for stage
# replace Emigration loss to matrix for use at end
if(M.Em>0) Emigration[(is.element(Emigration[,1],pn) & is.element(Emigration[,4],st)),5] <- Loss.em
# replace Mortality loss to matrix for use at end
if(M.mort>0) Mortality[(is.element(Mortality[,1],pn) & is.element(Mortality[,3],st) & !is.element(Mortality[,6],3)),4]<-Loss.mort
} # end if StageQtyInPolygon>0
################################################################################
} # end polygon
} # end stage
################################################################################
# 3. By polygon, update abundance, stage frequencies
for (pn in 1:getSlot(element, "polygonsN")) {
# 3.1 Vectors of current quantities and size for each stage in the polygon
QtyPolygon<-elemState$Abundance[[elemState$StageStrUnits]][pn]
StQty <- QtyPolygon*elemState$Stage[,pn] # vector
# 3.2 Prepare vectors for new quantities and condition
NewStQty <- rep(0,elemState$StageN)
# 3.3 By stage, update number, size, reproductive condition and health
for (st in 1:elemState$StageN) {
# Qty-Mort-Em+Im
# do in two steps
# step 1 - calculate numbers left after losses (all same size)
if(StQty[st]>0){
Remainder = StQty[st]
if(!is.null(Mortality)) Remainder <- Remainder - sum(Mortality[(is.element(Mortality[,1],pn) & is.element(Mortality[,3],st)),4])
NewStQty[st]<-Remainder
if(!is.null(Emigration)) {
Remainder <- Remainder -sum(Emigration[(is.element(Emigration[,1],pn) & is.element(Emigration[,4],st)),5])
Immigration.N <- Emigration[(is.element(Emigration[,2],pn) & is.element(Emigration[,4],st)),5]
Immigration.origin.poly <- Emigration[(is.element(Emigration[,2],pn) & is.element(Emigration[,4],st)),1]
NewStQty[st] <- Remainder+sum(Immigration.N)
}
} # end if StQty>0
} # end st
# put new numbers back in state of element
# adjust biomass of element if needed
elemState$Abundance[[elemState$StageStrUnits]][pn]<-sum(NewStQty)
elemState$Stage[,pn] <- ifelse(elemState$Abundance[[elemState$StageStrUnits]][pn]>0,
NewStQty/elemState$Abundance[[elemState$StageStrUnits]][pn], # vector
rep((1/elemState$StageN),elemState$StageN)
)
} # end polygon
# reset $Transition (Mortality, Emigration) : change to null
elemTrans["Mortality"] <- list(NULL)
elemTrans["Emigration"] <- list(NULL)
# update element transition
setTransition(element, value=elemTrans)
doUpdate(element, FALSE)
# Update state for the element of universe
setState(element, value=elemState)
} # end function
)
# Transition$Emigration of Subject element
# 1. subject polygon (relative in element)
# 2. destination polygon (relative in element)
# 3. units of qnty
# 4. stage
# 5. stage quantity
# 6. stage size
# Transition$Mortality for subject element
#
# 1. Subject element polygon (relative in element)
# 2. Subject element units of qnty
# 3. Subject element stage
# 4. Subject element stage quantity lost
# 5. Subject element stage size
# 6. mortality source module
# 7. mortality source element
# 8. mortality source local population (relative in element)
# 9. mortality source stage
|
35e8bb716a2d29296adbeb29e7d94ab97967ea87 | 9528e128bd7efdd04f272a0c3efad0d6af3b5598 | /week_4/getdata014_Q4_1.R | 5ba6a99e6dd6d8d5b26c5860217b462bdba43880 | [] | no_license | enricgili/Coursera_3_Getting_and_Cleaning_Data | 34063108806aa03c874247be463e28822e31ec20 | 1c523cdd583d5215039a56700377afb5c6583003 | refs/heads/master | 2021-01-10T12:37:25.200622 | 2017-02-28T03:42:45 | 2017-02-28T03:42:45 | 36,046,985 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 282 | r | getdata014_Q4_1.R | #setwd("Documents/datasciencecoursera/Coursera_3_Getting_and_Cleaning_Data/week_4/")
#download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv","ss06hid.csv",method="curl")
ss06hid <- read.csv("ss06hid.csv", quote="")
strsplit(names(ss06hid),"wgtp")[123]
|
95a1281dc8ffb63128cb567ff46efa54216d58f0 | c85471f60e9d5c462de6c60c880d05898ec81411 | /cache/catrwilliams|rprojects|tidytuesday__Ramen_Ratings.R | ea71d69de778d02a92e0c4cab3bd61bd41fbbfa3 | [
"CC-BY-4.0",
"MIT"
] | permissive | a-rosenberg/github-content-scraper | 2416d644ea58403beacba33349ee127e4eb42afe | ed3340610a20bb3bd569f5e19db56008365e7ffa | refs/heads/master | 2020-09-06T08:34:58.186945 | 2019-11-15T05:14:37 | 2019-11-15T05:14:37 | 220,376,154 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,698 | r | catrwilliams|rprojects|tidytuesday__Ramen_Ratings.R | ## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----library, message=FALSE, results=FALSE-------------------------------
#function to check if packages are installed, if not then install them, and load all packages
libraries <- function(packages){
for(package in packages){
#checks if package is installed
if(!require(package, character.only = TRUE)){
#If package does not exist, then it will install
install.packages(package, dependencies = TRUE)
#Loads package
library(package, character.only = TRUE)
}
}
}
packages <- c("tidyverse","naniar","rvest","textstem","modeest","countrycode","ggridges","viridis")
libraries(packages)
theme_set(theme_classic()) #applies classic theme to all charts
## ----import, message=FALSE-----------------------------------------------
df <- read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-06-04/ramen_ratings.csv")
## ----view----------------------------------------------------------------
glimpse(df)
head(df)
summary(df)
sapply(df, function(x) n_distinct(x)) %>% sort()
## ----missing-------------------------------------------------------------
#Visualize missing values
gg_miss_var(df) + labs(title="Missing Values")
#see count of NA values
df %>% is.na() %>% colSums() %>% sort(decreasing=TRUE)
#view only rows with NAs
df <- df %>% rownames_to_column() #add row number to make it easier to locate observations
df %>% filter_all(any_vars(.=="NA"|is.na(.)))
#add in missing value for review_number
df[189:191,]
df[190,2] <- 2991
#find missing values for style and stars
df <- df %>% group_by(country, brand)%>% mutate(style = replace_na(style, mfv1(style, na.rm=T)),
stars = replace_na(stars, mean(stars, na.rm=T))) %>% ungroup()
#see count of NA values again
df %>% is.na() %>% colSums() %>% sort(decreasing=TRUE)
#there are still some missing values, so we'll try again with less grouping
#find missing values for stars
df <- df %>% group_by(country)%>% mutate(stars = replace_na(stars, mean(stars, na.rm=T))) %>% ungroup()
## ----type----------------------------------------------------------------
df <- df %>% mutate_if(is.character,as.factor)
## ----country, warning=FALSE, message=FALSE-------------------------------
#import data frame with list of countries
country <- read_csv("https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596", skip=1)
head(country)
#there appear to be 2 columns with slightly different names. gathering these into one column
country <- country %>% gather(column, name, c(`#country +name +i_en +alt +v_unterm`, `#country +name +preferred`)) %>% select(name) %>% distinct()
#sort out country values that are NOT in the country name to correct them
country_fix <- df[!df$country %in% country$name,] %>% select(country) %>% distinct()
country_fix
#change values to appropriate names
df <- df %>% mutate(country = as.character(country),
country = (case_when(country %in% "USA" ~ "United States",
country %in% "Dubai" ~ "United Arab Emirates",
country %in% "Holland" ~ "Netherlands",
country %in% "Sarawak" ~ "Malaysia",
country %in% "UK" ~ "United Kingdom",
country %in% "Phlippines" ~ "Philippines",
TRUE ~ country)),
country = as.factor(country))
## ----brand---------------------------------------------------------------
df <- df %>% mutate(brand = str_replace_all(brand, "[:punct:]+", " "),
brand = str_replace(brand, " s", "'s"),
brand = str_to_title(brand),
brand = str_squish(brand),
brand = stem_words(brand) %>% as.factor())
## ----variety-------------------------------------------------------------
df <- df %>% mutate(variety = str_replace_all(variety, "[:punct:]+", " "),
variety = str_replace(variety, " s", "'s"),
variety = str_to_title(variety),
variety = str_squish(variety),
variety = stem_words(variety) %>% as.factor())
## ----style---------------------------------------------------------------
df %>% count(style, sort=T)
df <- df %>% group_by(style) %>% filter(n()>4) %>% ungroup()
## ----feature-------------------------------------------------------------
# change countries into regions
df$region <- df$country %>% countrycode(origin="country.name",destination="region") %>% as.factor()
df <- df %>% group_by(region) %>% filter(n()>10) %>% ungroup()
count(df, region, sort=TRUE)
# change countries into continents
df$continent <- df$country %>% countrycode(origin="country.name",destination="continent") %>% as.factor()
count(df, continent, sort=TRUE)
## ----country mean, out.width="75%"---------------------------------------
df_country <- df %>% group_by(country) %>% summarize(stars=mean(stars)) %>% ungroup()
df_country %>% mutate(country = fct_reorder(country, stars)) %>%
ggplot(aes(country, stars))+
geom_bar(stat="identity")+
coord_flip()
## ----region mean, out.width="75%"----------------------------------------
df_region <- df %>% group_by(region) %>% summarize(stars=mean(stars)) %>% ungroup()
df_region %>% mutate(region = fct_reorder(region, stars)) %>%
ggplot(aes(region, stars))+
geom_bar(stat="identity")+
coord_flip()
## ----continent mean, out.width="75%"-------------------------------------
df_continent <- df %>% group_by(continent) %>% summarize(stars=mean(stars)) %>% ungroup()
df_continent %>% mutate(continent = fct_reorder(continent, stars)) %>%
ggplot(aes(continent, stars))+
geom_bar(stat="identity")+
coord_flip()
## ----style mean, out.width="75%"-----------------------------------------
df_style <- df %>% group_by(style) %>% summarize(stars=mean(stars)) %>% ungroup()
df_style %>% mutate(style = fct_reorder(style, stars)) %>%
ggplot(aes(style, stars))+
geom_bar(stat="identity")+
coord_flip()
## ----final, message=FALSE------------------------------------------------
df %>% mutate(region = fct_reorder(region, stars)) %>%
ggplot(aes(stars, region, fill = region)) +
geom_density_ridges() +
scale_fill_viridis(option = "D", discrete = TRUE) +
theme(legend.position = "none") +
scale_x_continuous(breaks=0:5) +
labs(title="Ramen Ratings by Region", x="Rating", y="Region")
|
bbb58cd40f2828c08924242d3b437d38de1ccaab | 39c67209fb145b968537614003039dbcac7fc95c | /books/从数据到结论/R/决策树.R | da4b5019d2241e8a31fe70f61674f291d418f336 | [
"MIT"
] | permissive | yuanqingmei/Statistics_with_R | 33e1c408c4f78af2b49aa32a0e7f7aa9d134f4f6 | 673044d5231ff4b3776fc8257c9a18b34784e229 | refs/heads/master | 2020-07-13T02:12:19.640589 | 2019-06-30T15:37:18 | 2019-06-30T15:37:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 626 | r | 决策树.R | setwd('C:/Users/hnjyzdc/Desktop/R/从数据到结论')
w=read.table('Dermatology1.txt',header = T);w[,35]=factor(w[,35])
library(rpart.plot)
(a=rpart(formula=V35~.,data=w))
rpart.plot(a,type=1,extra=1)
table(w[,35],predict(a,w,type = 'class'))
w=read.table('agaricus-lepiota1.txt',header = T)
library(rpart.plot)
(a=rpart(V1~.,w))
rpart.plot(a,type=1,extra=1)
(z0=table(w[,1],predict(a,w,type='class')))
z0;(E0=(sum(z0)-sum(diag(z0)))/sum(z0))
w=read.table('housing.txt',header=T)
library(rpart.plot)
a=rpart(MEDV~.,w);a
rpart.plot(a,type=1,faclen=T)
y0=predict(a,w)
(NMSE0=mean((w$MEDV-y0)^2)/mean((w$MEDV-mean(w$MEDV))^2)) |
71d1350e5f5eb89a621277723f301844a4f071e0 | f4b3038c65c88be7c460ae7d1ef73fab7e3221fa | /man/file_remove.Rd | 18c800fa9c06de03935e32e817693b2f00ffe7bc | [] | no_license | cran/BayesianFROC | 8d74c554f82adbf3f73667a5b7eebb5174b4dbad | 9abf77e16c31284547960b17d77f53c30ea0d327 | refs/heads/master | 2022-02-04T14:54:01.788256 | 2022-01-23T06:22:43 | 2022-01-23T06:22:43 | 185,827,147 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 284 | rd | file_remove.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/file_remove.R
\name{file_remove}
\alias{file_remove}
\title{Execute before submission to delete redandunt files.}
\usage{
file_remove()
}
\value{
none
}
\description{
This for a developer of this package
}
|
665368365c44fa63cf1aedc798d0ae9165d8095b | 3006e60dd4811dec1b9f4f6b1fcfa4eddb6be44a | /Courseworks/Stat133_Statistical_Data_Computation/Workout/roller/man/check_prob.Rd | 25735e7d262624fe277363ea5a8111e87a254832 | [] | no_license | duilee/workspace | 382e8a2c11bc54e76e829bdd7b98268108e68c49 | 581c7e65383fe05d57826c7ad1a4d80a1bf0fcc5 | refs/heads/master | 2023-06-05T15:29:32.363152 | 2021-06-27T02:10:08 | 2021-06-27T02:10:08 | 328,106,426 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 473 | rd | check_prob.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/device.R
\name{check_prob}
\alias{check_prob}
\title{check probabilitiy}
\usage{
check_prob(x)
}
\arguments{
\item{prob}{numeric vector with prob values}
}
\value{
returns boolean value of whether given sides are valid prob values
}
\description{
checks the validity of the argument prob. It should be a numeric vector with more than 1 element, containing number between 0 and 1 and sums to 1
}
|
a9f2179a726ba7c8baab19405d073dd2b528dfe6 | 6874d2514172b9e809dccf1e4879e0edfaabb050 | /R/ignorelist.R | 956654ca2a8ceca70a5bd37b03b0129072d28091 | [] | no_license | cran/sen2r | 27e59e874a36d30b02f319b1e77fcd66f54d3f2e | 3720d77a025fc9f8d9e04825910e830f35ffa61b | refs/heads/master | 2023-06-29T11:44:40.672296 | 2023-06-16T06:10:02 | 2023-06-16T06:10:02 | 216,648,730 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,417 | r | ignorelist.R | #' @title Read / write the ignore list
#' @description Internal functions to read or write the file containing
#' information about images not to process (because cloud covered or because,
#' for any reason, they were not produced during previous processing runs).
#' @details Sometimes not all the output files are correctly created:
#' the main reason is the cloud coverage higher than the maximum allowed
#' value (argument `max_mask`), but also some other unexpected reasons could
#' happen, i.e. because of old name SAFE products which do not include all the tiles.
#' To prevent to try to create these files every time the function is called
#' with the same parameter file, the TOML file `".ignorelist.txt"` is created
#' in the directory where output files (or indices, or RGB images) are saved.
#' With sen2r <= 1.3.3, a different strategy was used: if `param_list` is a path,
#' these lists were saved in two hidden files ( one per file not created
#' because of cloud coverage, one other for all the other reasons).
#' To try it again, delete the files/files or set `overwrite = TRUE` in `sen2r()`).
#' @author Luigi Ranghetti, phD (2020)
#' @references L. Ranghetti, M. Boschetti, F. Nutini, L. Busetto (2020).
#' "sen2r": An R toolbox for automatically downloading and preprocessing
#' Sentinel-2 satellite data. _Computers & Geosciences_, 139, 104473.
#' \doi{10.1016/j.cageo.2020.104473}, URL: \url{https://sen2r.ranghetti.info/}.
#' @note License: GPL 3.0
#' @name write_ignorelist
#' @rdname ignorelist
#' @param pm parameter list (passed by `sen2r()`).
#' @param names_cloudcovered paths of cloud covered images (passed by `sen2r()`).
#' @param dates_cloudcovered dates of cloud covered images (passed by `sen2r()`)
#' (this is used only if `names_cloudcovered` is not specified).
#' @param names_missing paths of non produced images (passed by `sen2r()`).
#' @param param_list path of the parameter file (passed by `sen2r()`).
#' @return `write_ignorelist()` returns the path of the written TOML file
#' (invisibly).
#' @keywords internal
write_ignorelist <- function(
pm,
names_cloudcovered = NULL,
dates_cloudcovered = NULL,
names_missing = NULL,
param_list = NULL
) {
# Parameters used for processing
max_mask <- pm$max_mask
mask_type <- if (is.na(pm$mask_type)) {""} else {pm$mask_type}
# Add previous files to be ignored
suppressWarnings(
previous_ignorelist <- read_ignorelist(pm = pm, param_list = param_list)
)
# Retrieve dates_cloudcovered from names_cloudcovered
dates_cloudcovered <- if (!missing(names_cloudcovered)) {
sort(unique(c(
previous_ignorelist$dates_cloudcovered,
sen2r_getElements(names_cloudcovered)$sensing_date
)))
} else {
sort(unique(c(
previous_ignorelist$dates_cloudcovered,
dates_cloudcovered
)))
}
names_missing <- sort(unique(c(
previous_ignorelist$names_missing,
basename(nn(names_missing))
)))
# Prepare the file (in TOML-like syntax)
ignorelist_text <- c(
"## --- List of images to be ignored because cloud-covered ---",
"## --- or not produced for other reasons. ---",
"",
"# Masking parameters used during processing",
paste0("mask_type = \"", mask_type, "\""),
paste0("max_mask = ", max_mask),
"",
"# Dates resulted cloud-covered using the above-defined parameters",
if (length(dates_cloudcovered) > 0) {c(
"dates_cloudcovered = [",
paste0(" ",dates_cloudcovered,", "),
"]"
)} else {
"dates_cloudcovered = []"
},
"",
"# Images not produced for any reason",
"# (this entry can be deleted in case the user wants to retry processing them)",
if (length(names_missing) > 0) {c(
"names_missing = [",
paste0(" \"",names_missing,"\", "),
"]"
)} else {
"names_missing = []"
},
"",
"# --- archive produced with sen2r (https://sen2r.ranghetti.info/) ---"
)
# Determine out_dir
ignorelist_path <- path_ignorelist(pm)
# Write file
if (length(ignorelist_path) > 0) {
writeLines(ignorelist_text, ignorelist_path)
}
invisible(ignorelist_path)
}
#' @name read_ignorelist
#' @rdname ignorelist
#' @return `read_ignorelist()` returns a list with two vectors:
#' - `dates_cloudcovered` (dates of cloud covered files);
#' - `names_missing` (base names of files to be ignored).
#' @importFrom RcppTOML parseTOML
read_ignorelist <- function(pm, param_list = NULL) {
# Parameters used for processing
max_mask <- pm$max_mask
mask_type <- if (is.na(pm$mask_type)) {""} else {pm$mask_type}
# Retrieve / generate the ignorelist path
# retrieve it in path_out, OR in path_indices, OR in path_rgb (in this order)
exists_in_out <- !is.na(pm$path_out) &&
file.exists(file.path(pm$path_out, ".ignorelist.txt"))
exists_in_indices <- !is.na(pm$path_indices) &&
file.exists(file.path(pm$path_indices, ".ignorelist.txt"))
exists_in_rgb <- !is.na(pm$path_rgb) &&
file.exists(file.path(pm$path_rgb, ".ignorelist.txt"))
ignorelist_path <- if (exists_in_out) {
file.path(pm$path_out, ".ignorelist.txt")
} else if (exists_in_indices) {
file.path(pm$path_indices, ".ignorelist.txt")
} else if (exists_in_rgb) {
file.path(pm$path_rgb, ".ignorelist.txt")
} else {
character(0)
}
ignorelist <- if (length(ignorelist_path) > 0) {
# Read the existing ignorelist
tomlist <- parseTOML(ignorelist_path)
tomlist$max_mask <- as.integer(nn(tomlist$max_mask))
tomlist$dates_cloudcovered <- as.Date(nn(tomlist$dates_cloudcovered))
tomlist$names_missing <- nn(tomlist$names_missing)
# Check that mask_type / max_mask did not change (warn otherwise)
if (any(
tomlist$mask_type != as.character(mask_type),
tomlist$max_mask != as.character(max_mask)
)) {
print_message(
type = "warning",
"The ignorelist was saved using ",
"\"mask_type\" = '",tomlist$mask_type,"' and ",
"\"max_mask\" = ",tomlist$max_mask,", ",
"while this processing is using ",
"\"mask_type\" = '",mask_type,"' and ",
"\"max_mask\" = ",max_mask,". ",
"To avoid unexpected results, delete the file ",
"\"",ignorelist_path,"\"."
)
}
tomlist[c("dates_cloudcovered", "names_missing")]
} else if (length(param_list) > 0) {
# Check if an old-format ignorelist exists
oldignorelist_path <- gsub("\\.json$","_ignorelist.txt",param_list)
oldcloudlist_path <- gsub("\\.json$","_cloudlist.txt",param_list)
ignorelist0 <- if (file.exists(oldignorelist_path)) {
readLines(oldignorelist_path)
} else {
character()
}
cloudlist0 <- if (file.exists(oldcloudlist_path)) {
readLines(oldcloudlist_path)
} else {
character()
}
list(
names_missing = sort(unique(basename(ignorelist0))),
dates_cloudcovered = sort(unique(sen2r_getElements(cloudlist0)$sensing_date))
)
} else {
# If neither exist, use an amply list
list(
names_missing = character(0),
dates_cloudcovered = as.Date(character(0))
)
}
# Return
ignorelist
}
#' @name path_ignorelist
#' @rdname ignorelist
#' @return `path_ignorelist()` returns the path in which the TOML file
#' should be written (basing on processing parameters).
path_ignorelist <- function (pm) {
out_dir <- if (!is.na(pm$path_out)) {
pm$path_out
} else if (!is.na(pm$path_indices)) {
pm$path_indices
} else if (!is.na(pm$path_rgb)) {
pm$path_rgb
}
if (length(out_dir) > 0) {
normalize_path(file.path(out_dir, ".ignorelist.txt"), mustWork = FALSE)
} else {
NULL
}
}
#' @name clean_ignorelist
#' @rdname ignorelist
#' @return `clean_ignorelist()` returns NULL (it is called for its side effects).
clean_ignorelist <- function (pm, param_list = NULL) {
ignorelist_path <- path_ignorelist(pm) # path to be used
# Search for ignorelists in different directories
indicesignorelist_path <- normalize_path(
file.path(pm$path_indices, ".ignorelist.txt"),
mustWork = FALSE
)
rgbignorelist_path <- normalize_path(
file.path(pm$path_rgb, ".ignorelist.txt"),
mustWork = FALSE
)
if (!is.na(pm$path_indices) &&
file.exists(indicesignorelist_path) &&
indicesignorelist_path != ignorelist_path) {
file.remove(indicesignorelist_path)
}
if (!is.na(pm$path_rgb) &&
file.exists(rgbignorelist_path) &&
rgbignorelist_path != ignorelist_path) {
file.remove(rgbignorelist_path)
}
# Search for old format ignorelists
if (length(param_list) > 0) {
oldignorelist_path <- normalize_path(
gsub("\\.json$","_ignorelist.txt",param_list),
mustWork = FALSE
)
oldcloudlist_path <- normalize_path(
gsub("\\.json$","_cloudlist.txt",param_list),
mustWork = FALSE
)
if (file.exists(oldignorelist_path)) {
file.remove(oldignorelist_path)
}
if (file.exists(oldcloudlist_path)) {
file.remove(oldcloudlist_path)
}
}
invisible(NULL)
}
|
bc6700cbce8ff7b014a54c43b2a190a6f5da6ebb | 79f04abf894a67c37a65421b0a8fbad258d00a43 | /R/classification.R | 586ef3c613f676ca2dd2d3dad6e71c2faf20742f | [] | no_license | kimjeonghyon/Language-Basics | 9490c54caef4390b8a1257dab37ae23eb42a125a | dbfb894e7c4e9a21366a1c2e9b3ffa1ea4aedaf9 | refs/heads/master | 2021-06-24T04:09:54.906696 | 2020-12-04T01:13:46 | 2020-12-04T01:13:46 | 139,315,146 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,159 | r | classification.R | # 분류분석
install.packages("rpart")
library(rpart) # rpart() : 분류모델 생성
install.packages("rpart.plot")
library(rpart.plot) # prp(), rpart.plot() : rpart 시각화
install.packages('rattle')
library('rattle') # fancyRpartPlot() : node 번호 시각화
# 단계1. 실습데이터 생성
data(iris)
head(iris)
set.seed(415)
idx = sample(1:nrow(iris), 0.7*nrow(iris))
train = iris[idx, ]
test = iris[-idx, ]
dim(train) # 105 5
dim(test) # 45 5
table(train$Species)
head(train)
# 단계2. 분류모델 생성
# rpart(y변수 ~ x변수, data)
model = rpart(Species~., data=train) # iris의 꽃의 종류(Species) 분류
model
?rpart
# 분류모델 시각화 - rpart.plot 패키지 제공
prp(model) # 간단한 시각화
rpart.plot(model) # rpart 모델 tree 출력
fancyRpartPlot(model) # node 번호 출력(rattle 패키지 제공)
# 단계3. 분류모델 평가
pred <- predict(model, test) # 비율 예측
pred <- predict(model, test, type="class") # 분류 예측
pred
# 1) 분류모델로 분류된 y변수 보기
table(pred)
# 2) 분류모델 성능 평가
table(pred, test$Species) # 비율 예측 결과로는 table 생성 안 됨, 서로 모양이 달라서
t <- table(pred, test$Species)
(t[1,1]+t[2,2]+t[3,3])/nrow(test)
# ------------------------------------------
# 로지스틱 회귀분석 1
# ------------------------------------------
## [실습1] mtcars (자동차 모델별 성능정보) 데이터 셋을 이용한 로지스틱 회귀 모델 생성
## hp(마력), wt(중량), cyl 컬럼을 이용해 am(자동/수동)값을 찾는 로지스틱 회귀모델 생성
# 단계1. 실습데이터 생성
data(mtcars)
head(mtcars)
input <- mtcars[,c("am","cyl","hp","wt")]
print(head(input))
#단계2. 분류모델 생성
am.data = glm(formula = am ~ cyl + hp + wt, data = input, family = binomial)
# 단계3. 분류모델 평가
print(summary(am.data))
#결과 : P 값으로서 요약 변수 "CYL"및 "HP"우리가 변수 "AM"값에 영향이 적고, ‘WT’ 값은 영향이 있음
# ------------------------------------------
# 로지스틱 회귀분석 2
# ------------------------------------------
## [실습2] iris 데이터 셋을 이용한 로지스틱 회귀 모델 생성
# 단계1. 실습데이터 생성
data(iris)
head(iris)
row_0 <- subset(iris)
# setosa와 versicolor만 선택하여 데이터셋으로 저장
data_1 <- subset(row_0, Species=='setosa'|Species=='versicolor')
#해당변수는 Factor형으로 setosa는 Y=1, versicolor는 Y=2로 인식
data_1$Species <- factor(data_1$Species)
str(data_1)
#단계2. 분류모델 생성
out <- glm(Species~Sepal.Length, data=data_1, family=binomial)
# 단계3. 분류모델 평가
summary(out)
# ------------------------------------------
# 랜덤 포레스트
# ------------------------------------------
##################################################
# randomForest practice
##################################################
install.packages('randomForest')
library(randomForest)
data(iris)
# 1. 랜덤 포레스트 모델 생성 (형식: randomForest(y ~ x, data, ntree, mtry) )
model = randomForest(Species~., data=iris)
# 2. 파라미터 조정 300개의 Tree와 4개의 변수 적용 모델 생성
model = randomForest(Species~., data=iris,
ntree=300, mtry=4, na.action=na.omit)
# 3. 최적의 파리미터(ntree, mtry) 찾기
# - 최적의 분류모델 생성을 위한 파라미터 찾기
ntree <- c(400, 500, 600)
mtry <- c(2:4)
# 2개 vector이용 data frame 생성
param <- data.frame(n=ntree, m=mtry)
for(i in param$n){ # 400,500,600
cat('ntree = ', i, '\n')
for(j in param$m){ # 2,3,4
cat('mtry = ', j, '\n')
model = randomForest(Species~., data=iris,
ntree=i, mtry=j,
na.action=na.omit )
print(model)
}
}
# 4. 중요 변수 생성
model3 = randomForest(Species ~ ., data=iris,
ntree=500, mtry=2,
importance = T, # 중요변수 추출하기 위한 옵션
na.action=na.omit )
importance(model3)
# MeanDecreaseAccuracy higher the better 분류정확도 개선에 기여하는 변수
# MeanDecreaseGini higher the better 진위계수? 불확실성 = 노드 불순도 = entropy 개선에 기여하는 변수
varImpPlot(model3) # 중요변수 plot 생성
# ---------------------------------
# 부스팅
# ---------------------------------
##################################################
# Boosting practice
##################################################
install.packages("adabag")
library(adabag)
library(rpart)
data(iris)
iris.adaboost <- boosting(Species~., data=iris, boos=TRUE, mfinal=3)
iris.adaboost
## Data Vehicle (four classes)
library(mlbench)
data(Vehicle)
l <- length(Vehicle[,1])
sub <- sample(1:l,2*l/3)
mfinal <- 3
maxdepth <- 5
Vehicle.rpart <- rpart(Class~.,data=Vehicle[sub,],maxdepth=maxdepth)
Vehicle.rpart.pred <- predict(Vehicle.rpart,newdata=Vehicle[-sub, ],type="class")
tb <- table(Vehicle.rpart.pred,Vehicle$Class[-sub])
error.rpart <- 1-(sum(diag(tb))/sum(tb))
tb
error.rpart
Vehicle.adaboost <- boosting(Class ~.,data=Vehicle[sub, ],mfinal=mfinal, coeflearn="Zhu",
control=rpart.control(maxdepth=maxdepth))
Vehicle.adaboost.pred <- predict.boosting(Vehicle.adaboost,newdata=Vehicle[-sub, ])
Vehicle.adaboost.pred$confusion
Vehicle.adaboost.pred$error
#comparing error evolution in training and test set
errorevol(Vehicle.adaboost,newdata=Vehicle[sub, ])->evol.train
errorevol(Vehicle.adaboost,newdata=Vehicle[-sub, ])->evol.test
plot.errorevol(evol.test,evol.train)
# -----------------------------
# 나이브베이즈
# ------------------------------
##################################################
# Naive Bayes practice
##################################################
# 조건부 확률 적용 예측
# 비교적 성능 우수
# 베이즈 이론 적용
# -> 조건부 확률 이용
# -> 스펨 메시지 분류에 우수함
# 조건부 확률 : 사건 A가 발생한 상태에서 사건 B가 발생할 확률
# P(B|A) = P(A|B) * P(B) / P(A)
# ----------------------------------------------------------
# ex) 비아그라,정력 단어가 포함된 메시지가 스팸일 확률
# P(스팸|비아그라,정력)
# 사건 A : 비아그라, 정력 -> P(A) : 5/100(5%)
# 사건 B : 스팸 -> P(B) : 20/100(20%)
# P(A|B) : 스팸일때 비아그라, 정력일 경우 -> 4/20(20%)
##################################################
# Naive Bayes 기본실습 : iris
##################################################
# 패키지 설치
install.packages('e1071')
library(e1071) # naiveBayes()함수 제공
# 1. train과 test 데이터 셋 생성
data(iris)
idx <- sample(1:nrow(iris), 0.7*nrow(iris)) # 7:3 비율
train <- iris[idx, ]
test <- iris[-idx, ]
train; test
nrow(train)
# 2. 분류모델 생성 : train data 이용
# 형식) naiveBayes(train, class) - train : x변수, calss : y변수
model <- naiveBayes(train[-5], train$Species)
model # 학습 데이터를 이용하여 x변수(4개)를 y변수로 학습시킴
# 3. 분류모델 평가 : test data 이용
# 형식) predict(model, test, type='class')
p <- predict(model, test) # test : y변수가 포함된 데이터 셋
# 4. 분류모델 평가(예측결과 평가)
table(p, test$Species) # 예측결과, test data의 y변수
t <- table(p, test$Species)
# 분류 정확도
(t[1,1]+t[2,2]+t[3,3])/nrow(test)
# --------------------------
# KNN
# --------------------------
install.packages("class")
library("class")
set.seed(1234)
ind <- sample(1:nrow(iris), nrow(iris))
iris.training <- iris[ifelse(ind%%5!=0,ind,0), 1:4]
iris.test <- iris[ifelse(ind%%5==0,ind,0), 1:4]
dim(iris.training)
dim(iris.test)
iris.trainLabels <- iris[ifelse(ind%%5!=0,ind,0), 5]
iris.testLabels <- iris[ifelse(ind%%5==0,ind,0), 5]
iris_pred <- knn(train = iris.training, test = iris.test, cl = iris.trainLabels, k=3)
sum(iris_pred != iris.testLabels)
data(iris)
d <- iris head(d)
## 2. 시각화
install.packages("ggvis")
library(ggvis)
iris %>% ggvis(~Petal.Length, ~Petal.Width, fill=~factor(Species)) %>% layer_points() # 꽃잎 너비와 길이는 관계가 있는 것으로 보임
## 3. data split table(d$Species) # 총 150개
# random 을 위한 seed 값
set.seed(1234)
idx <- sample(1:nrow(d), 0.7*nrow(d))
iris.train <- d[idx,] # 70% 학습에 사용
iris.test <- d[-idx,] # 30% 테스트에 사용
nrow(iris.train); nrow(iris.test);
## 4. 데이터로부터 학습/모델 트레이닝
library(class)
# K=3~13 에 대하여 KNN 실행
# 최적의 k 찾기
result <- c()
for ( i in 3:13 ) { iris_model <- knn(train = iris.train[,-5],
test = iris.test[,-5],
cl = iris.train$Species, k=i )
t <- table(iris_model, iris.test$Species)
accuracy <- ((t[1,1]+t[2,2]+t[3,3]) / nrow(iris.test))
result <- c(result, accuracy)
}
acc_df <- data.frame(k=3:13, accuracy=result)
acc_df[order(acc_df$acc, decreasing = T),]
# k=3 선택
iris_model <- knn(train = iris.train[,-5],
test = iris.test[,-5],
cl = iris.train$Species, k=3)
summary(iris_model)
## 5. 모델평가
table(iris_model, iris.test$Species)
# coufusion matrix
install.packages("gmodels")
library(gmodels)
CrossTable(x=iris.test$Species, y=iris_model, prop.chisq = F)
# ---------------------
# SVM
# ---------------------
library("e1071")
head(iris,5)
attach(iris)
x <- subset(iris, select=-Species)
y <- Species
# Create SVM Model and show summary
svm_model <- svm(Species ~ ., data=iris)
summary(svm_model)
# Create SVM Model and show summary
svm_model1 <- svm(x,y)
summary(svm_model1)
# Run Prediction and you can measuring the execution time in R
pred <- predict(svm_model1,x)
system.time(pred <- predict(svm_model1,x))
# See the confusion matrix result of prediction, using command table to compare the result of SVM prediction and the class data in y variable. table(pred,y)
# TUning SVM to find the best cost and gamma ..
svm_tune <- tune(svm, train.x=x, train.y=y,
kernel="radial",
ranges=list(cost=10^(-1:2), gamma=c(.5,1,2))) print(svm_tune)
# After you find the best cost and gamma, you can create svm model again and try to run again
svm_model_after_tune <- svm(Species ~ ., data=iris, kernel="radial", cost=1, gamma=0.5)
summary(svm_model_after_tune)
# Run Prediction again with new model
pred <- predict(svm_model_after_tune,x)
predict(svm_model_after_tune,x)
table(pred,y)
# -------------------------
# 인공신경망
# -------------------------
## [실습1] iris 데이터 셋을 이용한 인공신경망 모델 생성
# 단계1 : 데이터 셋 생성
install.packages("nnet") # 인공신경망 모델 생성 패키지
library(nnet)
data(iris)
idx = sample(1:nrow(iris), 0.7*nrow(iris))
training = iris[idx, ]
testing = iris[-idx, ]
nrow(training) # 105(70%)
nrow(testing) # 45(30%)
# 단계2 : 인공신경명 모델 생성
model_net_iris1 = nnet(Species ~ ., training, size = 1) # hidden=1
model_net_iris1 # 11 weights
model_net_iris3 = nnet(Species ~ ., training, size = 3) # hidden=3
model_net_iris3 # 27 weights
# 단계3 : 가중치 망보기
summary(model_net_iris1) # 1개 가중치 확인
summary(model_net_iris3) # 3개 가중치 확인
# 단계4 : 분류모델 평가
table(predict(model_net_iris1, testing, type = "class"), testing$Species)
table(predict(model_net_iris3, testing, type = "class"), testing$Species)
## [실습2] neuralnet 패키지 이용 인공신경망 모델 생성
# 단계1 : 패키지 설치
install.packages('neuralnet')
library(neuralnet)
# 단계2 : 데이터 셋 생성
data("iris")
idx = sample(1:nrow(iris), 0.7*nrow(iris))
training_iris = iris[idx, ]
testing_iris = iris[-idx, ]
dim(training_iris) # 105 6
dim(testing_iris) # 45 6
# 단계3 : 숫자형으로 칼럼 생성
training_iris$Species2[training_iris$Species == 'setosa'] <- 1
training_iris$Species2[training_iris$Species == 'versicolor'] <- 2
training_iris$Species2[training_iris$Species == 'virginica'] <- 3
training_iris$Species <- NULL
head(training_iris)
# 단계4 : 데이터 정규화
# (1) 정규화 함수 정의 : 0 ~ 1 범위로 정규화
normal <- function(x){
return (( x - min(x)) / (max(x) - min(x)))
}
# (2) 정규화 함수를 이용하여 학습데이터/검정데이터 정규화
training_nor <- as.data.frame(lapply(training_iris, normal))
summary(training_nor) # 0 ~ 1 확인
testing_nor <- as.data.frame(lapply(testing_iris, normal))
summary(testing_nor) # 0 ~ 1 확인
# 단계5 : 인공신경망 모델 생성 : 은닉노드 1개
model_net = neuralnet(Species2 ~ Sepal.Length+Sepal.Width+Petal.Length+Petal.Width,
data=training_nor, hidden = 1)
model_net
plot(model_net) # Neural Network 모형 시각화
# 단계6 : 분류모델 성능 평가
# (1) compute() 함수 이용
model_result <- compute(model_net, testing_nor[c(1:4)])
model_result$net.result # 분류 예측값 보기
# (2) 상관분석 : 상관계수로 두 변수 간의 선형관계의 강도 측정
cor(model_result$net.result, testing_nor$Species2)
# 단계7 : 분류모델 성능 향상 : 은닉노드 2개, backprop 적용
# (1) 인공신경망 모델 생성
model_net2 = neuralnet(Species2 ~ Sepal.Length+Sepal.Width+Petal.Length+Petal.Width,
data=training_nor, hidden = 2, algorithm="backprop", learningrate=0.01 )
# (2) 분류모델 예측치 생성과 평가
model_result2 <- compute(model_net2, testing_nor[c(1:4)])
cor(model_result2$net.result, testing_nor$Species2)
|
36bd5e90e12398d270cd574c4b01d7603365e12a | 49599cbeb99d70432a688e11e97a4d3160881f6d | /R/set.random.seed.R | 552518f81be8dc6b034e3c4849637ebb3feb0607 | [] | no_license | langfob/rdv-framework-frozen-google-code-export-do-not-change | 260de4eae4de9d4d0465d90e693790d57f411073 | fe7e745223251790e50bc9a412080e45e0cbc709 | refs/heads/master | 2021-01-21T23:29:59.459162 | 2015-07-17T09:30:46 | 2015-07-17T09:30:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,952 | r | set.random.seed.R | #==============================================================================
### Usage:
### source ('set.random.seed.R');
# Set the seed for random number generation
# History:
# - Extracted from reserve.validation.R.
# BTL - 02/03/09.
# - Added specification of a starting seed to use if not using the
# seed from the previous run and not using the run number as the seed.
# No value was specified before, so I think that the it would default
# to the seed from the initialcall to sample(). This means that if
# something weird happened, you wouldn't necessarily be able to
# figure out what the seed was to be able to repeat the problem.
# Also moved setting seed to previous end seed up next to where it
# is read, because where it was would only see it executed half the
# time.
# BTL - 02/03/09.
#==============================================================================
#--------------------------------------------
# Load definitions of functions called here.
#--------------------------------------------
source( 'stop.execution.R' )
#==============================================================================
#---------------------------------------------------------------
# Load global variables from a file written by the python code.
#---------------------------------------------------------------
source( 'variables.R' )
#==============================================================================
#------------------------------------------------------------------------
# I'm commenting this line out because I don't think it's needed anymore
# since I've added the startingSeed and guarantee that a seed is always
# created. I won't remove the line yet though, in case I'm wrong.
# BTL - 02/03/09.
#------------------------------------------------------------------------
# put in a dummy line with sample in it, otherwise depending on what
# other parts of the framework are operating, there may be no other
# calls to any random functions and the "save.seed <- .Random.seed"
# line will cause the program to stop
#####dummy.sample <- sample(1);
#--------------------------
if (use.same.seed.as.last.run)
{
#load the seed from the previous run
if (file.exists ('./rng.seed'))
{
previous.end.seed <- scan ("rng.seed");
set.seed (previous.end.seed);
} else
{
cat( '\nWARNING: Could not load random seed from file.' );
cat( '\nFile rng.seed not found.' );
cat( '\nRun once use.same.seed.as.last.run set to false.\n' );
stop.execution();
}
} else if (use.run.number.as.seed)
{
set.seed (random.seed);
} else # Not using previous or run number, so use default seed value.
{
set.seed (startingSeed);
}
#==============================================================================
|
38c5dd208bce85b7532406fc597b7616edd76ff9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/vrtest/examples/exrates.Rd.R | cf50d20130ccca0b66f200b892f08298735c188b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 155 | r | exrates.Rd.R | library(vrtest)
### Name: exrates
### Title: wright's Exchange Rates Data
### Aliases: exrates
### Keywords: datasets
### ** Examples
data(exrates)
|
2384b1ac29d42719ebe48ddc774146ba6f41258e | 747564318a56ac683165b03ee6f1157712c2e4ce | /man/obs_rates.Rd | 95a57eeb4c58cb968bd5f8682bd31a5e1c4ea2d8 | [] | no_license | dataandcrowd/gamar | 626a9758ec58b81db71f3363a77e732cf092eecc | 3e9dd2b61b55816f79c16d152d5de32d6e9ea71d | refs/heads/master | 2023-04-02T18:00:01.423668 | 2021-04-09T14:44:43 | 2021-04-09T14:44:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 749 | rd | obs_rates.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/obs_rates.R
\name{obs_rates}
\alias{obs_rates}
\alias{obs_rates.default}
\alias{obs_rates.experiment}
\title{Extract monitoring rates}
\usage{
obs_rates(exp)
\method{obs_rates}{default}(exp)
\method{obs_rates}{experiment}(exp)
}
\arguments{
\item{exp}{An object of class \code{experiment}.}
}
\value{
A data frame that is a subset of the inputed \code{experiment} object.
}
\description{
Subsets the columns of an \code{experiment} object that correspond to the
obs_rates rates of the monitored variables.
}
\examples{
exp1 <- load_experiment("sir",
system.file("models", "sir.gaml", package = "gamar"))
exp2 <- repl(exp1, 10)
obs_rates(exp2)
}
|
3f8b746b1daa6cb43460b55b470f1e963ed0adb8 | 446373433355171cdb65266ac3b24d03e884bb5d | /man/saga_statisticsforrasters.Rd | eaf69d525ea667cfa869d7e8c3ac19cc26ff33f7 | [
"MIT"
] | permissive | VB6Hobbyst7/r_package_qgis | 233a49cbdb590ebc5b38d197cd38441888c8a6f3 | 8a5130ad98c4405085a09913b535a94b4a2a4fc3 | refs/heads/master | 2023-06-27T11:52:21.538634 | 2021-08-01T01:05:01 | 2021-08-01T01:05:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,685 | rd | saga_statisticsforrasters.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/saga_statisticsforrasters.R
\name{saga_statisticsforrasters}
\alias{saga_statisticsforrasters}
\title{QGIS algorithm Statistics for rasters}
\usage{
saga_statisticsforrasters(
GRIDS = qgisprocess::qgis_default_value(),
PCTL_VAL = qgisprocess::qgis_default_value(),
MEAN = qgisprocess::qgis_default_value(),
MIN = qgisprocess::qgis_default_value(),
MAX = qgisprocess::qgis_default_value(),
VAR = qgisprocess::qgis_default_value(),
SUM = qgisprocess::qgis_default_value(),
RANGE = qgisprocess::qgis_default_value(),
PCTL = qgisprocess::qgis_default_value(),
STDDEV = qgisprocess::qgis_default_value(),
STDDEVLO = qgisprocess::qgis_default_value(),
STDDEVHI = qgisprocess::qgis_default_value(),
...,
.complete_output = TRUE
)
}
\arguments{
\item{GRIDS}{\code{multilayer} - Grids. .}
\item{PCTL_VAL}{\code{number} - Percentile. A numeric value.}
\item{MEAN}{\code{rasterDestination} - Arithmetic Mean. Path for new raster layer.}
\item{MIN}{\code{rasterDestination} - Minimum. Path for new raster layer.}
\item{MAX}{\code{rasterDestination} - Maximum. Path for new raster layer.}
\item{VAR}{\code{rasterDestination} - Variance. Path for new raster layer.}
\item{SUM}{\code{rasterDestination} - Sum. Path for new raster layer.}
\item{RANGE}{\code{rasterDestination} - Range. Path for new raster layer.}
\item{PCTL}{\code{rasterDestination} - Percentile. Path for new raster layer.}
\item{STDDEV}{\code{rasterDestination} - Standard Deviation. Path for new raster layer.}
\item{STDDEVLO}{\code{rasterDestination} - Mean less Standard Deviation. Path for new raster layer.}
\item{STDDEVHI}{\code{rasterDestination} - Mean plus Standard Deviation. Path for new raster layer.}
\item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}}
\item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.}
}
\description{
QGIS Algorithm provided by SAGA Statistics for rasters (saga:statisticsforrasters)
}
\details{
\subsection{Outputs description}{
\itemize{
\item MEAN - outputRaster - Arithmetic Mean
\item MIN - outputRaster - Minimum
\item MAX - outputRaster - Maximum
\item VAR - outputRaster - Variance
\item SUM - outputRaster - Sum
\item RANGE - outputRaster - Range
\item PCTL - outputRaster - Percentile
\item STDDEV - outputRaster - Standard Deviation
\item STDDEVLO - outputRaster - Mean less Standard Deviation
\item STDDEVHI - outputRaster - Mean plus Standard Deviation
}
}
}
|
5295f76bee38e3107aa78ce46ba468e33c9440f8 | 60627dc5c9f23a9bafcf942c5a083629da786785 | /R/power_eeg_bands.R | b0e898a6ec48e898a0d1ddd56da5524f9359e2ce | [] | no_license | adigherman/EEGSpectralAnalysis | 6375fc44e8dd7864c0f1aa39c427a1369de1ddde | dadc57bcd0fb1ec39db0b0a9ab3ac1667e695184 | refs/heads/master | 2022-12-03T13:00:27.162666 | 2020-08-18T19:28:24 | 2020-08-18T19:28:24 | 234,624,034 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,276 | r | power_eeg_bands.R | #' @title Get power values for EEG bands
#'
#' @description Calculate power values for
#' each of the EEG bands:
#' Delta < 4
#' Theta >=4 and < 8
#' Alpha >= 8 and < 14
#' Beta >= 14 and < 32
#' Gamma >= 32 and < 50
#'
#' @param eeg_signal EEG signal expressed in micro-Volts
#' @param sampling_frequency Sampling frequency of the EEG signal. This is
#' typically equal to 125Hz. Default value is 125.
#' @param max_frequency The maximum frequency for which the spectrum is being
#' calculated. Default value is 32.
#' @param num_sec_w number of seconds in a time window used to
#' obtain the Fourier coefficients. Typically, this number is 5
#' @param aggreg_level number of 5 second intervals used to aggregate
#' power. Typically, this number is 6 to ensure a 30 second
#' interval window (standard in EEG analysis)
#' @export
#'
#' @return List containing the aggregated power values for each EEG band
power_eeg_bands = function(eeg_signal,
sampling_frequency = 125,
max_frequency = 32,
num_sec_w = 5,
aggreg_level = 6) {
eeg_params <- fft_eeg(eeg_signal,sampling_frequency,max_frequency,num_sec_w)
#Delta band
delta_band <- power_eeg(eeg_params$power_spectrum,freq_min = 0.8,freq_max = 3.9,num_sec_w,aggreg_level)
delta_band <- delta_band$absolute_band_aggreg
#Theta band
theta_band <- power_eeg(eeg_params$power_spectrum,freq_min = 4,freq_max = 7.9,num_sec_w,aggreg_level)
theta_band <- theta_band$absolute_band_aggreg
#Alpha band
alpha_band <- power_eeg(eeg_params$power_spectrum,freq_min = 8,freq_max = 14.9,num_sec_w,aggreg_level)
alpha_band <- alpha_band$absolute_band_aggreg
#Beta band
beta_band <- power_eeg(eeg_params$power_spectrum,freq_min = 15,freq_max = 31.9,num_sec_w,aggreg_level)
beta_band <- beta_band$absolute_band_aggreg
#Gamma band
gamma_band <- power_eeg(eeg_params$power_spectrum,freq_min = 32,freq_max = 49.9,num_sec_w,aggreg_level)
gamma_band <- gamma_band$absolute_band_aggreg
output <- list(delta_band,theta_band,alpha_band,beta_band,gamma_band)
names(output) <- c("Delta","Theta","Alpha","Beta","Gamma")
return(output)
} |
9703c460e268ecef61fb66262ab2312792b08f6d | 29f06a220a418387418e0c7d82009896fd965f86 | /tests/testthat.R | 68827a0242a69e1bd0d4ed425e88f8e42caef6d2 | [] | no_license | ValeriVoev/VRVPackage | bab712ae4face2660151e3e1c89b4432a8c77549 | a16a21e689b6aef7578346590b0fb0a2174c2a46 | refs/heads/master | 2021-01-22T05:50:49.933235 | 2017-06-01T20:05:41 | 2017-06-01T20:05:41 | 92,499,838 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 65 | r | testthat.R | library(testthat)
library(VRVPackage)
test_check("VRVPackage")
|
d8fab56bfada4ef9bdf7e52bff67ef688054e08b | 3ed978fd30c968c757471d579b79c8cfaf4493b6 | /rscript/data_preprocess.R | ea1dc401dcdaad5658fa19b6939a1e57b56c8e8b | [] | no_license | csetraynor/bybrca | 28a8c2defef065f1ea0d44a1f068cea2ac66ea68 | 0ec8d861fd4fca4e6bf54e34bbac81247cce9c91 | refs/heads/master | 2021-04-09T17:20:56.906599 | 2018-03-20T17:53:22 | 2018-03-20T17:53:22 | 125,680,400 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,353 | r | data_preprocess.R | #Data preprocess
library(readr)
library(VIM)
library(dplyr)
#load medical data
md <- tbl_df(read_rds("C:/RFactory/bymetabric_files/rdsmetabric/medicaldfs.rds"))
#md <- tbl_df(read_rds("medicaldfs.rds"))
#convert missig values into NA
convert_blank_to_na <- function(x) {
if(!purrr::is_character(x)){
return(x)
print("Error ", x, " not character")
} else {
ifelse(x == " " | x == "[Not Available]" | x == "[Discrepancy]" | x == "Indeterminate"| x == "Equivocal", NA, x)
}
}
md <- md %>% dplyr::mutate_all(funs(convert_blank_to_na))
md %>%
VIM::aggr(prop = FALSE, combined = TRUE, numbers = TRUE,
sortVars = TRUE, sortCombs = TRUE, plot = TRUE, only.miss = FALSE)
md$grade[md$lymph_nodes_positive == 0] <- "1"
md$grade[md$lymph_nodes_positive >=1 & md$lymph_nodes_positive <= 4 ] <- "2"
md$grade[md$lymph_nodes_positive > 4] <- "3"
md$lymph_nodes_positive <- NULL
#standardise continuos covariates and impute with median value
preProc <- caret::preProcess(md %>% select(size) %>% as.matrix(), method = c("center", "scale"))
md$size <- predict(preProc, md %>% select(size) %>% as.matrix())
histogram(md$size)
md$size[is.na(md$size)] <- median(md$size, na.rm = T)
#Filter missing cohort
md <- md %>% filter(!is.na(cohort))
md %>%
VIM::aggr(prop = FALSE, combined = TRUE, numbers = TRUE,
sortVars = TRUE, sortCombs = TRUE, plot = TRUE, only.miss = FALSE)
#Imputation using Bayesian Logistic Regression
tmp <- as.factor(md$grade)
tmp <- mice::mice.impute.polyreg(y = tmp,
ry = !is.na(tmp),
x = model.matrix(~size+
intclust + cohort ,
data = md)[,-1],
wy = array(TRUE, dim = length(tmp)))
md$grade[is.na(md$grade)] <- tmp[is.na(md$grade)]
remove(tmp)
VIM::marginplot(md[c("time","tumor_stage")])
table(md$tumor_stage , md$intclust, useNA = "always")
tmp <- as.factor(md$tumor_stage)
tmp <- mice::mice.impute.polyreg(y = tmp,
ry = !is.na(tmp),
x = model.matrix(~size+
intclust + cohort + grade,
data = md)[,-1],
wy = array(TRUE, dim = length(tmp)))
md$tumor_stage[is.na(md$tumor_stage)] <- tmp[is.na(md$tumor_stage)]
remove(tmp)
assertthat::assert_that(sum(is.na(md)) == 0)
#Convert intclust as factor
md$intclust = as.factor(md$intclust)
levels(md$intclust) = c("1", "2", "3", "4ER+", "4ER-", "5", "6", "7", "8", "9", "10")
#--- Gene matrix preprocess ----- #
gendata <- read_tsv("C:/RFactory/bymetabric_files/metabricdata/brca_metabric/data_expression.txt", col_names = TRUE)
#gendata <- read_tsv("brca_metabric/data_expression.txt", col_names = TRUE)
source("https://bioconductor.org/biocLite.R")
library(Biobase)
###
glimpse(gendata)
#or impute later
gene_names <- gendata %>% dplyr::select(Hugo_Symbol) #grap gene names
gendata <- gendata %>% dplyr::select(intersect(colnames(gendata), md$patient_id))# get intersection btw clinical and gene values
sample_names <- colnames(gendata) # get sample names
#Center and scale
gendata <- unname(t(gendata))
colnames(gendata) <- gene_names %>% unlist; rownames(gendata) <- sample_names %>% unlist
preProcgm <- caret::preProcess(gendata, method = c("center", "scale"))
metaES <- predict(preProcgm, gendata)
glimpse(metaES)
metaES <- t(metaES)
colnames(metaES) <- sample_names
gendata <- metaES
rownames(gendata) <- gene_names %>% unlist
rm(metaES)
sum(is.na(gendata))
#Convert to expression set
md <- as.data.frame(md %>% filter(patient_id %in% sample_names) %>% slice(match(sample_names, patient_id))) ; row.names(md) <- md$patient_id#requires classical data frame
x <- as.matrix(gendata) ;colnames(x) <- rownames(md);
gene_names <- as.data.frame(gene_names); rownames(gene_names) <- gene_names %>% unlist
brcaES <- Biobase::ExpressionSet(x,
phenoData = as(md, "AnnotatedDataFrame"),
featureData = as(gene_names, "AnnotatedDataFrame"))
assertthat::assert_that(all(md$patient_id == brcaES$patient_id))
rm(list = c("x", "gendata"))
##### Number of genes reduction using Moderated t statistic
library(limma)
fit <- limma::lmFit(brcaES, design = as.integer(as.factor(brcaES$intclust)))
fit <- eBayes(fit)
print(fit)
glimpse(fit$p.value)
test <- exprs(brcaES)[fit$p.value < 0.01,]
gene_names <- as.data.frame(gene_names[fit$p.value < 0.01,])
rownames(gene_names) <- gene_names %>% unlist
brcaES <- Biobase::ExpressionSet(test,
phenoData = as(md, "AnnotatedDataFrame"),
featureData = as(as.data.frame(gene_names), "AnnotatedDataFrame"))
assertthat::assert_that(all(md$patient_id == brcaES$patient_id))
rm(list = c("test"))
#Imputation using impute Biobase
sum(is.na(exprs(brcaES)))
require(MSnbase)
brcaMSN <- MSnbase::as.MSnSet.ExpressionSet(brcaES)
brcaMSN <- MSnbase::impute(brcaMSN, method = "MinProb")
Biobase::exprs(brcaES) <- MSnbase::exprs(brcaMSN)
rm(brcaMSN)
assertthat::assert_that(sum(is.na(Biobase::exprs(brcaES))) == 0)
#save and clean
saveRDS(md, "Med_Data_Clean.rds")
saveRDS(brcaES, "Gen_Data.rds")
rm(list = ls())
|
e2770ba7b6e3af8d40da4491a48e4a2712957744 | df3fae87357db44ea5318690beebac74758e9020 | /Old code/hmis explore.R | f77c72ef479f5de23ace77db330f54e58bd2db6e | [] | no_license | DanWeinberger/hmis | 1f276f3bd40c0cbb7f976303407be92f732d474b | 7674eafe0d46c5c40fc664d4e88404f105559cc3 | refs/heads/master | 2020-04-08T22:06:02.762681 | 2019-10-29T20:29:09 | 2019-10-29T20:29:09 | 159,771,645 | 0 | 0 | null | 2019-10-29T19:55:05 | 2018-11-30T05:09:36 | R | UTF-8 | R | false | false | 6,182 | r | hmis explore.R | #PCV was launched by the Union Health Minister, Shri JP Nadda on May 13, 2017 at Mandi,
#Himachal Pradesh [1]. With this phased introduction, nearly 2.1 million children in
#Himachal Pradesh (all 12 districts), parts of Bihar (17 out of 38 districts)
#and Uttar Pradesh (6 out of 75 districts) will be vaccinated with PCV in the first year [2].
#This will be followed by introduction in Madhya Pradesh and Rajasthan next year, and eventually coverage will be expanded across the entire country in a phased manner, in the coming years. "
#In uttar Pradesh it is: Lakhimpur Kheri, Sitapur, Siddharth Nagar, Bahraich, Balrampur, Shrawasti;
#In Bihar: The 17 high-priority districts are Araria, Begusarai, Darbhanga, Kishanganj, Khagaria, Katihar,
#Muzaffarpur, Munger, Vaishali, Madhepura, Madhubani, Purnea, Samastipur, Saran, Sitamarhi, Sheohar and Supaul
#bh1<-read.csv('C:/Users/dmw63/Desktop/My documents h/GATES/india/hmis/Bihar series by district.csv')
#######
#Lorine
##ACUTE RESPIRATORY INFECTION REQUIRING ADMISSION??
##85% of DEATH OCCUR AT HOME--SHOULD BE IN HMIS; MANY
##IN PRIVATE SECTOR HOSPITALS
##CONTROLS: DIARRHEA deaths? ROTA VACCINE IS ROLLING OUT..
##ACUTE ENCAPHALITIS IN SOUTH UP AND BIHAR. THERE ARE SOME
##VACCINE PROGRAMS BUT STARTED 5 YEARS AGO...NOT REALLY
#IN WESTERN UP
#ASPHYXIA DEATHS WONT BE SAME HOSPITALS --
#WOULD BE IN WOMENS HOSPITAL...MANY ARE SEPARATE
#WOMENS/MENS HOSPTALS
#UP HMIS--DR VISANT (Sp?)
#######
library(lme4)
library(lubridate)
bh1<-read.csv('C:/Users/dmw63/Desktop/My documents h/GATES/india/hmis/hmis/uttarPradesh series to Mar 2019.csv')
str(bh1)
bh1$month<-as.numeric(substr(bh1$DATE,6,7))
bh1$year<-as.numeric(substr(bh1$DATE,2,5))
bh1$uri<-bh1$X_10_13
bh1$pneu_death<-bh1$X_16_3_1
bh1$diar_death<-bh1$X_16_3_2
bh1$measles_death<-bh1$X_16_3_4
bh1$asphyxia_death<-bh1$X_16_2_2
bh1$sepsis_death<-bh1$X_16_2_1
bh1$neonatal_death<-bh1$X_16_1
bh1$monthdate<-as.Date(paste(bh1$year,bh1$month,'01', sep='-'))
up.intro.districts<-c('Lakhimpur Kheri', 'Sitapur', 'Siddharth Nagar', 'Bahraich', 'Balrampur', 'Shrawasti')
bh1$pcv.status<-0
bh1$pcv.status[bh1$DISTRICT %in% up.intro.districts] <-1
unique(bh1$DISTRICT)
#test123
strat1<-factor(bh1$monthdate)
ds.sub<-bh1[,c('uri', 'diar_death', 'pneu_death','sepsis_death','asphyxia_death', 'measles_death', 'neonatal_death')]
bh2<-aggregate(x=ds.sub, by=list( strat1) , FUN='sum', na.rm=TRUE)
names(bh2)<-c('monthdate',names(ds.sub))
bh2$monthdate<-as.Date(bh2$monthdate)
bh2$neonatal_death[nrow(bh2)]<-NA
bh2$month<-as.factor(month(bh2$monthdate))
bh2$year<-as.factor(year(bh2$monthdate))
par(mfrow=c(3,2), mar=c(3,3,1,1))
plot(bh2$monthdate,bh2$pneu_death,main='Pneumonia deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$diar_death,main='Diarrhea deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$sepsis_death,main='Sepsis Deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$asphyxia_death,main='Asphyxia Deaths', type='l', bty='l')
plot(bh2$monthdate,bh2$measles_death,main='Measles Deaths', type='l', bty='l')
plot(bh2$monthdate, bh2$neonatal_death, main='Neonatal Deaths',type='l', bty='l')
par(mfrow=c(1,1), mar=c(2,3,1,1))
plot(bh2$uri, type='l',bty='l', main='URI cases')
mod.uri<- glm( uri~ month +year, family='poisson', data=bh2)
summary(mod.uri)
mod.pneu.death<- glm( pneu_death~ month +year, family='poisson', data=bh2[!(bh2$year %in% c('2017','2018','2019')),])
summary(mod.pneu.death)
#heatmap of reporting of URI
#seems to be incomplete before April 2017
library(reshape2)
ds.sub<-bh1[,c('DISTRICT','monthdate','uri')]
ds.m<-melt(ds.sub, id=c('DISTRICT','monthdate'))
ds.c<-dcast(ds.m, monthdate~DISTRICT)
par(mfrow=c(1,1), mar=c(1,1,1,1))
hm1<-heatmap(t(as.matrix(ds.c[,-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Colv=NA,cexRow =0.5)
#Pneumonia deaths--seems to be incomplete before April 2017
ds.sub<-bh1[,c('DISTRICT','monthdate','pneu_death')]
ds.m<-melt(ds.sub, id=c('DISTRICT','monthdate'))
ds.c<-dcast(ds.m, monthdate~DISTRICT)
par(mfrow=c(1,1), mar=c(1,1,1,1))
hm1<-heatmap(t(as.matrix(ds.c[,-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Rowv=NA, Colv=NA,cexRow =0.5)
hm1<-heatmap(t(as.matrix(ds.c[ds.c$monthdate>=as.Date('2017-04-01'),-1])), scale='row', Colv=NA,cexRow =0.5)
##Simple model#
#Consider Apr 2018-Sep 2018 as roll out perios and Oct 2018-Mar 2019 as eval period
bh3<-bh1[bh1$monthdate>=as.Date('2017-04-01'),]
bh3$post.pcv<-0
bh3$post.pcv[bh3$pcv.status==1 & bh3$monthdate>=as.Date('2018-04-01')& bh3$monthdate<=as.Date('2018-09-01')] <- 1
bh3$post.pcv[bh3$pcv.status==1 & bh3$monthdate>=as.Date('2018-10-01')& bh3$monthdate<=as.Date('2019-03-01')] <- 2
bh3$post.pcv<-as.factor(bh3$post.pcv)
bh3$date.factor<-as.factor(bh3$monthdate)
bh3$obs<-as.factor(1:nrow(bh3))
mod1<-glmer(pneu_death ~ (1|date.factor) + (1|DISTRICT) + (1|obs)+ post.pcv, data=bh3, family='poisson' )
summary(mod1)
mod1<-glmer(uri ~ (1|DISTRICT) + (1|obs)+ post.pcv, data=bh3, family='poisson' )
summary(mod1)
#Gamm with time smooth
library(mgcv)
bh3$date.cont<-bh3$year+ bh3$month/12-1/12
mod2<-gamm(uri ~ s(date.cont) + post.pcv, data=bh3, family='poisson' ,random=list(DISTRICT=~1))
summary(mod2$lme)
#mod2<-gamm(pneu_death ~ s(date.cont) + post.pcv, data=bh3, family='poisson' ,random=list(DISTRICT=~1), niterPQL=500)
#summary(mod2$lme)
#statewide
ds.c.state<-dcast(ds.m, monthdate~1, fun.aggregate = sum)
par(mar=c(3,2,1,1))
plot(ds.c.state[ds.c.state$monthdate>=as.Date('2017-04-01'),1],ds.c.state[ds.c.state$monthdate>=as.Date('2017-04-01'),2], type='l')
#Brian: analyze by region vs district
#Need a crosswalk file. Western/Central/Eastern in UP; district-> division
#District hospitals--probably kids coming from district; bigger hospitals, kids coming from other places
# Mortality data: is location based on residence or place of death?
#--does death registry include out of hospital deaths
#There is an official death registry
|
0cf1ce7ec3c8ebad5556abbe9cd0651ae44034ed | ec94dddf45e332663da3e37db2feeb709221d763 | /R/utils.R | 6ace663fea091203a548e971a582314553b7759a | [
"Apache-2.0"
] | permissive | AntoineDubois/sdcv2 | 44687ab28a1c7aa3c82702ee2506257a20475994 | 53041ecc32698089a66a0df7911dd7c0f461cc34 | refs/heads/master | 2023-07-16T20:07:11.525114 | 2021-09-06T15:27:46 | 2021-09-06T15:27:46 | 386,579,310 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,373 | r | utils.R | #############################################################
## This file provides some useful functions
## The notations are taken from Train 2003.
## antoine.dubois.fr@gmail.com - March 2021
#############################################################
##############################
# 1 - Functions
##############################
#' @title normalization
#'
#' @name normalization
#'
#' @description The function which normalizes the representative utility
#'
#' @param V The matrix of representative utilities
#'
#' @param U The matrix of the utilities
#'
#' @examples V <- matrix(rnorm(30), 10, 3)
#' U <- matrix(rnorm(30), 10, 3) + matrix(rnorm(30), 10, 3)
#' normalization(V, U)
#'
normalization <- function(V, U){
for(i in 1:nrow(U))
V[i,] <- V[i,]/sum(U[i,])
return(U)
}
#' @title categorization
#'
#' @name categorization
#'
#' @description The function which classify by k-means the values of each column
#' of X. Then, the values of each column is set to the center of its cluster.
#'
#' @param X The matrix data
#'
#' @param np_levels The vector of the number of clusters to build in each column.
#'
#' @return $clustered The cluster's average of each attribute of each alternative
#' $category The cluster's number of each attribute of each alternative
#'
#' @export experiment
#'
#' @import stats
#'
#' @examples X <- matrix(rnorm(30), ncol=3)
#' nb_levels <- c(3, 2, 2)
#' X_clustered <- categorization(X, nb_levels)$clustered
#' X_category <- categorization(X, nb_levels)$category
#' X_clustered; X_category
categorization <- function(X, nb_levels){
if(is.null(ncol(X))){X <- matrix(X, ncol=1, nrow=length(X))}
if(nrow(X)==2){
sort_index_inc <- function(x){
return(sort(x, decreasing=FALSE, index.return=TRUE)$ix)}
X <- X
X_cat <- apply(X, 2, sort_index_inc)
} else{
X_cat <- matrix(0, nrow(X), ncol(X))
for(i in 1:ncol(X)){
if(length(unique(X[,i])) < nb_levels[i]){
warning("The number of levels of attributes ", i, " is inferior to the number of different attributes")
nb_levels[i] <- length(unique(X[,i]))
}
model <- stats::kmeans( X[,i], nb_levels[i])
centers <- model$centers
cluster <- model$cluster
X_cat[, i] <- cluster
for(j in 1:nrow(X)){
X[j, i] <- centers[cluster[j]]
}
}
}
return(list(clustered=X, category=X_cat))
}
#' @title InfoMatrix
#'
#' @name InfoMatrix
#'
#' @description The function which computes the information matrix.
#'
#' @param experimental_design The table representation of an experimental design
#'
InfoMatrix <- function(experimental_design, DM_att_names, AT_att_names, choice_set_size){
att_names <- c()
for(i in 1:choice_set_size){
att_names <- c(att_names, paste(AT_att_names, i, sep="."))
}
B <- experimental_design[c(DM_att_names, att_names)]
info_matrix <- matrix(0, nrow = ncol(B), ncol = ncol(B))
for(i in 1:nrow(B)){
info_matrix <- info_matrix + matrix(as.numeric(B[i,]), ncol=1) %*% matrix(as.numeric(B[i,]), nrow=1)/nrow(experimental_design)
}
return(info_matrix)
}
#' @title Dcriterion
#'
#' @name Dcriterion
#'
#' @description The function which computes the determinant of the information matrix.
#'
#' @param experimental_design The design matrix
#'
#' @return The D-score of the experimental design
#'
#' @return Data Frame of a full factorial design
#'
#' @import MASS
#'
Dcriterion <- function(experimental_design, DM_att_names, AT_att_names, choice_set_size){
M <- InfoMatrix(experimental_design, DM_att_names, AT_att_names, choice_set_size)
return(round(det(MASS::ginv(M)), digit=5))
}
#' @title summary
#'
#' @name summary
#'
#' @description This function returns a summary of the class Experiment
#'
#' @param FD An instance of the class Experiment
#'
#' @return Some information about an instance of the class Experiment
#'
summary.Exepriment <- function(FD){
cat("Alternatives' names:", unlist(FD$AT_names), "\n")
cat("Attributes' alternatives' names:", unlist(FD$info$Alternatives_attributes_names), "\n")
if(length(FD$groups)==1){
cat("Number of Decision Makers:", FD$groups, "\n")
}else{
cat("Groups of Decision makers:", FD$groups, "\n")
}
cat("Decision Makers' characteristics' names:", unlist(FD$info$Decision_Makers_attributes_names))
#cat("Choice set size:", FD$info$choice_set_size, "\n")
#cat("D-score:", FD$info$D_score, "\n")
#cat("real beta: \n")
#print(FD$info$mean_real_beta)
#cat("estimated beta: \n")
#print(FD$info$beta_hat)
}
#' @title fit
#'
#' @name fit
#'
#' @description This function gives the best estimate of the preference coefficients
#'
#' @param experimental_design The experimental design tableau (should be of format long)
#'
#' @param choice_set_size The size of each choice set
#'
#' @return The value of the loss function at its optimal parameter as well as the value of this optimal parameter
#'
#' @import optimx
loss <- function(experimental_design, choice_set_size){
conditional_loss <- function(beta){
X <- experimental_design[setdiff(colnames(experimental_design), c("utility", "DM_id", "choice_set", "alternative", "choice") )]
X <- data.matrix(X)
Y <- X %*% beta
weight <- c()
nb_questions <- nrow(experimental_design)/(choice_set_size)
for(i in 1:nb_questions){
weight <- c(weight, sum(Y[c(((i-1)*choice_set_size +1): (i*choice_set_size))]))
}
weight <- rep(weight, rep(choice_set_size, length(weight)))
proba <- log(Y/weight)
proba_chosen <- proba * experimental_design$choice
loss <- -sum(proba_chosen) # there is a minus sign since the loss is equal to the opposit of the likelihood and we inted to minimize the loss
return(loss)
}
return(conditional_loss)
}
fit <- function(experimental_design, choice_set_size){
conditionnal_loss <- loss(experimental_design = experimental_design, choice_set_size = choice_set_size)
nb_param <- length(setdiff(colnames(experimental_design), c("utility", "DM_id", "choice_set", "alternative", "choice")))
solution <- optimx::optimx(par = rep(1, nb_param), fn=conditionnal_loss, method = "Nelder-Mead", control=list(dowarn=FALSE))
value <- solution[(nb_param+1)]
solution <- solution[c(1:nb_param)]
colnames(solution) <- setdiff(colnames(experimental_design), c("utility", "DM_id", "choice_set", "alternative", "choice"))
return(list(solution=solution, value=value))
}
|
ce4f417e24d5e1b7c629d33194cec74a9109777e | a1486d95ad8b3891003e91ed8a3c264a10dbb1fb | /man/compute_pearson.Rd | 319d1039a2fb61bacdcbc93d43531a228ae4249f | [] | no_license | fmarotta/cvtools | 1e71f187cf194a9265214214fe841f0a6770d326 | a15192353fd9b5f82839bcd0405e0aa27536da1a | refs/heads/master | 2020-09-29T16:52:51.662237 | 2019-12-10T09:19:59 | 2019-12-10T09:19:59 | 227,077,267 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 452 | rd | compute_pearson.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/models.R
\name{compute_pearson}
\alias{compute_pearson}
\title{Title}
\usage{
compute_pearson(ytest, ypred, by = NULL, alternative = "g")
}
\arguments{
\item{ytest}{Vector of true values.}
\item{ypred}{Vector of predicted values.}
\item{by}{Factor by which to split the data.}
\item{alternative}{See cor.test().}
}
\value{
A "pearson" data.table.
}
\description{
Title
}
|
f69473bc215c50c464fd09b89811f6880084a093 | ba2392803688c4d70e60982ef0640a20143f5d59 | /R/peak_finder.R | 38d7b5f5dc456f9ab39fe6c18e3aa276ffbae03a | [] | no_license | jasongallant/eodplotter | 39cc5a3c86d1fec321dbf56b9412662c8049955d | 4f2d546a7316c03cd16f65854525bb0491008851 | refs/heads/master | 2020-03-17T22:53:29.356035 | 2018-05-22T03:43:36 | 2018-05-22T03:43:36 | 134,022,244 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,339 | r | peak_finder.R | #!/usr/bin/env Rscript
library(tdmsreader)
#' Find peaks from a TDMS file
#' @export
#' @import zoo
#'
#' @param filename The filename
#' @param channel The channel name, default /'Untitled'/'Dev1/ai0' which is just common in our lab
#' @param direction Only get positive or negative peaks
#' @param threshold Threshold for cutoff
#' @param remove Remove N seconds from start and end of recording, mutually exclusive to start/end
#' @param start Start of rec, mutually exclusive to remove
#' @param end End of rec
#' @param verbose Verbose output
#' @param progressCallback Callback for progress event update
#' @param winsize size of the adaptive rolling mean/sd calculation
peakFinder <- function(filename, channel="/'Untitled'/'Dev1/ai0'", direction = "none", threshold = 7, start = NULL, end = NULL, remove = NULL, verbose = F, progressCallback = NULL, winsize = 5000) {
m = file(filename, 'rb')
main = TdmsFile$new(m)
c = ifelse(is.null(channel), "/'Untitled'/'Dev1/ai0'", channel)
r = main$objects[[c]]
if(is.null(r)) {
stop('Channel not found')
}
total_vals = r$number_values * r$properties[['wf_increment']]
s = 0
e = total_vals
if(!is.null(remove)) {
e = e - remove
s = s + remove
} else {
e = ifelse(is.null(end), e, end)
s = ifelse(is.null(start), 0, start)
}
main$read_data(m, s, e)
t = r$time_track(start = s, end = e)
dat = r$data
close(m)
currTime = 0
if(verbose) {
cat(sprintf("finding peaks for %s\n", filename))
}
peaks = data.frame(peaks=numeric(), direction=character(), stringsAsFactors=F)
# fast rolling mean
mymeans = rollmean(dat, winsize)
# fast sd calculation https://stackoverflow.com/questions/24066085/rolling-standard-deviation-in-a-matrix-in-r
mysds = sqrt((winsize/(winsize-1)) * (rollmean(dat^2, winsize) - mymeans^2))
for(i in seq(winsize+1, length(dat) - winsize, by = 3)) {
ns = max(i - 1000, 1)
ne = i + 1000
mymean = mymeans[i]
mysd = mysds[i]
if(i %% 100000 == 0) {
if(verbose) {
cat(sprintf("\rprogress %d%%", round(100 * i / length(dat))))
}
if(!is.null(progressCallback)) {
progressCallback(i / length(dat))
}
}
if(t[i] - currTime > 0.001) {
if(dat[i] > mymean + mysd * threshold) {
loc_max = which.max(dat[ns:ne])
loc_min = which.min(dat[ns:ne])
if(loc_min >= loc_max) {
peaks = rbind(peaks, data.frame(peaks = t[ns + loc_max], direction = '+', stringsAsFactors = F))
currTime = t[i]
}
} else if(dat[i] < mymean - mysd * threshold) {
loc_max = which.max(dat[ns:ne])
loc_min = which.min(dat[ns:ne])
if(loc_max >= loc_min) {
peaks = rbind(peaks, data.frame(peaks = t[ns + loc_min], direction = '-', stringsAsFactors = F))
currTime = t[i]
}
}
}
}
if(verbose) {
cat("\r", sprintf("progress 100%%\n"))
cat("done scanning\n")
}
if(nrow(peaks) == 0) {
cat(sprintf('No peaks found in %s\n', filename))
}
peaks
}
|
84e0bc283801991fb12bb367ffde76ed78f0cda6 | 1f977fccad786a0869c1e45a0fbb0b3ed7111131 | /data_download/f010_hgnc_format.r | 8c79891d85c68374f36000eccce9e0cdcd05bbef | [] | no_license | Jaimelan/mirbaseID | fcf1822a053f404ed0fd7f1811bd9934c5a2a0b4 | 96979c68ccc305fca277b0b083724cbed2b49f66 | refs/heads/master | 2023-03-15T18:45:41.538060 | 2014-03-25T12:21:06 | 2014-03-25T12:21:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,914 | r | f010_hgnc_format.r | ##h010_hgnc_format.r
##2013-11-14 dmontaner@cipf.es
##Format Gene Symbol
##' Some Gene Symbol IDs in the mirna targets need to be updated
date ()
Sys.info ()[c("nodename", "user")]
commandArgs ()
rm (list = ls ())
R.version.string ##"R version 3.0.2 (2013-09-25)"
#library (mirbaseID); packageDescription ("mirbaseID", fields = "Version") #"0.0.2"
#library (mdgsa); packageDescription ("mdgsa", fields = "Version") #"0.3.2"
#help (package = mirbaseID)
try (source ("job.r")); try (.job)
##' ============================================================================
hgnc <- read.table (file = file.path (.job$dir$tmp, "hgnc", "mart_export_hgnc.txt"),
header = TRUE, sep = "\t", quote = "", colClasses = "character", comment.char = "")
dim (hgnc)
colnames (hgnc)
hgnc[1:3,]
table (duplicated (hgnc[,"Approved.Symbol"])) ##OK no duplicated
touse <- grep ("~", hgnc[,"Approved.Symbol"])
length (touse)
hgnc[touse,][1:10,]
unique (hgnc[touse, c ("Previous.Symbols", "Aliases")]) ## OK empty
hgnc <- hgnc[-touse,]
dim (hgnc)
table (hgnc[,"Approved.Symbol"] == "") ##OK no empty
table (hgnc[,"Approved.Symbol"] == toupper (hgnc[,"Approved.Symbol"]))
setdiff (hgnc[,"Approved.Symbol"], toupper (hgnc[,"Approved.Symbol"]))[1:10]
grep ("ORF", hgnc[,"Approved.Symbol"], value = TRUE)
##' Aliases
##' ----------------------------------------------------------------------------
alias <- hgnc[,c ("Approved.Symbol", "Aliases")]
touse <- alias[,"Aliases"] != ""
alias <- alias[touse,]
alias.li <- strsplit (alias[,"Aliases"], ", ")
alias.lo <- sapply (alias.li, length)
alias.ma <- cbind (from = unlist (alias.li), to = rep (alias[,"Approved.Symbol"], times = alias.lo))
table (duplicated (alias.ma))
table (duplicated (alias.ma[,"from"]))
##' Previous.Symbols
##' ----------------------------------------------------------------------------
previ <- hgnc[,c ("Approved.Symbol", "Previous.Symbols")]
touse <- previ[,"Previous.Symbols"] != ""
previ <- previ[touse,]
previ.li <- strsplit (previ[,"Previous.Symbols"], ", ")
previ.lo <- sapply (previ.li, length)
previ.ma <- cbind (from = unlist (previ.li), to = rep (previ[,"Approved.Symbol"], times = previ.lo))
table (duplicated (previ.ma))
table (duplicated (previ.ma[,"from"]))
##' Combine
##' ----------------------------------------------------------------------------
mat <- cbind (from = hgnc[,"Approved.Symbol"], to = hgnc[,"Approved.Symbol"])
mat <- rbind (mat, previ.ma, alias.ma)
dim (mat)
table (duplicated (mat))
dup <- duplicated (mat[,"from"])
table (dup)
mat <- mat[!dup,]
hgnc2latest <- mat[,"to"]
names (hgnc2latest) <- mat[,"from"]
##' SAVE
##' ============================================================================
setwd (file.path (.job$dir$tmp, "hgnc"))
dir ()
save (list = "hgnc2latest", file = "hgnc2latest.RData") ## not to be included in the library
###EXIT
warnings ()
sessionInfo ()
q ("no")
|
12b83f8478984156229d1197fc7f60970200e9e4 | 16a1d7ec1187e32b80364c10ddc4a106bf810948 | /man/probeR.whole.gene.Rd | 8ad613d986a149696e4207ec870c15157dfe2921 | [] | no_license | cran/ProbeR | 74a3cf933bbd35dd120375c093db58a30995d15f | 18996406185c33abaf87074a48a26d7552e623cb | refs/heads/master | 2016-09-03T07:28:37.670093 | 2009-03-01T00:00:00 | 2009-03-01T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,654 | rd | probeR.whole.gene.Rd | \name{probeR.wholegene}
\alias{probeR.wholegene}
\title{ Calculate the reliability of the whole genes }
\description{
calculate the reliability of the whole genes using summary variance and probe data variance
}
\usage{
probeR.wholegene(data.summary, data.probe)
}
\arguments{
\item{data.summary}{ the summary values from the function exprs }
\item{data.probe}{the normalized probe level data. They should be normalized using the same
method for the summary values. Also they should be the same probe level data calculating
the summary values. If the user use RMA with PM values, this should be only PM values.}
}
\value{
\item{summary.var }{ the variance of the summary values}
\item{probe.var }{ the variance of the probe level data}
\item{probe.n }{ the number of probes}
\item{reliability }{ the reliability }
...
}
\references{ Using Reliability with Gene Expression Models }
\author{Eun-Kyung Lee, Dianne Cook, Heike Hofmann, Maneesha Aluru, and Steve Rodermel }
\seealso{ \code{\link{probeR}}, ~~~ }
\examples{
library(affy)
data(affybatch.example)
eset<-expresso(affybatch.example,bg.correct=FALSE,normalize.method="quantiles",pmcorrect.method="pmonly",summary.method="medianpolish")
data.summary<-exprs(eset)
probe.norm<-normalize.AffyBatch.quantiles(affybatch.example,type="pmonly")
data.probe<-log(probes(probe.norm),base=2)
summary.value<-data.summary[1,]
probe.value<-data.probe[1:16,]
probeR(summary.value,probe.value)
data.rel<-probeR.wholegene(data.summary,data.probe)
}
\keyword{ models }% at least one, from doc/KEYWORDS
\keyword{ univar }% at least one, from doc/KEYWORDS
|
50eb0da55417c97b4fc15c3339dfe71aad5bca17 | cade24364c4d1232b5df00c98f82be708d76a205 | /reddit_comments.R | 3c58107762209d0f368ef9b05b34cb42422ea00f | [] | no_license | pjerrot/r_stuff | 7ccfe28e8c361ddd11fab39bfdadc04dbec51736 | d50c4ca5ddb2ba520ba421d14942227bd787e3be | refs/heads/master | 2023-08-04T11:33:27.002053 | 2023-07-31T14:22:15 | 2023-07-31T14:22:15 | 53,609,135 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,957 | r | reddit_comments.R |
reddit_comments <- function(subreddit=NULL,urls=NULL,flair=NULL,search=NULL) {
# Pretty hairy json file. Use http://jsonviewer.stack.hu/ to decompose
library(rvest)
library(dplyr)
library(stringr)
library(xml2)
library(httr)
library(R.utils)
library(tidyverse)
library(RJSONIO)
library(data.table)
source("https://raw.githubusercontent.com/pjerrot/r_stuff/master/pjerrots_mining_functions.R")
html_elements <- rvest::html_nodes
#url <- "https://www.reddit.com/r/Science/hot/?sort=top&t=year"
#url <- "https://www.reddit.com/r/science/?f=flair_name%3A%22Health%22"
#url <- "https://www.reddit.com/r/space"
#url <- "https://www.reddit.com/r/science/search/?q=Novo%20Nordisk"
#subreddit <- "science"
# Search
if (!is.null(search)) subreddit <- paste0(subreddit,"/search/?q=",gsub(" ","%20",search),"&restrict_sr=1&sr_nsfw=")
# Flair
if (!is.null(flair) & is.null(search)) subreddit <- paste0(subreddit,"/search?q=flair%3A",gsub(" ","%20",flair),"&restrict_sr=on&sort=new")
if (!is.null(subreddit)) {
subreddit_urls <- c(paste0("https://www.reddit.com/r/",subreddit,"/top/?t=month"),
paste0("https://www.reddit.com/r/",subreddit),
paste0("https://www.reddit.com/r/",subreddit,"/hot"),
paste0("https://www.reddit.com/r/",subreddit,"/new"),
paste0("https://www.reddit.com/r/",subreddit,"/top/?t=week"))
} else {
subreddit_urls <- urls
}
sublinks <- c()
for (url in subreddit_urls) {
webpage <- read_html(as.character(url))
# reading top topics from subreddit
permlinkposs <- gregexpr(pattern ='permalink',as.character(webpage))
for (j in 1:length(permlinkposs[[1]])) {
cutout1 <- substr(as.character(webpage),permlinkposs[[1]][j],permlinkposs[[1]][j]+500)
cutout2 <- substr(cutout1,gregexpr(pattern ='http',cutout1)[[1]][1],gregexpr(pattern ='http',cutout1)[[1]][1]+500)
cutout3 <- substr(cutout2,1,gregexpr(pattern ='"',cutout2)[[1]][1]-2)
sublinks <- c(sublinks,cutout3)
}
}
sublinks <- unique(sublinks)
# Collect and store comments and replies for each sublink
out <- NULL
for (link in grep("https://www.reddit.com",sublinks,value=TRUE)) {
print(link)
tryCatch(
{
# retrieving the json file
webpage2_json <- RJSONIO::fromJSON(paste0(link,"/.json"))
#webpage2_json <- jsonlite::fromJSON(paste0(link,"/.json"))
#Flattening the json file completely
raw_json <- data.frame(enframe(unlist(webpage2_json)))
if (!raw_json[grep("data.children.data.is_created_from_ads_ui",raw_json$name),"value"]=="TRUE") { # Skipping ads
# Extracting the relevant values and creating a df with these comments/replies
get_these_values <- paste0("\\.",c("id","body","text","ups","subreddit_id","author","created","depth","parent_id"))
get_these_values_str <- paste(get_these_values,collapse="|")
raw_json_reduced <- raw_json[grep(get_these_values_str,raw_json$name),]
remove_these <- c("author_flair_type","author_fullname","author_patreon_flair","author_fullname","body_html",
"created_utc","author_premium","author_is_blocked","author_cakeday","author_flair_background_color",
"author_flair_css_class","author_flair_template_id","author_flair_text","author_flair_text_color","all_awardings.id")
remove_these_str <- paste(remove_these,collapse="|")
raw_json_reduced <- raw_json_reduced[-grep(remove_these_str,raw_json_reduced$name),]
j <- 0
for (i in 1:nrow(raw_json_reduced)) {
if (grepl("\\.id",raw_json_reduced[i,"name"])) j <- j + 1
#if (grepl("\\.subreddit_id",raw_json_reduced[i,"name"]) | grepl("\\.id",raw_json_reduced[i,"name"])) j <- j + 1
raw_json_reduced[i,"seqid"] <- j
}
raw_json_reduced$varname <- substr(raw_json_reduced$name,regexpr("\\.[^\\.]*$", raw_json_reduced$name)+1,nchar(raw_json_reduced$name))
tmp <- reshape2::dcast(raw_json_reduced, seqid ~ varname, value.var="value", fun.aggregate = min)
if (all(colnames(tmp) %in% c("seqid","author","body","created","depth","id","parent_id","subreddit_id","ups"))) {
# Adding info regarding original post
tmp$post_title <- raw_json[grep("data.children.data.title",raw_json$name),"value"]
tmp$post_ups <- as.numeric(raw_json[raw_json$name=="data.children.data.ups","value"][1])
tmp$post_upvote_ratio <- as.numeric(raw_json[raw_json$name=="data.children.data.upvote_ratio","value"][1])
tmp$post_upvote_author <- as.character(raw_json[raw_json$name=="data.children.data.author","value"][1])
tmp$post_flair <- as.character(raw_json[raw_json$name=="data.children.data.link_flair_text","value"][1])
tmp$post_subreddit <- as.character(raw_json[raw_json$name=="data.children.data.subreddit","value"][1])
tmp$post_created_date <- as.numeric(raw_json[raw_json$name=="data.children.data.created","value"][1])
# Cleaning out empty replies and other.. crap
tmp <- tmp[which(!is.na(tmp$body)),c("post_subreddit","post_flair","post_upvote_author","post_title","post_created_date","post_ups","post_upvote_ratio",
"author","body","created","depth","id","parent_id","subreddit_id","ups" )]
colnames(tmp) <- c("post_subreddit","post_flair","post_upvote_author","post_title","post_created_date","post_ups","post_upvote_ratio",
"reply_author","reply_body","reply_created","reply_depth","reply_id","reply_parent_id","reply_subreddit_id","reply_ups")
#place <- grep("know what leads to an even",raw_json_reduced$value)
#View(raw_json_reduced[(place-10):(place+20),])
#View(tmp[grep("know what leads to an even",tmp$body),])
if (exists("out")) {out <- bind_rows(out,tmp)} else {out <- tmp}
} else {
print(paste("Failed on",substr(link,1,30),"...: Didn't have all columns. Maybe no comments yet!"))
}
}
}
,
error=function(e) {
print(e)
}
)
}
out$reply_datetime <- as.POSIXct(as.numeric(out$reply_created), origin="1970-01-01")
out$post_datetime <- as.POSIXct(as.numeric(out$post_created), origin="1970-01-01")
out <- out[!out$reply_author=="[deleted]",c("post_subreddit","post_datetime","post_flair","post_upvote_author","post_title","post_ups","post_upvote_ratio",
"reply_author","reply_body","reply_datetime","reply_depth","reply_id","reply_parent_id","reply_subreddit_id","reply_ups") ]
return(out)
}
|
f17f6e2083078922d7509bfdc0a51cc6531cd1f0 | 7bcafd0564bc5e77b2a6f7843abe9362d2baf3ee | /iswtest.R | cb42cbfeb86f8e80d6dc4cfc96bfbaee796607ff | [] | no_license | jaskonas/rfiles-public | a71081c77e73141643f19aedce77a1c08d3cbd91 | 0c3d880e81c74eaa8b8ac787ad276cbb84fb0229 | refs/heads/master | 2021-01-10T10:40:09.906779 | 2019-08-28T16:40:37 | 2019-08-28T16:40:37 | 51,803,826 | 0 | 0 | null | 2016-02-17T11:06:35 | 2016-02-16T03:04:20 | R | UTF-8 | R | false | false | 663 | r | iswtest.R | library(igraph)
library(fmsb)
library(ggplot2)
library(tidyr)
library(grid)
library(gridExtra)
library(scales)
library(network)
# setwd("~/Dropbox/Appcare")
test1 <- barabasi.game(200, power = 0.8, m = NULL,
out.dist = NULL, out.seq = NULL,
out.pref = FALSE, zero.appeal = 0.5, directed = FALSE,
algorithm = c("psumtree", "psumtree-multiple", "bag"),
start.graph = NULL)
V(test1)$size=3
V(test1)$color="#04173b"
plot(test1, vertex.label="")
library(intergraph)
# test2=network(matrix(data=1))
# for (i in 1:10){
# add.vertices(2)
# }
net1=asNetwork(test1)
ggplot(net1)
|
53da996dc39664634c77f865c93be07b4d7dc089 | 4be8570d1580d88f4ec06eb0187d1ac173971012 | /cachematrix.R | 808b8de057dbd102ed987386944e61ec3594c7b0 | [] | no_license | edavidaja/ProgrammingAssignment2 | fd2dbd64ac6255b122c05310980773108e1f5b69 | 72cb3013ac0c127b4371097b76ca3678d62fd618 | refs/heads/master | 2020-07-10T12:58:07.373102 | 2015-07-25T19:09:41 | 2015-07-25T19:09:41 | 39,685,529 | 0 | 0 | null | 2015-07-25T13:06:34 | 2015-07-25T13:06:33 | null | UTF-8 | R | false | false | 842 | r | cachematrix.R | ## makeCacheMatrix computes the matrix inverse of matrix X
## cacheSolve checks to see if a solution for X has already been computed
## and, computes and displays it if not; otherwise it displays the
## previously computed result
## Caching the solution to the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Check if matrix inverse is cached; solve if not
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
|
f97f31c4d664bdb46a1aab3b0bb713804bd0d3e6 | 1a3300279e9f163909247b6998673c09daa5d450 | /13 fileInput/ui.r | e76d29aa755750e0bf1050b0e2b31059bd244074 | [] | no_license | SimchaGD/Shiny-Tutorial | 8d95dd5d0d5b0552608d026c92caee020aa029bd | ae6e893f5375fa8571b551e1b69a22aea3306bb3 | refs/heads/master | 2020-07-18T17:33:31.551983 | 2019-09-19T15:53:04 | 2019-09-19T15:53:04 | 206,285,130 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 862 | r | ui.r | # load shiny
library(shiny)
shinyUI(
fluidPage(
titlePanel(title = "File input"),
sidebarLayout(
sidebarPanel(
fileInput("file", "Upload the file"),
helpText("Default max. file size is 5MB"),
br(),
h5("Select the read.table parameters below"),
checkboxInput(inputId = "header", label = "Header", value = FALSE),
br(),
radioButtons("sep", label = "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t",
Space = " "))
),
mainPanel(
uiOutput("tb")
)
)
)
)
|
037ac21a4ed35e02805f560ff627028399719323 | adc2fd58b4326ecfaf0e743698246da8846c981d | /Data_Cleaning/lab/Data_Cleaning_Lab_key.R | 135ab9e41444ba731f8e8361bc81d48ac6a7843d | [
"MIT"
] | permissive | andrewejaffe/summerR_2016 | c26d8e709c26100e1e40df3d9c1ad213fa34d94a | 8f47a6735f039499eba66e30e0b40c59a36c1c6b | refs/heads/gh-pages | 2020-04-06T03:36:03.439029 | 2016-06-24T19:25:02 | 2016-06-24T19:25:02 | 60,783,896 | 0 | 4 | null | 2016-06-14T02:00:58 | 2016-06-09T15:08:11 | HTML | UTF-8 | R | false | false | 5,588 | r | Data_Cleaning_Lab_key.R | #################
# Data Cleaning and Plotting
##############
# 6/15/2016
## Download the "Real Property Taxes" Data from my website (via OpenBaltimore):
# www.aejaffe.com/summerR_2016/data/real_property_tax.csv.gz
## note you don't need to unzip it to read it into R
rm( list = ls() ) # clear the workspace
library(stringr)
library(dplyr)
# 1. Read the Property Tax data into R and call it the variable `tax`
tax = read.csv( "~/GitHub/summerR_2016/data/real_property_tax.csv.gz",
stringsAsFactors = FALSE)
# write.csv(tax, file=gzfile("file.csv.gz"))
# 2. How many addresses pay property taxes?
nrow(tax)
dim(tax)
# 3. What is the total city and state tax paid?
head(tax$cityTax)
cityTax = tax$cityTax %>%
str_replace(fixed("$"), "") %>%
as.numeric
stateTax = tax$stateTax %>%
str_replace(fixed("$"), "") %>%
as.numeric
head(cityTax)
head(tax$cityTax[ is.na(cityTax) ])
table(tax$cityTax[ is.na(cityTax) ])
head(tax$stateTax[ is.na(stateTax) ])
table(tax$stateTax[ is.na(stateTax) ])
tax$cityTax = cityTax
tax$stateTax = stateTax
sum(tax$cityTax, na.rm = TRUE)
sum(tax$cityTax, na.rm = TRUE)/1e6
sum(tax$stateTax, na.rm = TRUE)
sum(tax$stateTax, na.rm = TRUE)/1e6
# 4. What is the 75th percentile of city and state tax paid by address type?
head(tax$propertyAddress)
tax$propertyAddress = str_trim(tax$propertyAddress)
head(tax$propertyAddress)
tax$street = str_detect(tax$propertyAddress, "ST$")
tax$street = str_detect(tax$propertyAddress, "STREET$") | tax$street
ss = str_split(tax$propertyAddress," ")
tab = table(sapply(ss, last))
# 5. Split the data by ward into a list:
### tax_list = split(tax, tax$street)
# Using `tapply()` and `table()`
# a. how many observations are in each address type?
### sapply(tax_list, nrow)
sum(tax$street)
table(tax$street)
tapply(tax$propertyAddress,
tax$street, length)
# b. what is the mean state tax per address type?
tax %>%
group_by(street) %>%
summarize(mean_state = mean(stateTax, na.rm = TRUE))
tapply(tax$stateTax, tax$street, mean, na.rm=TRUE)
## 75th percentile
tapply(tax$stateTax, tax$street,
quantile, prob = 0.75, na.rm=TRUE)
tapply(tax$stateTax, tax$street,
quantile, na.rm=TRUE)
# 6. Make boxplots using base graphics showing cityTax
# by whether the property is a principal residence or not.
tax$resCode = str_trim(tax$resCode)
boxplot(log(cityTax+1) ~ resCode, data = tax)
# 7. Subset the data to only retain those houses that are principal residences.
pres = tax %>% filter( resCode %in% "PRINCIPAL RESIDENCE")
pres = tax %>% filter( resCode == "PRINCIPAL RESIDENCE")
# a) How many such houses are there?
dim(pres)
# b) Describe the distribution of property taxes on these residences.
hist(log2(pres$cityTax+1))
# 8. Convert the 'lotSize' variable to a numeric square feet variable.
# Tips: - 1 acre = 43560 square feet
# - The hyphens represent inches (not decimals)
# - Don't spend more than 5-10 minutes on this; stop and move on
tax$lotSize = str_trim(tax$lotSize) # trim to be safe
lot = tax$lotSize # for checking later
# first lets take care of acres
aIndex= c(grep("ACRE.*", tax$lotSize),
grep(" %", tax$lotSize, fixed=TRUE))
head(aIndex)
head(lot[aIndex])
acre = tax$lotSize[aIndex] # temporary variable
## find and replace character strings
acre = gsub(" ACRE.*","",acre)
acre = gsub(" %","",acre)
table(!is.na(as.numeric(acre)))
head(acre[is.na(as.numeric(acre))],50)
## lets clean the rest
acre = gsub("-",".",acre,fixed=TRUE) # hyphen instead of decimal
head(acre[is.na(as.numeric(acre))])
table(!is.na(as.numeric(acre)))
acre = gsub("ACRES","", acre, fixed=TRUE)
head(acre[is.na(as.numeric(acre))])
# take care of individual mistakes
acre = gsub("O","0", acre, fixed=TRUE) # 0 vs O
acre = gsub("Q","", acre, fixed=TRUE) # Q, oops
acre = gsub(",.",".", acre, fixed=TRUE) # extra ,
acre = gsub(",","", acre, fixed=TRUE) # extra ,
acre = gsub("L","0", acre, fixed=TRUE) # leading L
acre[is.na(as.numeric(acre))]
acre2 = as.numeric(acre)*43560
sum(is.na(acre2)) # all but one
#######################
## now square feet:
fIndex = grep("X", tax$lotSize)
ft = tax$lotSize[fIndex]
ft = gsub("&", "-", ft, fixed=TRUE)
ft = gsub("IMP ONLY ", "", ft, fixed=TRUE)
ft = gsub("`","1",ft,fixed=TRUE)
ft= sapply(str_split(ft, " "), first)
# wrapper for string split and sapply
#### ss = function(x, pattern, slot=1,...) sapply(strsplit(x,pattern,...), "[", slot)
width = sapply(str_split(ft,"X"), first)
length = sapply(str_split(ft,"X"), nth, 2)
## width
widthFeet = as.numeric(sapply(str_split(width, "-"), first))
widthInch = as.numeric(sapply(str_split(width, "-"),nth,2))/12
widthInch[is.na(widthInch)] = 0 # when no inches present
totalWidth = widthFeet + widthInch # add together
# length
lengthFeet = as.numeric(sapply(str_split(length, "-"),first))
lengthInch = as.numeric(sapply(str_split(length, "-",2),nth,2))/12
lengthInch[is.na(lengthInch)] = 0 # when no inches present
totalLength = lengthFeet + lengthInch
# combine together for square feet
sqrtFt = totalWidth*totalLength
ft[is.na(sqrtFt)] # what is left?
### combine together
tax$sqft = rep(NA)
tax$sqft[aIndex] = acre2
tax$sqft[fIndex] = sqrtFt
mean(!is.na(tax$sqft))
# already in square feet, easy!!
sIndex=c(grep("FT", tax$lotSize),
grep("S.*F.", tax$lotSize))
sf = tax$lotSize[sIndex] # subset temporary variable
sqft2 = sapply(str_split(sf,"( |SQ|SF)"),first)
sqft2 = as.numeric(gsub(",", "", sqft2)) # remove , and convert
tax$sqft[sIndex] = sqft2
table(is.na(tax$sqft))
## progress!
#what remains?
lot[is.na(tax$sqft)]
|
4c3b1fad3f1d5f9e2b46fbd951cd8e751f964173 | f2a0fd6d339a9efad525701ab400e30e800c4b80 | /twothirds.R | ae3eb5fd7fa072f5e471c9bef154aa4708257775 | [] | no_license | 37beers/two-third-CT | 9aeb7714dc21d00d3a71827b4790cb3a7979bb9e | 31bd257128ecdd709bef8788291448fefc055293 | refs/heads/master | 2020-05-18T09:36:57.682528 | 2019-07-17T22:57:15 | 2019-07-17T22:57:15 | 184,330,548 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,906 | r | twothirds.R | #!/usr/bin/env Rscript
#TODO: Please always use a header. To make a function executable, add the Rscript above.
#TODO: This is a good package for making it commandline. See what happens when you chmod +x twothirds.R
# then run... /path/twothirds.R -f TCR.txt
suppressPackageStartupMessages(library("argparse"))
parser <- ArgumentParser()
parser$add_argument("-f", "--file", action="store", dest="file",
help="file containing raw qPCR data to be read")
args <- parser$parse_args()
#TODO: avoid setwd. Always just spell out the full input path
#setwd("/Users/abeerobaid/desktop")
#data <- read.csv('/Users/abeerobaid/desktop/TCR.txt', header= FALSE)
#TODO: but using the commandline parsing might be easier. Also read.csv is not going to work bc .txt file
data <- read.csv(args$file, header=FALSE)
#TODO: I'm not sure what this meant to do...
#data
#TODO: You're not reading in the right part of the file here. Below is not a good strategy. Try combining this code with a little bit of standard bash.
#TODO: grep on a data frame is a strange combination. Also you want this for all wells of interest right?
x <- grep("A1\t", data[,1])
#the first thing it finds is useless, so remove it
x <- x[-c(1,2)]
CT <- function(a) {
cycle = c()
blue = c()
for (i in a) {
well <- data[i,]
well <- unlist(strsplit(as.character(well), "\t"))
cycle <- c(cycle, well[2])
blue <- c( blue, as.numeric(well[3]) )
}
return(data.frame(as.numeric(cycle),as.numeric(blue)))
}
#Store the dataframe into a variable, and find the 2/3 fluorescent value
frame <- CT(x)
two.thirds <- (2*frame[which.max(frame[,2]),2]-frame[which.min(frame[,2]),2])/3
#TODO: plotting is not necessary
#ploting and finding the CT value
plot(frame)
fit4 <- lm(frame[,2]~poly(frame[,1],12,raw=TRUE))
lines(frame[,1], predict(fit4, data.frame(x=frame[,2]), col="purple"))
approx(frame[,2],frame[,1],xout=two.thirds)
|
c983590c81460e39c9d7b3974b15a42c4342486b | 3339945341718312385c9272b58fa6c531dd2eb5 | /05_Code/02_Analysis/rcode/models/trialRT_ANOVA_SRcond_allData.R | b90e325d5321de2d2dbe4bd5c59cdf9c7c5d6026 | [] | no_license | KSuljic/E299 | 17781ec7063377ea291ca21730be94209654b1e3 | 4b5e8c78695c06461f9a4c2b18a891518fe3da72 | refs/heads/master | 2022-04-29T12:46:10.644389 | 2020-03-11T16:06:57 | 2020-03-11T16:06:57 | 135,292,890 | 0 | 0 | null | 2018-05-29T12:30:04 | 2018-05-29T12:30:04 | null | UTF-8 | R | false | false | 2,391 | r | trialRT_ANOVA_SRcond_allData.R | cL
modelstring = "
model {
for ( i in 1:Ndata ) {
y[i] ~ dnorm( mu[i] ,1/(ySigma^2))
mu[i] <- a0 + a1[confSet[i]] + a2[RespM[i]] + a3[task[i]] + aS[S[i]] + # this is the full model with all interaction, included the interaction with the subject factor, this is only possible for when there is multiple data points per subject per cell
a1aS[confSet[i],S[i]] + a2aS[RespM[i],S[i]] + a3aS[task[i],S[i]]
}
ySigma ~ dgamma(sigmaSh, sigmaRa)
sigmaSh <- 1 + sigmaMode * sigmaRa
sigmaRa <- ( sigmaMode + sqrt( sigmaMode^2 + 4*sigmaSD^2 ) ) /(2*sigmaSD^2)
sigmaMode ~ dgamma(sGammaShRa[1],sGammaShRa[2])
sigmaSD ~ dgamma(sGammaShRa[1],sGammaShRa[2])
# BASELINE
a0 ~ dnorm( yMean , 1/(ySD*5)^2 )
#
# MAIN EFFECTS
for ( j1 in 1:8 ) { a1[j1] ~ dnorm( 0.0 , 1/a1SD^2 ) }
a1SD ~ dgamma(aGammaShRa[1],aGammaShRa[2])
for ( j2 in 1:2 ) { a2[j2] ~ dnorm( 0.0 , 1/a2SD^2 ) }
a2SD ~ dgamma(aGammaShRa[1],aGammaShRa[2])
for ( j3 in 1:2 ) { a3[j3] ~ dnorm( 0.0 , 1/a3SD^2 ) }
a3SD ~ dgamma(aGammaShRa[1],aGammaShRa[2])
# SUBJECT CONTRIBUTIONS
for ( jS in 1:NSubj ) { aS[jS] ~ dnorm( 0 , 1/SDa0S^2 ) }
SDa0S ~ dgamma(1.01005,0.1005) # mode=0.1,sd=10.0
# 2-WAY INTERACTIONS
for (j1 in 1:8){
for (js in 1:NSubj){
a1aS[j1,js] ~ dnorm( 0.0 , 1/a1aSSD^2 )
} }
a1aSSD ~ dgamma(aGammaShRa[1],aGammaShRa[2]) # or try a folded t (Cauchy)
for (j2 in 1:2){
for (js in 1:NSubj){
a2aS[j2,js] ~ dnorm( 0.0 , 1/a2aSSD^2 )
} }
a2aSSD ~ dgamma(aGammaShRa[1],aGammaShRa[2]) # or try a folded t (Cauchy)
for (j3 in 1:2){
for (js in 1:NSubj){
a3aS[j3,js] ~ dnorm( 0.0 , 1/a3aSSD^2 )
} }
a3aSSD ~ dgamma(aGammaShRa[1],aGammaShRa[2]) # or try a folded t (Cauchy)
# Convert a0,a1[],a2[],a3[] to sum-to-zero b0,b1[],b2[],b1b2[,] :
for ( j1 in 1:8) { for ( j2 in 1:2 ) { for ( j3 in 1:2 ) { for (js in 1:NSubj){
mm[j1,j2,j3,js] <- a0 + a1[j1] + a2[j2] + a3[j3] + aS[js] +
a1aS[j1,js] + a2aS[j2,js] + a3aS[j3,js]
} } } }
b0 <- mean( mm[1:2,1:2,1:2,1:NSubj] )
for ( j1 in 1:8 ) { b1[j1] <- mean( mm[j1,1:2,1:2,1:NSubj] ) - b0 }
for ( j2 in 1:2 ) { b2[j2] <- mean( mm[1:2,j2,1:2,1:NSubj] ) - b0 }
for ( j3 in 1:2 ) { b3[j3] <- mean( mm[1:2,1:2,j3,1:NSubj] ) - b0 }
for ( js in 1:NSubj ) { bS[js] <- mean( mm[1:2,1:2,1:2,js] ) - b0 }
for ( j1 in 1:8) { for ( j2 in 1:2 ) { for ( j3 in 1:2 ) {
m[j1,j2,j3]<- mean(mm[j1,j2,j3,1:NSubj])
}}}
}
" # close quote for modelstring
#writeLines(modelstring,con="model.txt") |
b1e10f117c7d736e5a0b4b9dc13bb8a2524ed8cc | c933feabc91c5461699ad271b7ab49c3a6c76153 | /C 2 R Programming coursera/W2/Assignment 1 Air Pollution/correlation.R | 6b63ed7284213a44d76d0b53fae86bf2a0d2cde0 | [] | no_license | khalidm1/datasciencecoursera | 4beb4f4a956babea3e3d9b53f4bada7e1c80987e | 27faed698a91640392edbb8d393779b3f78fce8f | refs/heads/master | 2022-12-02T06:33:49.981738 | 2020-08-11T18:24:51 | 2020-08-11T18:24:51 | 275,858,750 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 848 | r | correlation.R | corr <- function(directory, threshold = 0)
{
#create list of all files
list <- list.files(directory, full.names= TRUE)
# create empty data set
dat <- vector(mode = "numeric", length = 0)
for(i in 1:length(list))
{
# Read File
tmp <- read.csv(list[i])
#Calculate csum
csum <- sum((!is.na(tmp$sulfate)) & (!is.na(tmp$nitrate)))
if (csum > threshold)
{
#Extract data of niteate and sulfate and calculate correlation between them
sul <- tmp[which(!is.na(tmp$sulfate)), ]
nit <- sul[which(!is.na(sul$nitrate)), ]
dat <- c(dat, cor(nit$sulfate, nit$nitrate))
}
}
return(dat)
}
cr <- corr("specdata", 150)
head(cr)
summary(cr)
cr <- corr("specdata", 400)
head(cr)
summary(cr)
cr <- corr("specdata", 5000)
summary(cr)
length(cr)
cr <- corr("specdata")
summary(cr)
length(cr)
|
d281cf2be398cb79da73329150e0d40ca2252b6b | 4ecc47b913858b6c03733ab8c3ddb15719b4e8f6 | /man/genericIsValuesMissing.Rd | d35b5974dab664e67067313a71efb14bd9aeec29 | [
"MIT"
] | permissive | LE2P/rBSRN | 07876b6beec27cb6938c6261be3f436d01d1fe10 | f65499c72f5f329f9c31b8a3993598edc8ba4d69 | refs/heads/master | 2023-01-03T22:24:40.891751 | 2020-11-02T07:44:39 | 2020-11-02T07:44:39 | 300,272,096 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 361 | rd | genericIsValuesMissing.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1_utils.R
\name{genericIsValuesMissing}
\alias{genericIsValuesMissing}
\title{Generic function return true is the is missing mandatory variables}
\usage{
genericIsValuesMissing()
}
\value{
TRUE is there is mandatory value missing / FALSE if not.
}
\description{
Internal function.
}
|
d34923bc92b07bac568d000f0f2b83fbbb796292 | ff80500b960fb64aebefd3851801ebbd7dfc0c2e | /run_analysis.R | 674ee73e654d8251f12aea7cae4f62728609a8f8 | [] | no_license | AndrewsOR/clean-the-data | 5df14484cd76e1a7820130182e3c6d31783cf7f5 | f85018b879935c22e4f766873878ea760941fdb1 | refs/heads/master | 2021-05-27T23:44:38.650770 | 2014-10-23T15:53:29 | 2014-10-23T15:53:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,133 | r | run_analysis.R | ###############################################################################
# Run_analysis.R (Getting & Cleaning Data Course Project) 22 Oct 2014 #
# #
# The script has the following parts, required by the assignment: #
# #
# 0. (Implicit requirement) Downloads, unzips, and reads the data files. #
# #
# 1. Merges the training and the test sets to create one data set. #
# #
# 2. Extracts only the measurements on the mean and standard deviation for #
# each measurement. #
# #
# 3. Uses descriptive activity names to name the activities in the data set #
# #
# 4. A. Appropriately labels the data set with descriptive variable names. #
# B. (Implicit requirement) Creates the first tidy activity data set. #
# #
# 5. From the data set in step 4, creates [and exports] a second, #
# independent tidy data set with the average of each variable for each #
# activity and each subject. #
# #
# ...but please note that the order of parts is 0-1-3-4A-2-4B-5 (sorry) #
###############################################################################
# _____________________________________________________________________________
# 0) Download, unzip, and read the "Human Activity Recognition Using
# Smartphones Data Set"
#
#setwd("YOUR_WORKING_DIRECTORY_GOES_HERE")
url <- paste0("https://d396qusza40orc.cloudfront.net/getdata%2F",
"projectfiles%2FUCI%20HAR%20Dataset.zip ")
f = "har.zip"
download.file(url, f, method="curl")
unzip(f)
har_loc = "./UCI HAR Dataset/"
# Read the activity labels into act_labs
act_labs <- read.table(paste0(har_loc,"activity_labels.txt"),
col.names=c("activity_id","activity_nm"),
colClasses=c("integer","character"))
# Read the feature vector labels into feat_labs
feat_labs <- read.table(paste0(har_loc,"features.txt"),
col.names=c("feature_id","feature_nm"),
colClasses=c("integer","character"))
# Read the training and test sets
for(set in c("test","train")){
for(dim in c("X","y","subject")){
f <- paste0(har_loc,set,"/",dim,"_",set,".txt")
df <- paste0(dim,"_",set)
cat("Reading ",f,"...\n")
assign(df, read.table(f))
}
}
# End of part 0
# -----------------------------------------------------------------------------
# _____________________________________________________________________________
# 1) Merge the training and the test sets to create one data set (named "X")
# Combine the training and the test sets with activity and subject ids
#
X <- rbind(X_train, X_test)
X$activity_id <- unlist(rbind(y_train, y_test))
X$subject_id <- unlist(rbind(subject_train, subject_test))
#| End of part 1
# -----------------------------------------------------------------------------
# _____________________________________________________________________________
# 3) Use descriptive activity names to name the activities in the data set.
# Use activity labels as factors instead of their respective activity_ids
# (originally found in the "y" activity label files).
#
X$activity_nm <- factor(act_labs$activity_nm[X$activity_id],
levels = act_labs$activity_nm)
X$activity_id <- NULL # redundant
# End of part 3
# -----------------------------------------------------------------------------
# _____________________________________________________________________________
# 4A) Appropriately label the data set with descriptive variable names.
# Clean feature labels to use as variable names
# (A more thorough regex would check for leading numbers and ensure
# uniqueness after cleaning. But for now we will just strip punctuation.)
#
feature_names <- gsub("\\(|\\)", "", feat_labs$feature_nm) # Remove ()'s
feature_names<- gsub("([[:punct:]]|\\s)+", "_", feature_names) # punct => _
names(X)[grepl("^V\\d+$",names(X))] <- feature_names # replace V1, V2...
# End of part 4A
# -----------------------------------------------------------------------------
# _____________________________________________________________________________
# 2) Extract only the measurements on the mean and standard deviation.
# Keep only activity_nm, subject_id, and vars *ending* with "mean" or
# "std". Thus, we will keep variables that were originally
# "tBodyAccMag-mean()" or "fBodyAccMag-std()" but NOT "fBodyAcc-mean()-X"
# or "fBodyAccJerk-meanFreq()-Z". This was an "open question" per the
# Community TA:
# https://class.coursera.org/getdata-008/forum/thread?thread_id=24
#
X <- X[,grepl("^activity_nm$|^subject_id$|^.+_(mean|std)$", names(X),
ignore.case=TRUE)]
# End of part 2
# -----------------------------------------------------------------------------
# _____________________________________________________________________________
# 4B) Produce tidy data set "tidyX"
#
X$obs_id <- 1:nrow(X) # uniquely identifies a set of measurements in same row
library(tidyr) # which we treat as parts of a single "observation"
library(dplyr)
X %>% gather(key=metric, value=amount, -activity_nm,
-subject_id, -obs_id) %>%
separate(col=metric, into=c("feature_nm","stat")) %>%
mutate(stat = paste0(stat,"_amt"),
feature_nm = as.factor(feature_nm)) %>%
arrange(activity_nm, subject_id, feature_nm, stat, obs_id) %>%
spread(stat, amount) -> tidyX
# End of part 4B
# -----------------------------------------------------------------------------
# _____________________________________________________________________________
# 5) Create summary "tidyXsumm" from 4B with average of each variable for each
# activity and each subject. Write it to .txt file.
#
tidyX %>% group_by(feature_nm, activity_nm, subject_id) %>%
summarise(mean_mean=mean(mean_amt),
mean_stdev = mean(std_amt)) -> tidyXsumm
write.table(x=tidyXsumm, file="tidy_step_5.txt",row.names=FALSE)
# End of part 5
# -----------------------------------------------------------------------------
# _____________________________________________________________________________
# Extra) Write to codeBook.md with name and class of variables,
# outputting the levels of the factor variables
attach(tidyXsumm)
sink(file= "codeBook.md", append = TRUE, type = "output", split = TRUE)
for(v in colnames(tidyXsumm)){
writeLines(c("",""))
writeLines( paste0( v, ": ", class(get(v)) ) )
if( is.factor(get(v)) ){
writeLines(c("", paste0(1:length(levels(get(v))), ". ", levels(get(v))) ))
}
}
sink(file=NULL)
detach(tidyXsumm)
# End of Extra
# ----------------------------------------------------------------------------- |
ba078360951ad630842133eb6d49fd95d1cd6c88 | 53bda4ba2543bc576d0919073d29c31776faf56d | /man/create.quadrant.Rd | ed3dd13ca7d4dc1420d4dcb92fc264f7da93d8bd | [] | no_license | Rsoc/soc.ca | 18ab28ff0c136d25909dece9374889c3da93bc87 | 472b1d29684bcb2f350c3cb733566c6aeed6bd2c | refs/heads/master | 2023-08-24T23:10:14.788625 | 2023-08-09T06:32:54 | 2023-08-09T06:32:54 | 2,106,567 | 11 | 2 | null | 2015-07-12T17:50:08 | 2011-07-26T11:50:25 | R | UTF-8 | R | false | true | 993 | rd | create.quadrant.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_analysis.r
\name{create.quadrant}
\alias{create.quadrant}
\title{Create categories according to the quadrant position of each individual}
\usage{
create.quadrant(
object,
dim = c(1, 2),
cut.min = -0.125,
cut.max = 0.125,
cut.radius = 0.25
)
}
\arguments{
\item{object}{a soc.ca class object}
\item{dim}{the dimensions}
\item{cut.min}{Minimum cut value}
\item{cut.max}{Maximum cut value}
\item{cut.radius}{Radius of the center category}
}
\value{
Returns a character vector with category memberships
}
\description{
Creates a vector from two dimensions from a soc.ca object. Labels are the
cardinal directions with the first designated dimension running East - West.
The center category is a circle defined by \code{cut.radius}.
}
\examples{
example(soc.ca)
create.quadrant(result, dim = c(2, 1))
table(create.quadrant(result, dim = c(1, 3), cut.radius = 0.5))
}
\seealso{
\link{soc.mca}
}
|
1bcd8d312f2cd842f93000c15903d1455b8d5cb8 | 5bed56d19203d621ef28db176ab583f772f0f668 | /02_Demos/05_ecosystem/crosstalk_demo.R | c6b86dec0fe42e54f487bb1f46e9f53e53e051b7 | [] | no_license | julomoppet/Formation_Shiny | 5f2e5035b8208a17fd1ad6e317991a37329220be | 94032dedc21c0f1e9c74b8dd6a9836613571839d | refs/heads/master | 2022-11-17T17:09:43.623523 | 2020-06-29T20:10:30 | 2020-06-29T20:10:30 | 274,126,865 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,302 | r | crosstalk_demo.R | library(shiny)
library(DT)
library(plotly)
library(crosstalk)
m <- mtcars %>%
tibble::rownames_to_column()
ui <- fluidPage(
h1("Démo Crosstalk : intéractions entre Plotly & DT", align = "center"),
plotlyOutput("x2"),
DT::dataTableOutput("x1"),
fluidRow(
p(class = 'text-center', downloadButton('x3', 'Télécharger les données filtrées'))
)
)
server <- function(input, output) {
d <- SharedData$new(m, ~rowname)
# highlight selected rows in the scatterplot
output$x2 <- renderPlotly({
s <- input$x1_rows_selected
if (!length(s)) {
p <- d %>%
plot_ly(x = ~mpg, y = ~disp, mode = "markers", color = I('black'), name = 'Non filtrées') %>%
layout(showlegend = T) %>%
highlight("plotly_selected", color = I('red'), selected = attrs_selected(name = 'Filtrées'))
} else if (length(s)) {
pp <- m %>%
plot_ly() %>%
add_trace(x = ~mpg, y = ~disp, mode = "markers", color = I('black'), name = 'Non filtrées') %>%
layout(showlegend = T)
# selected data
pp <- add_trace(pp, data = m[s, , drop = F], x = ~mpg, y = ~disp, mode = "markers",
color = I('red'), name = 'Filtered')
}
})
# highlight selected rows in the table
output$x1 <- DT::renderDataTable({
m2 <- m[d$selection(),]
dt <- DT::datatable(m)
if (NROW(m2) == 0) {
dt
} else {
DT::formatStyle(dt, "rowname", target = "row",
color = DT::styleEqual(m2$rowname, rep("white", length(m2$rowname))),
backgroundColor = DT::styleEqual(m2$rowname, rep("black", length(m2$rowname))))
}
})
# download the filtered data
output$x3 = downloadHandler('mtcars-filtered.csv', content = function(file) {
s <- input$x1_rows_selected
if (length(s)) {
write.csv(m[s, , drop = FALSE], file)
} else if (!length(s)) {
write.csv(m[d$selection(),], file)
}
})
}
shinyApp(ui, server)
|
2fa956ab91ffa89c72ab056c6e93588e99c1bbc0 | 8be41cde935d804917a23f7ae89c427c38d4cb2e | /Results_bank_data.R | 785100b0182828055dac1ad999e36fa8a750042c | [] | no_license | tobiaslov/Master-thesis | 90501af644ce0c38da73b2849e840860582fe4b3 | 3721e65b212497e591a2a2650e6a39fc442ba6e5 | refs/heads/master | 2020-05-19T00:51:51.567410 | 2019-05-03T12:06:46 | 2019-05-03T12:06:46 | 184,745,340 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,199 | r | Results_bank_data.R | library(tidyverse)
library(GGally)
library("gridExtra")
############################################################
# #
# Bank data #
# #
############################################################
################################### Accuracy #######################################
# Reading D-optimality accuracy files
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Accuracy/A2_det")
a2_det_list <- list.files()
#Combining all simulations into one file for D-optimality
a2_det_data <- do.call(rbind, lapply(a2_det_list, function(x) read.csv(file = x, sep = ";")))
write.table(a2_det_data, file = "bank_data_A2_det_acc.csv", sep = ";")
# Reading A-optimality accuracy files
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Accuracy/A2_trace")
a2_trace_list <- list.files()
#Combining all simulations into one file for A-optimality
a2_trace_data <- do.call(rbind, lapply(a2_trace_list, function(x) read.csv(file = x, sep = ";")))
write.table(a2_trace_data, file = "bank_data_A2_trace_acc.csv", sep = ";")
# Reading Random algorithm files
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Accuracy/A1")
a1_data <- read.csv("bank_data_algorithm1_acc_2019-04-06-09.47.39.csv", sep = ";")
write.table(a1_data, file = "bank_data_A1_acc.csv", sep = ";", row.names = F)
################ Summarizing ACCURACY #############
# Random algorithm Accuracy
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Accuracy/A1")
a1_acc_summary <- read.csv("bank_data_A1_acc.csv", sep = ";") %>%
colMeans() %>%
as.data.frame() %>%
rename(Accuracy = ".") %>%
mutate(Group = "Random",
Instance = c(0:50))
# D-optimality Accuracy
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Accuracy/A2_det")
a2_det_acc_summary <- read.csv("bank_data_A2_det_acc.csv", sep = ";") %>%
colMeans() %>%
as.data.frame() %>%
rename(Accuracy = ".") %>%
mutate(Group = "D-optimality",
Instance = c(0:50))
# A-optimality Accuracy
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Accuracy/A2_trace")
a2_trace_acc_summary <- read.csv("bank_data_A2_trace_acc.csv", sep = ";") %>%
colMeans() %>%
as.data.frame() %>%
rename(Accuracy = ".") %>%
mutate(Group = "A-optimality",
Instance = c(0:50))
#Combining all three data sets
acc_data_summary <- a1_acc_summary %>%
rbind(a2_trace_acc_summary,a2_det_acc_summary)
#Writing combined accuracy table
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Accuracy/")
write.table(acc_data_summary, file = "acc_data_summary.csv", sep = ";", row.names = F)
acc_data_summary <- read.csv("acc_data_summary.csv", sep = ";")
#Plotting Accuracy for both algorithms
acc_data_summary %>%
ggplot(aes ( x = Instance, y = Accuracy, group = Group,
color = Group, shape = Group)) +
geom_point() +
geom_line() +
geom_hline(yintercept = .8268102)
################ TUKEY TEST ######################################### Did not use
bank_data_acc_G_full <- read.csv("bank_data_acc_G_full", sep = ";") %>%
rename(Accuracy = V51)
bank_data_acc_G_full %>%
ggplot(aes ( x = Accuracy, fill = Group)) +
geom_histogram(position = "identity")
a1 <- aov(Accuracy ~ Group, data = bank_data_acc_G_full)
summary(a1)
TukeyHSD(a1)
###################### SHAPIRO ############################# Did not use
b_det <- bank_data_acc_G_full %>%
filter(Group == "Det")
shapiro.test(b_det[ ,1])
b_tr <- bank_data_acc_G_full %>%
filter(Group == "Trace")
shapiro.test(b_tr[ ,1])
b_rand <- bank_data_acc_G_full %>%
filter(Group == "Random")
shapiro.test(b_rand[ ,1])
############### PROPORTION TEST #################### Did not use
bank_data_acc_G_full_means <- bank_data_acc_G_full %>%
group_by(Group) %>%
summarise(prop = mean(Accuracy)) %>%
mutate(Correct = round(prop*3067, 0),
Incorrect = round(3067 - Correct, 0),
Total = Correct + Incorrect)
Count_data <- bank_data_acc_G_full_means[1:3,3:4] %>%
as.data.frame()
rownames(Count_data) <- c("Det", "Random", "Trace")
Count_data <- as.matrix(Count_data)
chisq.test(Count_data)
fisher.test(Count_data)
prop.test(Count_data)
################################## Mann-Whitney ############# Did not use
wilcox.test(b_tr[,1],b_det[ ,1])
################################### Kruskal-Wallis test ############## Did not use
kruskal.test(Accuracy ~ Group, data = bank_data_acc_G_full)
############################# Histogram of Accuracy at Last instance #################
############################ WITH BOOTSTRAP #########################################
# Random algorithm Accuracy Bootstrap
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Accuracy/A1")
a1_acc_bootstrap <- read.csv("bank_data_A1_bootstrap.csv", sep = ";") %>%
mutate(Group = "Random") %>%
rename(Accuracy = ".")
#Taking 95% of data simulated
a1_acc_bootstrap_95 <- a1_acc_bootstrap %>%
arrange(desc(Accuracy)) %>%
slice(-1:-2500) %>%
slice(-95001:-97500)
# A-optimality Accuracy Bootstrap
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Accuracy/A2_trace")
a2_trace_acc_bootstrap <- read.csv("bank_data_A2_trace_bootstrap.csv", sep = ";") %>%
mutate(Group = "A-optimality") %>%
rename(Accuracy = ".")
#Taking 95% of data simulated
a2_trace_acc_bootstrap_95 <- a2_trace_acc_bootstrap %>%
arrange(desc(Accuracy)) %>%
slice(-1:-2500) %>%
slice(-95001:-97500)
#D-optimality Accuracy Bootstrap
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Accuracy/A2_det")
a2_det_acc_bootstrap <- read.csv("bank_data_A2_det_bootstrap.csv", sep = ";") %>%
mutate(Group = "D-optimality") %>%
rename(Accuracy = ".")
#Taking 95% of data simulated
a2_det_acc_bootstrap_95 <- a2_det_acc_bootstrap %>%
arrange(desc(Accuracy)) %>%
slice(-1:-2500) %>%
slice(-95001:-97500)
############################# Boostrap long format #########################
# Combining Bootstrap means of last instance
Acc_bootstrap_G <- a1_acc_bootstrap %>%
rbind(a2_trace_acc_bootstrap,a2_det_acc_bootstrap)
#Combining 95 % CI Bootstrap means of last instance
Acc_bootstrap_G_95 <- a1_acc_bootstrap_95 %>%
rbind(a2_trace_acc_bootstrap_95,a2_det_acc_bootstrap_95)
#Write tables
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Accuracy")
#Full bootstrap file
write.table(Acc_bootstrap_G, file = "acc_data_summary_bootstrap.csv", sep = ";", row.names = F)
#95 % bootstrap file
write.table(Acc_bootstrap_G_95, file = "acc_data_summary_bootstrap_95.csv", sep = ";", row.names = F)
###################### Bootrap wide format ###################### Did not use
#Combining files by cbind instead
Acc_bootstrap_G_wide <- a1_acc_bootstrap %>%
cbind(a2_trace_acc_bootstrap,a2_det_acc_bootstrap)
#Greating new column names
colnames(Acc_bootstrap_G_wide)[c(1,3,5)] <- c("Random", "Trace", "Det")
colnames(Acc_bootstrap_G_wide)[c(2,4,6)] <- c("g1", "g2", "g3")
#Removing unnecessary columns
Acc_bootstrap_G_wide <- Acc_bootstrap_G_wide %>%
select(-c(g1, g2, g3))
#Writing table
write.table(Acc_bootstrap_G_wide, file = "acc_data_summary_bootstrap_wide.csv", sep = ";", row.names = F)
######################## Summary of Boostrap long format ###########################
Acc_bootstrap_G <- read.csv("acc_data_summary_bootstrap.csv", sep = ";")
#Histogram of bootstrap means
Acc_bootstrap_G %>%
ggplot(aes (x = Accuracy, fill = Group)) +
geom_histogram(bins = 30, alpha = .9, position = "identity")
#95 distributions
Acc_bootstrap_G_95 <- read.csv("acc_data_summary_bootstrap_95.csv", sep = ";")
#Getting more decimals
apply(Acc_bootstrap_G_95, 2, range, digits = 4)
Acc_bootstrap_G_95 %>%
group_by(Group) %>%
summarise(minacc = sprintf("%1f", min(Accuracy)),
maxacc = sprintf("%1f", max(Accuracy)))
#Plotting 95% distributions of bootstrap means
Acc_bootstrap_G_95 %>%
ggplot(aes (x = Accuracy, fill = Group)) +
geom_histogram(bins = 30, alpha = .9, position = "identity")
################################### MAD ###########################################
# D-optimality mad files
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD/A2_Det")
a2_det_list <- list.files()
#Combining all simulations into one file for D-optimality
a2_det_data <- do.call(rbind, lapply(a2_det_list, function(x) read.csv(file = x, sep = ";")))
write.table(a2_det_data, file = "bank_data_A2_det_mad.csv", sep = ";")
#A-optimality mad files
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD/A2_trace")
a2_trace_list <- list.files()
#Combining all simulations into one file for A-optimality
a2_trace_data <- do.call(rbind, lapply(a2_trace_list, function(x) read.csv(file = x, sep = ";")))
write.table(a2_trace_data, file = "bank_data_A2_trace_mad.csv", sep = ";")
#Random algorithm mad files
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD/A1")
a1_data <- read.csv("bank_data_algorithm1_mad_2019-04-06-09.47.39.csv", sep = ";")
write.table(a1_data, file = "bank_data_A1_mad.csv", sep = ";", row.names = F)
################ Summarizing MAD ##############################
# andom algorithm file
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD/A1")
a1_mad_summary <- read.csv("bank_data_A1_mad.csv", sep = ";") %>%
colMeans() %>%
as.data.frame() %>%
rename(MAD = ".") %>%
mutate(Group = "Random",
Instance = c(0:50))
# D-optimality file
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD/A2_det")
a2_det_mad_summary <- read.csv("bank_data_A2_det_mad.csv", sep = ";") %>%
colMeans() %>%
as.data.frame() %>%
rename(MAD = ".") %>%
mutate(Group = "D-optimality",
Instance = c(0:50))
#A-optimality file
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD/A2_Trace")
a2_trace_mad_summary <- read.csv("bank_data_A2_trace_mad.csv", sep = ";") %>%
colMeans() %>%
as.data.frame() %>%
rename(MAD = ".") %>%
mutate(Group = "A-optimality",
Instance = c(0:50))
#Combining MAD for all methods into one file
mad_data_summary <- a1_mad_summary %>%
rbind(a2_trace_mad_summary,a2_det_mad_summary)
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD/")
write.table(mad_data_summary, file = "mad_data_summary.csv", sep = ";", row.names = F)
mad_data_summary <- read.csv("mad_data_summary.csv", sep = ";")
#Summarizing mad with plots.
mad_data_summary %>%
ggplot(aes ( x = Instance, y = MAD, group = Group,
color = Group, shape = Group)) +
geom_point() +
geom_line() +
geom_hline(yintercept = 0.2603522)
mad_data_summary %>%
filter(Instance == 50)
########################################## MAD BOOTSTRAP ###############################
# Random algorithm Accuracy Bootstrap
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD/A1")
a1_mad_bootstrap <- read.csv("bank_data_A1_bootstrap.csv", sep = ";") %>%
mutate(Group = "Random") %>%
rename(MAD = ".")
#Taking 95% of data simulated
a1_mad_bootstrap_95 <- a1_mad_bootstrap %>%
arrange(desc(MAD)) %>%
slice(-1:-2500) %>%
slice(-95001:-97500)
# A-optimality Accuracy Bootstrap
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD/A2_Trace")
a2_trace_mad_bootstrap <- read.csv("bank_data_A2_trace_bootstrap.csv", sep = ";") %>%
mutate(Group = "A-optimality") %>%
rename(MAD = ".")
#Taking 95% of data simulated
a2_trace_mad_bootstrap_95 <- a2_trace_mad_bootstrap %>%
arrange(desc(MAD)) %>%
slice(-1:-2500) %>%
slice(-95001:-97500)
#D-optimality Accuracy Bootstrap
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD/A2_Det")
a2_det_mad_bootstrap <- read.csv("bank_data_A2_det_bootstrap.csv", sep = ";") %>%
mutate(Group = "D-optimality") %>%
rename(MAD = ".")
#Taking 95% of data simulated
a2_det_mad_bootstrap_95 <- a2_det_mad_bootstrap %>%
arrange(desc(MAD)) %>%
slice(-1:-2500) %>%
slice(-95001:-97500)
# Combining Bootstrap means of last instance
mad_bootstrap_G <- a1_mad_bootstrap %>%
rbind(a2_trace_mad_bootstrap,a2_det_mad_bootstrap)
#Combining 95 % CI Bootstrap means of last instance
mad_bootstrap_G_95 <- a1_mad_bootstrap_95 %>%
rbind(a2_trace_mad_bootstrap_95,a2_det_mad_bootstrap_95)
#Write tables
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD")
#Full bootstrap file
write.table(mad_bootstrap_G, file = "mad_data_summary_bootstrap.csv", sep = ";", row.names = F)
#95 % bootstrap file
write.table(mad_bootstrap_G_95, file = "mad_data_summary_bootstrap_95.csv", sep = ";", row.names = F)
############################# MAD BOOTSTRAP VISUALIZATION ###################
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD")
mad_bootstrap_G <- read.csv("mad_data_summary_bootstrap.csv", sep = ";")
#Histogram of bootstrap means
mad_bootstrap_G %>%
ggplot(aes (x = MAD, fill = Group)) +
geom_histogram(bins = 30, alpha = .9, position = "identity")
#95 distributions
mad_bootstrap_G_95 <- read.csv("mad_data_summary_bootstrap_95.csv", sep = ";")
#Getting more decimals
apply(mad_bootstrap_G_95, 2, range, digits = 4)
mad_bootstrap_G_95 %>%
group_by(Group) %>%
summarise(minacc = sprintf("%1f", min(MAD)),
maxacc = sprintf("%1f", max(MAD)))
#Plotting 95% distributions of bootstrap means
Acc_bootstrap_G_95 %>%
ggplot(aes (x = Accuracy, fill = Group)) +
geom_histogram(bins = 30, alpha = .9, position = "identity")
################################ Determinant / Trace Histograms ##########################
#Did not use
# A_2_det
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Det_Trace/A2_Det")
a2_det_list <- list.files()
a2_det_data <- do.call(rbind, lapply(a2_det_list, function(x) read.csv(file = x, sep = ";")))
write.table(a2_det_data, file = "bank_data_A2_det_dt.csv", sep = ";")
# A_2_trace
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Det_Trace/A2_Trace")
a2_trace_list <- list.files()
a2_trace_data <- do.call(rbind, lapply(a2_trace_list, function(x) read.csv(file = x, sep = ";")))
write.table(a2_trace_data, file = "bank_data_A2_trace_dt.csv", sep = ";")
#A1
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Det_Trace/A1")
a1_data <- read.csv("bank_data_algorithm1_dt_2019-04-06-09.47.39.csv", sep = ";")
write.table(a1_data, file = "bank_data_A1_dt.csv", sep = ";", row.names = F)
################ Determinant / Trace Histograms ##############################
#Did not use
# A1
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Det_Trace/A1")
a1_dt_summary <- read.csv("bank_data_A1_dt.csv", sep = ";") %>%
mutate(Group = "Random")
# A2 Det
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Det_Trace/A2_Det")
a2_det_dt_summary <- read.csv("bank_data_A2_det_dt.csv", sep = ";") %>%
mutate(Group = "Det")
# A2 Trace
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Det_Trace/A2_Trace")
a2_trace_dt_summary <- read.csv("bank_data_A2_trace_dt.csv", sep = ";") %>%
mutate(Group = "Trace")
dt_data_summary <- a1_dt_summary %>%
rbind(a2_trace_dt_summary,a2_det_dt_summary)
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/Det_Trace/")
write.table(dt_data_summary, file = "dt_data_summary.csv", sep = ";", row.names = F)
dt_data_summary <- read.csv("dt_data_summary.csv", sep = ";")
dt_data_summary %>%
# subset(., Trace < 200) %>%
# subset(., Determinant < 1e-20) %>%
filter( Group == "Random") %>%
ggplot(aes (x = Trace)) +
geom_histogram(bins = 30, alpha = .5, position = "identity")
################ Investigating design matrix g=20 ###########################
setwd("C:/Users/tobia/Dropbox/Master Thesis/Sims/Bank_data_results/MAD_g20/")
x_rand <- read.csv("bank_data_rand_X.csv", sep = ";")
x_plot_rand <- x_rand %>%
mutate(Observation = as.factor(ifelse(row_number()<51, "Initial sample", "Added"))) %>%
ggplot(aes (x = age, y = balance, color = Observation)) +
geom_jitter() +
scale_x_continuous(name = "Age") +
scale_y_continuous(name = "Balance") +
ggtitle("Random")
x_det <- read.csv("bank_data_det_X.csv", sep = ";")
x_plot_det <- x_det %>%
mutate(Observation = as.factor(ifelse(row_number()<51, "Initial sample", "Added"))) %>%
ggplot(aes (x = age, y = balance, color = Observation)) +
geom_jitter() +
scale_x_continuous(name = "Age") +
scale_y_continuous(name = "Balance")+
ggtitle("D-optimality")
x_trace <- read.csv("bank_data_trace_X.csv", sep = ";")
x_plot_trace <- x_trace %>%
mutate(Observation = as.factor(ifelse(row_number()<51, "Initial sample", "Added"))) %>%
ggplot(aes (x = age, y = balance, color = Observation)) +
geom_jitter() +
scale_x_continuous(name = "Age") +
scale_y_continuous(name = "Balance")+
ggtitle("A-optimality")
grid.arrange(x_plot_det, x_plot_trace, x_plot_rand, nrow = 3)
|
3c6458038951c945da2abc97a60fa52f8aa79601 | c25ca7b930919db4299d8ee392daa3ed5c651180 | /man/prMergeClr.Rd | c6d9cfdb8deefcb4ca32398d3556f3024f3d4040 | [] | no_license | gforge/htmlTable | ecd0e56b54da74a085c5fc545c78181eb254fcb1 | 82ffe152c9b59559686a8a7bb74c9121d9539cf3 | refs/heads/master | 2022-07-21T22:01:41.100252 | 2022-07-07T18:14:47 | 2022-07-07T18:14:47 | 28,265,082 | 74 | 32 | null | 2022-07-06T14:53:56 | 2014-12-20T11:17:53 | R | UTF-8 | R | false | true | 656 | rd | prMergeClr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/htmlTable_helpers_mergeClr.R
\name{prMergeClr}
\alias{prMergeClr}
\title{Merges multiple colors}
\usage{
prMergeClr(clrs)
}
\arguments{
\item{clrs}{The colors}
}
\value{
\code{character} A hexadecimal color
}
\description{
Uses the \code{\link[grDevices:colorRamp]{colorRampPalette()}} for merging colors.
\emph{Note:} When merging more than 2 colors the order in the color
presentation matters. Each color is merged with its neigbors before
merging with next. If there is an uneven number of colors the middle
color is mixed with both left and right side.
}
\keyword{internal}
|
59a698b5dea464caf91f53f9ddb789f7a9b02bac | 621a9fb699eb610fa4b540196e9f82dd5da4fd16 | /man/plot_polar_smooths.Rd | 4864e84d8745aa33fc201c2270aaa9635ce936d5 | [
"MIT"
] | permissive | stefanocoretta/rticulate | f49d65465b13df4980fdb615d94a5c59a50936ba | 6a841e0a38c53c42df6bcf504b23468cffcebf97 | refs/heads/main | 2022-09-26T07:03:44.872518 | 2022-09-04T16:07:00 | 2022-09-04T16:07:00 | 89,402,150 | 4 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,934 | rd | plot_polar_smooths.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_polar_smooths}
\alias{plot_polar_smooths}
\title{Plot smooths from a polar \code{gam}}
\usage{
plot_polar_smooths(
model,
series,
comparison = NULL,
origin = NULL,
facet_terms = NULL,
conditions = NULL,
exclude_random = TRUE,
series_length = 100,
split = NULL,
sep = "\\\\.",
time_series
)
}
\arguments{
\item{model}{A \code{gam} or \code{bam} model object.}
\item{series}{An unquoted expression indicating the model term that defines the series on which smoothing is applied. This is the term that is displayed on the x-axis when plotting.}
\item{comparison}{An unquoted expression indicating the model term for which the comparison will be plotted.}
\item{origin}{The coordinates of the origin as a vector of \code{c(x, y)} coordinates.}
\item{facet_terms}{An unquoted formula with the terms used for faceting.}
\item{conditions}{A list of quosures with \code{quos} specifying the levels to plot from the model terms not among \code{series}, \code{comparison}, or \code{facet_terms}.}
\item{exclude_random}{Whether to exclude random smooths (the default is \code{TRUE}).}
\item{series_length}{An integer indicating how many values along the time series to use for predicting the outcome term.}
\item{split}{Columns to separate as a named list.}
\item{sep}{Separator between columns (default is \code{"\\."}, which is the default with \code{}). If character, it is interpreted as a regular expression.}
\item{time_series}{Deprecated, use \code{series} instead.}
}
\value{
An object of class \code{\link[ggplot2]{ggplot}}.
}
\description{
It plots the smooths of a polar GAM fitted with \code{polar_gam()}.
}
\examples{
\donttest{
library(dplyr)
tongue_it01 <- filter(tongue, speaker == "it01")
pgam <- polar_gam(Y ~ s(X, by = as.factor(label)), data = tongue_it01)
plot_polar_smooths(pgam, X, label)
}
}
|
9d7ac3a0366c1390e8879d922da9c6d7498f6255 | 9e301d83216651988c4ecb40d9f6087ec5945c75 | /extract/Compare_WorldBank.R | 6e2ea58f49feaf7b648d61315e9e20916c33c302 | [] | no_license | mcooper/DHSwealth | baba74bee09aabef2982b674b9d0e0a9ed1618fa | 0f0b25efcec20e78bd6d4e6545e63182e246ea45 | refs/heads/master | 2021-06-22T02:25:18.748308 | 2021-01-28T04:27:02 | 2021-01-28T04:27:02 | 189,031,285 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,698 | r | Compare_WorldBank.R | library(tidyverse)
library(countrycode)
library(zoo)
library(Hmisc)
data <- read.csv('~/mortalityblob/dhs/wealth_export.csv')
data$iso3 <- countrycode(data$country, 'country.name', 'iso3c')
data$year <- data$survey_single_year
fill <- function(x, upto=5){
#Interpolate any two values
#Fill edges up to "upto" values
if (all(is.na(x))){
return(x)
}
x <- na.approx(x, na.rm=F)
w <- which(!is.na(x))
s <- w[1]
e <- w[length(w)]
sr <- s - upto
sr <- ifelse(sr < 0, 0, sr)
er <- e + upto
er <- ifelse(er > length(x), length(x), er)
x[sr:s] <- x[s]
x[er:e] <- x[e]
x
}
# I think this WB data is the same as povcal data (it was for a few places I spot checked)
# but Im not sure if its all the same
wb <- read.csv('~/mortalityblob/dhs/API_SI.POV.DDAY_DS2_en_csv_v2_1928965.csv', skip=4) %>%
select(-`Country.Name`, -`Indicator.Name`, -`Indicator.Code`) %>%
gather(year, phc, -Country.Code) %>%
mutate(year = as.numeric(substr(year, 2, 5)),
iso3 = countrycode(Country.Code, 'wb', 'iso3c')) %>%
select(-Country.Code) %>%
filter(!is.na(iso3)) %>%
group_by(iso3) %>%
mutate(phc = fill(phc, upto=7))
comb <- merge(data, wb, all.x=T, all.y=F)
# we have 432 surveys
# upto == 7 > 393 surveys with data
# upto == 5 > 370 surveys with data
# upto == 0 > 292 surveys with data
comb <- comb %>%
filter(!is.na(phc))
# Get weighted quantiles
wtd.quantile<- function (x, weights = NULL, probs = c(0, 0.25, 0.5, 0.75, 1),
type = c("quantile", "(i-1)/(n-1)", "i/(n+1)", "i/n"),
na.rm = TRUE) {
# Function taken from HMISC, but issue solved which is documented here: https://github.com/harrelfe/Hmisc/issues/97#issuecomment-429634634
normwt = FALSE
if (!length(weights)) return(quantile(x, probs = probs, na.rm = na.rm))
type <- match.arg(type)
if (any(probs < 0 | probs > 1)) stop("Probabilities must be between 0 and 1 inclusive")
nams <- paste(format(round(probs * 100, if (length(probs) >
1) 2 - log10(diff(range(probs))) else 2)), "%", sep = "")
if(na.rm & any(is.na(weights))){ ###### new
i<- is.na(weights)
x <- x[!i]
weights <- weights[!i]
}
i <- weights <= 0 # nwe: kill negative and zero weights and associated data
if (any(i)) {
x <- x[!i]
weights <- weights[!i]
}
if (type == "quantile") {
if(sum(weights) < 1000000 ) {weights<- weights*1000000/sum(weights)} ##### new
w <- wtd.table(x, weights, na.rm = na.rm, normwt = normwt,
type = "list")
x <- w$x
wts <- w$sum.of.weights
n <- sum(wts)
order <- 1 + (n - 1) * probs
low <- pmax(floor(order), 1)
high <- pmin(low + 1, n)
order <- order%%1
allq <- approx(cumsum(wts), x, xout = c(low, high), method = "constant",
f = 1, rule = 2)$y
k <- length(probs)
quantiles <- (1 - order) * allq[1:k] + order * allq[-(1:k)]
names(quantiles) <- nams
return(quantiles)
}
w <- wtd.Ecdf(x, weights, na.rm = na.rm, type = type, normwt = normwt)
structure(approx(w$ecdf, w$x, xout = probs, rule = 2)$y,
names = nams)
}
combq <- comb %>%
group_by(survey) %>%
mutate(wfh_q = wfh <= quantile(wfh, probs=mean(phc, na.rm=T)/100),
wfh_q2 = wfh2 <= quantile(wfh2, probs=mean(phc, na.rm=T)/100),
wfh_q3 = wfh3 <= quantile(wfh3, probs=mean(phc, na.rm=T)/100))
comb2 <- combq %>%
select(iso3, year, urban_rural, hhweight, country, survey, program, wfh, wfh2, wfh3,
phc, wfh_q, wfh_q2, wfh_q3, wealth_quintile, wealth_factor)
write.csv(comb2, '~/mortalityblob/dhs/wealth_export_chi.csv', row.names=F)
|
c73bb67ef8becd694e519e80f6ecb1f47251dd03 | e5b0acd8a255c9491d27fc7706db6833b151db22 | /inst/gha/validate-test-files.R | 300f5c9d9db89121ce935d69dad6ca31c9275509 | [
"MIT"
] | permissive | rstudio/shinycoreci | 52b867e603a207d1e62552bf006572f6365f85fe | 05cb467a217972a5f838d18296ee701307a2430f | refs/heads/main | 2023-08-31T14:59:49.494932 | 2023-08-14T14:49:17 | 2023-08-14T14:49:17 | 227,222,013 | 38 | 5 | NOASSERTION | 2023-09-11T18:29:27 | 2019-12-10T21:46:45 | HTML | UTF-8 | R | false | false | 3,916 | r | validate-test-files.R | # Execute with `source("inst/gha/validate-test-files.R")`
errors_found <- list()
app_folders <- basename(list.dirs("inst/apps", recursive = FALSE))
app_folder_nums <- sub("^(\\d\\d\\d)-.*$", "\\1", app_folders)
# Can not call `setdiff()`. This internally calls `unique()`
app_folder_nums <- app_folder_nums[!(app_folder_nums %in% c("000"))]
if (any(duplicated(app_folder_nums))) {
stop("Duplicate app numbers found: ", paste0(app_folder_nums[duplicated(app_folder_nums)], collapse = ", "))
}
for (app_path in list.dirs("inst/apps", recursive = FALSE)) {
tryCatch({
app_files <- dir(app_path, pattern = "\\.(R|Rmd)$", full.names = TRUE)
tests_path <- file.path(app_path, "tests")
if (dir.exists(tests_path)) {
runners <- dir(tests_path, pattern = "R$")
if (length(runners) > 1) {
stop("More than 1 test runner found in ", app_path, ". Found: ", paste0(runners, collapse = ", "))
}
# Verify simple testthat.R
testthat_path <- file.path(tests_path, "testthat.R")
if (!file.exists(testthat_path)) {
stop("Missing `testthat.R` for app: ", app_path)
}
testthat_lines <- readLines(testthat_path)
if (length(testthat_lines) > 1) {
stop("Non-basic testthat script found for ", testthat_path, ". Found:\n", paste0(testthat_lines, "\n"))
}
if (testthat_lines != "shinytest2::test_app()") {
stop("Non-shinytest2 testthat script found for ", testthat_path, ". Found:\n", paste0(testthat_lines, "\n"))
}
# Verify shinyjster content
shinyjster_file <- file.path(tests_path, "testthat", "test-shinyjster.R")
if (file.exists(shinyjster_file)) {
for (jster_txt in c("shinyjster_server(", "shinyjster_js(")) {
found <- FALSE
for (app_file in app_files) {
if (any(grepl(jster_txt, readLines(app_file), fixed = TRUE))) {
found <- TRUE
break
}
}
if (!found) {
stop(app_path, " did not contain ", jster_txt, " but contains a `./tests/testthat/test-shinyjster.R")
}
}
}
} else {
# Test for manual app
found <- FALSE
for (app_file in app_files) {
if (any(grepl("shinycoreci::::is_manual_app", readLines(app_file), fixed = TRUE))) {
found <- TRUE
break
}
}
if (!found) {
stop(
"No `./", app_path, "/tests` folder found for non-manual app.\n",
"Either add tests with `shinytest2::use_shinytest2('", app_path, "')`\n",
"Or set to manual by calling `shinycoreci::use_manual_app('", app_path, "')`"
)
}
}
# Make sure shinycoreci is not used within an app
for (file in dir(app_path, recursive = TRUE, full.names = TRUE, pattern = "\\.(R|Rmd)$")) {
# Ignore first 000 apps
if (grepl("^inst/apps/000-", file)) next
file_lines <- readLines(file)
if (any(grepl("shinycoreci)", file_lines, fixed = TRUE))) {
stop("File `", file, "` contains library() or require() call to {shinycoreci}. Remove usage of {shinycoreci}.")
}
file_lines <- gsub("shinycoreci::::", "shinycoreci____", file_lines)
if (any(grepl("shinycoreci::", file_lines, fixed = TRUE))) {
stop("File `", file, "` contains usage of {shinycoreci}. Replace this code.")
}
}
}, error = function(e) {
errors_found[[length(errors_found) + 1]] <<- e
})
}
if (length(errors_found) > 0) {
for (e in errors_found) {
message("\n", e)
}
stop("Errors found when validating apps")
}
# warns <- warnings()
# if (length(warns) > 0) {
# warn_txt <- Map(names(warns), warns, f = function(msg, expr) { paste0(msg, " : ", as.character(as.expression(expr)), "\n") })
# stop("Warnings found when validating apps:\n", paste0(warn_txt, collapse = ""))
# }
message("No errors found when validating apps")
|
cd0d0cf512df6fd7b542bf377925528ddf51c1ee | 65abe9a7747cf2470d2607b52cd28a306dfd541a | /man/cor.with.Rd | faa200adb558a9aae9cdd998f0e2d50ffd11c25f | [] | no_license | cran/NCmisc | 22c73010bf15ef8b08a231f875aee74f018c6506 | cf149ef8aaf77e6b7e3013372b90e6d23fc2980b | refs/heads/master | 2022-11-04T13:34:38.299963 | 2022-10-17T08:15:22 | 2022-10-17T08:15:22 | 17,681,069 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,686 | rd | cor.with.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NCmisc.R
\name{cor.with}
\alias{cor.with}
\title{Simulate a correlated variable}
\usage{
cor.with(x, r = 0.5, preserve = FALSE, mn = NA, st = NA)
}
\arguments{
\item{x}{existing variable, to which you want to simulate a new correlated variable}
\item{r}{the 'expected' correlation you want to target (randomness
will mean that the actual correlation will vary around this value)}
\item{preserve}{logical, whether to preserve the same mean and standard deviation(SD)
as x, for the new variable}
\item{mn}{optional, set the mean for the new simulated variable [must also set st if using this]}
\item{st}{optional, set the SD for the new simulated variable [must also set mn if using this]}
}
\value{
return the new variable with an expected correlation of 'r' with x
}
\description{
Simulate a variable correlated at level 'r' with cector x (of the same length). Can
either 'preserve' the mean and standard-deviation, leave standardizeed,
or select new mean 'mn' and standard deviation 'st'.
}
\examples{
X <- rnorm(10,100,14)
cor.with(X,r=.5) # create a variable correlated .5 with X
cor(X,cor.with(X)) # check the actual correlation
# some variability in the actual correlation, so run 1000 times:
print(mean(replicate(1000,{cor(X,cor.with(X))})))
cor.with(X,preserve=TRUE) # preserve original mean and standard deviation
X[c(4,10)] <- NA # works fine with NAs, but new var will have same missing
cor.with(X,mn=50,st=2) # specify new mean and standard deviation
}
\references{
http://www.uvm.edu/~dhowell/StatPages/More_Stuff/CorrGen.html
}
\seealso{
\code{\link{sim.cor}}
}
\author{
Nicholas Cooper
}
|
66549798ef6acc1b0ed79af199a1a373403f6670 | 7ebe128fc17cdc0e2f534dbe5940774e98da4ce8 | /man/lasso.Rd | a616263ab14b97027b0b14e21981623bb4a3f9cc | [] | no_license | cran/bamlss | 89f8d08be4599c03120acb9ed097c31916d1ef21 | 5535fb038104cdd3df08eccb92863a778cd56e75 | refs/heads/master | 2023-07-17T04:24:23.253981 | 2023-07-04T06:30:02 | 2023-07-04T06:30:02 | 82,776,249 | 2 | 5 | null | null | null | null | UTF-8 | R | false | false | 8,843 | rd | lasso.Rd | \name{la}
\alias{la}
\alias{lasso}
\alias{opt_lasso}
\alias{lasso_plot}
\alias{lasso_stop}
\alias{lasso_coef}
\alias{lasso_transform}
\title{Lasso Smooth Constructor}
\description{
Smooth constructors and optimizer for Lasso penalization with \code{\link{bamlss}}. The
penalization is based on a Taylor series approximation of the Lasso penalty.
}
\usage{
## Smooth constructor function.
la(formula, type = c("single", "multiple"), ...)
## Single Lasso smoothing parameter optimizer.
opt_lasso(x, y, start = NULL, adaptive = TRUE, lower = 0.001, upper = 1000,
nlambda = 100, lambda = NULL, multiple = FALSE, verbose = TRUE,
digits = 4, flush = TRUE, nu = NULL, stop.nu = NULL,
ridge = .Machine$double.eps^0.5, zeromodel = NULL, ...)
lasso(x, y, start = NULL, adaptive = TRUE, lower = 0.001, upper = 1000,
nlambda = 100, lambda = NULL, multiple = FALSE, verbose = TRUE,
digits = 4, flush = TRUE, nu = NULL, stop.nu = NULL,
ridge = .Machine$double.eps^0.5, zeromodel = NULL, ...)
## Lasso transformation function to set
## adaptive weights from an unpenalized model.
lasso_transform(x, zeromodel, nobs = NULL, ...)
## Plotting function for opt_lasso() optimizer.
lasso_plot(x, which = c("criterion", "parameters"),
spar = TRUE, model = NULL, name = NULL, mstop = NULL,
retrans = FALSE, color = NULL, show.lambda = TRUE,
labels = NULL, digits = 2, ...)
## Extract optimum stopping iteration for opt_lasso() optimizer.
## Based on the minimum of the information criterion.
lasso_stop(x)
## Extract retransformed Lasso coefficients.
lasso_coef(x, ...)
}
\arguments{
\item{formula}{A formula like \code{~ x1 + x2 + ... + xk} of variables which should be
penalized with Lasso.}
\item{type}{Should one single penalty parameter be used or multiple parameters, one for each
covariate in \code{formula}.}
\item{x}{For function \code{opt_lasso()} and \code{lasso_transform()} the \code{x} list, as returned
from function \code{\link{bamlss.frame}}, holding all model matrices and other information that
is used for fitting the model. For the plotting function and
\code{lasso_stop()}/\code{lasso_coef()} the
corresponding \code{\link{bamlss}} object fitted with the \code{opt_lasso()} optimizer.}
\item{y}{The model response, as returned from function \code{\link{bamlss.frame}}.}
\item{start}{A vector of starting values. Note, Lasso smoothing parameters will be dropped.}
\item{adaptive}{Should adaptive weights be used for fused Lasso terms?}
\item{lower}{Numeric. The minimum lambda value.}
\item{upper}{Numeric. The maximum lambda value.}
\item{nlambda}{Integer. The number of smoothing parameters for which coefficients should be
estimated, i.e., the vector of smoothing parameters is build up as a sequence from
\code{lower} to \code{upper} with length \code{nlambda}.}
\item{lambda}{Numeric. A sequence/vector of lambda parameters that should be used.}
\item{multiple}{Logical. Should the lambda grid be exapnded to search for multiple lambdas, one for
each distributional parameter.}
\item{verbose}{Print information during runtime of the algorithm.}
\item{digits}{Set the digits for printing when \code{verbose = TRUE}. If the optimum lambda value
is plotted, the number of decimal decimal places to be used within \code{lasso_plot()}.}
\item{flush}{use \code{\link{flush.console}} for displaying the current output in the console.}
\item{nu}{Numeric or logical. Defines the step length for parameter updating of a model term,
useful when the algorithm encounters convergence problems. If \code{nu = TRUE} the step length
parameter is optimized for each model term in each iteration of the backfitting algorithm.}
\item{stop.nu}{Integer. Should step length reduction be stopped after \code{stop.nu} iterations
of the Lasso algorithm?}
\item{ridge}{A ridge penalty parameter that should be used when finding adaptive weights, i.e.,
parameters from an unpenalized model. The ridge penalty is used to stabilize the estimation
in complex models.}
\item{zeromodel}{A model containing the unpenalized parameters, e.g., for each \code{la()}
terms one can place a simple ridge penalty with \code{la(x, ridge = TRUE, sp = 0.1)}. This
way it is possible to find the unpenalized parameters that can be used as adaptive
weights for fusion penalties.}
\item{nobs}{Integer, number of observations of the data used for modeling. If not supplied
\code{nobs} is taken from the number of rows from the model term design matrices.}
\item{which}{Which of the two provided plots should be created, character or integer \code{1} and \code{2}.}
\item{spar}{Should graphical parameters be set by the plotting function?}
\item{model}{Character selecting for which model the plot shpuld be created.}
\item{name}{Character, the name of the coefficient group that should be plotted. Note that
the string provided in \code{name} will be removed from the labels on the 4th axis.}
\item{mstop}{Integer vector, defines the path length to be plotted.}
\item{retrans}{Logical, should coefficients be re-transformed before plotting?}
\item{color}{Colors or color function that creates colors for the group paths.}
\item{show.lambda}{Logical. Should the optimum value of the penalty parameter lambda be shown?}
\item{labels}{A character string of labels that should be used on the 4 axis.}
\item{\dots}{Arguments passed to the subsequent smooth constructor function.
\code{lambda} controls the starting value of the penalty parameter, \code{const} the constant
that is added within the penalty approximation. Moreover, \code{fuse = 1} enforces nominal
fusion of categorical variables and \code{fuse = 2} ordered fusion within \code{la()} Note that
\code{la()} terms with and without fusion should not be mixed when using the \code{opt_lasso()}
optimizer function.
For the optimizer \code{opt_lasso()} arguments passed to function \code{\link{bfit}}.}
}
\value{
For function \code{la()}, similar to function \code{\link[mgcv]{s}} a simple smooth
specification object.
For function \code{opt_lasso()} a list containing the following objects:
\item{fitted.values}{A named list of the fitted values based on the last lasso iteration
of the modeled parameters of the selected distribution.}
\item{parameters}{A matrix, each row corresponds to the parameter values of one boosting iteration.}
\item{lasso.stats}{A matrix containing information about the log-likelihood, log-posterior
and the information criterion for each lambda.}
}
\references{
Andreas Groll, Julien Hambuckers, Thomas Kneib, and Nikolaus Umlauf (2019). Lasso-type penalization in
the framework of generalized additive models for location, scale and shape.
\emph{Computational Statistics & Data Analysis}.
\doi{10.1016/j.csda.2019.06.005}
Oelker Margreth-Ruth and Tutz Gerhard (2015). A uniform framework for combination of
penalties in generalized structured models. \emph{Adv Data Anal Classif}.
\doi{10.1007/s11634-015-0205-y}
}
\seealso{
\code{\link[mgcv]{s}}, \code{\link[mgcv]{smooth.construct}}
}
\examples{
\dontrun{## Simulated fusion Lasso example.
bmu <- c(0,0,0,2,2,2,4,4,4)
bsigma <- c(0,0,0,-2,-2,-2,-1,-1,-1)
id <- factor(sort(rep(1:length(bmu), length.out = 300)))
## Response.
set.seed(123)
y <- bmu[id] + rnorm(length(id), sd = exp(bsigma[id]))
## Estimate model:
## fuse=1 -> nominal fusion,
## fuse=2 -> ordinal fusion,
## first, unpenalized model to be used for adaptive fusion weights.
f <- list(y ~ la(id,fuse=2,fx=TRUE), sigma ~ la(id,fuse=1,fx=TRUE))
b0 <- bamlss(f, sampler = FALSE)
## Model with single lambda parameter.
f <- list(y ~ la(id,fuse=2), sigma ~ la(id,fuse=1))
b1 <- bamlss(f, sampler = FALSE, optimizer = opt_lasso,
criterion = "BIC", zeromodel = b0)
## Plot information criterion and coefficient paths.
lasso_plot(b1, which = 1)
lasso_plot(b1, which = 2)
lasso_plot(b1, which = 2, model = "mu", name = "mu.s.la(id).id")
lasso_plot(b1, which = 2, model = "sigma", name = "sigma.s.la(id).id")
## Extract coefficients for optimum Lasso parameter.
coef(b1, mstop = lasso_stop(b1))
## Predict with optimum Lasso parameter.
p1 <- predict(b1, mstop = lasso_stop(b1))
## Full MCMC, needs lasso_transform() to assign the
## adaptive weights from unpenalized model b0.
b2 <- bamlss(f, optimizer = FALSE, transform = lasso_transform,
zeromodel = b0, nobs = length(y), start = coef(b1, mstop = lasso_stop(b1)),
n.iter = 4000, burnin = 1000)
summary(b2)
plot(b2)
ci <- confint(b2, model = "mu", pterms = FALSE, sterms = TRUE)
lasso_plot(b1, which = 2, model = "mu", name = "mu.s.la(id).id", spar = FALSE)
for(i in 1:8) {
abline(h = ci[i, 1], lty = 2, col = "red")
abline(h = ci[i, 2], lty = 2, col = "red")
}
}
}
\keyword{regression}
|
8d25fafff67084c03aa6e74565bbc163074faeba | acf6fbcc9ebe2a6403d04f8553ef0d51845e0f90 | /R/IMF_number.R | fb979bf2fa11a7c621f8651556aa6a64498ee4a6 | [
"MIT"
] | permissive | SeifMejri21/VVD | 294d5c8253d6e0b3deac6b614280ce77edbecd53 | 14b97b82160c9322adb972a37190dbde898f4246 | refs/heads/main | 2023-05-29T07:21:40.502688 | 2021-06-08T00:48:22 | 2021-06-08T00:48:22 | 374,054,403 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,002 | r | IMF_number.R | #' Finds appropriate number of IMFs for a given time series
#'
#' @param ts time series to decompose in xts format
#'
#' @return the appropriate number of IMF s
#' @import fpp
#' @export
#'
#' @examples
#'library(fpp)
#'data(a10)
#' VVD::IMF_number(a10)
#'
IMF_number <-function(ts){
if(class(ts)!="ts"){
stop("you must pass a ts type time series in argument")
}
else{
test_vector<-list()
eemd_dec<-Rlibeemd::eemd(ts, num_imfs = 1000 , num_siftings = 10,
ensemble_size = 50, threads = 1)
eemd_dec_df<-as.data.frame(eemd_dec)
reality_vec<-c()
for (j in 1:1000) {
extrems<-Rlibeemd::extrema(eemd_dec_df[,j])
mins_list<-extrems[["minima"]]
maxs_list<-extrems[["maxima"]]
nb_min<-length(mins_list)/2
nb_max<-length(maxs_list)/2
if(!((nb_min<=3) && (nb_max<=3))){
reality_vec[j]<-FALSE}
else{reality_vec[j]<-TRUE}
}
for (m in 1:1000) {
if(reality_vec[m] == TRUE){
break
}
}
return(m)
}
}
|
c9217e6692a0207e38a2c3e0bd9efab9f5afc120 | 16f69763aa041f9ff7c569d7d9307a6f2ce98498 | /fig5_periodic_pos/avg_TA_align/avg_TA_align.R | c1762c28ab87ed9c8a8310ae51683afd0c167deb | [] | no_license | aquaflakes/individual_analyses | 3751c9d79c8b1e37c7c2e05890538755dcb54747 | 73a0c47e5f2e2fe877b0212ca085726036c83465 | refs/heads/master | 2021-01-06T09:35:40.769249 | 2020-02-18T08:17:33 | 2020-02-18T08:17:33 | 241,279,140 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,896 | r | avg_TA_align.R | fjComm::clear_()
label384=fjComm::gen_384_label()
posCnt <- function(well,isTFctrl=F,dint="TA")
{
# check "TA" phase
readsfile= paste0("~/Nut_zhuData/seqFiles2/FJ4.5CAP2r_PE/allreads/Adpt_Trimmed_Reads/2_Trim_adaptor_kmer/Trulig147*-CAP2r-147c5Nu-",well,"-*.fq.gz")
# if(isTFctrl) readsfile= paste0("~/Nut_zhuData/seqFiles2/FJ4.5CAP2r_PE/allreads/Adpt_Trimmed_Reads/2_Trim_adaptor_kmer/Trulig147*-CAP2r-TFctrl147c4-",well,"-*.fq.gz")
readsfile= Sys.glob(readsfile)
seq= read_csv(readsfile,col_names = F) %>% rmdup(1)
seq_lines= nrow(seq)
sig_kcnt= kmerCntBit(strings = seq[[1]],k = 2L,diffLen = F,collapse = F)
c0_file= "~/Nut_zhuData/seqFiles2/FJ4.4CAP2_PE/allreads/FJ4.4_PE_c0/dual_trim/2_Trim_adaptor_kmer/subset_100000_Trulig147v1IIIPEx200IIIc0_S3_R1_001.peared_trimmed.fq.gz"
seq_c0= read_csv(c0_file,col_names = F)
c0_lines= nrow(seq_c0)
c0_kcnt= oneOffCalc("c0_count/c0_count",calcFun = function(x){kmerCntBit(strings = seq_c0[[1]],k = 2L,diffLen = F,collapse = F)},asObj = T,overWrite = F,useScriptPath = T)
# corr for bk
sub_mat= sweep(c0_kcnt,2,colMeans(c0_kcnt),"-") / c0_lines * seq_lines # (each di-nt - mean) / c0_lines * seq_lines
sig_kcnt_corr= sig_kcnt- sub_mat
return(sig_kcnt_corr[,"TA"])
}
result= parallel(label384,fun = function(well) oneOffCalc(
saved_oneOff_filename = paste0("indiv_TA_posCnt/",well,"_.robj"),
calcFun = posCnt,
calcFunParamList = list(well,isTFctrl=F, dint="TA"),asObj = T,useScriptPath = T
),workers = 25)
result= do.call(rbind,result) %>% colMeans()
TA_posCnt_allavg= gg_heat2D_diag(data_frame(pos1=seq_along(result),pos2=seq_along(result),result))+ theme(axis.title = element_blank(),axis.text = element_blank(),axis.ticks = element_blank())
gg_save_all(TA_posCnt_allavg,width = 4.157,height = 0.6)
forScale=data_frame(pos1=seq_along(result),pos2=seq_along(result),result) %>% gg_heat2D_MI()
gg_save_pdf(forScale,width = 6,height = 6)
|
29f8d8a0459008f0d5ce202114a72a6116e168b0 | 3f3606856688c02fe9e27e70298db399a942dfba | /bank data.R | be4037a27fca828a22dda6ef6db037187bb38bfe | [] | no_license | monika2612/logistic-linear-regression | acd7aa85271860cc5e74dc569fcd0211f6543a5c | 0f21c5be05de566b972f3e7acf83a127828a2310 | refs/heads/master | 2020-12-19T15:13:46.101929 | 2020-01-23T10:22:00 | 2020-01-23T10:22:00 | 235,770,764 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 790 | r | bank data.R | bankdata <- read.csv(file.choose())
View(bankdata)
attach(bankdata)
logit <- glm(y~age+factor(default)+balance+factor(housing)+factor(loan)+day+duration+campaign+pdays+previous,data=bankdata,family = "binomial")
summary(logit)
prob1 <- predict(fit1,type="response")
# Logistic Regression
exp(coef(logit))
# Confusion matrix table
prob <- predict(logit,type=c("response"),bankdata)
prob
confusion<-table(prob>0.5,bankdata$y)
confusion
# Model Accuracy
Accuracy<-sum(diag(confusion)/sum(confusion))
Accuracy
# ROC Curve
library(ROCR)
rocrpred<-prediction(prob,bankdata$y)
rocrperf<-performance(rocrpred,'tpr','fpr')
plot(rocrperf,colorize=T,text.adj=c(-0.2,1.7))
# More area under the ROC Curve better is the logistic regression model obtained
|
046b0a8c6c58116b5781229342702b01d388dd71 | db49c420c47af8f4b9629864d8a50abad8c41a78 | /MFA/man/plot_pfs.Rd | c3fd4ec116789650ed6a018783c3e073287728ac | [] | no_license | crossedbanana/Multiple-Factor-Analysis-in-R | b65e44e5246cdb72aafbfd9f77f358c6759fc119 | 718bc9c8a62bc7341d51bded34416adc09608efa | refs/heads/master | 2021-01-23T06:58:50.978688 | 2017-03-28T06:38:56 | 2017-03-28T06:38:56 | 86,415,047 | 1 | 0 | null | 2017-03-28T04:37:03 | 2017-03-28T04:32:16 | null | UTF-8 | R | false | true | 900 | rd | plot_pfs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mfa.R
\name{plot_pfs}
\alias{plot_pfs}
\title{Plot of Partial Factor Scores}
\usage{
plot_pfs(dimension1, dimension2,
rownames_vec = as.character(1:length(dimension1)), img1 = "black",
img2 = "black", img3 = "black")
}
\arguments{
\item{dimension1}{vector of class \code{"numeric"}}
\item{dimension2}{vector of class \code{"numeric"}}
\item{rownames_vec}{vector of row labels}
\item{img1}{optional image of class \code{"png"}}
\item{img2}{optional image of class \code{"png"}}
\item{img3}{optional image of class \code{"png"}}
}
\value{
plot of partial factor scores
}
\description{
Takes two dimensions of partial factor scores and returns a plot of the two dimensions
}
\examples{
dimension1 <- rnorm(20)
dimension2 <- rnorm(20)
plot_compromise(dimension1, dimension2)
}
|
c02f2fc24af448278e7a3a477536895cbcecd4ea | d26ca01de6365ed4c8bc2c77397bfe3f564aece6 | /codes/classification/forest.R | f8a3327180b7e1ee83bf9b4bd1a95ee76aa698c2 | [] | no_license | tohweizhong/RUGS-RF | f8aa81bd54dbbaf61c3a414fc7a850e7aa0fa653 | 7c65a738bc222f4bde3c28b9e3fb036a57dbed7f | refs/heads/master | 2021-01-19T02:57:12.418764 | 2016-08-03T02:21:47 | 2016-08-03T02:21:47 | 36,554,778 | 1 | 10 | null | null | null | null | UTF-8 | R | false | false | 1,426 | r | forest.R | #install.packages("randomForest")
#library(randomForest)
# finally, the random forest model
rf.mod <- randomForest(type ~ ., data = spam.train,
mtry = floor(sqrt(num.var - 1)), # 7; only difference from bagging is here
ntree = 300,
proximity = TRUE,
importance = TRUE)
beep()
# Out-of-bag (OOB) error rate as a function of num. of trees:
plot(rf.mod$err.rate[,1], type = "l", lwd = 3, col = "blue",
main = "Random forest: OOB estimate of error rate",
xlab = "Number of Trees", ylab = "OOB error rate")
# tuning the mtry hyperparameter
tuneRF(subset(spam.train, select = -type),
spam.train$type,
ntreetry = 100)
title("Random forest: Tuning the mtry hyperparameter")
# variable importance
varImpPlot(rf.mod,
main = "Random forest: Variable importance")
# multidimensional scaling plot
# green samples are non-spam,
# red samples are spam
MDSplot(rf.mod,
fac = spam.train$type,
palette = c("green","red"),
main = "Random forest: MDS")
beep()
# now, let's make some predictions
rf.pred <- predict(rf.mod,
subset(spam.test,select = -type),
type="class")
# confusion matrix
print(rf.pred.results <- table(rf.pred, spam.test$type))
# Accuracy of our RF model:
print(rf.accuracy <- sum(diag((rf.pred.results))) / sum(rf.pred.results))
|
73448fd2b4ba43512514c5024e1cc04ef6adb056 | e55ca0be11db2d5b77df1829121a947032d5d111 | /shiny/regPoly/server.R | 9109a472972b215d633f61e9470480e6764536c4 | [] | no_license | diogoprov/iguir2 | 73a32df22626fa184a8e0126b3dee6203f04fbcc | a6fd33e8f2ab893c8568af3a268625ab485f7914 | refs/heads/master | 2021-01-20T01:11:59.936535 | 2016-09-03T23:26:24 | 2016-09-03T23:26:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,579 | r | server.R | ##-------------------------------------------
## server.R
library(shiny)
## Carrega template das aplicações elaboradas pelo projeto iguiR2
source("../template.R")
apropos("^update[A-Z]", ignore.case=FALSE)
get("cars")
shinyServer(function(input, output, session){
## Cabeçalho IGUIR2
output$header <- renderPrint({
template("TEMA")
})
observe({
da <- get(input$DATASET)
updateSelectInput(session,
inputId="Y",
choices=names(da),
selected=names(da)[1])
updateSelectInput(session,
inputId="X",
choices=names(da),
selected=names(da)[2])
})
REATIVA <- reactive({
DADOS <- get(input$DATASET)
DADOS <- DADOS[, c(input$X, input$Y)]
names(DADOS) <- c("x", "y")
if (input$TRANSFORM){
DADOS$y <- do.call(input$TRANSFUN, list(DADOS$y))
}
MODELO <- lm(y~poly(x, degree=input$DEGREE, raw=TRUE),
data=DADOS)
return(list(DADOS=DADOS, MODELO=MODELO))
})
output$AJUSTE <- renderPlot({
da <- REATIVA()$DADOS
m0 <- REATIVA()$MODELO
pred <- data.frame(x=seq(min(da[,"x"]), max(da[,"x"]), l=50))
a <- predict(m0, newdata=pred, interval="confidence")
pred <- cbind(pred, a)
plot(da[,"y"]~da[,"x"], xlab="x", ylab="y")
matlines(x=pred[,1], pred[,2:4], lty=c(1,2,2), col=1)
}, width=600, height=600)
output$ANOVA_SUMMARY <- renderPrint({
m0 <- REATIVA()$MODELO
cat("------------------------------------------------------------",
capture.output(anova(m0))[-(7:8)],
"------------------------------------------------------------",
capture.output(summary(m0))[-c(1:9)],
"------------------------------------------------------------",
sep="\n")
})
output$RESIDUOS <- renderPlot({
par(mfrow=c(2,2))
plot(REATIVA()$MODELO)
layout(1)
}, width=600, height=600)
# output$INFLUENCE <- renderDataTable({
# im <- influence.measures(REATIVA()$MODELO)
# colnames(im$infmat)[1:2] <- c("db.b0", "db.b1")
# formatC(x=round(im$infmat, digits=2),
# digits=2, format="f")
# })
output$INFLUENCE <- renderTable({
im <- influence.measures(REATIVA()$MODELO)
colnames(im$infmat)[1:2] <- c("db.b0", "db.b1")
im$infmat
}, digits=4)
})
|
0aac6e03bfb839a7d33ddc8c4b9aac41bdde2f33 | 10f9e1baf23dff8571f1aeb41c20e74360dd5bf6 | /posner80.R | 0349f95349cb752f549ce8f40a8e53ebccaf130a | [] | no_license | boryspaulewicz/posner80 | e71b87ce2aa51337046650a4230202dd0ae9108a | 8d379c3863e8d20f4b7df81c4c80a265a5b153c6 | refs/heads/master | 2021-01-17T11:13:48.343495 | 2016-06-03T21:30:31 | 2016-06-03T21:30:31 | 52,156,058 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,686 | r | posner80.R | ## -*- coding: utf-8 -*-
if(interactive())source('~/cs/code/r/tasks/task/task.R')
TASK.NAME = 'posner80'
MAX.REACTION.TIME = 3000
PRE.FIXATION.TIME.MIN = 500 ## 1000
PRE.FIXATION.TIME.MAX = 1000 ## 2000
FIXATION.TIME = 1000
POST.FIXATION.TIME = 100
## Obiekty graficzne
RECT = new(RectangleShape, WINDOW$get.size())
RECT$set.scale(c(.15, .25))
center(RECT, WINDOW)
RECT$set.outline.thickness(WINDOW$get.size()[1] * .01)
TXT$set.string("Proszę nacisnąć spację")
center(TXT, WINDOW)
FX = fixation(WINDOW)
CUER = new(ConvexShape, 3)
CUER$set.point(0, c(-.5, .5))
CUER$set.point(1, c(.5, 0))
CUER$set.point(2, c(-.5, -.5))
CUER$set.scale(WINDOW$get.size() * .02)
center(CUER, WINDOW)
CUEL = new(ConvexShape, 3)
CUEL$set.point(0, c(.5, .5))
CUEL$set.point(1, c(-.5, 0))
CUEL$set.point(2, c(.5, -.5))
CUEL$set.scale(WINDOW$get.size() * .02)
center(CUEL, WINDOW)
KEYS <<- c(Key.Left, Key.Right)
draw.boxes = function(){
RECT$set.fill.color(c(0, 0, 0))
RECT$set.position(WINDOW$get.size() * c(.25, .5))
WINDOW$draw(RECT)
RECT$set.position(WINDOW$get.size() * c(.75, .5))
WINDOW$draw(RECT)
}
draw.stim = function(side){
RECT$set.fill.color(c(0, 1, 0))
RECT$set.position(WINDOW$get.size() * c(c(left = .25, right = .75)[side], .5))
WINDOW$draw(RECT)
}
draw.cue = function(side){
## CUE$set.rotation(c(left = -180, right = 0)[side])
## center(CUE, WINDOW)
WINDOW$draw(list(left = CUEL, right = CUER)[[side]])
}
trial.code = function(trial, side = sample(c('left', 'right'), 1),
cue = sample(c(0, 1), 1),
valid = sample(c(0, 1, 1, 1), 1)){
## Kod specyficzny dla zadania
pre.fixation.time = runif(1, PRE.FIXATION.TIME.MIN, PRE.FIXATION.TIME.MAX)
## Szablon
if(trial == 1){
state = 'press-space'
}else{ state = 'show-fixation' }
if(WINDOW$is.open())process.inputs()
start = CLOCK$time
while(WINDOW$is.open()){
process.inputs()
## Kod specyficzny dla zadania
switch(state, 'press-space' = {
WINDOW$clear(c(0, 0, 0))
WINDOW$draw(TXT)
WINDOW$display()
if(KEY.RELEASED[Key.Space + 1] > start){
WINDOW$clear(c(0, 0, 0))
draw.boxes()
WINDOW$display()
start = CLOCK$time
state = 'show-fixation'
}
}, 'show-fixation' = {
if((CLOCK$time - start) > pre.fixation.time){
WINDOW$clear(c(0, 0, 0))
draw.boxes()
if(cue){
draw.cue(ifelse(valid, side, setdiff(c('left', 'right'), side)))
}else{
lapply(FX, WINDOW$draw)
}
WINDOW$display()
fixation.start = CLOCK$time
state = 'clear-fixation'
}
}, 'clear-fixation' = {
if((CLOCK$time - fixation.start) > FIXATION.TIME){
WINDOW$clear(c(0, 0, 0))
draw.boxes()
WINDOW$display()
fixation.cleared = CLOCK$time
state = 'post-fixation'
}
}, 'post-fixation' = {
if((CLOCK$time - fixation.cleared) > POST.FIXATION.TIME)state = 'show-stim'
}, 'show-stim' = {
WINDOW$clear(c(0, 0, 0))
draw.boxes()
draw.stim(side)
WINDOW$display()
stim.onset = CLOCK$time
CORRECT.KEY <<- c(left = Key.Left, right = Key.Right)[side]
ACC <<- RT <<- NULL
state = 'measure-reaction'
}, 'measure-reaction' = {
if(!is.null(ACC) || ((CLOCK$time - stim.onset) > MAX.REACTION.TIME))state = 'done'
}, 'done' = {
WINDOW$clear(c(0, 0, 0))
draw.boxes()
WINDOW$display()
return(list(soa = floor(pre.fixation.time), rt = ifelse(is.null(RT), MAX.REACTION.TIME, RT - stim.onset),
acc = ifelse(is.null(ACC), 2, ACC)))
})
}
}
gui.show.instruction("Za chwilę pojawi się okno danych osobowych.
Jako identyfikator należy podać pierwsze dwa inicjały, dzień i miesiąc urodzenia.
Na przykład, Jan Kowalski urodzony 11 grudnia będzie miał identyfikator jk1112
")
if(!interactive())gui.user.data()
gui.show.instruction("To zadanie polega na reagowaniu za pomocą klawiszy strzałek na pojawiające się zielone prostokąty. Przez cały czas trwania zadania widoczne są dwie białe ramki, jedna po lewej, a jedna po prawej stronie od środka ekranu. Na początku próby na środku między nimi pojawia się strzałka. Po pewnym czasie od rozpoczęcia próby w środku jednej z ramek pojawia się zielony prostokąt.
Jeżeli zielony prostokąt pojawił się po lewej stronie, należy szybko nacisnąć klawisz strzałka w lewo.
Jeżeli zielony prostokąt pojawił się po prawej stronie, należy szybko nacisnąć klawisz strzałka w prawo.
W trakcie badania proszę zachować ciszę. Należy reagować możliwie szybko i poprawnie.")
if(!interactive()){
## 16 warunków
run.trials(trial.code, expand.grid(side = c('left', 'right'),
cue = c(1, 1),
valid = c(0, 1, 1, 1)),
b = 2, n = 20, max.time = 15 * 60 * 1000, record.session = T)
quit("no")
}else{
USER.DATA = list(name = 'admin', age = 37, gender = 'M')
run.trials(trial.code, expand.grid(side = c('left', 'right'),
cue = c(0, 1),
valid = c(0, 1, 1, 1)),
b = 1, n = 1, record.session = T)
}
|
52833c8ea3d9ea4930ea27876ff12480f3967e79 | ec90298bdaa342b37785253f3abb65545be40bdf | /Proj2/random_search.R | 053d1707d9d201872a14a352731ad169ec385e04 | [
"MIT"
] | permissive | byKakayo/modelagem | d328891c945e0613d277505339974dfa82177da5 | 2cb77cbe4efa7ffe42abadc748c3f621f59d8deb | refs/heads/master | 2023-01-22T23:36:07.811126 | 2020-12-01T16:55:30 | 2020-12-01T16:55:30 | 298,584,177 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 360 | r | random_search.R | random_search <- function(f, n, Ix, Iy) {
cz = Inf
r <- list()
r$X = c()
r$Y = c()
r$Z = c()
for (i in seq(n)) {
xi <- runif(1, Ix[1], Ix[2])
yi <- runif(1, Iy[1], Iy[2])
zi = f(xi, yi)
if (zi < cz) {
cx = xi
cy = yi
cz = zi
}
r$X <- c(r$X, cx)
r$Y <- c(r$Y, cy)
r$Z <- c(r$Z, cz)
}
return(r)
} |
a86f46a52b1b268ae1c1c54dbb578b74b47c34aa | b1f23446c30bd70b4cdf459d20b5da02af4c98ad | /getpal.R | d59d1daffe3b996f8517843f86e1d11a8c5325f5 | [
"BSD-2-Clause"
] | permissive | lionsarmor/heck | 263d5252f88c0fe885e8eb6e529afcc82baf593d | 9de43ba92d53d047af992468c08aa3c601348323 | refs/heads/master | 2020-12-10T08:36:31.436530 | 2019-10-15T01:50:04 | 2019-10-15T01:50:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 233 | r | getpal.R | #!/usr/bin/env RScript
library(colormap)
color<-commandArgs(TRUE)
m0<-colormap(color, 16, format="rgb")/16
m1<-matrix(sapply(m0,as.integer),16)[,-4]
m2<-cbind(seq(0,15),m1)
write.table(m2, sep=',', row.names=FALSE, col.names=FALSE)
|
422e619641902aeac84573e258575610e8d78c49 | 10d55aa2971e8aae9a26d65acc7de09e0b4377c4 | /problemSets/PS3/PS3 code.R | e2a4c32a89821af5ca58a247e8e6d2aea1d50e10 | [] | no_license | mkvereen/QPMspring2019 | 710d8ba4d14e173888e5a8e499f45f5fe841a403 | a025aab335472205dab532a4f0abd0730d530e20 | refs/heads/master | 2020-04-17T02:33:54.371966 | 2019-05-01T19:57:11 | 2019-05-01T19:57:11 | 166,141,562 | 0 | 0 | null | 2019-01-17T01:51:24 | 2019-01-17T01:51:24 | null | UTF-8 | R | false | false | 1,285 | r | PS3 code.R | setwd("~/Documents/GitHub/QPMspring2019/problemSets/PS3")
install.packages("faraway")
library("faraway")
data("newhamp")
help("newhamp")
#question 1
#A
plot(newhamp$pObama, newhamp$Dean, main= "Dean vs Obama and hand counted vs machine ballots", cex.main=1, xlab="proportion Obama", ylab="proportion Dean", col=my_cols, xlim=c(0,1), ylim=c(0,1))
my_cols <- ifelse(newhamp$votesys=="H", "Blue", "Red")
legend("topleft", legend = c("Hand counted", "Machine counted"), col = c("Blue", "Red"), lty = 1, cex = 1)
dev.off()
#question 2
#B
x<-seq(from=-5, to=5, by=.1)
plot(x, dnorm(x), lwd=3, type="l", col=1, lty=1, xlab="x", ylab="y", main="Question 2")
lines(x, dt(x, df=20), lwd=3, ylim=c(0, .4), col=2, lty=2)
lines(x, dt(x, df=3), lwd=3, ylim=c(0, .4), col=3, lty=3)
lines(x, dt(x, df=1), lwd=3, ylim=c(0, .4), col=4, lty=4)
legend("topleft",
c("normal", "t(df=20)", "t(df=3)", "t(df=1)"),
lty=c(1,2,3,4), col=c(1,2,3,4), bty="n")
pdf("Question 2")
dev.off()
#question 3
install.packages("Zelig")
library("Zelig")
data("voteincome")
?voteincome
#se=sd/sqrt(n)
x<-voteincome$age
sd(x)/sqrt(1500)
#se=.4511027
#z score=(x-u)/se
z.score<-(50-mean(x))/.4511027
#z= 1.637469
#D
mean(x)
mean(x) - 1.96*.4511027
mean(x)+ 1.96*.4511027
# [ 48.37717, 50.14549]
|
8154fbdf31481c792f79844c9cb4d368e180cafb | f697cbbb0da988fd43c07652d5955fbb82e25e38 | /GoViewer/R/legendColourLookup.r | 6f614cdd46d208aa87d092c83cf407e2859a2737 | [] | no_license | aidanmacnamara/epiView | eec75c81b8c7d6b38c8b41aece3e67ae3053fd1c | b3356f6361fcda6d43bf3acce16b2436840d1047 | refs/heads/master | 2021-05-07T21:22:27.885143 | 2020-06-22T13:08:51 | 2020-06-22T13:08:51 | 109,008,158 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,780 | r | legendColourLookup.r | #' assigns colours to text values using a matched list
#'
#' Compares a dictionary of search values against a list of elements
#' in a string vector to assign colour - or other other coding - to
#' each element. In particular, in turn, for each item, we cycle
#' through the dictionary (a list of words as a space deliminated string)
#' and return a colour taken from the name of the corresponding dictionary
#' entry if a match is detected. If multiple hits are detected, the first is
#' returned. For an illustration, see the examples.
#'
#' Typically, you might used to to assign named colours to labels on
#' the basis of a text match in their description, or to generate a
#' factor of arbitary levels (i.e. not explicit factor names) to be
#' used to assign colours (or other plotting attributes) automatically. Based upon
#' a function, colourLookup in a previous library.
#'
#' @param values the list of values to be coloured, e.g. labels on a
#' plot
#' @param dict a list of vectors of single word values to be
#' searched for in values. If the list is names, its names
#' will be taken as the colours, otherwise a list of artifical
#' names of the form colour.X will be used. If the list is omitted (length zero)
#' a vector of the default.colour length length(values) is returned
#' @param ignore.case if true (default) case is ignored
#' @param default.colour ("black") the colour or value assigned to all non-matched values
#'
#' @return a vector of length values coloured by the name of the
#' entry with the first hit in dict.
#' @export
#'
#' @examples
#'# using a named dictionary
#'
#'legendColourLookup(c("a big dog","lots of fish","a small cat","a cat-dog"),
#' list(red="dog",
#' blue="cat"))
#'
#'# or
#'
#'legendColourLookup(c("a big Dog","lots of fish","a small cat","a cat-dog"),
#' list(red="dog",
#' blue="cat"),
#' default.colour = "none",
#' ignore.case=F)
#'
#'# and without names
#'
#'legendColourLookup(c("a big dog","a small cat","a cat-dog"),
#' list("dog",
#' "cat"))
legendColourLookup=function(values=NULL,dict,ignore.case=T,default.colour="black"){
if (length(dict)>0){
if (is.null(names(dict))){
names(dict)=paste("colour",1:length(dict),sep=".")
}
colours=rep(default.colour,length(values))
for (dictItemName in rev(names(dict))){
dictItem=dict[dictItemName]
dictItem=gsub(" ","",dictItem) # remove multiple blanks
for (dictItemWord in strsplit(dictItem," ")){
# dictItemName is a colour
colours[grepl(dictItemWord,values,ignore.case = ignore.case)]=dictItemName
}
}
colours
} else {
rep(default.colour,length(values))
}
}
|
5ea5bc0f876d9c811221a55a08745b7e849b7c72 | ad925982f059cd5751308ae254cddb7193c26e5c | /R/bgd.R | 5c37a1c73a34ce0074a4fff1f66e9cea145e9007 | [] | permissive | SiddheshAcharekar/rcane | 5ec6ceeeef23b5e100068bc524c8d13dc94c4c43 | c385060cc7a2338c4ba19887abecbd6899d2f54e | refs/heads/master | 2022-04-13T07:53:09.989310 | 2020-02-28T13:37:13 | 2020-02-28T13:37:13 | 108,881,042 | 0 | 0 | MIT | 2020-02-28T13:37:14 | 2017-10-30T16:55:56 | R | UTF-8 | R | false | false | 1,796 | r | bgd.R | BatchGradientDescent <- function(X, Y, alpha=0.1, max.iter=1000, precision=0.0001, boldDriver=FALSE, ...) {
if (is.null(n <- nrow(X))) stop("'X' must be a matrix")
if(n == 0L) stop("0 (non-NA) cases")
p <- ncol(X)
if(p == 0L) {
return(list(
x = X,
y = Y,
coefficients = numeric(),
residuals = Y,
fitted.values = 0 * Y
))
}
if(NROW(Y) != n) {
stop("incompatible dimensions")
}
# Initial value of coefficients
B <- rep(0, ncol(X))
# Recored for bold driver
err <- NA
# Recorded for loss vs iteration
loss_iter <- data.frame(
loss = numeric(),
iter = integer()
)
for(iter in 1:max.iter){
B.prev <- B
err.prev <- err
yhat <- X %*% B
# Record loss vs itertation
loss <- Y - yhat
loss_iter <- rbind(loss_iter, c(sqrt(mean(loss^2)), iter))
B <- B + alpha/n * t(X) %*% (loss)
if(any(is.na(B)) ||
!any(abs(B.prev - B) > precision * B)){
break
}
# Use BoldDriver to update coefficients
if(boldDriver) {
err <- error(Y,yhat)
if(!is.na(err.prev)) {
if(err <= err.prev) {
alpha <- alpha + alpha * 0.1
} else {
B <- B.prev
alpha <- alpha - alpha * 0.5
}
}
}
}
names(B) <- colnames(X)
fv <- X %*% B
rs <- Y - fv
coef <- as.vector(B)
names(coef) <- rownames(B)
colnames(loss_iter) <- c('loss', 'iter')
z <- structure(list(
x=X,
y=Y,
coefficients = coef,
fitted.values = fv,
residuals = rs,
loss_iter = loss_iter
),
class = c("rlm","rlmmodel"))
z
}
error <- function(actual, predicted) {
sqrt(sum((actual-predicted)^2))
}
|
f7107bb330519a8f30b32598bfe18171bba2b1ee | 29585dff702209dd446c0ab52ceea046c58e384e | /StatMatch/R/pw.assoc.R | b170a311ae94d2968e472167404aea12d89fbd15 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,150 | r | pw.assoc.R | `pw.assoc` <-
function(formula, data, weights=NULL, freq0c=NULL)
{
## code for Cramer's V
V <- function(tab){
chi <- chisq.test(tab)
mm <- min(nrow(tab)-1, ncol(tab)-1)
out<-sqrt(chi$statistic/(sum(tab)*mm))
names(out) <- NULL
out
}
### function for computing measures of
### proportional reduction of the variance Row|Column
prv.rc <- function(tab){
tab <- tab/sum(tab)
rS <- rowSums(tab)
cS <- colSums(tab)
## code for Goodman & Kruskal lambda(Row|Column)
V.r <- 1 - max(rS)
EV.rgc <- 1-sum(apply(tab,2,max))
lambda <- (V.r - EV.rgc)/V.r
## code for Goodman & Kruskal tau(Row|Column)
V.r <- 1 - sum(rS^2)
a <- colSums(tab^2)
EV.rgc <- 1 - sum(a/cS)
tau <- (V.r - EV.rgc)/V.r
## code for Theil's Uncertainty(Row|Column)
V.r <- (-1)*sum(rS*log(rS))
cS.mat <- matrix(cS, nrow=nrow(tab), ncol=ncol(tab), byrow=TRUE)
EV.rgc <- sum(tab *log(tab/cS.mat))
u <- (V.r + EV.rgc)/V.r
## output
c(lambda.rc=lambda, tau.rc=tau, U.rc=u)
}
###################################################
###################################################
if(is.null(weights)) ww <- rep(1, nrow(data))
else{
ww <- data[,weights]
data <- data[,setdiff(colnames(data), weights)]
}
df <- model.frame(formula=formula, data=data)
lab <- colnames(df)
p.x <- length(lab) - 1
vV <- vlambda <- vtau <- vU <- numeric(p.x)
for(i in 1:p.x){
pos <- i+1
form <- paste(lab[1], lab[pos], sep="+")
form <- paste("ww", form, sep="~")
tab <- xtabs(as.formula(form), data=df)
if(is.null(freq0c)) freq0c <- 1/sum(tab)^2
tab[tab==0] <- freq0c
vV[i] <- V(tab)
appo <- prv.rc(tab)
vlambda[i] <- appo[1]
vtau[i] <- appo[2]
vU[i] <- appo[3]
}
lab.am <- paste(lab[1], lab[-1], sep="." )
names(vV) <- names(vlambda) <- names(vtau) <- names(vU) <- lab.am
list(V=vV, lambda=vlambda, tau=vtau, U=vU)
}
|
7c8ef3b7fe95a5f60f10287e20d559fb12108a5c | 02e865334769049a7a92ffe4b3d37cb66c97ae04 | /Assigment3/census.R | 82ee266ae51b12d00d6d3625209b97632a18dd4a | [] | no_license | egorgrachev/Analytics_Edge | 5d17c046273fe1461842d4b2e0d81eaec4c3f1be | 9d4a5e611f0ac5dc39ce566b7f68739fecac66dd | refs/heads/master | 2021-01-16T01:02:05.038312 | 2015-04-15T09:31:37 | 2015-04-15T09:31:37 | 33,805,264 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,568 | r | census.R | data = read.csv("AnalyticsEdge/Assigment3/census.csv")
str(data)
set.seed(2000)
spl = sample.split(data$over50k, SplitRatio = 0.6)
Train = subset(data, spl==TRUE)
Test = subset(data, spl==FALSE)
GLM = glm(over50k ~ ., data=Train, family="binomial")
summary(GLM)
PredictionGLM = predict(GLM, newdata=Test)
table(Test$over50k, PredictionGLM > 0.5)
(9351+1515) / (9351+362+1563+1515)
table(Test$over50k)
library(ROCR)
ROCRprediction = prediction(PredictionGLM, Test$over50k)
as.numeric(performance(ROCRprediction, "auc")@y.values)
CART = rpart(over50k ~ ., data=Train, method="class")
PredCART = predict(CART, newdata=Test)
prp(CART)
table(Test$over50k, PredCART)
(9243+1596) / (9243+470+1482+1596)
ROCRprediction = prediction(PredCART, Test$over50k)
as.numeric(performance(ROCRprediction, "auc")@y.values)
set.seed(1)
trainSmall = Train[sample(nrow(Train), 2000), ]
set.seed(1)
RF = randomForest(over50k ~ ., data=trainSmall)
PredRF = predict(RF, newdata=Test)
table(Test$over50k, PredRF)
(9586+1093) / (9586+127+1985+1093)
vu = varUsed(RF, count=TRUE)
vusorted = sort(vu, decreasing = FALSE, index.return = TRUE)
dotchart(vusorted$x, names(RF$forest$xlevels[vusorted$ix]))
varImpPlot(RF)
library(caret)
library(e1071)
set.seed(2)
cartGrid = expand.grid( .cp = seq(0.002,0.1,0.002))
train(over50k ~ ., data=Train, method="rpart", tuneGrid=cartGrid)
superCART = rpart(over50k ~ ., data=Train, method='class', cp=0.002)
predsuperCart = predict(superCART, Test, type='class')
table(Test$over50k, predsuperCart)
(9178+1838) / (9178+535+1240+1838)
prp(superCART)
|
5973ee09c3e80c558613a5835794b9477b67fd2a | 8fec3121657b46e781dd770306267c5a79e36c37 | /R/get.objects.from.script.R | 3307a373557ab54e96ba9314f09108c00668d2a2 | [
"MIT"
] | permissive | rosalieb/miscellaneous | c94e723fc8cd084d6a6cca04977bfbee56f9c10f | 0c1fc29f865978b76661e99cbfb4fedbf11f6ca6 | refs/heads/master | 2022-06-17T11:25:51.352167 | 2022-06-16T13:30:10 | 2022-06-16T13:30:10 | 211,395,839 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,014 | r | get.objects.from.script.R | #' @title Function to return objects created in a specific script that exist in the environment
#'
#' @description The function identify the objects created by a script, and return those present in the environment. The function can take some time to run if the script is long!
#'
#' @author Rosalie Bruel
#'
#' @param path2file The path to the .R or .Rmd file to be evaluated. If input is .Rmd, the function will first extract the code from chunk (creating a copy of the code in the working directory).
#' @param exception Vector of objects to not return. Default = NULL.
#' @param source Logical argument. Source the file before running: necessary to put the objects in the environment. Default = FALSE.
#' @param message Logical argument. Default = TRUE
#'
#' @example get.objects.from.script()
#' @example get.objects.from.script(source = TRUE, message = FALSE)
if(!"utils" %in% rownames(installed.packages())) install.packages("utils")
if(!"tools" %in% rownames(installed.packages())) install.packages("tools")
if(!"knitr" %in% rownames(installed.packages())) install.packages("knitr")
get.objects.from.script <- function(path2file = NULL, exception = NULL, source = FALSE, message = TRUE) {
library("utils")
library("tools")
# Step 0-1: Possibility to leave path2file = NULL if using RStudio.
# We are using rstudioapi to get the path to the current file
if(is.null(path2file)) path2file <- rstudioapi::getSourceEditorContext()$path
# Check that file exists
if (!file.exists(path2file)) {
stop("couldn't find file ", path2file)
}
# Step 0-2: If .Rmd file, need to extract the code in R chunks first
# Use code in https://felixfan.github.io/extract-r-code/
if(file_ext(path2file)=="Rmd") {
library("knitr")
tmp <- purl(path2file)
path2file <- paste(getwd(),tmp,sep="/")
source = TRUE # Must be changed to TRUE here
}
# Step 0-3: Start by running the script if you are calling an external script.
if(source) source(path2file)
# Step 1: screen the script
summ_script <- getParseData(parse(path2file, keep.source = TRUE))
# Step 2: extract the objects
list_objects <- summ_script$text[which(summ_script$token == "SYMBOL")]
# List unique
list_objects <- unique(list_objects)
# Step 3: find where the objects are.
src <- paste(as.vector(sapply(list_objects, find)))
src <- tapply(list_objects, factor(src), c)
# List of the objects in the Global Environment
# They can be in both the Global Environment and some packages.
src_names <- names(src)
list_objects = NULL
for (i in grep("GlobalEnv", src_names)) {
list_objects <- c(list_objects, src[[i]])
}
# Step 3bis: if any exception, remove from the list
if(!is.null(exception)) {
list_objects <- list_objects[!list_objects %in% exception]
}
# Step 4: done!
# If message, print message:
if(message) {
cat(paste0(" ",(length(list_objects)+length(exception))," objects were created in the script \n ", path2file,"\n"))
}
return(list_objects)
}
|
145bb5b6add43ce16af48146ec91be121136fff8 | 3bf1ad00bd669b4c7a1c0c90d82fbfbd71b76808 | /codes/functions.R | fa51cd96a3b7f8d04f5d56c9e7bed0214988a70a | [] | no_license | aflaxman/InSilicoVA-sim | 40da0877fb92b9571148db01281090cb6ed442f3 | 51b35cf126f7451dff7150c3f1ca697158ea8a02 | refs/heads/master | 2020-07-18T23:58:21.650633 | 2019-07-19T18:16:13 | 2019-07-19T18:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 927 | r | functions.R | getCCC <- function(est, truth, causes){
C <- length(causes)
N <- length(truth)
ccc <- rep(NA, C)
for(i in 1:C){
TP <- length(intersect(which(est == causes[i]), which(truth == causes[i])))
FN <- length(intersect(which(est != causes[i]), which(truth == causes[i])))
ccc[i] = (TP / (TP + FN) - 1/C) / (1 - 1/C)
}
return(ccc)
}
# just in case, this function returns the removed symptoms in the PHMRC shortened questionnaire.
# However, for this analysis, the full questionnaire was used in Tariff 2.0, so this is not needed
getRemoved <- function(){
c("a2_01", "a2_03", "a2_06", "a2_08", "a2_12", "a2_16", "a2_17", "a2_18", "a2_19", "a2_20", "a2_23", "a2_24", "a2_28", "a2_33", "a2_37", "a2_38", "a2_38_s1", "a2_40", "a2_41", "a2_42", "a2_45", "a2_46", "a2_46a", "a2_46a_s1", "a2_46b", "a2_48", "a2_49", "a2_54", "a2_69", "a2_70", "a2_71", "a2_76", "a2_78", "a2_79", "a2_80", "a2_81", "a2_86", "a3_20", "a4_03")
}
|
15401f7bed1b28edbfbb2e2e5336608aa3567c42 | 60306730d83af4c5a715a3079cc0047bfa95351a | /Lib/PredictDSHW.R | 89474854c666546a6249da1dc481753e1c593a85 | [] | no_license | ikelq/ShortTermLoadForecasting | 0fc3e36c1231abeb3316f9a1e9768ef53802d91c | 0d9751f50cbbe8656dd1dda75170128eca47baa1 | refs/heads/master | 2021-06-08T05:44:05.729174 | 2016-12-01T12:15:36 | 2016-12-01T12:15:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,877 | r | PredictDSHW.R | #Parallelize over zones
predictDSHWParallel <- function(outputDir,
trainingDf,
completeDf,
zones,
horizons,
modifiedDSHW = FALSE,
NCores = 8,
plotResult = FALSE,
saveResult = TRUE){
stopifnot(require("doParallel"))
stopifnot(require("foreach"))
source("Lib/SavePredictions.R")
source("Lib/CombinePredictions.R")
registerDoParallel(NCores)
predictions = foreach(zones = zones,
.combine=function(pred1, pred2) combinePredictions(horizons, zones, pred1, pred2),
.errorhandling="remove") %dopar%
predictDSHW(outputDir, trainingDf, completeDf, zones, horizons, modifiedDSHW,
plotResult = FALSE, saveResult = FALSE)
stopImplicitCluster()
methodName = ifelse(modifiedDSHW, "ModifiedDSHW", "OriginalDSHW")
if (saveResult){
savePredictions(methodName, predictions, horizons, outputDir)
}
}
predictDSHW <- function(outputDir,
trainingDf,
completeDf,
zones,
horizons,
modifiedDSHW = FALSE,
plotResult = FALSE,
saveResult = FALSE){
stopifnot(require("forecast"))
stopifnot(require("xts"))
source("Lib/SavePredictions.R")
#Setup loging file
source("Lib/SetupLog.R")
if (modifiedDSHW){
source("Lib/ModifiedDSHW.R")
methodName = "ModifiedDSHW"
} else{
methodName = "OriginalDSHW"
source("Lib/OriginalDSHW.R")
}
#Extract testing period
idxNaCases = !complete.cases(trainingDf)
startPoints = which(idxNaCases & !c(FALSE, head(idxNaCases, -1)) & c(tail(idxNaCases, -1), TRUE))
endPoints = which(idxNaCases & c(TRUE, head(idxNaCases, -1)) & !c(tail(idxNaCases, -1), FALSE))
startDates = trainingDf$DateTime[startPoints]
endDates = trainingDf$DateTime[endPoints]
nTestingPeriods = length(startDates)
xtsDf = xts(x = completeDf[, -1], order.by = completeDf[, 1])
maxHorizons = max(horizons)
maxPoint = nrow(trainingDf)
#Build models and make predictions
predictions = rep(list(trainingDf), max(horizons));
season1 = 24; #Hourly seasonal
season2 = 24*7; #Weekly seasonal
for (zone in zones){
xts = xtsDf[, zone]
for (period in seq(1, nTestingPeriods)){
startTime = Sys.time()
startPoint = startPoints[period]
endPoint = endPoints[period]
startTrainingPoint = startPoint - 12*season2 #Only get 3 months of data for training
trainXts = xts[startTrainingPoint:(startPoint-1)]
sink("/dev/null") #Disable output from dshw function
model = dshw(trainXts, season1, season2, h=season1)
sink()
testXts = trainXts
for (currentPoint in seq(startPoint, endPoint)){
prediction = dshw(testXts, h=maxHorizons, model = model)$mean
for (h in horizons){
if (currentPoint+h-1 <= endPoint){
predictions[[h]][currentPoint+h-1, zone] = prediction[h]
}
}
testXts = c(testXts, xts[currentPoint])
}
prettyPrint(paste0(methodName,"|", zone, "|period ", period, "|Done in ",
as.numeric(Sys.time()-startTime, units = "secs")));
}
}
if (saveResult){
savePredictions(methodName, predictions, horizons, outputDir)
}
return (predictions)
} |
15630632d060a1abb952e962d7f016fe2026ddc7 | daa4e30879dca0b8bd786d5a47da08ab787f4d9d | /pcd/plot_gene_exon_structure_whole_gene_MYB.r | 3bbd0a3708989465d1bbd519fc75cc70bbf07189 | [] | no_license | bioinfoICS/Alternative-Splicing-Study-Under-Mineral-Nutrient-Deficiency | 862da812095be8e258588754239fe3a9f53e7f6a | 9e5adb4b15b249c3a096941ce9d05ca3c9e4f98b | refs/heads/master | 2021-09-18T22:36:37.377167 | 2018-07-21T05:19:59 | 2018-07-21T05:19:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,026 | r | plot_gene_exon_structure_whole_gene_MYB.r | argsx <- commandArgs(TRUE)
GENE <- argsx[1]
pcd <- argsx[2]
samplelist <- strsplit(argsx[3], ",")[[1]]
fun1 <- paste(pcd, "/plot_gene_exon_structure_AS_FunG3.r", sep="")
genefile <- read.table(GENE, header=FALSE, sep="\t")
strand <- as.vector(genefile[,6])
gst <- genefile[,2]
gen <- genefile[,3]
otfile <- paste("Gene", ".pdf", sep="")
pdf(file=otfile, width=15, height=8)
colorss <- c("lightblue", "red")
layout(matrix(c(1,2), 2, 1, byrow=TRUE), height=c(3.5,3.5))
par(mar=c(0,12,0,3))
plot(NA, NA,xlim=c(gst, gen), ylim=c(0, 2.5),type="n",col="grey", main="", cex.main=0.9, ylab="", xlab="Pos(bp)", frame.plot=FALSE, yaxt="n",xaxt="n")
distance <- seq(gst, gen, by=(gen-gst)/2)
equal10 <- (gen-gst)/50
if(strand == "+")
{
arrows(gst+equal10*24, 0, gst+equal10*26, 0, col = "black", length=0.1, lwd=1)
arrows(gst+equal10*9, 0, gst+equal10*11, 0, col = "black", length=0.1, lwd=1)
arrows(gst+equal10*39, 0, gst+equal10*41, 0, col = "black", length=0.1, lwd=1)
}else
{
arrows(gst+equal10*26, 0, gst+equal10*24, 0, col = "black", length=0.1, lwd=1)
arrows(gst+equal10*11, 0, gst+equal10*9, 0, col = "black", length=0.1, lwd=1)
arrows(gst+equal10*41, 0, gst+equal10*39, 0, col = "black", length=0.1, lwd=1)
}
theta <- NULL
sss_n <- length(samplelist)
filename <- paste(samplelist[1], ".txt", sep="")
#Control1
Control <- read.table(filename, sep="\t", header=FALSE)
n <- dim(Control)[1]
depth <- Control[,3]
depth <- (depth-min(depth))/(max(depth)-min(depth)+0.1)
for(i in 1:n)
{
lines(c(Control[i,2], Control[i,2]), c(1.5,depth[i]+1.5), col="lightblue")
}
filename <- paste(samplelist[1], "_junction.txt", sep="")
Control <- read.table(filename, sep="\t", header=FALSE)
for(i in 1:dim(Control)[1])
{
tmp <- strsplit(as.vector(Control[i,1]), "\\&")[[1]]
strand <- tmp[4]
if(tmp[1] == "RI")
{
target_st <- tmp[8]
target_en <- tmp[9]
target_st <- as.numeric(target_st)
target_en <- as.numeric(target_en)
tmpin <- as.numeric(Control[i,2])
tmpex <- as.numeric(Control[i,3])
tmpin <- round(tmpin, 0)
tmpex <- round(tmpex, 0)
mid <- (target_en+target_st)/2
text(mid, 1.5+0.2, tmpin, cex=1.8)
lines(c(target_st, mid), c(1.5, 1.5-0.1), lwd=1)
lines(c(mid, target_en), c(1.5-0.1, 1.5), lwd=1)
text((target_en+target_st)/2, 1.5-0.1, tmpex, cex=1.8)
}
if(tmp[1] == "A3SS" && strand == "+")
{
target_st <- tmp[5]
target_en <- tmp[7]
upstream_st <- tmp[9]
upstream_en <- tmp[10]
downstream_st <- tmp[7]
downstream_en <- tmp[8]
target_st <- as.numeric(target_st)
target_en <- as.numeric(target_en)
upstream_st <- as.numeric(upstream_st)
upstream_en <- as.numeric(upstream_en)
downstream_st <- as.numeric(downstream_st)
downstream_en <- as.numeric(downstream_en)
tmpin <- as.numeric(Control[i,2])
tmpex <- as.numeric(Control[i,3])
ii <- 1.5
mid <- (upstream_en+target_st)/2
lines(c(upstream_en, mid), c(ii, ii+0.1), lwd=1)
lines(c(mid, target_st), c(ii+0.1, ii), lwd=1)
text(mid, ii+0.1, tmpin, cex=1.8)
mid <- (upstream_en+downstream_st)/2
lines(c(upstream_en, mid), c(ii, ii-0.1), lwd=1)
lines(c(mid, downstream_st), c(ii-0.1, ii), lwd=1)
text(mid, ii-0.1, tmpex, cex=1.8)
}
if(tmp[1] == "A3SS" && strand == "-")
{
target_st <- tmp[8]
target_en <- tmp[6]
upstream_st <- tmp[7]
upstream_en <- tmp[8]
downstream_st <- tmp[9]
downstream_en <- tmp[10]
tmpin <- as.numeric(Control[i,2])
tmpex <- as.numeric(Control[i,3])
ii <- 1.5
mid <- (target_en+downstream_st)/2
lines(c(target_en, mid), c(ii, ii+0.1), lwd=1)
lines(c(mid, downstream_st), c(ii+0.1, ii), lwd=1)
text(mid, ii+0.1, tmpin, cex=1.8)
mid <- (upstream_en+downstream_st)/2
lines(c(upstream_en, mid), c(ii, ii-0.1), lwd=1)
lines(c(mid, downstream_st), c(ii-0.1, ii), lwd=1)
text(mid, ii-0.1, tmpex, cex=1.8)
}
if(tmp[1] == "A5SS" && strand == "+")
{
target_st <- tmp[8]
target_en <- tmp[6]
upstream_st <- tmp[7]
upstream_en <- tmp[8]
downstream_st <- tmp[9]
downstream_en <- tmp[10]
target_st <- as.numeric(target_st)
target_en <- as.numeric(target_en)
upstream_st <- as.numeric(upstream_st)
upstream_en <- as.numeric(upstream_en)
downstream_st <- as.numeric(downstream_st)
downstream_en <- as.numeric(downstream_en)
tmpin <- as.numeric(Control[i,2])
tmpex <- as.numeric(Control[i,3])
mid <- (target_en+downstream_st)/2
lines(c(target_en, mid), c(1.5, 1.5+0.1), lwd=1)
lines(c(mid, downstream_st), c(1.5+0.1, ii), lwd=1)
text(mid, 1.5+0.1, tmpin, cex=1.8)
mid <- (upstream_en+downstream_st)/2
lines(c(upstream_en, mid), c(1.5, 1.5-0.1), lwd=1)
lines(c(mid, downstream_st), c(1.5-0.1, 1.5), lwd=1)
text(mid, 1.5-0.1, tmpex, cex=1.8)
}
if(tmp[1] == "A5SS" && strand == "-")
{
target_st <- tmp[5]
target_en <- tmp[7]
upstream_st <- tmp[9]
upstream_en <- tmp[10]
downstream_st <- tmp[7]
downstream_en <- tmp[8]
tmpin <- as.numeric(Control[i,2])
tmpex <- as.numeric(Control[i,3])
mid <- (upstream_en+target_st)/2
lines(c(upstream_en, mid), c(1.5, 1.5+0.1), lwd=1)
lines(c(mid, target_st), c(1.5+0.1, 1.5), lwd=1)
text(mid, 1.5+0.1, tmpin, cex=1.8)
mid <- (upstream_en+downstream_st)/2
lines(c(upstream_en, mid), c(1.5, 1.5-0.1), lwd=1)
lines(c(mid, downstream_st), c(1.5-0.1, 1.5), lwd=1)
text(mid, 1.5-0.1, tmpex, cex=1.8)
}
}
#Case
filename <- paste(samplelist[2], ".txt", sep="")
Case <- read.table(filename, sep="\t", header=FALSE)
n <- dim(Case)[1]
depth <- Case[,3]
depth <- (depth-min(depth))/(max(depth)-min(depth)+0.1)
for(i in 1:n)
{
lines(c(Case[i,2], Case[i,2]), c(0.5,depth[i]+0.5), col="red")
}
filename <- paste(samplelist[2], "_junction.txt", sep="")
Control <- read.table(filename, sep="\t", header=FALSE)
for(i in 1:dim(Control)[1])
{
tmp <- strsplit(as.vector(Control[i,1]), "\\&")[[1]]
strand <- tmp[4]
if(tmp[1] == "RI")
{
target_st <- tmp[8]
target_en <- tmp[9]
target_st <- as.numeric(target_st)
target_en <- as.numeric(target_en)
tmpin <- as.numeric(Control[i,2])
tmpex <- as.numeric(Control[i,3])
tmpin <- round(tmpin, 0)
tmpex <- round(tmpex, 0)
mid <- (target_en+target_st)/2
text(mid, 0.5+0.2, tmpin, cex=1.8)
lines(c(target_st, mid), c(1.5, 1.5-0.1), lwd=1)
lines(c(mid, target_en), c(1.5-0.1, 1.5), lwd=1)
text((target_en+target_st)/2, 1.5-0.1, tmpex, cex=1.8)
}
if(tmp[1] == "A3SS" && strand == "+")
{
target_st <- tmp[5]
target_en <- tmp[7]
upstream_st <- tmp[9]
upstream_en <- tmp[10]
downstream_st <- tmp[7]
downstream_en <- tmp[8]
target_st <- as.numeric(target_st)
target_en <- as.numeric(target_en)
upstream_st <- as.numeric(upstream_st)
upstream_en <- as.numeric(upstream_en)
downstream_st <- as.numeric(downstream_st)
downstream_en <- as.numeric(downstream_en)
tmpin <- as.numeric(Control[i,2])
tmpex <- as.numeric(Control[i,3])
ii <- 0.5
mid <- (upstream_en+target_st)/2
lines(c(upstream_en, mid), c(ii, ii+0.1), lwd=1)
lines(c(mid, target_st), c(ii+0.1, ii), lwd=1)
text(mid, ii+0.1, tmpin, cex=1.8)
mid <- (upstream_en+downstream_st)/2
lines(c(upstream_en, mid), c(ii, ii-0.1), lwd=1)
lines(c(mid, downstream_st), c(ii-0.1, ii), lwd=1)
text(mid, ii-0.1, tmpex, cex=1.8)
}
if(tmp[1] == "A3SS" && strand == "-")
{
target_st <- tmp[8]
target_en <- tmp[6]
upstream_st <- tmp[7]
upstream_en <- tmp[8]
downstream_st <- tmp[9]
downstream_en <- tmp[10]
tmpin <- as.numeric(Control[i,2])
tmpex <- as.numeric(Control[i,3])
ii <- 0.5
mid <- (target_en+downstream_st)/2
lines(c(target_en, mid), c(ii, ii+0.1), lwd=1)
lines(c(mid, downstream_st), c(ii+0.1, ii), lwd=1)
text(mid, ii+0.1, tmpin, cex=1.8)
mid <- (upstream_en+downstream_st)/2
lines(c(upstream_en, mid), c(ii, ii-0.1), lwd=1)
lines(c(mid, downstream_st), c(ii-0.1, ii), lwd=1)
text(mid, ii-0.1, tmpex, cex=1.8)
}
if(tmp[1] == "A5SS" && strand == "+")
{
target_st <- tmp[8]
target_en <- tmp[6]
upstream_st <- tmp[7]
upstream_en <- tmp[8]
downstream_st <- tmp[9]
downstream_en <- tmp[10]
target_st <- as.numeric(target_st)
target_en <- as.numeric(target_en)
upstream_st <- as.numeric(upstream_st)
upstream_en <- as.numeric(upstream_en)
downstream_st <- as.numeric(downstream_st)
downstream_en <- as.numeric(downstream_en)
tmpin <- as.numeric(Control[i,2])
tmpex <- as.numeric(Control[i,3])
mid <- (target_en+downstream_st)/2
lines(c(target_en, mid), c(1.5, 1.5+0.1), lwd=1)
lines(c(mid, downstream_st), c(1.5+0.1, ii), lwd=1)
text(mid, 0.5+0.1, tmpin, cex=1.8)
mid <- (upstream_en+downstream_st)/2
lines(c(upstream_en, mid), c(1.5, 1.5-0.1), lwd=1)
lines(c(mid, downstream_st), c(1.5-0.1, 1.5), lwd=1)
text(mid, 0.5-0.1, tmpex, cex=1.8)
}
if(tmp[1] == "A5SS" && strand == "-")
{
target_st <- tmp[5]
target_en <- tmp[7]
upstream_st <- tmp[9]
upstream_en <- tmp[10]
downstream_st <- tmp[7]
downstream_en <- tmp[8]
tmpin <- as.numeric(Control[i,2])
tmpex <- as.numeric(Control[i,3])
mid <- (upstream_en+target_st)/2
lines(c(upstream_en, mid), c(1.5, 1.5+0.1), lwd=1)
lines(c(mid, target_st), c(1.5+0.1, 1.5), lwd=1)
text(mid, 0.5+0.1, tmpin, cex=1.8)
mid <- (upstream_en+downstream_st)/2
lines(c(upstream_en, mid), c(1.5, 1.5-0.1), lwd=1)
lines(c(mid, downstream_st), c(1.5-0.1, 1.5), lwd=1)
text(mid, 0.5-0.1, tmpex, cex=1.8)
}
}
samplelist <- sub("\\_.*", "", samplelist)
axis(2, at=c(0.7, 1.7),labels=c("-Fe", "+Fe"), cex.axis=2, tick = FALSE, las=1)
##### whole gene structure
par(mar=c(4,12,0,3))
gtf1 <- read.table("include.gtf", sep="\t", header=FALSE)
tmp1 <- strsplit(unique(as.vector(gtf1$V9)), "\\;\\s")
tmp1 <- tmp1[[1]][2]
tmp1 <- sub("transcript_id ", "", tmp1)
tmp1 <- sub("\\;", "", tmp1)
gtf2 <- read.table("exclude.gtf", sep="\t", header=FALSE)
tmp2 <- strsplit(unique(as.vector(gtf2$V9)), "\\;\\s")
tmp2 <- tmp2[[1]][2]
tmp2 <- sub("transcript_id ", "", tmp2)
tmp2 <- sub("\\;", "", tmp2)
tids <- c(tmp1, tmp2)
pos <- rbind(gtf1[,c("V4", "V5")],gtf2[,c("V4", "V5")])
gst <- min(pos)
gen <- max(pos)
plot(NA,NA, col="red",xlim=c(gst, gen),ylim=c(0,5),frame.plot=FALSE,yaxt="n", ylab="",xaxt="n", xlab="")
distance <- seq(gst, gen, by=(gen-gst)/2)
axis(1, at=distance,labels=round(distance,0),cex.axis=1.8, las=1)
#axis(2, at=c(1.5, 2, 4),labels=c("Domain",rev(tids)), cex.axis=1.4, tick = FALSE, las=1)
#dev.off()
gtf <- read.table("include.gtf", sep="\t", header=FALSE)
gtf <- gtf[order(gtf$V4), ]
i=4
source(fun1)
i=2
gtf <- read.table("exclude.gtf", sep="\t", header=FALSE)
gtf <- gtf[order(gtf$V4), ]
source(fun1)
#### domain
gtf <- read.table("Domain.gtf", sep="\t", header=FALSE)
gidtid <- as.vector(gtf[,"V9"])
allgids <- NULL
tmp <- strsplit(gidtid, "\\;\\s")
for(i in 1:length(tmp))
{
allgids <- c(allgids, tmp[[i]][1])
}
allgids <- sub("gene_id ", "", allgids)
exon_type <- as.vector(gtf[,"V3"])
exon_type_uniq <- unique(exon_type)
for(i in 1:length(exon_type_uniq))
{
exsts <- gtf[exon_type == exon_type_uniq[i], "V4"]
exens <- gtf[exon_type == exon_type_uniq[i], "V5"]
n_ex <- length(exsts)
if(n_ex > 0)
{
for(j in 1:n_ex)
{
if(exon_type_uniq[i] == "PF00249")
{
rect(exsts[j], 1-0.3, exens[j], 1+0.3, col="purple",border="purple")
}
}
}
}
legend1 <- NULL
legend2 <- NULL
legend3 <- NULL
legend1 <- c(legend1, c('5UTR','CDS','3UTR'))
legend2 <- c(legend2, c(15,15,15))
legend3 <- c(legend3, c("green","blue","red"))
if(is.element('PF00249', exon_type_uniq))
{
legend1 <- c(legend1, 'Myb-like DNA-binding')
legend2 <- c(legend2, 15)
legend3 <- c(legend3, "purple")
}
legend("bottomright",inset=0, yjust=-2, pt.cex=1.2, x.intersp=0.5, legend=legend1, pch=legend2 ,box.col="NA", col=legend3, horiz=TRUE, cex=1.2)
dev.off()
|
55c253afc9de683f2d960eca3e2a1f8decad4852 | 0dd9adf043a76b5decf0b1e61eb2e68cde0e2762 | /2_1_6 - tidyverse - Grammar of Data Analysis.R | 648675c4a03aa1fcd8cac720ecb8f66dbc7d114b | [] | no_license | MISK-DSI-2020-01/Misk_Learn_R_Solutions | bbc732f1ecd4ae2100bbb25cffccb1c1bfad1bee | 6726256ba96e4b2e476cd178c6545d062f2035bb | refs/heads/main | 2022-12-25T16:14:42.641284 | 2020-10-06T12:45:11 | 2020-10-06T12:45:11 | 301,722,568 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 384 | r | 2_1_6 - tidyverse - Grammar of Data Analysis.R | # 2.1.6 tidyverse - Grammar of Data Analysis
# Misk Academy Data Science Immersive, 2020
# Using the tidy PlayData dataframe, try to compute an aggregation function
# according to the three scenarios, e.g. the mean of the value.
# Scenario 1: Aggregation across height & width
# Scenario 2: Aggregation across time 1 & time 2
# Scenario 3: Aggregation across type A & type B
|
58b82379dda66b57bf5d952c751a75f5e9a883e8 | 580f7956d9545fcbeddcf0c5d4c46daf717e033d | /man/print.survey_definition.Rd | 32230fcba8d023dcd563a491313895780c074b91 | [] | no_license | cturbelin/ifnBase | a6c1021de0c7d3872dcf0ea16044793d998e89be | 4f423ab2acbd5514905c12cd6ab4b1108294d5c8 | refs/heads/master | 2023-03-03T11:42:00.185716 | 2023-03-02T04:39:02 | 2023-03-02T04:39:02 | 123,335,534 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 384 | rd | print.survey_definition.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/platform.R
\name{print.survey_definition}
\alias{print.survey_definition}
\title{print survey definition}
\usage{
\method{print}{survey_definition}(x, ...)
}
\arguments{
\item{x}{list() recode mapping}
\item{...}{extra parameters (print interface compatibility)}
}
\description{
print survey definition
}
|
036ccbfa577dccc05161536e5a770d10ebc63352 | 61ad55574d696a3ba8701ba556068a13977b6d26 | /custom_scripts/qqman.R | 367591669482ba80455f623ea9b0ffe9f823a703 | [] | no_license | aslavney/XWAS_2017 | ebcfa05b29da253a4bb1cca40c0536eb524b69dc | f1d45e44eb7c7871bc66273975f8d912f1ee669f | refs/heads/master | 2021-06-18T11:10:24.566674 | 2017-06-13T22:04:45 | 2017-06-13T22:04:45 | 88,672,310 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 724 | r | qqman.R | # Kaixiong Ye's script for generating QQ and manhattan plots from XWAS output
args <- commandArgs(trailingOnly=TRUE)
if(is.na(args[2])){
stop("Usage:Rscript *.R <input of p values><output basename>\n")
}
library(qqman)
gwasfile <- args[1];
outbase <- args[2];
manfile <- paste(outbase, ".man.pdf", sep="")
qqfile <- paste(outbase, ".qq.pdf", sep="")
gwasResults<-read.table(gwasfile <- args[1], header=T)
pdf(manfile, h=3, w=7)
par(mar=c(4.1,4.1,1.1,1.1))
manhattan(gwasResults, suggestiveline = F, genomewideline = F, bty="n")
dev.off()
pmax <- max(-log10(gwasResults$P)) + 1
pdf(qqfile, h=3,w=3)
par(mar=c(4.1,4.1,1.1,1.1))
qq(gwasResults$P, main = "", bty="n", xlim=c(0, pmax), ylim=c(0,pmax))
dev.off()
|
ef9b46e4e209fd62a489cfd663bc6e3d10f896c0 | ee7a5fbabb24da2692ff151790ab6674d5dc7b7b | /book/r_in_action/22_classification/decision_trees_classical.R | e7569e22575f53c90d713063d308d4966ca4e7c8 | [] | no_license | dataikido/tech | 8b0659e8a2beac6422862c88e5bde2ba58a2e093 | 0a41fbc1d1781bec90d5faee047d36733f636ca7 | refs/heads/master | 2021-10-20T04:20:30.846356 | 2019-02-25T19:27:19 | 2019-02-25T19:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 937 | r | decision_trees_classical.R | #
# R in action (2 ed.)
#
#
# Decision trees
# 1. Classical decision trees
# 2. Conditional inference trees
#
# Decision trees can be biased toward selecting predictors that have
# many levels or many missing values
# Load data
source("22_classification/example_breast.R")
# 1. Classical decision trees
# Creating a classical decision tree with rpart()
# Grows the tree
library(rpart)
set.seed(1234)
dtree <- rpart(class ~ ., data=df.train, method="class",
parms=list(split="information"))
dtree$cptable
plotcp(dtree)
# Prunes the tree
# cp = complexity parameter
dtree.pruned <- prune(dtree, cp=.0125)
library(rpart.plot)
prp(dtree.pruned, type = 2, extra = 104,
fallen.leaves = TRUE, main="Decision Tree")
# Classifies new cases
dtree.pred <- predict(dtree.pruned, df.validate, type="class")
dtree.perf <- table(df.validate$class, dtree.pred,
dnn=c("Actual", "Predicted"))
dtree.perf
|
2b270984ca8102d2aa32bab6a0d35ed5462f87a9 | 4a0144618a7a0fa179ecaf540467e5897451a420 | /tests/testthat/test-geom-timeline-label.R | 63530e6dcee71fba8839f2f30f5d113bfbfeb264 | [] | no_license | slava-kohut/R-Capstone-SoftDev | 556a7ca549589d27d78a2b7394c9ca5c758cb1af | 313cf407bc8084c2b470261fb90eb3f6ad4133a9 | refs/heads/master | 2020-03-20T20:41:52.487524 | 2018-07-01T18:18:00 | 2018-07-01T18:18:00 | 137,701,198 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 602 | r | test-geom-timeline-label.R | context("geom_timeline_label")
test_that('geom_timeline_label works correctly', {
data("eq_data")
p <- ggplot(data = eq_data %>% dplyr::filter(COUNTRY %in% c('COLOMBIA', 'MEXICO', 'USA')),
aes(x = DATE, y = COUNTRY, size = EQ_PRIMARY,
color = TOTAL_DEATHS, xmin = as.Date('1970-01-01'),
xmax = as.Date('2015-01-01'))) +
geom_timeline_label(aes(label = as.character(DATE)), nmax = 2) +
geom_timeline() +
theme_eq
expect_that(p, is_a('ggplot'))
expect_equal(p$mapping$x, as.name('DATE'))
expect_equal(length(p$layers), 2)
})
|
5b61ce1c5279a008b307efc9e33c169a85f1b8da | 07552777f40d6410f3f3009a2707105bb5462253 | /R/orlm.default.R | bceba76a1b7c866b0f6ee6a260eb389d8678d497 | [] | no_license | cran/ic.infer | 2bf8d9707d41d79d9cc15d9d0ce3d9f42c88493c | a09ce2542945f6d8bddc28a20f427f8e6ff7527b | refs/heads/master | 2021-01-02T09:14:10.937054 | 2018-01-26T21:01:12 | 2018-01-26T21:01:12 | 17,696,740 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,014 | r | orlm.default.R | orlm.default <- function (model, ui, ci = NULL, index = NULL,
meq = 0, tol = sqrt(.Machine$double.eps), df.error = NULL, ...)
{
## this program is used, if model is a covariance matrix
## check model
if (!(is.matrix(model)))
stop("ERROR: model must be of class lm or a covariance matrix.")
else
if (!(nrow(model)==ncol(model)))
stop("ERROR: If it is not a linear model, model must be a quadratic matrix.")
else
if (!(all(eigen(model,TRUE,only.values=TRUE)$values>0)))
stop("ERROR: matrix model must be positive definite.")
g <- nrow(model)-1
if (is.null(df.error))
stop("ERROR: df.error is required, when working from a covariance matrix")
if (!(df.error>2))
stop("ERROR: df.error must be at least 2.")
if (is.null(index)) index <- 1:g
else {
if (!is.vector(index)) stop("Index must be a vector of integer numbers.")
if (min(index) < 2)
stop("No restrictions on intercept possible, when working from a covariance matrix.")
index <- index - 1
}
### no intercept information obtainable from covariance matrix
### i.e. index refers to x1 (2) to xp (p+1)
## preliminary calculations
V <- solve(model[2:(g+1),2:(g+1)])/df.error ## works also, if sigma^2=0
if (is.null(colnames(V))) colnames(V) <- paste("X",1:g,sep="")
b <- solve(model[2:(g+1),2:(g+1)],model[2:(g+1),1])
var.y <- model[1,1]
s2 <- c(var.y - model[1,2:(g+1)]%*%b)
b <- c(b)
names(b) <- colnames(V)
orig.R2 <- model[1,2:(g+1)]%*%b/var.y
## check inputs
if (!(is.vector(index)))
stop("index must be a vector.")
if (is.vector(ui))
ui <- matrix(ui, 1, length(ui))
if (!is.matrix(ui))
stop("ui must be a matrix.")
if (!length(index) == ncol(ui))
stop("mismatch between number of columns for ui and length of index")
if (is.null(ci))
ci <- rep(0, nrow(ui))
if (!is.vector(ci))
stop("ci must be a vector.")
if (!nrow(ui) == length(ci))
stop("mismatch between number of rows in ui and elements in ci")
hilf <- RREF(t(ui))
if (hilf$rank < nrow(ui))
stop(paste("Matrix ui must have full row-rank (choose e.g. rows",
paste(hilf$pivot, collapse = " "), ")."))
## expand ui by 0 columns, if necessary
uiw <- ui
if (length(index) > g | max(index) > g)
stop(paste("index must be vector of index positions, at most of length ",
g))
uiw <- matrix(0, nrow(ui), g)
uiw[, index] <- ui
## inequality restrictions only, all fulfilled
if (all(uiw %*% b - ci >= 0 * ci) & meq == 0) {
aus <- list(b.unrestr = b, b.restr = b,
R2 = orig.R2, residuals = NULL, fitted.values = NULL,
weights = NULL, orig.R2 = orig.R2,
df.error = df.error, s2 = s2, Sigma = s2*V,
origmodel = NULL, ui = ui, ci = ci, iact = NULL,
restr.index = index, meq = meq, bootout = NULL)
}
else {
## equality restrictions involved or some inequality restrictions violated
## calculate restricted estimate
aus <- solve.QP(Dmat = solve(V), dvec = solve(V, b),
Amat = t(uiw), bvec = ci, meq = meq)
names(aus$solution) <- names(b)
aus$solution[abs(aus$solution) < tol] <- 0
## initialize output list
aus <- list(b.restr = aus$solution, b.unrestr = b,
R2 = NULL, residuals = NULL, fitted.values = NULL,
weights = NULL, orig.R2 = orig.R2,
df.error = df.error, s2 = s2, Sigma = s2*V,
origmodel = NULL, ui = ui, ci = ci, iact = aus$iact,
restr.index = index, meq = meq, bootout = NULL)
### R2
aus$R2 <- model[1,2:(g+1)]%*%t(t(aus$b.restr))/var.y
}
class(aus) <- c("orlm", "orest")
aus
}
|
72272c9f39fdfcefcb31c2bc27783b932cc3ea7c | 8bf41dcf04b0ad00f97e493516f2635593b55ce6 | /man/plotDistribution.Rd | 1b4a554c6750f68e503ab302d197af96ab8cd4a3 | [] | no_license | liangdp1984/psichomics | 994942825246d71fc86a88a93b582f18c2cb7e57 | 3c039cf6f68722e0a1def624c607aca18c8910ec | refs/heads/master | 2023-01-24T01:20:49.789841 | 2020-11-23T12:33:22 | 2020-11-23T12:33:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 6,271 | rd | plotDistribution.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis.R
\name{plotDistribution}
\alias{plotDistribution}
\title{Plot distribution using a density plot}
\usage{
plotDistribution(
data,
groups = NULL,
rug = TRUE,
vLine = TRUE,
...,
title = NULL,
psi = NULL,
rugLabels = FALSE,
rugLabelsRotation = 0,
legend = TRUE,
valueLabel = NULL
)
}
\arguments{
\item{data}{Numeric, data frame or matrix: gene expression data or
alternative splicing event quantification values (sample names are based on
their \code{names} or \code{colnames})}
\item{groups}{List of sample names or vector containing the group name per
\code{data} value (read Details); if \code{NULL} or a character vector of
length 1, \code{data} values are considered from the same group}
\item{rug}{Boolean: show rug plot?}
\item{vLine}{Boolean: plot vertical lines (including descriptive statistics
for each group)?}
\item{...}{
Arguments passed on to \code{\link[stats:density]{stats::density.default}}
\describe{
\item{\code{bw}}{the smoothing bandwidth to be used. The kernels are scaled
such that this is the standard deviation of the smoothing kernel.
(Note this differs from the reference books cited below, and from S-PLUS.)
\code{bw} can also be a character string giving a rule to choose the
bandwidth. See \code{\link[stats]{bw.nrd}}. \cr The default,
\code{"nrd0"}, has remained the default for historical and
compatibility reasons, rather than as a general recommendation,
where e.g., \code{"SJ"} would rather fit, see also Venables and
Ripley (2002).
The specified (or computed) value of \code{bw} is multiplied by
\code{adjust}.
}
\item{\code{adjust}}{the bandwidth used is actually \code{adjust*bw}.
This makes it easy to specify values like \sQuote{half the default}
bandwidth.}
\item{\code{kernel}}{a character string giving the smoothing kernel
to be used. This must partially match one of \code{"gaussian"},
\code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"},
\code{"biweight"}, \code{"cosine"} or \code{"optcosine"}, with default
\code{"gaussian"}, and may be abbreviated to a unique prefix (single
letter).
\code{"cosine"} is smoother than \code{"optcosine"}, which is the
usual \sQuote{cosine} kernel in the literature and almost MSE-efficient.
However, \code{"cosine"} is the version used by S.
}
\item{\code{window}}{a character string giving the smoothing kernel
to be used. This must partially match one of \code{"gaussian"},
\code{"rectangular"}, \code{"triangular"}, \code{"epanechnikov"},
\code{"biweight"}, \code{"cosine"} or \code{"optcosine"}, with default
\code{"gaussian"}, and may be abbreviated to a unique prefix (single
letter).
\code{"cosine"} is smoother than \code{"optcosine"}, which is the
usual \sQuote{cosine} kernel in the literature and almost MSE-efficient.
However, \code{"cosine"} is the version used by S.
}
\item{\code{weights}}{numeric vector of non-negative observation weights,
hence of same length as \code{x}. The default \code{NULL} is
equivalent to \code{weights = rep(1/nx, nx)} where \code{nx} is the
length of (the finite entries of) \code{x[]}.}
\item{\code{width}}{this exists for compatibility with S; if given, and
\code{bw} is not, will set \code{bw} to \code{width} if this is a
character string, or to a kernel-dependent multiple of \code{width}
if this is numeric.}
\item{\code{give.Rkern}}{logical; if true, \emph{no} density is estimated, and
the \sQuote{canonical bandwidth} of the chosen \code{kernel} is returned
instead.}
\item{\code{n}}{the number of equally spaced points at which the density is
to be estimated. When \code{n > 512}, it is rounded up to a power
of 2 during the calculations (as \code{\link[stats]{fft}} is used) and the
final result is interpolated by \code{\link[stats]{approx}}. So it almost
always makes sense to specify \code{n} as a power of two.
}
\item{\code{from}}{the left and right-most points of the grid at which the
density is to be estimated; the defaults are \code{cut * bw} outside
of \code{range(x)}.}
\item{\code{to}}{the left and right-most points of the grid at which the
density is to be estimated; the defaults are \code{cut * bw} outside
of \code{range(x)}.}
\item{\code{cut}}{by default, the values of \code{from} and \code{to} are
\code{cut} bandwidths beyond the extremes of the data. This allows
the estimated density to drop to approximately zero at the extremes.}
}}
\item{title}{Character: plot title}
\item{psi}{Boolean: are \code{data} composed of PSI values? If \code{NULL},
\code{psi = TRUE} if all \code{data} values are between 0 and 1}
\item{rugLabels}{Boolean: plot sample names in the rug?}
\item{rugLabelsRotation}{Numeric: rotation (in degrees) of rug labels; this
may present issues at different zoom levels and depending on the proximity
of \code{data} values}
\item{legend}{Boolean: show legend?}
\item{valueLabel}{Character: label for the value (by default, either
\code{Inclusion levels} or \code{Gene expression})}
}
\value{
\code{highchart} object with density plot
}
\description{
The tooltip shows the median, variance, maximum, minimum and number of non-NA
samples of each data series (if \code{data} contains names or column names,
those will be used as sample names and also appear in the tooltip).
}
\details{
Argument \code{groups} can be either:
\itemize{
\item{a list of sample names, e.g.
\code{list("Group 1"=c("Sample A", "Sample B"), "Group 2"=c("Sample C")))}}
\item{a character vector with the same length as \code{data}, e.g.
\code{c("Sample A", "Sample C", "Sample B")}.}
}
}
\examples{
data <- sample(20, rep=TRUE)/20
groups <- paste("Group", c(rep("A", 10), rep("B", 10)))
names(data) <- paste("Sample", 1:20)
plotDistribution(data, groups)
# Using colours
attr(groups, "Colour") <- c("Group A"="pink", "Group B"="orange")
plotDistribution(data, groups)
}
\seealso{
Other functions to perform and plot differential analyses:
\code{\link{diffAnalyses}()}
}
\concept{functions to perform and plot differential analyses}
|
057bad4fd9d00af7c92c4f5e35bfc3042a8ffd57 | 68d1df29f7798d633c730766f7e5670bd001dcac | /man/get.pi.permute.Rd | e13b63bb5821f06931b7bf98a9a6312c6790a037 | [] | no_license | shauntruelove/IDSpatialStats | 1c221becdf5795602b388e76d7649df88fd46f7a | 2782d6dcc9ee4be9855b5e468ce789425b81d49a | refs/heads/master | 2020-04-25T07:52:29.132425 | 2018-12-17T18:00:34 | 2018-12-17T18:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,042 | rd | get.pi.permute.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatialfuncs.r
\name{get.pi.permute}
\alias{get.pi.permute}
\title{get the null distribution of the \code{get.pi} function}
\usage{
get.pi.permute(posmat, fun, r = 1, r.low = rep(0, length(r)),
permutations)
}
\arguments{
\item{posmat}{a matrix with columns type, x and y}
\item{fun}{the function to evaluate}
\item{r}{the series of spatial distances we are interested in}
\item{r.low}{the low end of each range....0 by default}
\item{permutations}{the number of permute iterations}
}
\value{
pi values for all the distances we looked at
}
\description{
Does permutations to calculate the null distribution of get pi
if there were no spatial dependence. Randomly reassigns coordinates
to each observation permutations times
}
\seealso{
Other get.pi: \code{\link{get.pi.bootstrap}},
\code{\link{get.pi.ci}},
\code{\link{get.pi.typed.bootstrap}},
\code{\link{get.pi.typed.permute}},
\code{\link{get.pi.typed}}, \code{\link{get.pi}}
}
\concept{get.pi}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.