blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c3ad41a44274cddaf992e0716387b4ccf0bd234
|
011ac744a8e88f027b44be5972d7858722a34545
|
/Homogeneous Fleet Composition Problem.R
|
41c96fe6ce540a07b8695830ee280628867717bd
|
[] |
no_license
|
Diego-Fabbri/Homogeneous_Fleet_Composition-Problem
|
b6cb8aec52f3c02b9c154b91bad134defad8be84
|
6044f7354c0c8b5b8181f752b1bd361064d38229
|
refs/heads/main
| 2023-06-20T07:00:08.213609
| 2021-07-18T10:36:25
| 2021-07-18T10:36:25
| 387,149,718
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,162
|
r
|
Homogeneous Fleet Composition Problem.R
|
#Set your own working directory
setwd("C:/Users/diego/Documents/R/Projects/GitHub_Projects/Optimization/Homogeneous Fleet Composition Problem")
# Import lpSolve package
library(lpSolve)
#Import required packages (ompr)
library(dplyr)
library(ROI)
library(ROI.plugin.symphony)
library(ompr)
library(ompr.roi)
#Set fixed cost
cf <- 350
#Set variable cost
cv <- 150
#Set hiring cost
ch <- 800
#Set number of required vehicles
v_ <- c(12, 15, 16, 17, 17, 18, 20, 20, 21, 22, 24, 22, 20, 18, 17, 16, 14, 13, 13, 14, 15, 16, 17, 19, 21, 22, 23, 22, 24, 26, 27, 28, 30, 32, 32, 30, 29, 28, 26, 25, 25, 24, 22, 22, 19, 20, 18, 17, 16, 16, 14, 13)
#Get max value of v
v_max <- max(v_)
#Set problem's size
n <- length(v_)
N <- n
if(cf + cv < ch) {
print("Cost condition holds")
#Build Model
Model <- MIPModel() %>%
add_variable(x[t], t = 1:n, type = "binary") %>% #define variables
add_variable(y[t], t = 1:n, type = "integer", lb = 0, ub = v_max) %>%
add_variable(z, type = "integer", lb = 0, ub = v_max) %>%
add_variable(dummy, type = "integer", lb = 1, ub = 1) %>% #dummy variable
set_objective(N*cf*z + N*z*cv -N*z*ch +
sum_expr(v_[t]*x[t]*cv, t = 1:n) +
sum_expr(-y[t]*cv, t = 1:n) +
sum_expr(v_[t]*ch*dummy, t = 1:n) +
sum_expr(-v_[t]*x[t]*ch, t = 1:n) +
sum_expr(y[t]*ch, t = 1:n), "min") %>% #define objective
add_constraint(z >= v_[t] -v_max + v_max*x[t], t = 1:n) %>% #define constraints
add_constraint(z <= v_[t] +v_max*x[t], t = 1:n) %>%
add_constraint(y[t] <= z, t = 1:n) %>%
add_constraint(y[t] <= v_max*x[t], t = 1:n) %>%
add_constraint(y[t] >= z -v_max + v_max*x[t], t = 1:n) %>%
solve_model(with_ROI(solver = "symphony", verbosity = 1))
#Model summary
##Status
print(paste("Model status is:", Model$status))
##Objective Function
print(paste("Objective value:", objective_value(Model)))
##Solution
print(paste("Variable v:", get_solution(Model, z)))
}else{
print("Cost condition does not hold")
}
|
99bbc3840c1a9476ebb39202f84d2b67ba7649d2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sbpiper/examples/sbpiper_pe.Rd.R
|
6186f3b0da2d502b0129220b2e63b6492db16225
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,480
|
r
|
sbpiper_pe.Rd.R
|
library(sbpiper)
### Name: sbpiper_pe
### Title: Main R function for SBpipe pipeline: parameter_estimation().
### Aliases: sbpiper_pe
### ** Examples
## No test:
dir.create(file.path("pe_datasets"))
dir.create(file.path("pe_plots"))
data(insulin_receptor_best_fits)
write.table(insulin_receptor_best_fits,
file=file.path("pe_datasets", "best_fits.csv"),
row.names=FALSE)
data(insulin_receptor_all_fits)
write.table(insulin_receptor_all_fits,
file=file.path("pe_datasets", "all_fits.csv"),
row.names=FALSE)
sbpiper_pe(model="ir_beta",
finalfits_filenamein=file.path("pe_datasets", "best_fits.csv"),
allfits_filenamein=file.path("pe_datasets", "all_fits.csv"),
plots_dir="pe_plots",
data_point_num=33,
fileout_param_estim_best_fits_details=file.path("pe_datasets",
"param_estim_best_fits_details.csv"),
fileout_param_estim_details=file.path("pe_datasets",
"param_estim_details.csv"),
fileout_param_estim_summary=file.path("pe_datasets",
"param_estim_summary.csv"),
best_fits_percent=50,
plot_2d_66cl_corr=TRUE,
plot_2d_95cl_corr=TRUE,
plot_2d_99cl_corr=TRUE,
logspace=TRUE,
scientific_notation=TRUE)
## End(No test)
|
dd8defc2b5bd504e26b7c5fd3f476d9e25192650
|
7db1059fad04b646a1ef84950b5df8d5163fa9a0
|
/tests/testthat/test-symncmp.R
|
8f1f5fc730684c6dd95c500423ab232a101afa40
|
[
"MIT"
] |
permissive
|
Blockhead-yj/cognitive_training
|
bd3a615f84b7c6bd37a72e033f7c23355a9c7068
|
309926437a521f08b5cd9a817b4243f118451a88
|
refs/heads/master
| 2022-12-26T13:00:54.092301
| 2020-05-16T14:53:52
| 2020-05-16T14:53:52
| 303,683,930
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,130
|
r
|
test-symncmp.R
|
context("`symncmp` test")
library(dataprocr2)
test_that("`symncmp` should deal with normal and abnormal data", {
# testing on normal data
expect_silent(
result_symncmp <- symncmp(
jsonlite::read_json("data_symncmp.json", simplifyVector = TRUE)
)
)
expect_named(result_symncmp, c("pc", "mrt", "dist_eff", "dist_eff_adj", "is_normal"))
expect_equal(result_symncmp$pc, 0.93, tolerance = 0.01)
expect_equal(result_symncmp$mrt, 568.373, tolerance = 0.001)
expect_equal(result_symncmp$dist_eff, 5.80, tolerance = 0.01)
expect_equal(result_symncmp$dist_eff_adj, 0.0102, tolerance = 1e-4)
expect_true(result_symncmp$is_normal)
# testing on abnormal data input
expect_warning(
result_abnormal <- symncmp(data.frame()),
"`Small`, `Big`, `RT` and `ACC` variables are required."
)
expect_named(result_abnormal, c("pc", "mrt", "dist_eff", "dist_eff_adj", "is_normal"))
expect_true(is.na(result_abnormal$pc))
expect_true(is.na(result_abnormal$mrt))
expect_true(is.na(result_abnormal$dist_eff))
expect_true(is.na(result_abnormal$dist_eff_adj))
expect_false(result_abnormal$is_normal)
})
|
7c46adf38a39b7ceb34befc42cd701c067aec393
|
40060cbadf4e2b56d0d28034ce0cbecafa6d88a4
|
/man/runORF.Rd
|
0c7c28f78016e8707a1d61165d5f1b4ec4343d33
|
[] |
no_license
|
meerapatelmd/OncoRegimenFinder
|
a1f573965e43eb70f695105291a877285e7e8336
|
abd2843999e1dcd375f991206f25ba3b030ba95b
|
refs/heads/main
| 2023-02-06T17:15:03.450092
| 2020-12-25T00:11:05
| 2020-12-25T00:11:05
| 319,721,049
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,412
|
rd
|
runORF.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runORF.R
\name{runORF}
\alias{runORF}
\title{Run OncoRegimenFinder}
\usage{
runORF(
conn,
cdmDatabaseSchema,
writeDatabaseSchema,
vocabularyTable,
drugExposureIngredientTable,
cohortTable,
regimenTable,
regimenIngredientTable,
verbose = TRUE,
progressBar = TRUE,
renameTable = FALSE
)
}
\arguments{
\item{conn}{a database connection object}
\item{cdmDatabaseSchema}{OMOP CDM source schema}
\item{writeDatabaseSchema}{schema to write to}
\item{vocabularyTable}{Vocabulary Table}
\item{drugExposureIngredientTable}{Drug Exposure Table}
\item{cohortTable}{Cohort Table}
\item{regimenTable}{Regimen Table}
\item{regimenIngredientTable}{Regimen Ingredient Table}
\item{verbose}{If TRUE, prints details of the operations being performed as they are being executed.}
\item{progressBar}{If TRUE, prints a progress bar to the console that tracks the write table process.}
}
\description{
This function runs OncoRegimenFinder from start to finish.
}
\seealso{
\code{\link[progress]{progress_bar}}
\code{\link[secretary]{typewrite}}
\code{\link[OncoRegimenFinder]{createVocabTable}},\code{\link[OncoRegimenFinder]{buildIngredientExposuresTable}},\code{\link[OncoRegimenFinder]{buildCohortRegimenTable}},\code{\link[OncoRegimenFinder]{processRegimenTable}},\code{\link[OncoRegimenFinder]{createRegimenIngrTable}}
}
|
6c3d0e13d366f2efddca6bf498d4cedafdd11fb8
|
bf7aaee95ec3383693e16976af58f218cea4ead9
|
/igr-app/ui.R
|
b375e10e4b8d60664265c28cdc5fea8844978d02
|
[] |
no_license
|
BroVic/igr-app
|
7c624051d66993be670e0695665d1c7c1f4ed090
|
baf82e5e311f68c5f01ea71071fac8c7ddbb926b
|
refs/heads/master
| 2021-07-08T11:39:02.813913
| 2016-11-17T16:01:34
| 2016-11-17T16:01:34
| 58,225,115
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 494
|
r
|
ui.R
|
#ui.R
library(shiny)
igrbank <- readRDS(file = "data/igr-tidy.rds")
shinyUI(fluidPage(
titlePanel(
img(src = "nesrea_logo.png", height = 100, width = 120)
),
sidebarLayout(
sidebarPanel(
helpText("Plot of IGR from various NESREA Offices"),
selectInput("state",
label = "Location", choices = levels(igrbank$off), selected = "HQ"),
width = 3
),
mainPanel(
plotOutput("chart1")
)
)
))
|
9b1558e27d56bb41a381fc74636e2b0f849f0f64
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/taxize/examples/gnr_datasources.Rd.R
|
62d8f0669f672d4e021b59b2ce9e4d127db610d0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 554
|
r
|
gnr_datasources.Rd.R
|
library(taxize)
### Name: gnr_datasources
### Title: Get data sources for the Global Names Resolver.
### Aliases: gnr_datasources
### Keywords: names resolve taxonomy
### ** Examples
## Not run:
##D # all data sources
##D gnr_datasources()
##D
##D # give me the id for EOL
##D out <- gnr_datasources()
##D out[out$title == "EOL", "id"]
##D
##D # Fuzzy search for sources with the word zoo
##D out <- gnr_datasources()
##D out[agrep("zoo", out$title, ignore.case = TRUE), ]
##D
##D # Output as a list
##D gnr_datasources(FALSE)
## End(Not run)
|
42deddda52ea5d92a8b4888d0b52862ece2be8ab
|
6496f94b0dacab60bc50668563e78c9378718bac
|
/MGS662-Project2.R
|
8a0dd5582e5d3c8483979882826b7a8b4abf81b2
|
[] |
no_license
|
MGS662/Project-2
|
14f40bf4c1508d8659aa45f5e3f50eaf2505e587
|
9d7444059a927cf7c3bca4d4ccd8a976cf5baefc
|
refs/heads/master
| 2020-03-16T00:39:21.199191
| 2018-05-07T22:22:20
| 2018-05-07T22:22:20
| 132,422,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,849
|
r
|
MGS662-Project2.R
|
#Importing the train data
train<-read.csv('/Users/yashahuja/Desktop/College/Sem 2/Machine learning for IT Managers IE662/BlogFeedback/blogData_train.csv',header=FALSE,sep=",")
#preprocessing the data
features<-train[c("V51","V52","V53","V54","V56","V57","V58","V59","V281")]
#Sampling of data values 'x' and 'b', and splitting into train and test pairs
set.seed(12)
train1<-features[sample(1:nrow(train), 5000,replace=FALSE),]
train1
train.rows<-sample(nrow(train1), 4000)
train.rows
train.set<-train1[train.rows,]
train.x<-train.set[c("V51","V52","V53","V54","V56","V57","V58","V59")]
train.b<-train.set[c("V281")]
test.set<-train1[-train.rows,]
test.x<-test.set[c("V51","V52","V53","V54","V56","V57","V58","V59")]
test.b<-test.set[c("V281")]
#Converting to matrix
test.x<-as.matrix(test.x)
test.b<-as.matrix(test.b)
train.x<-as.matrix(train.x)
train.b<-as.matrix(train.b)
#Defining terms for calculating the MSE from RSS
nrow_test<-nrow(test.b-y.hat)
nrow_test
nrow_train<-nrow(train.b-y.hat)
nrow_train
##Not calling the function like how it was done using iris dataset, because of the error in dimension of 'c'.
#Using ols function
rf=solve.ols(as.matrix(train.x), as.matrix(train.b))
rf
require(Rmosek)
solve.ols<-function(X,y, verb=1){
p<-dim(X)[2] # number of parameters needed
#correspondence between OLS and the Quad program
xx<-crossprod(X) # X'X=Q variable in the Quadratic program
c<--crossprod(X,y) # X'y=c variable in the Quadratic program
xx2<-xx
xx2[upper.tri(xx)]<-0 #mosek needs Q to be triangular
idx <- which(xx2 != 0, arr.ind=TRUE) #index of the nonzero elements of Q
#problem
blogmodel<-list() #empty list that contains the Q problem
blogmodel$sense<-"min" #problem sense
blogmodel$c<-as.vector(c) #objective coefficients
blogmodel$qobj<-list(i = idx[,1],
j = idx[,2],
v = xx2[idx] ) #the Q matrix is imputed by the row indexes i, the cop indexes j and the values v that define the Q matrix
blogmodel$A<-Matrix(rep(0,p), nrow=1,byrow=TRUE,sparse=TRUE) #constraint matrix A is a null
blogmodel$bc<-rbind(blc=0, buc= Inf) #constraint bounds
blogmodel$bx<-rbind(blx=rep(-Inf,p), bux = rep(Inf,p)) #data value bounds
r<-mosek(blogmodel, opts = list(verbose = verb)) #call mosek solver
return(r)
}
#Calculating predicted values of y (i.e.) y.hat, RSS value, and MSE for MOSEK solution for test
y.hat=test.x[,1]*0.12630415 + test.x[,2]*0.21347815 + test.x[,3]*(-0.01587096)+ test.x[,4]*(-0.14254154) + test.x[,5]*(-1.73508394) +test.x[,6]*2.19002483 +test.x[,7]*(-1.04679858)+ test.x[,8]*(2.60912549)
RSSinmosek<-sum((test.b-y.hat)^2)
RSSinmosek
MSEinMosek.test<-RSSinmosek/nrow_test
MSEinMosek.test
#Calculating predicted values of y (i.e.) y.hat, RSS value, and MSE for MOSEK solution for train
y.hat.t=train.x[,1]*0.12630415 + train.x[,2]*0.21347815 + train.x[,3]*(-0.01587096)+ train.x[,4]*(-0.14254154) + train.x[,5]*(-1.73508394) +train.x[,6]*2.19002483 +train.x[,7]*(-1.04679858)+ train.x[,8]*(2.60912549)
RSSinmosek.train<-sum((train.b-y.hat.t)^2)
RSSinmosek.train
MSEinMosek.train<-RSSinmosek.train/nrow_train
MSEinMosek.train
#Linear Regression on test
fit.lm<- lm(formula =V281 ~ .,data = data.frame(train1))
prediction <- predict(fit.lm, data.frame(test.x), se.fit = TRUE)
summary(fit.lm)
prediction
#Linear Regression on train
fit.lm.train<- lm(formula =V281 ~ .,data = data.frame(train1))
prediction.train <- predict(fit.lm.train, data.frame(train.x), se.fit = TRUE)
summary(fit.lm.train)
prediction.train
#MSE of model using Linear regression on test
RSS.lm<-sum((test.b - prediction$fit)^2)
RSS.lm
MSE_lm.test<-RSS.lm/nrow_test
MSE_lm.test
summary(fit.lm)
#MSE of model using Linear regression on train
summary(fit.lm)
RSS.lm<-sum((train.b - prediction.train$fit)^2)
RSS.lm
MSE_lm.train<-RSS.lm/nrow_train
MSE_lm.train
|
4555d0f1369a5a76c31464aaa32ce6a23966bde3
|
202775810402a48a177b2c7f5b20f3bf45809342
|
/R/FUNCOES/FN_AJUSTA_COCA_LS.R
|
1ce799bebf95ddb50dc19af143ce973ab2418035
|
[] |
no_license
|
emirsmaka/CLASSIFICA_PRODUTOS
|
a4cd8db15fdc64666e169ebb4a6bf4c0256a2188
|
d96810d502bc5ffad07ad454f2879b6c8b989d40
|
refs/heads/master
| 2023-04-16T12:42:56.126200
| 2021-04-26T11:31:32
| 2021-04-26T11:31:32
| 297,635,969
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 361
|
r
|
FN_AJUSTA_COCA_LS.R
|
fn_cocacola <- function(x){
y <- x%>%
filter(CPROD_REFRIGERANTE_SEFAZ == 4000601 & VOLUME_SEFAZ == -1)
i_ls <- grep("\\sls\\s",y$PROD_XPROD,ignore.case = T)
x <- anti_join(x,y,by=c("IDNFE","DET_NITEM"))
z <- y[i_ls,]
y <- anti_join(y,z,by=c("IDNFE","DET_NITEM"))
z$VOLUME_SEFAZ <- 1
z$UN_MEDIDA_SEFAZ <- "L"
x <- rbind(x,y,z)
return(x)
}
|
4262e2cbd3264d208e6f8ba2729f2de49ff6945d
|
99fa87a363c47c5304e101a1e7740e238aa01412
|
/inst/appliedPAClassificationScripts/postprocess/postprocess.R
|
ff05fa7b1321f6b11ce05ddf87083cc88743be24
|
[] |
no_license
|
elray1/PACwithDDM
|
f1d1cc6c249a687feea1dce4d45fe737a53772c4
|
01b6d323986ca05b2b6072318c0fe79d6dff7b31
|
refs/heads/master
| 2021-05-09T02:18:00.150153
| 2018-01-27T19:29:19
| 2018-01-27T19:29:19
| 119,192,480
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,237
|
r
|
postprocess.R
|
rm(list = ls())
library("PACwithDDM")
library("irr")
options(warn = 2, error = recover)
pacwithddm_source_location <- "C:/Stat/HMM/PACwithDDM"
combined_results_dir <- file.path(pacwithddm_source_location, "data")
results_dir_base <- file.path(pacwithddm_source_location, "inst/results")
for(data_set in c("Mannini", "SasakiLab", "SasakiFreeLiving")) {
# location_levels <- c("ankle", "wrist")
#for(data_set in c("Mannini")) {
for(class_var_group in c("Type", "Intensity")) {
results_df <- data.frame(location = NA, class_var = NA, fit_method = NA, case = NA, subject = NA,
prop_correct = NA, F1_score_macro = NA, mse_pred = NA)
row_num <- 1
for(location in c("ankle", "wrist")) {
if(identical(data_set, "SasakiFreeLiving")) {
N <- 15
if(identical(class_var_group, "Type")) {
class_var_levels <- "y_category3"
} else {
class_var_levels <- c("y_intensity")
}
if(identical(location, "ankle")) {
fit_data <- SasakiFreeLivingAnkle
} else if(identical(location, "hip")) {
fit_data <- SasakiFreeLivingHip
} else if(identical(location, "wrist")) {
fit_data <- SasakiFreeLivingWrist
}
} else if(identical(data_set, "SasakiLab")) {
if(identical(location, "ankle")) {
N <- 34
} else {
N <- 35
}
if(identical(class_var_group, "Type")) {
class_var_levels <- "y_category3"
} else {
class_var_levels <- "y_intensity"
}
if(identical(location, "ankle")) {
fit_data <- SasakiLabAnkle
} else if(identical(location, "hip")) {
fit_data <- SasakiLabHip
} else if(identical(location, "wrist")) {
fit_data <- SasakiLabWrist
}
} else if(identical(data_set, "Mannini")) {
N <- 33
if(identical(class_var_group, "Type")) {
class_var_levels <- c("y_category4")
} else {
class_var_levels <- c("y_intensity")
}
if(identical(location, "ankle")) {
fit_data <- ManniniAnkleCorrected
} else if(identical(location, "wrist")) {
fit_data <- ManniniWristCorrected
}
}
for(class_var in class_var_levels) {
if(class_var %in% c("y_category3", "y_category4")) {
num_classes <- 4
} else if(identical(class_var, "y_intensity")) {
if(data_set %in% c("SasakiFreeLiving")) {
num_classes <- 5
} else {
num_classes <- 4
}
} else {
num_classes <- 6
}
all_fit_methods <- c("normalFMM", "normalHMM", "RF", "parametricBoostMLR", "parametricBoostCRF")
results_dir <- file.path(results_dir_base, data_set, location, class_var)
for(fit_method in all_fit_methods) {
aggregated_confusion_matrix <- matrix(0, nrow = num_classes, ncol = num_classes)
combined_y_true_as_matrix <- matrix(NA, nrow = 1, ncol = num_classes)
combined_est_class_probs <- matrix(NA, nrow = 1, ncol = num_classes)
for(subject in seq_len(N)) {
results_filename <- paste0("results_subject", subject, ".Rdata")
load(file.path(results_dir_base, data_set, location, class_var, fit_method, results_filename))
temp <- as.data.frame(confusion_matrix)
inds <- as.matrix(temp[, 1:2])
# if(fit_method %in% c("normalFMM", "normalHMM", "parametricBoostMLR", "parametricBoostCRF") && identical(class_var_group, "Type") && data_set %in% c("SasakiLab", "SasakiFreeLiving")) {
# if(fit_method %in% c("normalFMM", "normalHMM", "parametricBoostMLR", "parametricBoostCRF") && identical(class_var_group, "Intensity") && data_set %in% c("SasakiFreeLiving")) {
# inds[, 1] <- as.character(as.integer(inds[, 1]) - 1L)
# } else if(fit_method %in% c("RF") && identical(class_var_group, "Intensity") && data_set %in% c("SasakiFreeLiving")) {
# inds[, 1] <- as.character(as.integer(inds[, 1]) - 1L)
# inds[, 2] <- as.character(as.integer(inds[, 2]) - 1L)
# }
if(identical(data_set, "SasakiLab") && identical(class_var_group, "Intensity")) {
inds[inds == "5"] <- "4" # "Vigorous" intensity was not a possible level, but this was treated differently in two different variables..........
}
storage.mode(inds) <- "integer"
vals <- temp[, 3]
confusion_matrix <- matrix(0, nrow = num_classes, ncol = num_classes)
confusion_matrix[inds] <- temp[, 3]
aggregated_confusion_matrix <- aggregated_confusion_matrix + confusion_matrix
tp_by_class <- diag(confusion_matrix)
tn_by_class <- sapply(seq_along(tp_by_class), function(ind) sum(tp_by_class[-ind]))
fp_by_class <- apply(confusion_matrix, 2, sum) - tp_by_class
fn_by_class <- apply(confusion_matrix, 1, sum) - tp_by_class
# calculate macro F1
to_keep <- (apply(confusion_matrix, 1, sum) > 0)
precision_by_class <- tp_by_class[to_keep] / (tp_by_class[to_keep] + fp_by_class[to_keep])
precision_by_class[is.na(precision_by_class)] <- 0
recall_by_class <- tp_by_class[to_keep] / (tp_by_class[to_keep] + fn_by_class[to_keep])
recall_by_class[is.na(recall_by_class)] <- 0
precision_macro <- mean(precision_by_class)
recall_macro <- mean(recall_by_class)
F1_score_macro <- 2 * precision_macro * recall_macro / (precision_macro + recall_macro)
# calculate MSE / Brier score
if(fit_method %in% c("parametricBoostCRF", "parametricBoostMLR")) {
est_class_probs <- exp(log_class_probs[[1]])
} else {
est_class_probs <- exp(log_class_probs)
}
y_true_as_matrix <- matrix(0, nrow = length(fit_data[[subject]][[class_var]]), ncol = num_classes)
y_true_cols <- as.integer(fit_data[[subject]][[class_var]])
if(identical(class_var_group, "Intensity") && data_set %in% c("SasakiFreeLiving")) {
y_true_cols <- y_true_cols - 1L
}
y_true_as_matrix[cbind(seq_along(fit_data[[subject]][[class_var]]), y_true_cols)] <- 1
mse_pred <- sum((y_true_as_matrix - est_class_probs)^2) / length(fit_data[[subject]][[class_var]])
combined_y_true_as_matrix <- rbind(combined_y_true_as_matrix, y_true_as_matrix)
combined_est_class_probs <- rbind(combined_est_class_probs, est_class_probs)
results_df[row_num, ] <- list(location = location, class_var = class_var, fit_method = fit_method,
case = NA, subject = as.character(subject),
prop_correct = prop_correct, F1_score_macro = F1_score_macro, mse_pred = mse_pred)
row_num <- row_num + 1
}
## add summary statistics for aggregated values across subjects
tp_by_class <- diag(aggregated_confusion_matrix)
tn_by_class <- sapply(seq_along(tp_by_class), function(ind) sum(tp_by_class[-ind]))
fp_by_class <- apply(aggregated_confusion_matrix, 2, sum) - tp_by_class
fn_by_class <- apply(aggregated_confusion_matrix, 1, sum) - tp_by_class
# calculate macro F1
to_keep <- (apply(aggregated_confusion_matrix, 1, sum) > 0)
precision_by_class <- tp_by_class[to_keep] / (tp_by_class[to_keep] + fp_by_class[to_keep])
precision_by_class[is.na(precision_by_class)] <- 0
recall_by_class <- tp_by_class[to_keep] / (tp_by_class[to_keep] + fn_by_class[to_keep])
recall_by_class[is.na(recall_by_class)] <- 0
precision_macro <- mean(precision_by_class)
recall_macro <- mean(recall_by_class)
F1_score_macro <- 2 * precision_macro * recall_macro / (precision_macro + recall_macro)
# calculate MSE / Brier score
if(fit_method %in% c("parametricBoostCRF", "parametricBoostMLR")) {
est_class_probs <- exp(log_class_probs[[1]])
} else {
est_class_probs <- exp(log_class_probs)
}
if(identical(fit_method, "SVM")) {
mse_pred <- NA
} else {
combined_y_true_as_matrix <- combined_y_true_as_matrix[-1, , drop = FALSE]
combined_est_class_probs <- combined_est_class_probs[-1, , drop = FALSE]
mse_pred <- sum((combined_y_true_as_matrix - combined_est_class_probs)^2) / nrow(combined_y_true_as_matrix)
}
results_df[row_num, ] <- list(location = location, class_var = class_var, fit_method = fit_method,
case = NA, subject = "Aggregated",
prop_correct = prop_correct, F1_score_macro = F1_score_macro, mse_pred = mse_pred)
row_num <- row_num + 1
}
}
}
results_df$fit_method[results_df$fit_method == "normalFMM"] <- "FMM"
results_df$fit_method[results_df$fit_method == "normalHMM"] <- "HMM"
results_df$fit_method[results_df$fit_method == "RF"] <- "RF"
results_df$fit_method[results_df$fit_method == "parametricBoostMLR"] <- "MLR"
results_df$fit_method[results_df$fit_method == "parametricBoostCRF"] <- "CRF"
results_df$fit_method <- factor(results_df$fit_method, levels = c("FMM", "HMM", "RF", "MLR", "CRF"))
results_df$location <- as.character(results_df$location)
results_df$location[results_df$location == "ankle"] <- "Ankle"
results_df$location[results_df$location == "wrist"] <- "Wrist"
for(colind in seq_len(5))
results_df[, colind] <- as.factor(results_df[, colind])
save_name <- paste0(data_set, class_var_group, "Results")
assign(save_name, results_df)
save(list = save_name, file = file.path(combined_results_dir, paste0(save_name, ".rdata")))
}
}
|
f901728d2fb688a647a7f2603abe7ad978eb2c3a
|
dbaa7726a291793aaa12fe882008562026923f75
|
/AMS206B_H3.R
|
8132b1c5657ac17d4dcf76384fed267ba396e1ff
|
[] |
no_license
|
msilva00/AMS206B
|
8b5cfdd60ed49f4ce24c015a6693eeca40efa04c
|
f77758f20f4b2db053f71c0aeee67a95c1693ba2
|
refs/heads/master
| 2020-04-28T19:13:20.020830
| 2019-07-28T20:46:50
| 2019-07-28T20:46:50
| 175,504,665
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,488
|
r
|
AMS206B_H3.R
|
alpha = 1/2
beta = 1/2
a = alpha + x
b = beta + n - x
#### Problem 6a ####
x = 1
n =10
a
lower = qbeta(0.025, a, b)
upper = qbeta(0.975, a, b)
print(c(lower, upper))
#### Problem 6b ####
# calculate the MAP
theta_hat <- (a-1)/(a+b-2)
# evaluate the first and second derivaties at the MAP
h <- (a-1)*log(theta_hat) + (b-1)*log(1-theta_hat)
h.2 <- -(a-1)/theta_hat^2 - (b-1)/(1-theta_hat)^2
#calculate the constant
Const <- exp(h)*sqrt(2*pi/(-h.2))/beta(a, b)
# compute the interval
lower<-theta_hat + qnorm((1 - 0.95/Const)/2, 0, 1)/sqrt(-h.2)
upper<-theta_hat + qnorm((1 - 0.95/Const)/2, 0, 1, lower.tail=FALSE)/sqrt(-h.2)
print(c(lower,upper))
#### Problem 6c ####
th <- rbeta(10000, a, b)
quantile(th, probs=c(0.025, 0.975))
#### Problem 6d ####
n = c(10, 100, 1000)
x = c(1, 10, 100)
a = alpha + x
b = beta + n - x
lower = qbeta(0.025, a, b)
upper = qbeta(0.975, a, b)
exact = cbind(lower, upper)
print(exact)
# calculate the MAP
theta_hat <- (a-1)/(a+b-2)
# evaluate the first and second derivaties at the MAP
h <- (a-1)*log(theta_hat) + (b-1)*log(1-theta_hat)
h.2 <- -(a-1)/theta_hat^2 - (b-1)/(1-theta_hat)^2
#calculate the constant
Const <- exp(h)*sqrt(2*pi/(-h.2))/beta(a, b)
# compute the interval
lower<-theta_hat + qnorm((1 - 0.95/Const)/2, 0, 1)/sqrt(-h.2)
upper<-theta_hat + qnorm((1 - 0.95/Const)/2, 0, 1, lower.tail=FALSE)/sqrt(-h.2)
laplace = (cbind(lower,upper))
print(laplace)
th1 <- rbeta(10000, a[1], b[1])
q1 = quantile(th1, probs=c(0.025, 0.975))
th2 <- rbeta(10000, a[2], b[2])
q2 = quantile(th2, probs=c(0.025, 0.975))
th3 <- rbeta(10000, a[3], b[3])
q3 = quantile(th3, probs=c(0.025, 0.975))
rbind(q1,q2,q3)
#### Problem 8e ####
# set the number of observations and true value of parameters
n <- 1000
tr.th <- 5
tr.sig2 <- 1
# generate dataset
x <- rnorm(n, tr.th, sqrt(tr.sig2)) #set the number of MC samples
N.sam <- 5000
#i. set hyperparameters for fairly informative priors
th0 <- tr.th
k0 <- 0.01
a <- 1001
b <- tr.sig2/(a-1)
#calculate posterior parameters
m <- (th0 + n*k0*mean(x))/(1+n*k0)
alpha <- a + n/2
beta <- 1/b + sum(x^2)/2 + th0^2/(2*k0) - (th0 + n*k0*mean(x))^2/(2*k0*(1+n*k0))
#MC simulation
sig2.1 <- 1/rgamma(N.sam, alpha, beta)
th.1 <- rnorm(N.sam, m, sqrt(sig2.1/(1/k0 + n)))
par(mar=c(4.5, 4.5, 2.1, 2.1), mfrow=c(1,2))
hist(th.1, col=8, lwd=2, , main="", cex.axis=1.5, cex.lab=1.5)
abline(v=tr.th, lty=2, lwd=3, col=2)
hist(sig2.1, col=8, lwd=2, , main="", cex.axis=1.5, cex.lab=1.5)
abline(v=tr.sig2, lty=2, lwd=3, col=2)
|
a85f98f4c0c90fd4322e05a7ef048f40c04ccdda
|
d32728e5da852d5dcf04f5952b0cc2490fd164e3
|
/man/geom_subgene_label.Rd
|
3a81e000d46dadec9995647fd591f8ea698e85e1
|
[] |
no_license
|
wilkox/gggenes
|
b116c5b07b0bc8948c2c094128d82e4e54758c6c
|
6c8e540ca94810e79ebee1fbd819a132b35a7747
|
refs/heads/master
| 2023-08-31T16:16:33.808940
| 2023-07-02T10:36:36
| 2023-07-02T10:36:36
| 83,646,997
| 391
| 52
| null | 2022-03-24T09:44:33
| 2017-03-02T07:16:19
|
R
|
UTF-8
|
R
| false
| true
| 2,275
|
rd
|
geom_subgene_label.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_subgene_label.R
\name{geom_subgene_label}
\alias{geom_subgene_label}
\title{A 'ggplot2' geom to add text labels to subgenes}
\usage{
geom_subgene_label(
mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
na.rm = FALSE,
show.legend = FALSE,
inherit.aes = TRUE,
padding.x = grid::unit(1, "mm"),
padding.y = grid::unit(0.1, "lines"),
align = "centre",
min.size = 4,
grow = F,
reflow = F,
height = grid::unit(3, "mm"),
...
)
}
\arguments{
\item{mapping, data, stat, position, na.rm, show.legend, inherit.aes, ...}{Standard
geom arguments as for \code{ggplot2::geom_text()}.}
\item{padding.x, padding.y}{\code{grid::unit()} object, giving horizontal or
vertical padding around the text. Defaults to 1 mm and 0.1 lines
respectively.}
\item{align}{Where inside the subgene to place the text label. Default is
'centre'; other options are 'left' and 'right'.}
\item{min.size}{Minimum font size, in points. If provided, text that would
need to be shrunk below this size to fit inside the subgene will not be
drawn. Defaults to 4 pt.}
\item{grow}{If \code{TRUE}, text will be grown as well as shrunk to fill the
subgene.}
\item{reflow}{If \code{TRUE}, text will be reflowed (wrapped) to better fit the
subgene.}
\item{height}{\code{grid::unit()} object giving the maximum height of the text.
Defaults to 3 mm, which is the default height of gene arrows (and therefore
of subgenes) drawn with \code{geom_gene_arrow()}.}
}
\description{
\code{geom_subgene_label()} can be used to add a text label to subgenes drawn
with \code{geom_subgene_arrow()}.
}
\details{
\code{geom_subgene_label()} uses the 'ggfittext' package to fit text to genes.
All text drawing options available in \code{ggfittext::geom_fit_text()} (growing,
reflowing, etc.) are also available here. For full details on how these
options work, see the documentation for \code{ggfittext::geom_fit_text()}.
Standard 'ggplot2' aesthetics for text are supported (see Aesthetics.)
}
\section{Aesthetics}{
\itemize{
\item xsubmin,xsubmax (start and end of the subgene; required)
\item y (molecule; required)
\item colour
\item size
\item alpha
\item family
\item fontface
\item angle
}
}
|
91c20ee35048bda06d46caa304d272d5c2f2d1e9
|
c271b5fa67f7d6ca5c248a202e24c6be87ec27f1
|
/summary_plot.R
|
f5b0e118b084eaec7f804bc550105af8259e35e5
|
[
"MIT"
] |
permissive
|
qpmnguyen/sc2meta
|
35239cba530ee5cffe4da0ee9a1fae0957c1cf9d
|
601533a5a9372db702d16102a84c246df1426635
|
refs/heads/master
| 2022-11-21T13:40:05.746636
| 2020-07-13T15:56:04
| 2020-07-13T15:56:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,021
|
r
|
summary_plot.R
|
library(ggplot2)
library(plyr)
### Heatmap all
FDR_summary <- readRDS(file = "./data/summary/FDR_divergence_summary.RDS")
KS_summary <- readRDS(file = "./data/summary/KS_summary.RDS")
WMC_summary <- readRDS(file = "./data/summary/WMC_summary.RDS")
simulations_summary <- readRDS(file = "./data/summary/simulations_summary.RDS")
simulations_time_summary <- readRDS(file = "./data/summary/simulations_time_summary.RDS")
real_data_time_summary <- readRDS(file = "./data/summary/real_data_time_summary.RDS")
enrichment_summary <- readRDS(file = "./data/summary/enrichment_summary.RDS")
FDR_summary$value <- FDR_summary$value/nrow(FDR_summary)
KS_summary$value <- KS_summary$value/nrow(KS_summary)
WMC_summary$value <- WMC_summary$value/nrow(WMC_summary)
simulations_summary$value <- simulations_summary$value/nrow(simulations_summary)
real_data_time_summary$value <- real_data_time_summary$value/nrow(real_data_time_summary)
enrichment_summary$value <- enrichment_summary$value/nrow(enrichment_summary)
summary_list <- list("Type I Error - Nominal vs Observed" = FDR_summary,
"Type I Error - KS" = KS_summary,
"Concordance analysis - WMC" = WMC_summary,
"Power - Enrichement analysis" = enrichment_summary,
"Computational time" = real_data_time_summary)
summary_df <- ldply(summary_list,.id = "Parameter")
methodlist = c("DESeq2_poscounts",
"DESeq2_TMM",
"DESeq2_poscounts_zinbwave",
"edgeR_TMM_standard",
"edgeR_poscounts_standard",
"edgeR_TMM_robustDisp",
"edgeR_TMM_zinbwave",
"limma_voom_TMM",
"limma_voom_TMM_zinbwave",
"ALDEx2",
"mgsZig_CSS",
"corncob_LRT",
"corncob_wald",
"songbird",
"mixMC",
"MAST",
"seurat_wilcoxon",
"scde")
summary_df$method <- factor(summary_df$method, levels = methodlist, ordered = TRUE)
ord <- order(ddply(summary_df[!summary_df$Parameter %in% c("Computational time") & !summary_df$method %in% c("songbird","mixMC"),],.variables = ~ method, .fun = function(x){
mean(x$value,na.action = "na.omit")
})[,2])
index <- which(levels(summary_df$method) %in% c("mixMC","songbird"))
methodlist_ordered <- methodlist[!methodlist %in% c("songbird","mixMC")]
methodlist_ordered <- c(methodlist_ordered[ord],c("mixMC","songbird"))
fig <- ggplot(summary_df,mapping = aes(x = method, y = Parameter, fill = value)) +
geom_tile(width = 0.8,height = 0.8) +
#geom_text(aes(label = round(mean,digits = 2))) +
scale_fill_distiller(palette = "RdYlBu", limits = c(0,1)) +
coord_equal() +
theme_minimal() +
ylab("Measure") + xlab("Method") + labs(fill = "Mean rank") +
ggtitle(label = "Overall normalized ranking", subtitle = "Ranked methods") +
scale_y_discrete(limits = rev(levels(summary_df$Parameter))) +
scale_x_discrete(limits = methodlist_ordered) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5),
panel.grid = element_blank(),
axis.ticks = element_blank())
fig
svg("../fig7.svg",height = 5, width = 8)
fig
dev.off()
fig_vertical <- ggplot(summary_df,mapping = aes(x = Parameter, y = method, fill = value)) +
geom_tile(width = 0.8,height = 0.8) +
#geom_text(aes(label = round(mean,digits = 2))) +
scale_fill_distiller(palette = "RdYlBu", limits = c(0,1)) +
coord_equal() +
theme_minimal() +
xlab("Measure") + ylab("Method") + labs(fill = "Mean rank") +
ggtitle(label = "Overall normalized ranking", subtitle = "Ranked methods") +
# scale_x_discrete(limits = rev(levels(summary_df$Parameter))) +
scale_y_discrete(limits = methodlist_ordered) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5),
panel.grid = element_blank(),
axis.ticks = element_blank())
fig_vertical
svg("../fig7_vertical.svg",height = 8, width = 5)
fig_vertical
dev.off()
|
ecf26adbadb18c8322393e613ec2e01cd18d0cd5
|
077c58c20c89820002cb183230e4fae81c3bbc11
|
/SCR/R/scr9_2.R
|
37ae52c3f0e3673600aceb100d94a6017958bfe6
|
[] |
no_license
|
Creatran/SCR
|
a4b80a45a236f791b39c9387d39690a93bdfc0e1
|
4ccad3eedc6e661616d2c879f662263295fb7bc7
|
refs/heads/master
| 2023-01-19T16:47:08.079379
| 2020-11-30T19:54:13
| 2020-11-30T19:54:13
| 251,180,799
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,283
|
r
|
scr9_2.R
|
#' scr9_2
#' @description
#' The solution to the exercise 9_2 for Rizzo's Book.
#'
#' Repeat Example 9.1 using the proposal distribution Y
#' (shape parameter Xt and rate parameter 1).
#'
#' @references Maria L. Rizzo. (2016). Statistical computing with r.
#'
#' @param m a number indicating the iteration numbers. Defaults to 10000.
#' @param sigma a number indicating to the sigma parameter related to Rayleigh distribution.
#' Defaults to 4.
#'
#' @return plot showing the chain generated with Metropolis-Hasting sampler
#' for the target Rayleigh's distribution with sigma (defaluts to 4).
#' @export
#'
#' @examples scr9_2()
#'
#'
#'
scr9_2 <- function(m = 10000, sigma = 4){
f <- function(x, sigma) {
if (any(x < 0)) return (0)
stopifnot(sigma > 0)
return((x / sigma^2) * exp(-x^2 / (2*sigma^2)))
}
x <- numeric(m)
x[1] <- rgamma(1, 1)
k <- 0
u <- runif(m)
for (i in 2:m) {
xt <- x[i-1]
y <- rgamma(1, xt)
num <- f(y, sigma) * dgamma(xt, y)
den <- f(xt, sigma) * dgamma(y, xt)
if (u[i] <= num/den) x[i] <- y else {
x[i] <- xt
k <- k+1 #y is rejected
}
}
index <- 5000:5500
y1 <- x[index]
plot(index, y1, type="l",
main=paste0("Metropolis-Hastings sampler with sigma = ", sigma), ylab="x")
}
|
b4dff6d3e30351f9bfbea6271369622abaa332d0
|
fb91e76054b3b48c6d43e855875dd67b57724032
|
/test.R
|
7945b339f772daaadc94597c1894e81a7164d7aa
|
[
"Apache-2.0"
] |
permissive
|
hbahamonde/Bahamonde_Kovac
|
04566b22eac016052bd3b1db64cf6957d68e89b4
|
3aaa6db2500523ee0d366d63bc206114bd936997
|
refs/heads/master
| 2022-11-09T12:37:31.235781
| 2022-10-28T16:08:33
| 2022-10-28T16:08:33
| 212,148,023
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,683
|
r
|
test.R
|
library(magrittr)
# W -----------------------------------------------------------------------
W_list = list()
years = 200 ## years matrices with countries x countries
countries = 120
possible_means_0 = 1:years
possible_means_1 = cumsum(rnorm(years,1,1))
for(i in 1:years){
means = possible_means_1
W_i = matrix(rnorm(countries*countries,mean = means[i],sd=1),countries,countries)
diag(W_i) = 0
W_list[[i]] = W_i
}
# Delta -------------------------------------------------------------------
## ctrl + shift + R
D_list = list()
# matrices with countries x countries
possible_means = rep(1,years)
deduction = 0.2
for(i in 1:years){
D_list[[i]] = matrix(runif(countries*countries,possible_means[i]-deduction,possible_means[i]+deduction),countries,countries)
}
# D*w ---------------------------------------------------------------------
dw = lapply(1:years,function(x){
D_list[[x]]*W_list[[x]]
})
# str(dw)
# View(dw[[1]])
####
if (!require("pacman")) install.packages("pacman"); library(pacman)
p_load(tsDyn)
# Simulation presented in Enders2014 p. 289
parameter.1 = 0.7 # 0.7 (x)
parameter.2 = 0.2 # 0.2 (y)
parameters.m <- matrix(c(parameter.1, parameter.2, parameter.2, parameter.1), 2)
years.sim = 200
# var sims
set.seed(1);country.1 <- VAR.sim(B=parameters.m,n=years.sim,include="none")
set.seed(2);country.2 <- VAR.sim(B=parameters.m,n=years.sim,include="none")
alpha = 0
psi_1 = 0.5
psi_2 = 1
lambda_1 = 3
lambda_2 = 1.5
lambda_3 = 0
lambda_4 = 1
e = rnorm(countries, 0, sd = 1)
p_load(rgr)
x = matrix(
alpha +
as.vector(psi_1*lag(country.1[,2],1)) +
as.vector(psi_2*lag(country.1[,1],1)) +
lambda_1*dw[[2]] +
lambda_2*dw[[1]] +
lambda_3*dw[[2]] +
lambda_4*dw[[1]] +
e
)
p_load(GVARX)
p.1.sim=1 # The number of lag for Xt matrix
FLag.1.sim=2 # The number of lag for foreign variables in country-specific VAR
lag.max.sim=1 # The maximal number of lag for estimating country-specific VAR
type.sim="none" # Model specification for VAR. As in package vars, we have four selection: "none","const","trend", "both".
ic.sim="AIC" # Information criteria for optimal lag.As in package vars, we have four selection: "AIC", "HQ", "SC", and "FPE".
# determinsitic component for the paper
type.sim.paper = as.character(ifelse(type.sim=="both", as.character("Trend and constant"), ifelse(type.sim == "trend", as.character("Trend"), ifelse(type.sim == "constant", "Constant", NA ))))
options(scipen=9999999)
mainOUTPUT.sim = GVECMest(
data = country.var.d,
p = p.1.sim,
FLag = FLag.1.sim,
lag.max = lag.max.sim,
type = type.sim,
ic = ic.sim,
weight.matrix=sim.w.matrix)
|
460928247a7332962d432b4a8ab03393c4a7a6d9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/UsingR/examples/scrabble.Rd.R
|
986b512f9816b94e111e78a4b3497a7dae04f41c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 836
|
r
|
scrabble.Rd.R
|
library(UsingR)
### Name: scrabble
### Title: Distribution of Scrabble pieces
### Aliases: scrabble
### Keywords: datasets
### ** Examples
data(scrabble)
## perform chi-squared analysis on long string. Is it in English?
quote = " R is a language and environment for statistical computing \
and graphics. It is a GNU project which is similar to the S language \
and environment which was developed at Bell Laboratories (formerly \
AT&T, now Lucent Technologies) by John Chambers and colleagues. R \
can be considered as a different implementation of S. There are \
some important differences, but much code written for S runs \
unaltered under R."
quote.lc = tolower(quote)
quote = unlist(strsplit(quote.lc,""))
ltr.dist = sapply(c(letters," "),function(x) sum(quote == x))
chisq.test(ltr.dist,,scrabble$freq)
|
b9060d0af3bbe233c149d3ff9d5c7aca6637368c
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/BivRec/R/bivrecSurv.R
|
2b881733d8ddeb2de89c81447363dc74cd7ae28c
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,162
|
r
|
bivrecSurv.R
|
#################### CREATE A BIVREC OBJECT ######################
#####
#' Create a Bivariate Alternating Recurrent Event Object
#'
#' @description
#' This function creates a bivariate recurrent survival object to be used as a response variable in a model formula.
#'
#' @importFrom stats na.omit
#'
#' @param id Numeric or character vector of subject's unique identifier (i).
#' @param episode Vector indicating the observation or episode (j) for a subject (i). This will determine order of events for each subject.
#' @param xij Vector with the lengths of time spent in event of Type I for individual i in episode j.
#' @param yij Vector with the lengths of time spent in event of Type II for individual i in episode j.
#' @param d1 Vector of censoring indicator corresponding to Type I gap times (xij): = 1 for uncensored, and = 0 for censored gap times.
#' @param d2 Vector of censoring indicator corresponding to Type II gap times (yij): = 1 for uncensored, and = 0 for censored gap times. Note that in the last episode (or pair) of gap times, yij is always censored, i.e., d2 = 0.
#'
#' @details
#' All the following data requirements must be met to avoid errors or warnings:
#' \enumerate{
#' \item All variables should have the same length.
#' \item The Type I (xij) and Type II (yij) gap times must be numeric and meet the requirements xij\eqn{>}0, yij\eqn{\ge}0 respectively for all observations.
#' \item Only unique, positive, consecutive integers starting from 1, are allowed within each subject for episode (i.e., 1, 2, ...), but they should be allowed to be in any order in the raw data as long as they are unique and have no gap within each subject.
#' \item Censoring indicators d1 and d2 must be numeric. Only the last episode is allowed to have zero value for either, and for the last episode, the only combinations allowed are (d1, d2) = (0, 0) or (1, 0).
#' \item No missing values. If a subject has missing values in any variables, the whole subject's data will not be used.
#' }
#'
#' @return A bivrecSurv object ready to be used as the response for analysis using \verb{bivrecReg} or \verb{bivrecNP}.
#'
#' @rdname BivRec
#' @export
#' @examples
#' library(BivRec)
#' set.seed(28)
#' sim_data <- simBivRec(nsize=100, beta1=c(0.5,0.5), beta2=c(0,-0.5))
#' bivrecsurv_data <- with(sim_data, bivrecSurv(id, epi, xij, yij, d1, d2))
#' class(bivrecsurv_data)
bivrecSurv <- function(id, episode, xij, yij, d1, d2) {
#Check if anything is missing
if (missing(xij)) stop("Missing gap times for Type I event (xij).")
if (missing(yij)) stop("Missing gap times for Type II event (yij).")
if (missing(id)) stop("Missing subject identifiers (id).")
if (missing(episode)) stop("Missing episodes for each subject (episode).")
if (missing(d2)) stop("Missing censoring indicator for Type I event (d1).")
if (missing(d1)) stop("Missing censoring indicator for Type II event (d2).")
Xcind <- d1
Ycind <- d2
#Check all vectors have same length
all_lengths <- c(length(id),length(episode),length(xij),length(yij),length(Ycind),length(Xcind))
if (length(unique(all_lengths)) != 1) stop("Data not cleaned. One or more input vectors (id, episode, xij, yij, d1, d2) differs in length from the rest.")
#Check xij > 0 and yij >=0 both numeric vectors
if (!is.numeric(xij)) stop("Data not cleaned. Time arguments (xij and yij) must be numeric.")
if (!is.numeric(yij)) stop("Data not cleaned. Time arguments (xij and yij) must be numeric.")
if (any(xij <= 0)) stop("Data not cleaned. Time arguments for event Type I (xij) must be positive.")
if (any(yij < 0)) stop("Data not cleaned. Time arguments for event Type II (yij) must be non-negative")
#Check censoring indicators are made of only 0 or 1 values
if (any(Xcind!=0 & Xcind!=1)) stop("Data not cleaned. Indicator vector for Type I gap times (d1) must be made of 0 or 1 values only.")
if (any(Ycind!=0 & Ycind!=1)) stop("Data not cleaned. Indicator vector for Type II gap times (d2) must be made of 0 or 1 values only.")
#ensure id's are numeric
if (!is.numeric(id)) {
if (is.character(id)) {id = as.numeric(as.factor(id))} else {
if (is.factor(id)) {id = as.numeric((id))} else {
stop("Data not cleaned. The id vector must be numeric, character or factor.")}
}
}
id_ref = id
inputdf <- data.frame(id=id, epi=episode, xij=xij, yij=yij, d1=Xcind, d2=Ycind)
#Checks for each subject
err_xind = err_yind = err_epi = NULL
unique_id <- unique(inputdf$id)
for (i in 1:length(unique_id)) {
sub_id <- unique_id[i]
temp_by_subject <- subset(inputdf, inputdf$id==sub_id)
temp_by_subject <- temp_by_subject[order(temp_by_subject$epi),]
sub_n <- nrow(temp_by_subject)
#Check last (d1,d2) = (1,0) or (0,0)
last_cx <- temp_by_subject$d1[sub_n]
last_cy <- temp_by_subject$d2[sub_n]
if (last_cy!=0) {err_yind <- c(err_yind, sub_id)}
if (last_cx==0) {if (last_cy==1) {err_yind <- c(err_yind, sub_id)}
if (sub_n > 1) {
other_cx <- temp_by_subject$d1[-sub_n]
other_cy <- temp_by_subject$d2[-sub_n]
#check all d1, d2 besides last are 0 or 1
if (sum(other_cx)!=(sub_n-1)) {err_xind <- c(err_xind, sub_id)}
if (sum(other_cy)!=(sub_n-1)) {err_yind <- c(err_yind, sub_id)}
ind_check1 <- unique(other_cx >= other_cy)
if (length(ind_check1) > 1) {
err_xind <- c(err_xind, sub_id)
err_yind <- c(err_yind, sub_id)
} else {
if (ind_check1==FALSE) {
err_xind <- c(err_xind, sub_id)
err_yind <- c(err_yind, sub_id)
}
}
}
#Check episodes don't have gaps
if (!is.numeric(episode) & !is.integer(episode)) {stop("Episode variable must be numeric.")}
for (j in 1:sub_n){
if (temp_by_subject$epi[j]!=j) {
err_epi <- c(err_epi, sub_id)}
}
}
error_subjects <- unique(c(err_xind, err_yind, err_epi))
if (length(error_subjects>0)){
errmsg <- paste(error_subjects, collapse = ", ")
msg <- paste("Warning: Data not cleaned. Subjects with id", errmsg,
"removed because of gaps in episodes or incorrect values for d1, d2.",
sep=" ")
print(msg)
df4mdat <- inputdf[-which(inputdf$id %in% error_subjects), ]
} else {df4mdat <- inputdf}
}
#calculate censoring time
ci=id2=NULL
j=1
df4mdat$zij <- df4mdat$xij + df4mdat$yij
for (i in unique(df4mdat$id)){
tempi=df4mdat[df4mdat$id == i,]
if (nrow(tempi) == 1){
ci=c(ci,tempi$zij)
id2=c(id2,j)
} else {
ci=c(ci,rep(sum(tempi$zij),nrow(tempi)))
id2=c(id2,rep(j,nrow(tempi)))
}
j=j+1
}
df4mdat <- cbind(id=id2, df4mdat[-1], ci)
result <- list()
result$id_ref = id_ref
result$error_ids <- error_subjects
d2check <- unique(Ycind)
if (length(d2check)==1) {
if (d2check==0) {
stop("Data not cleaned. All episodes provided are censored (all d2=0).")
}
}
result$data4Lreg <- mdat(dat=df4mdat) #data for Lee regression
result$data4Creg <- df4mdat #data for Chang regression (this is also the df that is used in bivrecPlot)
#####ADD data for cdf and marginal of NP model
df4np <- df4mdat
colnames(df4np)=c("id", "epi", "vij", "wij", "d1", "d2", "x0ij", "ci")
df4np=df4np[,c("id","vij","wij","d2","d1","epi","x0ij","ci")] #change order of columns
forcdf1 <- np.dat(df4np, ai=1)
forcdf2 <- np.dat(df4np, ai=2)
marg1 <- formarginal(dat = df4np) #this is from the reformat code
marg2 <- formarginal(dat = df4np)
formarg1 <- np.dat(dat=marg1, ai=1)
formarg2 <- np.dat(dat=marg2, ai=2)
#two np objects that have data for cdf and marg depending on ai
result$dat4np1 <- list(forcdf=forcdf1, formarg=formarg1,refdata = df4np) #for ai=1
result$dat4np2 <- list(forcdf=forcdf2, formarg=formarg2,refdata = df4np) #for ai=2
result$call <- match.call()
class(result) <- "bivrecSurv"
return(result)
}
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
|
ddaa93ae096a7e7b616a823c0b0b3a17672210f6
|
5d7dbd67d9fa3e8d2589cc7a4a45e9d40b37677b
|
/Section 6- Abadie-replicate/synth_fisher.R
|
791369c56c517dea7bc274f9c8506c9da46c1563
|
[] |
no_license
|
ptoulis/synth-fisher
|
6b15e5ebdf916c9babb8b44750a7dd81e626681d
|
4e24efd06b49f6d18508086f779cbf1ac91a608e
|
refs/heads/main
| 2023-07-05T06:05:49.431916
| 2021-08-26T16:57:24
| 2021-08-26T16:57:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,093
|
r
|
synth_fisher.R
|
rm(list=ls())
library(plyr)
library(survival)
require(xtable)
## Loads adoption_DataList
## Contains several specifications of the DATA.
source("synth_fisher_DATA.R")
load("smoking_with_dems.rdata")
smoking$unit.num = NULL
cols = c(3:8) # interesting covariates.
#' Used to de-trend X_it
time_trends = function() {
## Covariates
X = matrix(0, nrow=0, ncol=35)
before1980 = seq(1, 10)
X = rbind(X, ddply(smoking, .(year), summarize, mean(cigsale))[-before1980, 2]) # baseline is 1980
X = rbind(X, ddply(smoking, .(year), summarize, mean(lnincome))[-before1980, 2])
X = rbind(X, ddply(smoking, .(year), summarize, mean(age15to24))[-before1980, 2])
X = rbind(X, ddply(smoking, .(year), summarize, mean(retprice))[-before1980, 2])
X = rbind(X, ddply(smoking, .(year), summarize, mean(unemploy))[-before1980, 2])
X = rbind(X, ddply(smoking, .(year), summarize, mean(dems))[-before1980, 2])
rownames(X) = colnames(smoking)[cols]
B_trend = matrix(0, nrow=0, ncol=1)
for(j in 1:nrow(X)) {
Xj = X[j, ]
z = 1:ncol(X)
B_trend = rbind(B_trend, coef(lm(Xj ~ z))[2])
}
rownames(B_trend) = rownames(X)
#' B_trend =
#' cigsale -2.3869101487
#' lnincome 0.0309519193
#' age15to24 -0.0008508221
#' retprice 15.3226861961
#' unemploy -0.0337556561
#' dems -0.5573361715
return(B_trend)
}
#' Main function.
#' adoption = data frame with treatment adoption data (state, when)
#' @return Matrix with propensity scores (P(I_1|..)) for every state.
#'
synth_fisher = function(adoption, vars=c("lnincome", "retprice"), only.CA=FALSE, verbose=FALSE) {
# fits Xt ~ t to take out time-effect.
X_trend = time_trends()
# Week format
week = sapply(adoption$when, function(s) {
yr = as.numeric(strsplit(as.character(s), "/")[[1]][2])
mo = as.numeric(strsplit(as.character(s), "/")[[1]][1])
12*(yr-1980) + mo # baseline is 1980.
})
#' Define covariates.
#'
#' X_ij = covariate j of unit i at time of treatment.
#' X_ij is defined in terms of "1980 values" where we difference out common time trends.
#'
X = matrix(0, nrow=0, ncol=6)
colnames(X) = colnames(smoking)[cols]
stopifnot(all(vars %in% colnames(X)))
# Adjust Xit for time trends.
for(i in 1:length(AllStates)) {
yr = as.numeric(strsplit(as.character(adoption[i,]$when), "/")[[1]][2])
st = as.character(adoption[i, ]$state)
# risk_set = AllStates[which(week >= week[i])]
# st_data = subset(smoking, state==st & year==yr)[, c(3:7, 9)]
x_it = as.numeric(subset(smoking, state==st & year==yr)[, cols])
x_it = x_it - as.numeric(X_trend) * (yr - 1980)
X = rbind(X, x_it)
}
rownames(X) = NULL
# Update adoption data.
adoption = cbind(adoption, X)
head(adoption)
# state when cigsale lnincome age15to24 retprice unemploy dems
# Alabama 05/2004 145.1858 9.765749 0.1755933 -44.3444687 6.510136 84.80464
# Change from (01/1990) -> week format
adoption$when = week
head(adoption)
#' state when cigsale lnincome age15to24 retprice unemploy dems
#' Alabama 293 145.1858 9.765749 0.1755933 -44.3444687 6.510136 84.8046
#' ...
status = rep(1, length(AllStates))
status[which(AllStates=="Missouri")] = 0
adoption$event = status
surv = with(adoption, Surv(when, event))
f = as.formula(paste("surv ~ ", paste(vars, collapse="+")))
out = coxph(f, data=adoption)
if(verbose) {
print(sprintf("## Model ##"))
print(sprintf("AIC = %.2f", AIC(out)))
print(summary(out))
print("## ##")
}
#
var_ids = as.numeric(sapply(vars, function(v) which(colnames(adoption)==v)))
X = as.matrix(adoption[, var_ids])
stopifnot(all(names(coef(out)) == colnames(X)))
# hats.
yhat = exp(X %*% as.numeric(coef(out)))
ps_hat = yhat / sum(yhat)
rownames(ps_hat) = AllStates
if(only.CA) {
i = which(rownames(ps_hat)=="California")
return(c(ps_hat[i, ], AIC(out)))
}
ord = rev(order(ps_hat))
M = data.frame(a = rep(0, 13))
# matrix(0, nrow=13, ncol=6)
for(j in 1:3) {
j1 = 13 * (j-1) + 1
j_index = ord[seq(j1, j1 + 12)]
ps_j = round(as.numeric(ps_hat[j_index]), 4)
names_j = rownames(ps_hat)[j_index]
M = cbind(M, data.frame(State=names_j, PS=ps_j))
}
M$a = NULL
rownames(M) = NULL
as.matrix(M)
}
#' Try all possible combinations of models with X1, ... X6
#'
#' @return Kx3 matrix that contains (pvalue, AIC, #vars) at each row
#'
single_DATA_analysis = function(adoption, verbose=FALSE) {
# out = synth_fisher(adoption, vars = c("lnincome", "retprice"), FALSE)
## All models
print("- Checking all models with 1-6 variables...")
pvalues = matrix(0, nrow=0, ncol=3)
colnames(pvalues) = c("pvalue", "AIC", "vars")
for(num_var in 1:6) {
# All num_var models
models = t(combn(colnames(smoking)[cols], num_var))
for(i in 1:nrow(models)) {
m = models[i, ]
# print(sprintf("Checking model"))
# print(m)
out = synth_fisher(adoption, vars = m, only.CA = TRUE)
pvalues = rbind(pvalues, c(out, num_var))
}
}
rownames(pvalues) = NULL
#' pvalues = MATRIX (K x 3)
#' pvalue AIC #vars.
#' ....
return(as.data.frame(pvalues))
}
# single_DATA_analysis(adoption_Data)
#' Analyzes all data adoption specifications in "adoption_DataList
#' @return Results matrix
#' (dataset_id, %non_reject, mean_AIC, bestYES)
#' "bestYes focuses on Q1 of models in terms of AIC (i.e., best 25% of models)
#'
full_DATA_analysis = function() {
Results = matrix(0, nrow=0, ncol=4)
colnames(Results) = c("dataSpec", "nonReject", "meanAIC", "isBest")
K = length(adoption_DataList)
for(i in 1:K) {
print(sprintf("Testing data specification %d / %d ", i, K))
dat = adoption_DataList[[i]]
pvals = single_DATA_analysis(dat)
nonRej = mean(pvals[, 1] >= 0.05)
avgAIC = mean(pvals[, 2])
M = pvals
M = cbind(rep(i, nrow(pvals)), M)
Results = rbind(Results, M)
# best = which(pvals[, 2] <= quantile(pvals[, 2], .25))
# nonRej_best = mean(pvals[best, 1] >= 0.05)
# avgAIC_best = mean(pvals[best, 2])
# Results = rbind(Results, c(i, nonRej_best, avgAIC_best, 1))
print(Results)
save(Results, file="SynthFisher_Results.rda")
}
return(Results)
}
paper_analysis = function() {
# single_DATA_analysis(adoption_Data_2, colnames(smoking)[cols])
all_xnames = colnames(smoking)[cols]
out = synth_fisher(adoption_Data, vars=c("lnincome", "retprice"),verbose=T)
out
xtable(out, include.rownames=FALSE)
out = synth_fisher(adoption_Data_2, vars=c("lnincome", "retprice"), verbose=T)
out
xtable(out, include.rownames=FALSE)
## Single data analysis
pvals = single_DATA_analysis(adoption_Data)
pvals[which.min(pvals$AIC), ]
}
paper_appendix = function() {
#
load("SynthFisher_Results.rda")
Results = as.data.frame(Results)
all = Results
head(all)
require(ggplot2)
g = ggplot(data=all, aes(x=pvalue, y=AIC))
g = g + geom_point() + xlab("pvalue") + ylab("average AIC")
plot(g)
}
|
b6447c856bfa72883260b9f181df85a166bf40ed
|
ff0a2852a43d34bc01f564299d3e4d1751eec56f
|
/CI_network_files/mr_shrinkage.R
|
863bd8dcca9225384763c6ca322946b61af94b95
|
[] |
no_license
|
shinilraina/Summer_project
|
85ac5755b1e52ac4cff7a5e5ce2f5ec19c66ce36
|
17b2b26775972581ed3c1b0c69432d6a0d9eb0e1
|
refs/heads/main
| 2023-07-15T13:13:00.986967
| 2021-09-06T10:25:01
| 2021-09-06T10:25:01
| 374,995,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,005
|
r
|
mr_shrinkage.R
|
# Script to try shrinkage of correlation values to make MR matrix positive definite
path<-dirname(rstudioapi::getActiveDocumentContext()$path)
setwd(path)
load("../Results/MR/MR_results.rds")
effect_estimate<-read.csv("../Results/MR/effect_estimate_matrix.csv")
rownames(effect_estimate)<-effect_estimate[,1]
effect_estimate<-effect_estimate[,-1]
effect_estimate<-as.matrix(effect_estimate)
mr_ci<-mr_table[,c("Exposure","Outcome","Estimate","SE")]
mr_ci$zscore<-mr_ci$Estimate/mr_ci$SE
# Note: In the matrix, the columns are the outcomes and rows are the exposures
mr_zscores<-read.csv("../Results/MR/mr_zscores.csv")
rownames(mr_zscores)<-mr_zscores[,1]
mr_zscores<-mr_zscores[,-1]
mr_zscores<-as.matrix(mr_zscores)
#install.packages("DescTools")
library(DescTools)
mr_cors_ci<-FisherZInv(mr_zscores)
for (i in 1:nrow(mr_cors_ci)){
mr_cors_ci[i,i]=0
}
mr_cors_ci.no_ms<-mr_cors_ci[-5,-5] # Remove MS
mr_cors_ci.no_epi<-mr_cors_ci[-3,-3]# Remove epilepsy
mr_cors_limited<-mr_cors_ci[c(-3,-5),c(-3,-5)] #Remove both
lambda=seq(0.00001,0.1,by=0.00001)
for (i in 1:length(lambda)){
corr_shrink = sign(mr_cors_ci.no_ms) * (min((abs(mr_cors_ci.no_ms) - lambda[i]), 0))
verdict=is.positive.definite(corr_shrink)
if (verdict==TRUE){
print(paste(lambda[i]," = smallest lambda to get a positive definite matrix"))
} else {
x=i
}
} # All 10,000 were false
for (i in 1:length(lambda)){
corr_shrink = sign(mr_cors_ci.no_epi) * (min((abs(mr_cors_ci.no_epi) - lambda[i]), 0))
verdict=is.positive.definite(corr_shrink)
if (verdict==TRUE){
print(paste(lambda[i]," = smallest lambda to get a positive definite matrix"))
} else {
x=i
}
} # All 10,000 were false
for (i in 1:length(lambda)){
corr_shrink = sign(mr_cors_limited) * (min((abs(mr_cors_limited) - lambda[i]), 0))
verdict=is.positive.definite(corr_shrink)
if (verdict==TRUE){
print(paste(lambda[i]," = smallest lambda to get a positive definite matrix"))
} else {
x=i
}
} # All 10,000 were false
|
14583b8a09c2e957b96f0cd1acf89096ee2fdff8
|
f5c129a2a22e44fb38e9e049f6b1d285378dde7e
|
/R/plotFigure2.R
|
917d0e3a89c12ea800c2660accdc0e7cc3d90438
|
[] |
no_license
|
pughlab/inspire-ctdna
|
29fbc5a65abb377a1a8da6cef89a5231399ad974
|
2f3ccbb4b12d9b652dbf773992d67ff75c0e80d0
|
refs/heads/master
| 2021-03-30T08:26:15.641726
| 2020-10-16T18:52:18
| 2020-10-16T18:52:18
| 248,032,245
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,266
|
r
|
plotFigure2.R
|
###########
# plotFigure2.R
# Created by: Cindy Yang
# Updated: Oct 16, 2020
# This script allows the user to to create Figure 2
# This script requires the plotSwimmer.R function and Data.Figure2.RData
###########
#####
# SETUP
#####
source("./R/plotSwimmer.R")
load ("./data/Data.Figure2.RData")
#######
# PLOT MAIN
########
# 1. Plot swimmer plot on right hand side
pdf(height = 12, width = 10, file = "./output/Figure2.pdf")
split.screen (
matrix (c(c(0.12,1,0,1),
c(0,0.12,0,1)),
byrow = T, ncol = 4)
);
# 1) Swimmer plot in order based on % change in ctDNA
screen(1);
# get patients with change
pts.selected <- pts.selected[order(pts.selected$ctDNAChangeLog, decreasing = T),];
pts.selected <- pts.selected[!is.na(pts.selected$ctDNAChangeLog),]; # move the patient with NA to the top of the plot order
plot.dat.netera3 <- plotSwimmer(pt.select = c(row.names(pts.selected),"INS-D-012"),decision = TRUE,plot.legend = TRUE);
# 2) left hand side % change in ctDNA
screen(2);
par(mar = c(6.15 ,1, 5.2, 0));
pts.selected$ctDNAChangeLog[pts.selected$ctDNAChangeLog==Inf] <- 45; # capping maximum at 45
log10change <- log10(abs(pts.selected[,"ctDNAChangeLog"])*100);
log10change <- ifelse (pts.selected[,"ctDNAChangeLog"]<0, yes = -1*log10change, no = log10change);
plot(
x = c(-1*log10change,NA),
y = plot.dat.netera3$mp,
col = adjustcolor(factor(pts.selected$ctDNAChangeLog>0,levels = c(TRUE, FALSE), labels = c("blue", "red")), alpha.f = 1),
pch = 16,
cex = 1,
xlim = c(-5,2.5),
ylim = c(0,max(plot.dat.netera3$mp)+0.7),
axes = FALSE,
xaxs="i",
yaxs="i",
xlab = ""
);
axis(1, at=c(-4:2), labels=c(10000, 1000, 100, 10 ,0, -10,-100),lwd = 1, lwd.tick=1, xaxs="i", cex.axis = 0.5);
# Add title and axes
title(xlab = "Percent change mean\nctDNA detected, %", main = "ctDNA Change\nat cycle 3", cex.lab = 0.60, line = 2.50, cex.main = 0.7);
# Add in ND notation for two samples with no ctDNA detected at baseline
text(x = 0, y = max(plot.dat.netera3$mp), labels = "ND", cex = 0.70);
legend("bottomright",
title = "Change in %\nctDNA mean",
legend = c("Increase", "Decrease"),
fill = c("blue", "red"),
bty = "n",
cex = 0.60);
# 3. Close all screens
close.screen(all = TRUE);
dev.off()
|
733cf6d2b75eda3d60d8d8fbf76d4897dfe0a32d
|
8113d8a8b485bd31a5c8ee88996f9ea7969e60fb
|
/plot2.R
|
77d5d013130e91ef575661a9756ef1ea87c240d5
|
[] |
no_license
|
pjmw/ExData_Plotting1
|
c058358185c9b705e90902fd277fe6c77763f903
|
fa47424ed1b271edc5fa41e8ffb0491a33df5252
|
refs/heads/master
| 2021-01-02T22:55:59.702473
| 2017-08-05T13:57:08
| 2017-08-05T13:57:08
| 99,425,465
| 0
| 0
| null | 2017-08-05T13:47:55
| 2017-08-05T13:47:55
| null |
UTF-8
|
R
| false
| false
| 589
|
r
|
plot2.R
|
library(sqldf)
library(dplyr)
data <- read.csv.sql("household_power_consumption.txt","select * from file where
Date = '1/2/2007' or Date = '2/2/2007' ",sep=";")
data <- tbl_df(data)
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
data$datetime <- strptime(paste(data$Date, data$Time), format = "%Y-%m-%d %H:%M:%S")
my_data <- select(data, datetime, Global_active_power:Sub_metering_3)
dev.copy(png, file = "plot2.png")
plot(my_data$datetime, my_data$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
|
adbcc9152d1d7c8aa1e78d089465d756bc5e63ba
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/RBesT/man/preddist.Rd
|
7ea8f14985a4d1f98f263b14580c3ec2d2a52e1e
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,532
|
rd
|
preddist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preddist.R
\name{preddist}
\alias{preddist}
\alias{preddist.betaMix}
\alias{preddist.normMix}
\alias{preddist.gammaMix}
\title{Predictive Distributions for Mixture Distributions}
\usage{
preddist(mix, ...)
\method{preddist}{betaMix}(mix, n = 1, ...)
\method{preddist}{normMix}(mix, n = 1, sigma, ...)
\method{preddist}{gammaMix}(mix, n = 1, ...)
}
\arguments{
\item{mix}{mixture distribution}
\item{...}{includes arguments which depend on the specific prior-likelihood pair, see description below.}
\item{n}{predictive sample size, set by default to 1}
\item{sigma}{The fixed reference scale of a normal mixture. If left
unspecified, the default reference scale of the mixture is assumed.}
}
\value{
The function returns for a normal, beta or gamma mixture
the matching predictive distribution for \eqn{y_n}.
}
\description{
Predictive distribution for mixture of conjugate distributions
(beta, normal, gamma).
}
\details{
Given a mixture density (either a posterior or a prior)
\deqn{p(\theta,\mathbf{w},\mathbf{a},\mathbf{b})}{p(\theta,w,a,b)}
and a data likelihood of
\deqn{y|\theta \sim f(y|\theta),}{y|\theta ~ f(y|\theta),}
the predictive distribution of a one-dimensional summary \eqn{y_n}
of $n$ future observations is distributed as
\deqn{y_n \sim \int p(\theta,\mathbf{w},\mathbf{a},\mathbf{b}) \, f(y_n|\theta) \, d\theta .}{y_n ~ \int p(u,w,a,b) \, f(y_n|u) du .}
This distribution is the marginal distribution of the data under
the mixture density. For binary and Poisson data \eqn{y_n =
\sum_{i=1}^n y_i} is the sum over future events. For normal data,
it is the mean\eqn{\bar{y}_n = 1/n \sum_{i=1}^n y_i}.
}
\section{Methods (by class)}{
\itemize{
\item \code{betaMix}: Obtain the matching predictive distribution
for a beta distribution, the BetaBinomial.
\item \code{normMix}: Obtain the matching predictive distribution
for a Normal with constant standard deviation. Note that the
reference scale of the returned Normal mixture is meaningless as the
individual components are updated appropriatley.
\item \code{gammaMix}: Obtain the matching predictive distribution
for a Gamma. Only Poisson likelihoods are supported.
}}
\section{Supported Conjugate Prior-Likelihood Pairs}{
\tabular{lccc}{
\strong{Prior/Posterior} \tab \strong{Likelihood} \tab \strong{Predictive}
\tab \strong{Summaries} \cr
Beta \tab Binomial \tab Beta-Binomial \tab \code{n}, \code{r} \cr
Normal \tab Normal (\emph{fixed \eqn{\sigma}}) \tab Normal \tab \code{n}, \code{m}, \code{se} \cr
Gamma \tab Poisson \tab Gamma-Poisson \tab \code{n}, \code{m} \cr
Gamma \tab Exponential \tab Gamma-Exp (\emph{not supported}) \tab \code{n}, \code{m}
}
}
\examples{
# Example 1: predictive distribution from uniform prior.
bm <- mixbeta(c(1,1,1))
bmPred <- preddist(bm, n=10)
# predictive proabilities and cumulative predictive probabilities
x <- 0:10
d <- dmix(bmPred, x)
names(d) <- x
barplot(d)
cd <- pmix(bmPred, x)
names(cd) <- x
barplot(cd)
# median
mdn <- qmix(bmPred,0.5)
mdn
# Example 2: 2-comp Beta mixture
bm <- mixbeta( inf=c(0.8,15,50),rob=c(0.2,1,1))
plot(bm)
bmPred <- preddist(bm,n=10)
plot(bmPred)
mdn <- qmix(bmPred,0.5)
mdn
d <- dmix(bmPred,x=0:10)
\donttest{
n.sim <- 100000
r <- rmix(bmPred,n.sim)
d
table(r)/n.sim
}
# Example 3: 3-comp Normal mixture
m3 <- mixnorm( c(0.50,-0.2,0.1),c(0.25,0,0.2), c(0.25,0,0.5), sigma=10)
print(m3)
summary(m3)
plot(m3)
predm3 <- preddist(m3,n=2)
plot(predm3)
print(predm3)
summary(predm3)
}
|
e1fdd389cb889aa4bafcb1d844946f3766220f44
|
5308ead5d0602fdca069effe3b186134fe077685
|
/Main Figures.R
|
674ed2477e81591bc3c4ccafea2495e19e07256e
|
[] |
no_license
|
haiyangzhang798/EPICON-Drought-Study
|
480af065fac458241cb56e57800d3718f47a9995
|
864e7051ca7b0db0bd140123625e8f0dc37e91bf
|
refs/heads/master
| 2020-11-27T16:02:13.012705
| 2018-02-05T05:29:19
| 2018-02-05T05:29:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,287
|
r
|
Main Figures.R
|
#### Experimental design
# The following code was used in the analysis of data generated as part of a DOE funded project to explore the relationship between drought and microbial recruitment in Sorghum bicolor.
# For detailed descriptions of experimental design, please see the associated publication. In brief, we planted two different sorghum cultivars (RTx430 and BTx642) within a randomized block design that accounted for treatments, genotypes and replication, with three replicate blocks in total.
# From this field experiment, we collected a variety of plant phenotypes, soil measurements, and rhizosphere, root and soil samples for microbial community analysis and metatranscriptomics.
# All samples were collected weekly at the same time of day (between 10am and 1pm) and the same day of the week for seventeen weeks following seedling emergence (TP1 to TP17).
##### Load all of the packages required for quality control, main figures, and supplementary figures.
library("phyloseq")
library("ggplot2")
library("scales")
library("grid")
theme_set(theme_bw())
library("DESeq2")
library("ape")
library("vegan")
library("data.table")
library("RColorBrewer")
library(colorRamps)
library("svglite")
library(VennDiagram)
##### Fig. 1a-c Shannon diversity for three treatments
richness_2 = estimate_richness(rar,measure="Shannon")
s <- data.frame(sample_data(rar))
alphadiv <- cbind(richness_2, s)
alphadiv <- data.frame(alphadiv)
alphadiv$Timepoint <- factor(alphadiv$Timepoint,levels=c("TP1","TP2","TP3","TP4","TP5","TP6","TP7","TP8","TP9","TP10","TP11","TP12","TP13","TP14","TP15","TP16","TP17"))
alphadiv$Treatment <- factor(alphadiv$Treatment,levels=c("Control","Pre_flowering","Post_flowering"))
alphadiv$SampleType <- factor(alphadiv$SampleType,levels=c("Soil","Rhizosphere","Root"))
tp1_2 <- subset(alphadiv,Timepoint=="TP1"|Timepoint=="TP2")
tp1_2$Treatment <- "Pre_flowering"
tp1_2$TreatmentByTimepoint <- paste(tp1_2$Treatment,tp1_2$Timepoint,sep="")
tp1_7 <- subset(alphadiv,Treatment == "Control")
tp1_7_2 <- subset(tp1_7,Timepoint=="TP1"|Timepoint=="TP2"|Timepoint=="TP3"|Timepoint=="TP4"|Timepoint=="TP5"|Timepoint=="TP6"|Timepoint=="TP7")
tp1_7_2$Treatment <- "Post_flowering"
tp1_7_2$TreatmentByTimepoint <- paste(tp1_7_2$Treatment,tp1_7_2$Timepoint,sep="")
data <- rbind(alphadiv,tp1_2,tp1_7_2)
input<-aggregate(Shannon ~ Treatment + Timepoint + SampleType, data, mean)
std <- aggregate(Shannon ~ Treatment + Timepoint + SampleType, data = data, FUN= sd)
input<-cbind(input,std$Shannon)
colnames(input)[5]<-"SD"
TreatmentSampleType<-with(input,interaction(Treatment,SampleType))
input<-cbind(input,TreatmentSampleType)
input$upper<-input$Shannon+input$SD
input$lower<-input$Shannon-input$SD
input<-data.table(input)
input[SampleType == "Soil",y_min := 7.5]
input[SampleType == "Soil",y_max := 5]
input[SampleType == "Rhizosphere",y_min := 4]
input[SampleType == "Rhizosphere",y_max := 6.5]
input[SampleType == "Root",y_min := 3]
input[SampleType == "Root",y_max := 5.5]
ggplot(input, aes(x = Timepoint, y = Shannon, color = Treatment,
group = Treatment, shape = Treatment)) +
geom_line(size = 0.8) + xlab("")+
facet_wrap(~SampleType, scales = "free_y",ncol = 1) +
scale_color_manual(values = c("##### 6AB187", "#DE7A22", "#F4CC70")) +
scale_fill_manual(values = c("lightblue3","#FF8C00","#CDBE70"))+
theme(legend.title = element_text(colour="black", size=11, face="bold")) +
theme(legend.text = element_text(colour="black", size = 11, face = "bold"))+
theme(axis.text.x=element_text(hjust=1,vjust=0.5,size=10,color="black",angle=90,face="bold"),
axis.text.y=element_text(size=11,color="black",face="bold"),
axis.title=element_text(size=11,face="bold"),text=element_text(size=11,face="bold"))+
geom_blank(aes(y = y_min)) +
geom_blank(aes(y = y_max))+
geom_ribbon(aes(ymin=lower,ymax=upper,fill=Treatment),alpha=0.4,colour=NA)+
scale_y_continuous(breaks=seq(3, 7.5, 0.5)) # make all y axis show 0.5 space
##### Fig. 1d PCoA of Bray Curtis distance for all samples
map_file <- "Meta_final_Figure1d.txt"
bmsd = import_qiime_sample_data(map_file)
rar_pre<-subset_samples(rar,Treatment!="Post_flowering")
sample_data(rar_pre) <- bmsd
sample_data(rar_pre)$Timepoint<-factor(sample_data(rar_pre)$Timepoint, levels=c("TP1", "TP2","TP3","TP4","TP5","TP6","TP7","TP8","TP9","TP10","TP11",
"TP12","TP13","TP14","TP15","TP16","TP17"))
plot_ordination(rar_pre, ordinate(rar_pre, "MDS",distance="bray"),axes=1:2, color = "TreatmentByTimepoint5") +
scale_colour_manual(values=c(brewer.pal(9,"YlOrRd")[c(4,6,8)],brewer.pal(9,"YlGn")[c(4,6,8)],rep(brewer.pal(9,"YlGn")[c(8)],times=11),rep(brewer.pal(9,"YlOrRd")[c(8)],times=6),brewer.pal(9,"YlGn")[c(4,6,8)],rep(brewer.pal(9,"YlGn")[c(8)],times=6))) + #"green","green","green","green"brewer.pal(6,"YlOrBr")[c(4)],brewer.pal(11,"PiYG")[c(11)],brewer.pal(11,"PiYG")[c(8)],"black","grey")) + # scale_shape_manual(values=c(15,16,17)) +
geom_point(size = 3,aes(shape=SampleType))+ #facet_wrap(~Treatment) +
scale_shape_manual(values=c(15,16,17)) +
theme(axis.text.x=element_text(size=11,color="black",angle=90), axis.text.y=element_text(size=11,color="black"),
axis.title=element_text(size=11,face="bold"),text=element_text(size=11,face="bold"))
##### Fig. 1e PCoA of Bray Curtis distance for root samples
r = subset_samples(rar_pre,SampleType == "Root")
plot_ordination(r, ordinate(r, "MDS",distance="bray"), color = "Timepoint") +
scale_colour_manual(values=c("gray8","gray37",brewer.pal(9,"YlGn")[c(9,8,7,6,4,3)],brewer.pal(9,"YlGnBu")[c(3,4,6,7,8)],brewer.pal(9,"RdPu")[c(9,8,7,6)]))+
geom_point(size = 3)+ facet_wrap(~Treatment) +
theme(axis.text.x=element_text(size=11,color="black",angle=90), axis.text.y=element_text(size=11,color="black"),
axis.title=element_text(size=11,face="bold"),text=element_text(size=11,face="bold"))
##### Fig. 1f-h Heat map of the mean pairwise Bray Curtis dissimiliarity between all root sample replicates within the specified pair of treatments
data<-read.table("fig1f-h_heatmap_similarity_input.txt",header=T,sep="\t")
data$Var1<-factor(data$Var1, levels=c("ControlTP1", "ControlTP2", "ControlTP3", "ControlTP4", "ControlTP5", "ControlTP6", "ControlTP7", "ControlTP8", "ControlTP9", "ControlTP10", "ControlTP11", "ControlTP12", "ControlTP13", "ControlTP14", "ControlTP15", "ControlTP16", "ControlTP17", "Pre_floweringTP3", "Pre_floweringTP4", "Pre_floweringTP5", "Pre_floweringTP6", "Pre_floweringTP7", "Pre_floweringTP8", "Pre_floweringTP9", "Pre_floweringTP10", "Pre_floweringTP11", "Pre_floweringTP12", "Pre_floweringTP13", "Pre_floweringTP14", "Pre_floweringTP15", "Pre_floweringTP16", "Pre_floweringTP17"))
data$Var2<-factor(data$Var2, levels=c("ControlTP1", "ControlTP2", "ControlTP3", "ControlTP4", "ControlTP5", "ControlTP6", "ControlTP7", "ControlTP8", "ControlTP9", "ControlTP10", "ControlTP11", "ControlTP12", "ControlTP13", "ControlTP14", "ControlTP15", "ControlTP16", "ControlTP17", "Pre_floweringTP3", "Pre_floweringTP4", "Pre_floweringTP5", "Pre_floweringTP6", "Pre_floweringTP7", "Pre_floweringTP8", "Pre_floweringTP9", "Pre_floweringTP10", "Pre_floweringTP11", "Pre_floweringTP12", "Pre_floweringTP13", "Pre_floweringTP14", "Pre_floweringTP15", "Pre_floweringTP16", "Pre_floweringTP17"))
sub <- subset(data,Var1 != "ControlTP1")
sub <- subset(sub,Var1 != "ControlTP2")
sub <- subset(sub,Var2 != "ControlTP2")
sub <- subset(sub,Var2 != "ControlTP1")
ggplot(data = sub, aes(Var1,Var2)) +
geom_tile(aes(fill = value), colour = "white") + facet_wrap(~Variation, scales="free") + theme(panel.border=element_blank())+
scale_fill_gradientn(name="Bray curtis distance",colours = terrain.colors(7))+
theme(axis.text.x=element_text(size=10,color="black",angle=90,face="bold"),
axis.text.y=element_text(size=10,color="black",face="bold"),
axis.title=element_text(size=10,face="bold"),text=element_text(size=10))+xlab("")+ylab("")+
theme(legend.title = element_text(colour="black", size=11, face="bold")) +
theme(legend.text = element_text(colour="black", size = 11, face = "bold"))+
theme(strip.background = element_blank(),
strip.text.x = element_blank())
##### Fig.2 Relative abundance for different phyla.
options(scipen=200)
b1_phylum <- tax_glom(Bushman, taxrank="Phylum")
c = subset_samples(b1_phylum,Treatment=="Control")
x <- c("TP1","TP2","TP3","TP4","TP5","TP6","TP7","TP8","TP9","TP10","TP11","TP12","TP13","TP14","TP15","TP16","TP17")
rm <- {}
control <- {}
for(i in x){
TP = subset_samples(c, Timepoint == i)
merged_r = merge_samples(TP, "SampleType")
merged_r_m = transform_sample_counts(merged_r , function(x) 100 * x/sum(x))
physeqdf <- psmelt(merged_r_m)
temp <- c("Soil","Rhizosphere","Root")
physeqdf$Sample <- factor(physeqdf$Sample, levels = c(temp))
n = nrow(physeqdf[order(physeqdf$Phylum),])
x = sample(i,n,replace=TRUE)
ST <- sample("Control",n,replace=TRUE)
physeqdf[order(physeqdf$Phylum),]$Timepoint <- x
physeqdf[order(physeqdf$Phylum),]$Treatment <- ST
physeqdf$Phylum <- as.character(physeqdf$Phylum)
control <- rbind(control,physeqdf[order(physeqdf$Phylum),])
}
control <- data.frame(control)
pre1 <- subset(control,Timepoint=="TP1"|Timepoint=="TP2")
pre1$Treatment[pre1$Treatment == "Control"] <- "Pre_flowering"
pre2 <- {}
pre = subset_samples(b1_phylum,Treatment=="Pre_flowering")
y <- c("TP3","TP4","TP5","TP6","TP7","TP8","TP9","TP10","TP11","TP12","TP13","TP14","TP15","TP16","TP17")
for(i in y){
TP = subset_samples(pre, Timepoint == i)
merged_r = merge_samples(TP, "SampleType")
merged_r_m = transform_sample_counts(merged_r , function(x) 100 * x/sum(x))
physeqdf <- psmelt(merged_r_m)
temp <- c("Soil","Rhizosphere","Root")
physeqdf$Sample <- factor(physeqdf$Sample, levels = c(temp))
n = nrow(physeqdf[order(physeqdf$Phylum),])
x = sample(i,n,replace=TRUE)
ST <- sample("Pre_flowering",n,replace=TRUE)
physeqdf[order(physeqdf$Phylum),]$Timepoint <- x
physeqdf[order(physeqdf$Phylum),]$Treatment <- ST
physeqdf$Phylum <- as.character(physeqdf$Phylum)
pre2 <- rbind(pre2,physeqdf[order(physeqdf$Phylum),])
}
add2 <- subset(control,Treatment=="Control")
add3 <- subset(add2,Timepoint=="TP1"|Timepoint=="TP2"|Timepoint=="TP3"|Timepoint=="TP4"|Timepoint=="TP5"|Timepoint=="TP6"|Timepoint=="TP7")
add3$Treatment[add3$Treatment == "Control"] <- "Post_flowering"
p <- {}
y <- c("TP8","TP9","TP10","TP11","TP12","TP13","TP14","TP15","TP16","TP17")
post = subset_samples(b1_phylum,Treatment=="Post_flowering")
for(i in y){
TP = subset_samples(post, Timepoint == i)
merged_r = merge_samples(TP, "SampleType")
merged_r_m = transform_sample_counts(merged_r , function(x) 100 * x/sum(x))
physeqdf <- psmelt(merged_r_m)
temp <- c("Soil","Rhizosphere","Root")
physeqdf$Sample <- factor(physeqdf$Sample, levels = c(temp))
n = nrow(physeqdf[order(physeqdf$Phylum),])
x = sample(i,n,replace=TRUE)
ST <- sample("Post_flowering",n,replace=TRUE)
physeqdf[order(physeqdf$Phylum),]$Timepoint <- x
physeqdf[order(physeqdf$Phylum),]$Treatment <- ST
physeqdf$Phylum <- as.character(physeqdf$Phylum)
p <- rbind(p,physeqdf[order(physeqdf$Phylum),])
}
rm <- rbind(control,pre1,pre2,add3,p)
sum <- tapply(rm$Abundance,rm$Phylum, FUN=sum)
sum <- data.frame(sum)
sum <- sort(sum$sum,decreasing = T)
list <- c("Acidobacteria","Actinobacteria","Bacteroidetes","Chloroflexi","Cyanobacteria",
"Firmicutes","Gemmatimonadetes","Nitrospirae",
"Planctomycetes","Proteobacteria","Tenericutes","TM7","Verrucomicrobia")
a <- rm[rm$Phylum %in% list,]
b <- rm[!(rm$Phylum %in% list),]
b$Phylum <- "Other"
rm <- rbind(a,b)
rm$Treatment<-factor(rm$Treatment,levels=c("Control","Pre_flowering","Post_flowering"))
rm$Timepoint<-factor(rm$Timepoint, levels=c("TP1","TP2","TP3","TP4","TP5","TP6","TP7","TP8","TP9","TP10","TP11","TP12","TP13","TP14","TP15","TP16","TP17"))
col_SampleByTreatment<-c("Acidobacteria"="thistle","Actinobacteria"="lightsalmon2","Armatimonadetes"="mediumpurple1","Bacteroidetes"="cornsilk","Chloroflexi"="lightskyblue","Cyanobacteria"="lightpink","Firmicutes"="lightblue1","Gemmatimonadetes"="darkseagreen","Nitrospirae"="mistyrose2",
"Planctomycetes"="gray69","Proteobacteria"="cadetblue3","SPAM"="darkorchid1","Tenericutes"="darkolivegreen2","TM7"="yellow","Verrucomicrobia"="slategray1","Other"="plum")
rm$Phylum <- factor(rm$Phylum,levels=c("Actinobacteria","Chloroflexi","Cyanobacteria","Firmicutes","Bacteroidetes","Proteobacteria",
"Acidobacteria","Armatimonadetes","Gemmatimonadetes","Nitrospirae",
"Planctomycetes","SPAM","Tenericutes","TM7","Verrucomicrobia","Other" ))
sub<-subset(rm,Treatment!="Post_flowering")
ggplot(sub, aes(x=Timepoint, y=Abundance, fill=Phylum)) +
geom_bar(stat = "identity", color = "NA")+facet_wrap(~Sample+Treatment,ncol=2)+ scale_fill_manual(values = col_SampleByTreatment) +
theme(axis.text.x=element_text(size=11,color="black",angle=90), axis.text.y=element_text(size=11,color="black"),
axis.title=element_text(size=14,face="bold"),text=element_text(size=14,face="bold"))+
ylab("Relative Abundance")+xlab("")
###### Fig. 3 Phylogenetic tree of all genera with significant differences in abundance between pre-flowering drought and control root samples at TP8.
TP <- subset_samples(Bushman, Timepoint =="TP8")
TP <- subset_samples(TP,Treatment != "Post_flowering")
TP <- subset_samples(TP,SampleType == "Root")
# This converts to a DESeq2 format.
diagdds = phyloseq_to_deseq2(TP, ~ Treatment)
# This helps to obtain the normalization factor for your dataset.
diagdds = estimateSizeFactors(diagdds)
# You also need to demonstrate that your dataset is effectively dispersed (?).
diagdds = estimateDispersions(diagdds)
# This will reduce sampling variance.
diagvst = getVarianceStabilizedData(diagdds)
dim(diagvst)
#norm=counts(diagdds, normalized=TRUE)
diagdds = DESeq(diagdds, test="Wald", fitType="parametric")
resultsNames(diagdds)
res = results(diagdds)
alpha = 0.05
sigtab = res[which(res$padj < alpha), ]
sigtab = cbind(as(sigtab, "data.frame"), as(tax_table(TP)[rownames(sigtab), ], "matrix"))
new <- subset(sigtab,Phylum != "NA")
Genus <- subset(new,Genus != "NA")
na <- setdiff(rownames(new),rownames(Genus))
na.complete <- new[na,]
class <- na.complete[is.na(na.complete$Class),]
class$Class <- paste(class$Phylum,"PH",sep="")
class$Order <- paste(class$Phylum,"PH",sep="")
class$Family <- paste(class$Phylum,"PH",sep="")
class$Genus <- paste(class$Phylum,"PH",sep="")
order <- na.complete[is.na(na.complete$Order),]
order2 <- subset(order,Class!="NA")
order2$Order <- paste(order2$Class,"OR",sep="")
order2$Family <- paste(order2$Class,"OR",sep="")
order2$Genus <- paste(order2$Class,"OR",sep="")
Family <- na.complete[is.na(na.complete$Family),]
Family2 <- subset(Family,Order!="NA")
Family2$Family <- paste(Family2$Order,"CL",sep="")
Family2$Genus <- paste(Family2$Order,"CL",sep="")
Ge <- na.complete[is.na(na.complete$Genus),]
Ge2 <- subset(Ge,Family!="NA")
Ge2$Genus <- paste(Ge2$Family,"FA",sep="")
all <- rbind(Ge2,Family2,order,class,Genus)
all$taxa <- paste(all$Phylum,all$Genus,sep="_")
mean <- tapply(all$log2FoldChange,all$taxa,mean)
as <- as.character(sort(as.numeric(row.names(all))))
all2 <- all[as,]
uniq <- all2[!duplicated(all2$taxa),]
uniq$name <- paste(rownames(uniq),uniq$log2FoldChange,uniq$taxa,sep="_")
#hand over the data to iTOL for plotting
write.csv(uniq,"root_treatment_tp8.csv")
##### Fig.4a Relative abundance across the top thirteen phyla for all transcripts for which taxonomies could be assigned in the metatranscriptome data
biom_file <- "fig4a-1-bacteria_count.biom"
map_file <- "fig4a-2-metadata_metatrans.txt"
set.seed(1)
biomot=import_biom(biom_file,parseFunction = parse_taxonomy_greengenes)
saveRDS(biomot, "biomot.rds")
bmsd = import_qiime_sample_data(map_file)
Bushman = merge_phyloseq(biomot,bmsd)
saveRDS(Bushman, "Bushman.rds")
sample_data(Bushman)$SampleType<-factor(sample_data(Bushman)$SampleType, levels=c("Soil","Rhizosphere"))
sample_data(Bushman)$Treatment<-factor(sample_data(Bushman)$Treatment, levels=c("Control","Pre_flowering","Post_flowering"))
sample_data(Bushman)$Timepoint<-factor(sample_data(Bushman)$Timepoint, levels=c("TP8","TP9"))
Bushman = subset_taxa(Bushman, Phylum != "Streptophyta")
rhi = subset_samples(Bushman, SampleType=="Rhizosphere")
Phylum <- tax_glom(rhi, taxrank="Phylum")
saveRDS(Phylum, "rhi_soil_Phylum_Bacteria.rds")
x <- c("TP8","TP9")
rm <- {}
for(i in x){
TP = subset_samples(Phylum, Timepoint == i)
merged_r = merge_samples(TP, "Treatment")
merged_r_m = transform_sample_counts(merged_r , function(x) 100 * x/sum(x))
physeqdf <- psmelt(merged_r_m)
#temp <- c("BT642Control","RT430Control","BT642Pre_flowering","RT430Pre_flowering","BT642Post_flowering","RT430Post_flowering")
temp <- c("Control","Pre_flowering","Post_flowering")
physeqdf$Sample <- factor(physeqdf$Sample, levels = c(temp))
n = nrow(physeqdf[order(physeqdf$Phylum),])
x = sample(i,n,replace=TRUE)
ST <- sample("Rhizosphere",n,replace=TRUE)
physeqdf[order(physeqdf$Phylum),]$Timepoint <- x
physeqdf[order(physeqdf$Phylum),]$SampleType <- ST
rm <- rbind(rm,physeqdf[order(physeqdf$Phylum),])
}
Soil = subset_samples(Bushman, SampleType=="Soil")
Phylum <- tax_glom(Soil, taxrank="Phylum")
saveRDS(Phylum, "soil_Phylum_Bacteria.rds")
x <- c("TP8","TP9")
rm2 <- {}
for(i in x){
TP = subset_samples(Phylum, Timepoint == i)
merged_r = merge_samples(TP, "Treatment")
merged_r_m = transform_sample_counts(merged_r , function(x) 100 * x/sum(x))
physeqdf <- psmelt(merged_r_m)
temp <- c("Control","Pre_flowering","Post_flowering")
physeqdf$Sample <- factor(physeqdf$Sample, levels = c(temp))
n = nrow(physeqdf[order(physeqdf$Phylum),])
x = sample(i,n,replace=TRUE)
ST <- sample("Soil",n,replace=TRUE)
physeqdf[order(physeqdf$Phylum),]$Timepoint <- x
physeqdf[order(physeqdf$Phylum),]$SampleType <- ST
rm2 <- rbind(rm2,physeqdf[order(physeqdf$Phylum),])
}
rm3 <- rbind(rm,rm2)
sum <- tapply(rm3$Abundance,rm3$Phylum, FUN=sum)
sum <- data.frame(sum)
sum <- sort(sum$sum,decreasing = T)
list <- c("Acidobacteria","Actinobacteria","Bacteroidetes","Chloroflexi","Cyanobacteria",
"Firmicutes","Gemmatimonadetes","Nitrospirae",
"Planctomycetes","Proteobacteria","Tenericutes","TM7","Verrucomicrobia")
a <- rm3[rm3$Phylum %in% list,]
b <- rm3[!(rm3$Phylum %in% list),]
b$Phylum <- "Other"
rm <- rbind(a,b)
rm$Treatment<-factor(rm$Treatment,levels=c("Control","Pre_flowering","Post_flowering"))
rm$Timepoint<-factor(rm$Timepoint, levels=c("TP1","TP2","TP3","TP4","TP5","TP6","TP7","TP8","TP9","TP10","TP11","TP12","TP13","TP14","TP15","TP16","TP17"))
col_SampleByTreatment<-c("Acidobacteria"="thistle","Actinobacteria"="lightsalmon2","Armatimonadetes"="mediumpurple1","Bacteroidetes"="cornsilk","Chloroflexi"="lightskyblue","Cyanobacteria"="lightpink","Firmicutes"="lightblue1","Gemmatimonadetes"="darkseagreen","Nitrospirae"="mistyrose2",
"Planctomycetes"="gray69","Proteobacteria"="cadetblue3","SPAM"="darkorchid1","Tenericutes"="darkolivegreen2","TM7"="yellow","Verrucomicrobia"="slategray1","Other"="plum")
rm$Phylum <- factor(rm$Phylum,levels=c("Actinobacteria","Chloroflexi","Cyanobacteria","Firmicutes","Bacteroidetes","Proteobacteria",
"Acidobacteria","Armatimonadetes","Gemmatimonadetes","Nitrospirae",
"Planctomycetes","SPAM","Tenericutes","TM7","Verrucomicrobia","Other" ))
ggplot(rm, aes(x=Sample, y=Abundance, fill=Phylum, )) +
geom_bar(stat = "identity", color = "NA")+facet_wrap(~SampleType+Timepoint,ncol=4)+
scale_fill_manual(values = col_SampleByTreatment) +
theme(axis.text.x=element_text(size=11,color="black",angle=90),
axis.text.y=element_text(size=11,color="black"),
axis.title=element_text(size=11,face="bold"),
text=element_text(size=11,face="bold"))+
ylab("Relative Abundance")
##### Fig.4b Gene Ontology (GO) enrichment analysis for all differentially expressed genes
#TP8 input
data<-read.table("fig4b-1-new_heatmap_enrichment_TP8.txt",header=T,sep="\t")
tp8_rhi <- data[,c(1,3,4)]
ratio <- data.frame(cbind(tp8_rhi,tp8_rhi$Rhi/tp8_rhi$total))
dat <- ratio[,c(1,4)]
colnames(dat) <-c("Annotation","TP8_rhizosphere")
tp8_soil <- data[,c(1,2,4)]
ratio <- data.frame(tp8_soil$Soil/tp8_soil$total)
colnames(ratio) <- c("TP8_soil")
mer8 <- data.frame(cbind(dat,ratio))
#TP9 input
data<-read.table("fig4b-2-heatmap_enrich_tp9.txt",header=T,sep="\t")
tp8_rhi <- data[,c(1,3,4)]
ratio <- data.frame(cbind(tp8_rhi,tp8_rhi$rhi/tp8_rhi$total))
dat <- ratio[,c(1,4)]
colnames(dat) <- c("Annotation","TP9_rhizosphere")
tp8_soil <- data[,c(1,2,4)]
ratio <- data.frame(tp8_soil$soil/tp8_soil$total)
colnames(ratio) <- c("TP9_soil")
mer9 <- data.frame(cbind(dat,ratio))
rm <- merge(mer8,mer9,by="Annotation")
rm.m <- melt(rm)
rm.m$variable <- factor(rm.m$variable, levels=c("TP8_soil","TP9_soil",
"TP8_rhizosphere","TP9_rhizosphere"))
dat <- rm.m[order(-rm.m[,3]),]
dat$Annotation <- factor(dat$Annotation, levels=c("Carbohydrate transport and metabolism",
"Amino acid transport and metabolism",
"Secondary metabolites biosynthesis, transport and catabolism",
"Cell cycle control, cell division, chromosome partitioning",
"Mobilome: prophages, transposons",
"Posttranslational modification, protein turnover, chaperones",
"Nucleotide transport and metabolism",
"Transcription",
"Replication, recombination and repair",
"Translation, ribosomal structure and biogenesis",
"Inorganic ion transport and metabolism",
"Energy production and conversion",
"Coenzyme transport and metabolism",
"Lipid transport and metabolism",
"Cell motility",
"Cell wall/membrane/envelope biogenesis",
"General function prediction only",
"Intracellular trafficking, secretion, and vesicular transport",
"Function unknown",
"Signal transduction mechanisms",
"Defense mechanisms",
"Cytoskeleton",
"Extracellular structures"))
samples_new <- sapply(as.character(dat$variable), function(x) strsplit(x, "[_]"))
Timepoint <- {}
for(i in 1:84){
T <- samples_new[[i]][1]
Timepoint <- append(Timepoint,T)
}
SampleType <- {}
for(i in 1:84){
T <- samples_new[[i]][2]
SampleType <- append(SampleType,T)
}
dat$SampleType = SampleType
dat$Timepoint = Timepoint
dat <- subset(dat,Timepoint=="TP8")
ggplot(dat, aes(x=value, y=Annotation,colour=Timepoint)) +
scale_colour_manual(name="",values = c("#004445","#6FB98F","#9B4F0F","#C99E10"))+
scale_shape_manual(name="",values=c(1,1,19,19))+facet_wrap(~SampleType,scales="free_x",ncol=2)+
geom_point(size = 4,stroke = 1)+theme_bw()+
geom_vline(aes(xintercept=1),colour="Black",size=1,linetype="dashed")+xlab("Enrichment ratio")+
theme(axis.text.x=element_text(size=11,color="black",face="bold",angle=90),
axis.text.y=element_text(size=11,color="black",face="bold"),
axis.title=element_text(size=11,face="bold"),text=element_text(size=10))+
theme(legend.text = element_text(colour="black", size = 11, face = "bold"))+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black")) +
theme(strip.text = element_text(size=11,face="bold"))
##### Fig.4c Relative abundance across the top thirteen phyla for all differentially expressed transcripts by treatment
data<-read.table("fig4c-new_input_function_taxa.txt",header=T,sep="\t")
data <- data.frame(data)
sum <- tapply(data$Percentage,data$Phylum, FUN=sum)
sum <- data.frame(sum)
sum <- sort(sum$sum,decreasing = T)
list <- c("Acidobacteria","Actinobacteria","Bacteroidetes","Chloroflexi","Cyanobacteria",
"Firmicutes","Gemmatimonadetes","Nitrospirae",
"Planctomycetes","Proteobacteria","Tenericutes","TM7","Verrucomicrobia")
a <- data[data$Phylum %in% list,]
b <- data[!(data$Phylum %in% list),]
b$Phylum <- "Other"
rm <- rbind(a,b)
rm$Timepoint<-factor(rm$Timepoint, levels=c("TP8","TP9"))
col_SampleByTreatment<-c("Acidobacteria"="thistle","Actinobacteria"="lightsalmon2","Armatimonadetes"="mediumpurple1","Bacteroidetes"="cornsilk","Chloroflexi"="lightskyblue","Cyanobacteria"="lightpink","Firmicutes"="lightblue1","Gemmatimonadetes"="darkseagreen","Nitrospirae"="mistyrose2",
"Planctomycetes"="gray69","Proteobacteria"="cadetblue3","SPAM"="darkorchid1","Tenericutes"="darkolivegreen2","TM7"="yellow","Verrucomicrobia"="slategray1","Other"="plum")
rm$Phylum <- factor(rm$Phylum,levels=c("Actinobacteria","Chloroflexi","Cyanobacteria","Firmicutes","Bacteroidetes","Proteobacteria",
"Acidobacteria","Armatimonadetes","Gemmatimonadetes","Nitrospirae",
"Planctomycetes","SPAM","Tenericutes","TM7","Verrucomicrobia","Other" ))
rm$Category <- factor(rm$Category, levels=c("Carbohydrate transport and metabolism",
"Amino acid transport and metabolism",
"Secondary metabolites biosynthesis, transport and catabolism",
"Cell cycle control, cell division, chromosome partitioning",
"Mobilome: prophages, transposons",
"Posttranslational modification, protein turnover, chaperones",
"Nucleotide transport and metabolism",
"Transcription",
"Replication, recombination and repair",
"Translation, ribosomal structure and biogenesis",
"Inorganic ion transport and metabolism",
"Energy production and conversion",
"Coenzyme transport and metabolism",
"Lipid transport and metabolism",
"Cell motility",
"Cell wall/membrane/envelope biogenesis",
"General function prediction only",
"Intracellular trafficking, secretion, and vesicular transport",
"Function unknown",
"Signal transduction mechanisms",
"Defense mechanisms",
"Cytoskeleton",
"Extracellular structures"))
ggplot(rm, aes(x=Category, y=Percentage, fill=Phylum)) +
geom_bar(stat = "identity", color = "NA") + scale_fill_manual(values = col_SampleByTreatment) +
facet_wrap(~Timepoint+SampleType)+
theme(axis.text.x=element_text(size=9,color="black",hjust=1,vjust=0,face="bold"),
axis.text.y=element_text(size=9,color="black",face="bold"),
axis.title=element_text(size=11,face="bold"),text=element_text(size=11,vjust=1,face="bold"))+
theme(legend.title = element_text(colour="black", size=11, face="bold")) +
theme(legend.text = element_text(colour="black", size = 11, face = "bold"))+
ylab("Relative Abundance") +
coord_flip()
##### Fig. 5 violin plot
rm(list = ls())
data<-read.table("fig5-Confocal.txt",header=T,sep="\t")
sai73 = subset(data,Strain=="SAI73")
sai73$Treatment <- as.factor(sai73$Treatment)
sai73_cd <- subset(sai73,Treatment != "PEG")
ggplot(sai73_cd, aes(x=Treatment, y=Value, fill=Treatment)) +
geom_violin(trim = TRUE)+
stat_summary(fun.data=mean_sdl, fun.args = list(mult=1),
geom="pointrange", color="white") +
scale_fill_manual(values=c("#6AB187", "#DE7A22"))
|
3c4474288a3c79a48a801df7031b7e784b063871
|
f4a6173ace305e66d6aa9af8ff825283c7e85f00
|
/R/man/format_summarytable_columns.Rd
|
682aa280aeede794d11b8b4def4afa5bc096b74a
|
[
"Apache-2.0"
] |
permissive
|
karawoo/syndccutils
|
6528f023f770c760e0c26b40f365a2ca68476269
|
550b4c0839457e3a48ff72636e028a9611065e5c
|
refs/heads/master
| 2020-03-07T13:24:53.453472
| 2019-10-09T16:28:20
| 2019-10-09T16:28:20
| 127,499,939
| 0
| 0
| null | 2018-03-31T04:51:45
| 2018-03-31T04:51:44
| null |
UTF-8
|
R
| false
| true
| 376
|
rd
|
format_summarytable_columns.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/processing.R
\name{format_summarytable_columns}
\alias{format_summarytable_columns}
\title{Prettify column names based on annotation keys.}
\usage{
format_summarytable_columns(df, facet_cols = c())
}
\arguments{
\item{facet_cols}{}
}
\description{
Prettify column names based on annotation keys.
}
|
99035c22710c068896e59a349af7392d3e83f970
|
1337ef316a4bdd44a037e96be4ab98b1191ea005
|
/Synth.Infer/man/synth.prep.Rd
|
39fe64f69f9083be2d57892bf749743607e1b09b
|
[] |
no_license
|
MaysonChang/About-Me
|
10a77a358780e020f90c24873fd4cd105b5894b3
|
fa0399dde5cd2be3f7c867e483d8b85ca3d26e58
|
refs/heads/master
| 2023-08-28T07:59:27.682056
| 2021-11-09T17:54:13
| 2021-11-09T17:54:13
| 272,926,620
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,521
|
rd
|
synth.prep.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/synth.R
\name{synth.prep}
\alias{synth.prep}
\title{Construct a list of matrices from panel dataset to be loaded into synth.meth().}
\usage{
synth.prep(
data = NULL,
treatment.identifier = NULL,
controls.identifier = NULL,
time.variable = NULL,
time.predictors.prior = NULL
)
}
\arguments{
\item{data}{The dataframe with required wide format.}
\item{treatment.identifier}{A scalar identifying the column number of treatment unit in the dataframe or a character string giving the column name of treatment unit in the dataframe.}
\item{time.variable}{A scalar identifying column number or column-name character string associated with period (time) data. The time variable has to be numeric.}
\item{time.predictors.prior}{A numeric vector identifying the row numbers corresponding to pre-treatment periods.}
\item{control.identifier}{A scalar identifying the column numbers of control units in the dataframe or a vector of character strings giving the column names of control units in the dataframe.}
}
\value{
A list with a series of components prepared for running \code{synth.meth}.
\item{Y1}{A matrix of treatment unit data for pre-treatment periods.}
\item{X1}{A matrix of control units data for pre-treatment periods.}
\item{Y2}{A matrix of treatment unit data for post-treatment periods.}
\item{X2}{A matrix of control units data for post-treatment periods.}
\item{time}{A matrix of period(time) data.}
\item{T}{A scalar identifying the column number of period(time) data.}
\item{T1}{A scalar identifying the number of pre-treatment periods.}
\item{T2}{A scalar identifying the number of post-treatment periods.}
\item{n}{A scalar identifying the column number of control units data.}
}
\description{
This function extracts relevant data objects from the given dataframe and produce a list of matrices necessary for running synth.meth().
}
\details{
User should import a dataframe ("data") with required wide format, identify the columns associated with treatment and control units respectively, time variable and the pre-treatment time period.
}
\examples{
##First example: wide-format toy dataset sourced from Package "Synth".
#load data from the package.
data(synth.data)
#extract relevant components necessary for running synth.meth()
#from wide-format panel data.
synth.prep.out =
synth.prep(
synth.data,
treatment.identifier = 9,
controls.identifier = c(2:8),
time.variable = 1,
time.predictors.prior = c(1984:1989)
)
## Second example: The economic impact of terrorism in the
## Basque country using data from Abadie and Gardeazabal (2003)
#load data from the package.
data(basque)
#extract relevant components necessary for running synth.meth()
#from wide-format panel data.
synth.prep.out =
synth.prep(
data = basque,
treatment.identifier = 5,
controls.identifier = c(2:4,6:19),
time.variable = "year",
time.predictors.prior = c(1964:1969),
)
}
\references{
Li K, Shankar V (2020) Estimating the causal effect of a digitally native retailer opening a new store: a new two-step synthetic control method. Working Paper, University of Texas at Austin, Austin, TX.
Li K, Shankar V (2020) Statistical inference for average treatment effects estimated by synthetic control methods. \emph{Journal of American Statistical Association}.
}
|
390e128d54bcbafb4706efa2c917cb2ba10ec6c4
|
9995fdb4f2b2341db3a9f7e2b036b656e802e38c
|
/man/checkKKT4BetaStep.Rd
|
21e2f626cae52473283ff2930a22055db1b12677
|
[
"Apache-2.0"
] |
permissive
|
sverchkov/BaselineRegularization
|
100f8c647199ae58ecc0281e2b8cf598525e8d4a
|
d3c208b543104ae63609fe1320a68ed2015b1e79
|
refs/heads/master
| 2021-07-04T18:33:11.068161
| 2019-02-12T19:37:56
| 2019-02-12T19:37:56
| 112,363,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 629
|
rd
|
checkKKT4BetaStep.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkKKT4BetaStep.R
\name{checkKKT4BetaStep}
\alias{checkKKT4BetaStep}
\title{check convergence for beta step}
\usage{
checkKKT4BetaStep(y, X, w, beta, l, lambda)
}
\arguments{
\item{y}{response}
\item{X}{features}
\item{w}{weights}
\item{beta}{parameters learned}
\item{l}{interval length of each interval}
\item{lambda}{regularization strength}
}
\value{
2-norm of a vector that specifies the degree of
optimality violation
}
\description{
check conveergence for beta step
used as an inner loop convergence criterion
}
\author{
Zhaobin Kuang
}
|
f068e9b3fd161949868a3186b3e2f3ebeebff4e4
|
4ab888da78d52fcacb6a22affa53f09f9e0da9a8
|
/man/prepareSimulationFiles.Rd
|
d4b1a98e1eba1e89027ad655009f54fd23a5c3f8
|
[] |
no_license
|
MarionLi0/antaresFlowbased
|
a291ead418fe29f99baa0cad9dbb181b2e9ff0b8
|
9207cd7564b4f821f4d25acf30ba7d1f09d5e286
|
refs/heads/master
| 2021-01-21T18:24:48.240411
| 2017-05-16T12:25:39
| 2017-05-16T12:25:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,468
|
rd
|
prepareSimulationFiles.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepareSimulationFiles.R
\name{prepareSimulationFiles}
\alias{prepareSimulationFiles}
\title{Prepare files needed for a single simulation run. Write playlist and FB}
\usage{
prepareSimulationFiles(ts, secondMember, scenarios, simNumber,
opts = antaresRead::simOptions(), verbose = 1)
}
\arguments{
\item{ts}{\code{data.frame} of dayType and simulation}
\item{secondMember}{\code{data.frame} of second members}
\item{scenarios}{\code{data.frame} of scenarios}
\item{simNumber}{\code{numeric} number of simulation must be in scenarios}
\item{opts}{\code{list} of simulation parameters returned by the function \link{setSimulationPath}. Defaut to \code{antaresRead::simOptions()}}
\item{verbose}{\code{numeric} show log in console. Defaut to 1
\itemize{
\item{0}{ : No log}
\item{1}{ : Short log}
\item{2}{ : Long log}
}}
}
\description{
Prepare files needed for a single simulation run. Write playlist and FB
}
\examples{
\dontrun{
path <- "D:/exemple_test"
antaresRead::setSimulationPath(path, 0)
secondMember <- data.table::fread(paste0(opts$studyPath,"/user/flowbased/second_member.txt"))
ts <- data.table::fread(paste0(opts$studyPath,"/user/flowbased/ts.txt"))
scenarios <- data.table::fread(paste0(opts$studyPath,"/user/flowbased/scenario.txt"))
prepareSimulationFiles(opts = opts, ts = ts, secondMember = secondMember,
scenarios = scenarios, simNumber = 54)
}
}
|
6496ce6a1712b8af666e2f346e420430107f1a1b
|
f19e54af838ec12a12d753ce09cadb3b6927e949
|
/Plot2.R
|
a812ce89328275f6b938aa8a5787f7ba4a9cfc58
|
[] |
no_license
|
Philippe-C/ExData_Plotting1
|
c52fcd7a91c00243b3c1e01f11f74012d8df1cb0
|
1f8b5156f36caf8c5b8fccc9a874a09a04c31c83
|
refs/heads/master
| 2021-01-16T18:52:10.288727
| 2015-03-08T21:22:11
| 2015-03-08T21:22:11
| 31,585,627
| 0
| 0
| null | 2015-03-03T07:41:50
| 2015-03-03T07:41:49
| null |
UTF-8
|
R
| false
| false
| 559
|
r
|
Plot2.R
|
# Step 1: Check the presence of the file for loading data - Please refer to the load_data.R file in the repo for more details!
if (!"load_data.R" %in% list.files()) {
setwd("~/ExData_Plotting1")
}
## Step2: Access the data
source("load_data.R")
### Step 3: Creation of the second plot: Global Active Power as a function of DateTime
png(filename = "plot2.png",width = 480, height = 480,)
plot(DateTime, Global_active_power,type = "l", xlab = "",ylab = "Global Active Power (kilowatts)")
#### Step 4: Do not forget to exit the graphic device
dev.off()
|
6a06890690e99e0b768e87f23c53b82e6bddfedc
|
6628549379f9c05f0c61583cc889e4a097deb82e
|
/plot3.R
|
9b29c4f76e25833e7f039b408b986c0d3a7609db
|
[] |
no_license
|
ramyasree92/ExData_Plotting1
|
b926e201ac0a46851449f33f4a13c895561d37fb
|
3b495dd121ef2a0d2c9163a039d2c00285aefa7a
|
refs/heads/master
| 2020-03-18T15:56:22.162972
| 2018-05-26T13:08:25
| 2018-05-26T13:08:25
| 134,938,350
| 0
| 0
| null | 2018-05-26T07:12:44
| 2018-05-26T07:12:44
| null |
UTF-8
|
R
| false
| false
| 1,379
|
r
|
plot3.R
|
library(lubridate) #For
##Reading data and subsetting for date values of 1/2/2007 2/2/2007
power <- read.csv("household_power_consumption.txt", sep = ";")
powerdata <- subset(power, as.character(power$Date) == "2/2/2007")
powerdata <- rbind(subset(power, as.character(power$Date) == "1/2/2007"), powerdata)
rownames(powerdata) <- NULL
rm(power)
##Combining Date and Time values into one column
powerdata$datetime <- paste(powerdata$Date, powerdata$Time)
##Converting datetime variable to class POSIXt POSIXct
powerdata$datetime <- dmy_hms(powerdata$datetime)
#Creates a blank plot with appropriate x and y axes ables and scale
plot(powerdata$datetime, as.numeric(as.character(powerdata$Sub_metering_1)), xlab = "", ylab = "Energy sub metering", type = "n", ylim = c(0, 30))
##Adding graph for each of Submetering Data
lines(powerdata$datetime, as.numeric(as.character(powerdata$Sub_metering_1)))
lines(powerdata$datetime, as.numeric(as.character(powerdata$Sub_metering_2)), col = "red")
lines(powerdata$datetime, as.numeric(as.character(powerdata$Sub_metering_3)), col = "blue")
##Adding legend to describe the data
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1)
##Copying data from screen to png file
dev.copy(png, filename = "plot3.png", width = 480, height = 480, units = "px")
dev.off()
|
3871836bee0629b8ddde929df8f87f58785236c8
|
8f98e70fef3d9e7f3dd1fd07f687e01995d0b6d7
|
/HW4/hw4_5_input_output.R
|
f0c26956500dae8e28df5a3a53faa1e5bb3b04f6
|
[] |
no_license
|
QihangYang/Computational-Statistics
|
63814aa9ce6763f905db3665f1ed8f37391e7dfb
|
d26c30c3cdddecc7962144da1266439507cecfab
|
refs/heads/master
| 2020-12-22T00:14:32.193964
| 2020-04-10T00:02:23
| 2020-04-10T00:02:23
| 236,611,768
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,040
|
r
|
hw4_5_input_output.R
|
# HW4: Input/Output
#'
# In this exercise, we will get familiar with the different types of outputs such as `print`, `sprintf`, `message`, etc.
#'
# 1. Compute the mean miles per gallon (`mpg`) from the `mtcars` data set and store it in a scalar `mean_mpg`.
# Using the function `mean`. Use `format` to format the `mean_mpg` with 4 significant digits.
# Store formatted value in scalar `formatted_mean`.
# Write a function `my_print(x)` that uses the function `print()` to print `"The mean of mpg is <mean>."`.
# Substitue `<mean>` by parameter `x` and uses the function `paste0`.
# Note: There is no space between `<mean>` and period.
## Do not modify this line!
mean_mpg <- mean(mtcars$mpg)
formatted_mean <- format(mean_mpg, digits = 4)
my_print <- function(x){
print(paste0("The mean of mpg is ", x, "."))
}
my_print(formatted_mean)
# 2. Set the random seed to zero and save the random seed vector to `seed`. (hint: use the command `seed <- .Random.seed`)
# Use `sample` to sample 5 integers from 1 to 30 without replacement. Store the samples in `my_sample`.
# Use the samples in `my_sample` as indices and change the according values in mpg column to `NA`.
# Store the modified data in vector `my_mpg`.
# Write a function `my_print2(x)` to use `print` to print the vector with 3 digits and replace `NA` by `N/A`.
# (Hint : lookup `na.print`).
## Do not modify this line!
set.seed(0)
seed <- .Random.seed
my_sample <- sample(1:30, size = 5, replace = FALSE)
my_mpg <- mtcars$mpg
my_mpg[my_sample] <- NA
my_print2 <- function(x){
print(x, digit = 3, na.print = "N/A")
}
# 3. Write a function `wake_up(hour, minute, AMPM)` that takes in two integers `hour` and `minute`,
# a string `AMPM` and returns a string `"I wake up at <hour>:<minute> <AMPM>."` using `sprintf`.
# Note: Make sure `<hour>` and `<minute>` have 2 significant digits.
## Do not modify this line!
wake_up <- function(hour, minute, AMPM){
sprintf("I wake up at %02i:%02i %s.", hour, minute, AMPM)
}
# 4. Write a function `convert(x)` that converts the string `x` into numeric values and returns the converted values.
# Note: If the type of input is not character, use `stop()` to cast an error message: `Input is not character.`.
# After converting the input to its corresponding numerical value,
# the function should print the message `Converted successfully.`.
# Use the functions `message()` and `as.numeric`.
# You can assume the input will always be convertible.
## Do not modify this line!
convert <- function(x){
if(!is.character(x))
stop("Input is not character.")
message("Converted successfully.")
return(as.numeric(x))
}
# 5. Load the `readr` package.
# Use the function `write_csv()` to write `mtcars` data set into a .csv file called `mtcars.csv`.
# Use `read_csv` to read the `mtcars.csv` and store the data in data frame `my_mtcars`.
## Do not modify this line!
library(readr)
write_csv(mtcars, path = "mtcars.csv")
my_mtcars <- read_csv("mtcars.csv")
|
925097b33a81f980b02208ebc0748b2e9bb19658
|
781e0e29ef9c326859a53ecfa70514c2eec62f1d
|
/Analytics 01 Extraction.R
|
cc6831e033ec5efad39369986f06807473b0c123
|
[] |
no_license
|
andrewsahalie/Analytics
|
7093d60c74cac6f0b263da09893899ba3f04369f
|
24b86d1d399e09b76a849bd043d109160f341492
|
refs/heads/master
| 2021-01-17T12:46:06.646953
| 2016-06-28T15:09:16
| 2016-06-28T15:09:16
| 59,064,207
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,912
|
r
|
Analytics 01 Extraction.R
|
# Analytics Data: A data manual exists for these data. See Andrew.
# EXTRACTION: The real extraction code (connecting directly to JSON data) hasn’t been written yet - needs final JSON details.
# This CSV file is the source for now.
# LogsTableRaw<-fromJSON("URLFromCaz")
# Extracting the data
# LogsTable<-LogsTableRaw
# RawLogs <- read.csv("~/Documents/Y1RFS/Analytics Data/Y1RFS Day-2 Data.csv", header=FALSE, sep="|", )
# Reading in raw data from CSV files. These files are created from https://destinationbiology.com/analytics/ (See documentation about how to get these data elsewhere)
AnalyticsFeeder01 <- read.csv("~/Documents/Y1RFS/Analytics Data/Raw Data/Y1RFS Day 1 DataFix.csv", header = FALSE, sep = "|", stringsAsFactors = FALSE)
AnalyticsFeeder01 <- AnalyticsFeeder01[, c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12")]
AnalyticsFeeder02 <- read.csv("~/Documents/Y1RFS/Analytics Data/Raw Data/Y1RFS Day-2 Data.csv", header = FALSE, sep = "|", stringsAsFactors = FALSE)
AnalyticsFeeder02 <- AnalyticsFeeder02[, c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12")]
AnalyticsFeeder03 <- read.csv("~/Documents/Y1RFS/Analytics Data/Raw Data/Y1RFS Day3Data.csv", header = FALSE, sep = "|", stringsAsFactors = FALSE)
AnalyticsFeeder03 <- AnalyticsFeeder03[, c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12")]
AnalyticsFeeder04 <- read.csv("~/Documents/Y1RFS/Analytics Data/Raw Data/Y1RFS NonLectureDataFix.csv", header = FALSE, sep = "|", stringsAsFactors = FALSE)
AnalyticsFeeder04 <- AnalyticsFeeder04[, c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12")]
#Merges files into a common temporary file. A good place to start when re-running stuff after breaking something.
MergedAnalyticsFile = rbind(AnalyticsFeeder01, AnalyticsFeeder02, AnalyticsFeeder03, AnalyticsFeeder04)
|
28fb3391fe5404060f6e5a1f2e7605ef65ce2f93
|
38a052ec391f777d2951b35e57f80bd533e1cda3
|
/class_code/class_code_181025.R
|
31e68d92798818f0b7ea315d71f44da7b9014526
|
[] |
no_license
|
abbiepopa/BSDS100
|
c2de879ce725992e0a1a4838f575da100b95171b
|
5b858a0dbd9d9452f183a52978aa60dbb13831c8
|
refs/heads/master
| 2020-03-25T03:11:21.161959
| 2019-02-05T00:56:20
| 2019-02-05T00:56:20
| 143,327,245
| 3
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,344
|
r
|
class_code_181025.R
|
data("mtcars")
save(mtcars, file = "~/Desktop/mtcars.csv") # won't work
# correct way
write.csv(mtcars, file = "~/Desktop/mtcars_1.csv",
row.names = F)
# txts, write.table instead of csv
my_df <- data.frame(city = c("Austin", "Georgia",
"Vancouver"),
fancy = c(35000, 43000, 106000),
normal = c(30000, 44000, 770000))
library(tidyr)
tidy_df <- gather(my_df, key = light_sign,
value = sales, fancy, normal)
dim(my_df)
dim(tidy_df)
library(ggplot2)
ggplot(tidy_df, aes(x = city, y = sales,
group = light_sign,
colour = light_sign)) +
geom_point(size = 3) +
geom_line()
set.seed(1979)
mobile_time <- data.frame(unique_id = 1:4,
treatment = sample(rep(c("ios",
"android"),
each = 2)),
work_am = runif(4, 0, 1),
home_am = runif(4, 0, 1),
work_pm = runif(4, 1, 2),
home_pm = runif(4, 1, 2))
mobile_time_tidy <- gather(mobile_time, key = sample,
value = time, -unique_id,
-treatment)
mobile_time_tidier <- separate(mobile_time_tidy,
sample,
into = c("location",
"time_of_day"),
sep = "\\_")
ggplot(mobile_time_tidier, aes(
group = interaction(location, time_of_day),
x = interaction(location, time_of_day),
fill = interaction(location, time_of_day),
y = time)) + geom_boxplot() +
facet_wrap(~treatment)
### Review Activity
library(tibble)
rest_profit <- tibble(
name = c("Papalote", "Nopa", "Jannah"),
`1999` = c(745, 737, 2458),
`2000` = c(2666, 8488, 2766))
rest_profit <- gather(rest_profit,
key = "year",
value = "profit",
`1999`,
`2000`)
rest_profit
spread(tidy_df, light_sign, sales)
unite(mobile_time_tidier, col = sample,
location, time_of_day, sep = "zzz")
table3
unite(table3, col = "country_year",
country, year)
spread(table3, year, rate)
|
97da634c5d62851cf5a004eb40874544f7a44c08
|
f5d5447166c7ddcd7a12ff8fe35c73f60057b792
|
/00_scripts/LEA.R
|
9068395a483a36645e89b9fae53d0a4f8ab762c2
|
[] |
no_license
|
rcanet/Transcrptomic-approach-P-margaritifera
|
1892e0167df38dcfcc5043cca1c5a02a6c30660c
|
fe51c91a2ce8fd09a353054af2aaf3983d4e679b
|
refs/heads/master
| 2020-03-26T07:03:22.804487
| 2019-04-02T08:57:59
| 2019-04-02T08:57:59
| 144,634,385
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,403
|
r
|
LEA.R
|
#!/usr/bin/Rscript
setwd("12_LEA")
library("LEA")
genotype = lfmm2geno("data.lfmm")
obj.snmf = snmf(genotype, K = 1:9, entropy = T, ploidy = 2, project="new")
save(obj.snmf, file = "snmf.rda")
print("object snmf saved")
exit
obj.lfmm=lfmm("data.lfmm", "popfile.env", K = 2, rep = 5, project="new")
print("obj.lfmm created")
save(obj.lfmm, file = "obj.lfmm.rda")
#Record z-scores from the 5 runs in the zs matrix
zs = z.scores(obj.lfmm, K = 2)
#Combine z-scores using the median
zs.median = apply(zs, MARGIN = 1, median)
#Compute the GIF
lambda = median(zs.median^2)/qchisq(0.5, df = 1)
lambda
# compute adjusted p-values from the combined z-scores
adj.p.values = pchisq(zs.median^2/lambda, df = 1, lower = FALSE)
#histogram of p-values
hist(adj.p.values, col = "red")
# compute adjusted p-values from the combined z-scores
adj.p.values = pchisq(zs.median^2/.55, df = 1, lower = FALSE)
#histogram of p-values
hist(adj.p.values, col = "green")
print("histogram of adjusted p-value done")
save.image(file="Env_lfmm_padj.rda")
## FDR control: Benjamini-Hochberg at level q
## L = number of loci
L = 145123
#fdr level q
q = 0.1
w = which(sort(adj.p.values) < q * (1:L)/L)
candidates.bh = order(adj.p.values)[w]
## FDR control: Storey's q-values
library(qvalue)
plot(qvalue(adj.p.values))
candidates.qv = which(qvalue(adj.p.values, fdr = .1)$signif)
save.image(file="myEnvironment.rda")
|
892f530f94a3c818ddd8a274b2058a045cfcca31
|
b5e73b7034cb861f5b793cec78f0f840539a70a9
|
/Experiment1/R/General_Functions.R
|
5c01936bb03e50f651a1603b2ba871cc99756088
|
[] |
no_license
|
barslancogs/Drag_and_Drop
|
6a338d0918e734a5d1ff472b1e8fbfa0eef0a2b0
|
bb46d9b934b6a583840d4b7a8e291efd24a5e691
|
refs/heads/master
| 2020-05-30T09:12:51.809809
| 2019-10-04T00:40:28
| 2019-10-04T00:40:28
| 189,637,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,426
|
r
|
General_Functions.R
|
# crosstab function to produce crosstab descriptives
source("http://pcwww.liv.ac.uk/~william/R/crosstab.r")
# Import functions
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
# Function to save figures
savefig <- function(fileName, dpi, width, height, units, type){
if(type=="png"){ file <- paste(fileName, ".png", sep=""); ggsave(file, dpi = dpi, width = width, height = height, units = units) }
if(type=="pdf"){ file <- paste(fileName, ".pdf", sep=""); ggsave(file, dpi = dpi, width = width, height = height, units = units) }
if(type=="both"){
file <- paste(fileName, ".png", sep=""); ggsave(file, dpi = dpi, width = width, height = height, units = units)
file <- paste(fileName, ".pdf", sep=""); ggsave(file, dpi = dpi, width = width, height = height, units = units)
}
}
# function to generate t-statistic and df for pairwise t-tests using pooled sd
pairwise.t.test.with.t.and.df <- function (x, g, p.adjust.method = p.adjust.methods, pool.sd = !paired,
paired = FALSE, alternative = c("two.sided", "less", "greater"),
...)
{
if (paired & pool.sd)
stop("pooling of SD is incompatible with paired tests")
DNAME <- paste(deparse(substitute(x)), "and", deparse(substitute(g)))
g <- factor(g)
p.adjust.method <- match.arg(p.adjust.method)
alternative <- match.arg(alternative)
if (pool.sd) {
METHOD <- "t tests with pooled SD"
xbar <- tapply(x, g, mean, na.rm = TRUE)
s <- tapply(x, g, sd, na.rm = TRUE)
n <- tapply(!is.na(x), g, sum)
degf <- n - 1
total.degf <- sum(degf)
pooled.sd <- sqrt(sum(s^2 * degf)/total.degf)
compare.levels <- function(i, j) {
dif <- xbar[i] - xbar[j]
se.dif <- pooled.sd * sqrt(1/n[i] + 1/n[j])
t.val <- dif/se.dif
if (alternative == "two.sided")
2 * pt(-abs(t.val), total.degf)
else pt(t.val, total.degf, lower.tail = (alternative ==
"less"))
}
compare.levels.t <- function(i, j) {
dif <- xbar[i] - xbar[j]
se.dif <- pooled.sd * sqrt(1/n[i] + 1/n[j])
t.val = dif/se.dif
t.val
}
}
else {
METHOD <- if (paired)
"paired t tests"
else "t tests with non-pooled SD"
compare.levels <- function(i, j) {
xi <- x[as.integer(g) == i]
xj <- x[as.integer(g) == j]
t.test(xi, xj, paired = paired, alternative = alternative,
...)$p.value
}
compare.levels.t <- function(i, j) {
xi <- x[as.integer(g) == i]
xj <- x[as.integer(g) == j]
t.test(xi, xj, paired = paired, alternative = alternative,
...)$statistic
}
compare.levels.df <- function(i, j) {
xi <- x[as.integer(g) == i]
xj <- x[as.integer(g) == j]
t.test(xi, xj, paired = paired, alternative = alternative,
...)$parameter
}
}
PVAL <- pairwise.table(compare.levels, levels(g), p.adjust.method)
TVAL <- pairwise.table.t(compare.levels.t, levels(g), p.adjust.method)
if (pool.sd)
DF <- total.degf
else
DF <- pairwise.table.t(compare.levels.df, levels(g), p.adjust.method)
ans <- list(method = METHOD, data.name = DNAME, p.value = PVAL,
p.adjust.method = p.adjust.method, t.value = TVAL, dfs = DF)
class(ans) <- "pairwise.htest"
ans
}
pairwise.table.t <- function (compare.levels.t, level.names, p.adjust.method)
{
ix <- setNames(seq_along(level.names), level.names)
pp <- outer(ix[-1L], ix[-length(ix)], function(ivec, jvec) sapply(seq_along(ivec),
function(k) {
i <- ivec[k]
j <- jvec[k]
if (i > j)
compare.levels.t(i, j)
else NA
}))
pp[lower.tri(pp, TRUE)] <- pp[lower.tri(pp, TRUE)]
pp
}
geom.text.size = 5; theme.size = 3*geom.text.size
nsmall <- 1
# centering with 'scale()'
center_scale <- function(x) {
scale(x, scale = FALSE)
}
|
67c51dd85708030096773487252c285801d2ec6b
|
94d9d1198fb596683d506ef2b621dfd67a8f2c12
|
/app.R
|
90243f21e75ca1a84ca6c767c5730ea70c6fe637
|
[] |
no_license
|
anushreeavasthi/SportsAnalytics
|
389c05e4d872c846b6a0689398da2783eadac037
|
9386a79c56750eb494429593469acff1236a655f
|
refs/heads/master
| 2023-08-29T23:52:44.141400
| 2021-10-30T00:08:49
| 2021-10-30T00:08:49
| 422,742,970
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 53,114
|
r
|
app.R
|
library(rtweet)
library(dplyr)
library(tidyr)
library(tidytext)
library(textdata)
library(ggplot2)
library(tibble)
library(purrr)
library(devtools)
library(wordcloud)
library(wordcloud2)
library(RColorBrewer)
library(tm)
library(shiny)
library(datasets)
library(rvest)
library(gganimate)
library(RColorBrewer)
library(kableExtra)
library(plotly)
library(data.table)
library(highcharter)
library(DT)
library(tableHTML)
init_teams <- c("Arsenal",
"Aston Villa",
"Burnley",
"Chelsea",
"Crystal Palace",
"Everton",
"Hull",
"Leicester",
"Liverpool",
"Man City",
"Man United",
"Newcastle",
"QPR",
"Southampton",
"Stoke",
"Sunderland",
"Swansea",
"Tottenham",
"West Brom",
"West Ham")
na_converter <- function(dt, value = 0) {
for (j in names(dt)) set(dt, which(is.na(dt[[j]])), j, value)
dt
}
ui <- shinyUI(fluidPage(
titlePanel("European Football Analytics"),
tabsetPanel(
tabPanel('Welcome',
div(class = 'bg',
div(class = 'first_line', strong('European Football Analytics')),
br(),
div(id = 'row_images',
img(class = 'flags', src = "England.png"),
img(class = 'flags', src = "Scotland.png"),
img(class = 'flags', src = "Germany.png"),
img(class = 'flags', src = "Italy.png"),
img(class = 'flags', src = "Spain.png"),
img(class = 'flags', src = "France.png"),
img(class = 'flags', src = "Netherlands.png"),
img(class = 'flags', src = "Belgium.png"),
img(class = 'flags', src = "Portugal.png"),
img(class = 'flags', src = "Turkey.png"),
img(class = 'flags', src = "Greece.png")
),
br(),
div(class = 'third_line',
'Explore the tabs at the top to begin'),
div(class= 'third_line',
'Developed by Anushree Avasthi and Aditya Agrawal')
),
img(class = 'background',
src = 'img2.jpg')
),
tabPanel("Sentiment Analysis",
sidebarPanel(
tags$b("Here, you can compare sentiments surrounding football teams by inputting indicative hashtags. This will provide most common unique words, positive/negative texts and word clouds based on tweets using the hashtags you provide."),
## Getting hashtags from users as inputs
br(),
br(),
textInput("hash1", "Insert First Hashtag (Ex: #manutd)", "#"),
tags$b("This may take a few seconds, currently loading:"),
verbatimTextOutput("value1"),
br(),
textInput("hash2", "Insert Second Hashtag (Ex: #liverpool)", "#"),
tags$b("This may take a few seconds, currently loading:"),
verbatimTextOutput("value2")
),
mainPanel(
tags$b("Most common unique words and their counts for Hashtag 1"),
plotOutput("plotHashCount1"),
br(),
tags$b("Most common unique words and their counts for Hashtag 2"),
plotOutput("plotHashCount2"),
br(),
tags$b("Positive and Negative reviews for Hashtag 1"),
plotOutput("plotBingVisually1"),
br(),
tags$b("Positive and Negative reviews for Hashtag 2"),
plotOutput("plotBingVisually2"),
br(),
tags$b("Word cloud for hashtag 1"),
plotOutput("plotHash1"),
br(),
tags$b("Word cloud for hashtag 2"),
plotOutput("plotHash2"),
br(),
#tags$b("Bing results Tibble for both the hashtags"),
#tableOutput("BingAnalysisTibble"),
br()
)
),
tabPanel( "Standings-Premier League",
mainPanel(
tags$b("This page may take a few seconds to load.."),
br(),
br(),
br(),
tags$b("Click on PLAY to visualize the points progression every game week of the title challengers (Premier League 2021/22)"),
plotlyOutput("plotPremierLeaguePlayTop"),
br(),
tags$b("Find the color of your favorite team and observe their weekwise progression with respect to all premier league teams this season"),
plotlyOutput("plotPremierLeaguePlay"),
br(),
tags$b("Every Premier League team and their point-growth per weeek (2021/22)"),
plotOutput("plotPremierLeague"),
br(),
tags$b("Premier League 2021-2022 Standings"),
tableOutput("PLStanding"),
br()
)
),
#tab Application top of app
tabPanel(
'Sports Betting',
tags$head(includeCSS('www/football.css')),
tags$head(tags$script(src="floating_sidebar.js")),
sidebarLayout(
#sidebar starts here
sidebarPanel(
selectInput('country',
'Select Country',
choices = c('England',
'Scotland',
'Germany',
'Italy',
'Spain',
'France',
'Netherlands',
'Belgium',
'Portugal',
'Turkey',
'Greece'),
selected = 'England'),
selectInput('division',
'Select Division',
choices = c('Premier League',
'Championship',
'League 1',
'League 2',
'Conference'),
selected = 'Premier League'),
# selectInput('season',
# 'Select Season',
# choices = rev(paste(1993:2017, shift(1993:2017, type='lead'),
# sep = '-'))[-1],
# selected = c('2016-2017')),
sliderInput('date_range',
'Select the Date Range',
value = c(as.IDate('2020-08-01'), as.IDate('2021-06-30')),
min = as.IDate('2020-08-01'),
max = as.IDate('2022-06-30')),
width = 3
),
#main panel starts here
mainPanel(
tabsetPanel(
tabPanel('About',
tags$h3("Which Sports Betting platform should you bet on today?"),
br(),
br(),
tags$b("Apart from helping you decide which website you should use to bet on your favorite fixture this week, on this tab, you can select from footballing leagues in 11 countries (Ex: England), and various vertical divisions (Ex: Premier league, Championship). The table will provide you with basic information about all fixtures in that season such as Goals Scored by both teams, Shots on target, Yellow/Red cards received, and who refereed the fixture. But more importantly, it will prove you with all sports betting-related data compiled from various platforms such as Bet365, BetVictor and more. Once you know what bet you want to place, this table will provide you with the information about what website provides you with the best odds, and the chance to win the most money. Gambling always has a chance of losing money and you should bet responsibly!"),
),
#tab league table in Application tab
tabPanel('Fixture wise data (20-21)',
br(),
fixedRow(
tableHTML_output('league_table', height = '800px')
)),
tabPanel('Fixture wise data (21-22)',
br(),
tags$b("Please correct the date range on the slider to get the correct results. By default results are shown from the start of the 2021-2022 period."),
fixedRow(
tableHTML_output('league_table21', height = '800px')
))
),
width = 9)
)
),
tabPanel('About',
fixedRow(
column(
img(src = 'stadium-pic.jpg', id = 'ribbon-about', height = 230),
width = 12
)
),
br(),
fixedRow(
column(
HTML('<ul class="nav nav-pills nav-justified">
<li class="active">
<a href="#about-tab-1" data-toggle="tab" data-value="Author">Author</a>
</li>
<li>
<a href="#about-tab-2" data-toggle="tab" data-value="Data/Charts">About the dashboard</a>
</li>
<li>
<a href="#about-tab-3" data-toggle="tab" data-value="Contact">Contact</a>
</li>
</ul>'),
width = 12
)
),
HTML('<div class="container-fluid">
<div class="tab-content">
<div class="tab-pane active" data-value="Author" id="about-tab-1">'),
br(),
br(),
br(),
column(width = 2),
column(h2('Author'), div(id = 'about',
'You can connect with the authors on linkedin :',
a(id = 'footy', 'Aditya Agrawal',
href = 'https://www.linkedin.com/in/adityaagrawal314'),
'and',
a(id = 'footy', 'Anushree Avasthi',
href = 'https://in.linkedin.com/in/anushree-avasthi'),
'.' ),
width = 8),
column(width = 2),
HTML(' </div>
<div class="tab-pane" data-value="Data/Charts" id="about-tab-2">'),
br(),
br(),
br(),
column(width = 2),
column(h2('Data'),
div(id = 'about', 'The data in this application has been sourced from multiple applications. For sentiment analysis we have used Twitter APIs, Premier League data is sourced from',
a(id = 'footy', 'weltfussball',
href = 'https://www.weltfussball.de/'),
'and data for the sports betting tab has been collected from',
a(id = 'footy', 'football-data.co.uk',
href = 'http://www.football-data.co.uk/data'),
'.'),
br(),
h2('Methodology'),
div(id='about','We have used various methodologies across tabs to develop this dashboard.'),
h3('Sentiment Analysis'),
div(id = 'about', 'We used Twitter APIs to retrieve tweets by hashtags and performed sentiment analysis on them. We employed the use of bing lexicon to classify words and gauge the sentiment (by calculating the total score) of the tweet. Based on the results we created word clouds for most commonly used words and positive/negative words used with a partiular hashtag by occurence.'),
br(),
h3('Standings- Premier League'),
div(id = 'about', 'The English Premier League is one of the most followed football leagues. To depict how teams are performing in the current season, we used web scrapping to scrap the last modified data from this years Premier League and used plotly graphs to visualize the same. Users can come to this tab to see how their teams have performed over the season.'),
br(),
h3('Sports-betting'),
div(id = 'about', 'Here we allow the users to select from footballing leagues in 11 countries (Ex: England), and various vertical divisions (Ex: Premier league, Championship). The table will provide users with basic information about all fixtures in that season such as Goals Scored by both teams, Shots on target, Yellow/Red cards received, and who refereed the fixture. But more importantly, it will provide users with all sports betting-related data compiled from various platforms such as Bet365, BetVictor and more. We have rigorously worked on compiling this data for sports betting related activities.'),
br(),
width = 8),
column(width = 2),
HTML(' </div>
<div class="tab-pane" data-value="Contacts" id="about-tab-3">'),
br(),
br(),
br(),
column(width = 2),
column(h2('Contact'),
div(id = 'about', 'If you would like to contact the authors about the application feel
free to drop an email at:',
a(id = 'email', 'aaa107@duke.edu',
href = 'mailto:aaa107@duke.edu'),
'or you can mail',
a(id = 'email', 'ama131@duke.edu',
href = 'mailto:ama131@duke.edu'),'.'),
width = 8 ),
column(width = 2),
HTML(' </div>
</div>
</div>')
)
)
)
);
server <- function(input, output, session) {
output$value1 <- renderText({ input$hash1 })
output$value2 <- renderText({ input$hash2 })
## Authenticating into the twitter api
app_name= 'aaa107_ama131'
consumer_key= 'NjSBG2CwK8n4u85U8cyZVlv8Y'
consumer_secret= 'iQP20TbFqfcy0x6xRR8IUNkQG7oa6MZY4F74QZcVcqSXJwpQCY'
access_token= '1044601287071809536-h5vLHK8AhJgbfsQpgwwRatMsfUtnLS'
access_secret= '5HnjbgWInUPANATnd6YfITK4cmvpHoxk4TU69zrxUJ9xM'
#Authentication
create_token(app= app_name,
consumer_key=consumer_key,
consumer_secret= consumer_secret,
access_token= access_token,
access_secret= access_secret)
## Adding relevant hashtags based on user inputs
## Relevant inputs from users are input$hash1 and input$hash2
#Set of common code for bing and afinn
## For Afinn
########## Common code ends #########
#Using bing method
output$BingAnalysis <- renderPrint({
sentiment_bing = function(twt){
#basic cleaning on the tweet
twt_tbl = tibble(text=twt) %>%
mutate(
#Remove http manually
stripped_text = gsub("http\\S+","", text)
) %>%
unnest_tokens(word, stripped_text) %>%
anti_join(stop_words) %>% #Remove stop words
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup() %>%
## Score column
mutate(
score = case_when(
sentiment == 'negative'~n*(-1),
sentiment == 'positive'~n*1)
)
## Total score
sent.score = case_when(
nrow(twt_tbl)==0~0, # if there are no score is 0
nrow(twt_tbl)>0~sum(twt_tbl$score) #otherwise, sum the positive and negative
)
## Keep track of tweets with no bing words
zero.type = case_when(
nrow(twt_tbl)==0~"Type 1",
nrow(twt_tbl)>0~"Type 2",
)
list(score=sent.score, type= zero.type, twt_tbl= twt_tbl)
}
country1 <- search_tweets(input$hash1, n=100, include_rts= FALSE)
lapply(country1$text, function(x){sentiment_bing(x)})
})
output$BingAnalysis2 <- renderPrint({
sentiment_bing = function(twt){
#basic cleaning on the tweet
twt_tbl = tibble(text=twt) %>%
mutate(
#Remove http manually
stripped_text = gsub("http\\S+","", text)
) %>%
unnest_tokens(word, stripped_text) %>%
anti_join(stop_words) %>% #Remove stop words
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup() %>%
## Score column
mutate(
score = case_when(
sentiment == 'negative'~n*(-1),
sentiment == 'positive'~n*1)
)
## Total score
sent.score = case_when(
nrow(twt_tbl)==0~0, # if there are no score is 0
nrow(twt_tbl)>0~sum(twt_tbl$score) #otherwise, sum the positive and negative
)
## Keep track of tweets with no bing words
zero.type = case_when(
nrow(twt_tbl)==0~"Type 1",
nrow(twt_tbl)>0~"Type 2",
)
list(score=sent.score, type= zero.type, twt_tbl= twt_tbl)
}
country2 <- search_tweets(input$hash2, n=100, include_rts= FALSE)
lapply(country2$text, function(x){sentiment_bing(x)})
})
output$BingAnalysisTibble <- renderTable({
sentiment_bing = function(twt){
#basic cleaning on the tweet
twt_tbl = tibble(text=twt) %>%
mutate(
#Remove http manually
stripped_text = gsub("http\\S+","", text)
) %>%
unnest_tokens(word, stripped_text) %>%
anti_join(stop_words) %>% #Remove stop words
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup() %>%
## Score column
mutate(
score = case_when(
sentiment == 'negative'~n*(-1),
sentiment == 'positive'~n*1)
)
## Total score
sent.score = case_when(
nrow(twt_tbl)==0~0, # if there are no score is 0
nrow(twt_tbl)>0~sum(twt_tbl$score) #otherwise, sum the positive and negative
)
## Keep track of tweets with no bing words
zero.type = case_when(
nrow(twt_tbl)==0~"Type 1",
nrow(twt_tbl)>0~"Type 2",
)
list(score=sent.score, type= zero.type, twt_tbl= twt_tbl)
}
country1 <- search_tweets(input$hash1, n=100, include_rts= FALSE)
country1_sent = lapply(country1$text, function(x){sentiment_bing(x)})
country2 <- search_tweets(input$hash2, n=100, include_rts= FALSE)
country2_sent <- lapply(country2$text, function(x){sentiment_bing(x)})
country_sentiment = bind_rows(
tibble(
country=input$hash1,
score=unlist({map(country1_sent,'score')}),
type=unlist({map(country1_sent,'type')})
),
tibble(
country=input$hash2,
score= unlist(map(country2_sent,'score')),
type= unlist(map(country2_sent, 'type'))
)
)
country_sentiment
})
output$plotHash1 <- renderPlot({
country1 <- search_tweets(input$hash1, n=100, include_rts= FALSE)
text <- country1$text
docs <- Corpus(VectorSource(text))
toSpace <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, toSpace, "/")
docs <- tm_map(docs, toSpace, "@")
docs <- tm_map(docs, toSpace, "\\|")
docs <- tm_map(docs, toSpace, "https")
docs <- tm_map(docs, toSpace, "tco")
# Convert the text to lower case
docs <- tm_map(docs, content_transformer(tolower))
# Remove numbers
docs <- tm_map(docs, removeNumbers)
# Remove english common stopwords
docs <- tm_map(docs, removeWords, stopwords("english"))
# Remove your own stop word
# specify your stopwords as a character vector
docs <- tm_map(docs, removeWords, c("blabla1", "blabla2"))
# Remove punctuations
docs <- tm_map(docs, removePunctuation)
# Eliminate extra white spaces
docs <- tm_map(docs, stripWhitespace)
dtm <- TermDocumentMatrix(docs)
m <- as.matrix(dtm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
gsub("https\\S*", "", docs$text)
gsub("@\\S*", "", docs$text)
gsub("amp", "", docs$text)
gsub("[\r\n]", "", docs$text)
gsub("[[:punct:]]", "", docs$text)
set.seed(1234)
wordcloud(words = d$word, freq = d$freq, min.freq = 1,
max.words=200, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Dark2"))})
output$plotHash2 <- renderPlot({
country1 <- search_tweets(input$hash2, n=100, include_rts= FALSE)
text <- country1$text
docs <- Corpus(VectorSource(text))
toSpace <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, toSpace, "/")
docs <- tm_map(docs, toSpace, "@")
docs <- tm_map(docs, toSpace, "\\|")
docs <- tm_map(docs, toSpace, "https")
docs <- tm_map(docs, toSpace, "tco")
# Convert the text to lower case
docs <- tm_map(docs, content_transformer(tolower))
# Remove numbers
docs <- tm_map(docs, removeNumbers)
# Remove english common stopwords
docs <- tm_map(docs, removeWords, stopwords("english"))
# Remove your own stop word
# specify your stopwords as a character vector
docs <- tm_map(docs, removeWords, c("blabla1", "blabla2"))
# Remove punctuations
docs <- tm_map(docs, removePunctuation)
# Eliminate extra white spaces
docs <- tm_map(docs, stripWhitespace)
dtm <- TermDocumentMatrix(docs)
m <- as.matrix(dtm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
gsub("https\\S*", "", docs$text)
gsub("@\\S*", "", docs$text)
gsub("amp", "", docs$text)
gsub("[\r\n]", "", docs$text)
gsub("[[:punct:]]", "", docs$text)
set.seed(1234)
wordcloud(words = d$word, freq = d$freq, min.freq = 1,
max.words=200, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Dark2")) })
output$plotHashCount1 <- renderPlot({
country1 <- search_tweets(input$hash1, n=100, include_rts= FALSE)
tweets.Country1 = country1 %>% select(screen_name,text)
tweets.Country1$stripped_text1 <- gsub("http\\S+","",tweets.Country1$text)
#unnest_tokens() to convert to lower case
#remove punctuations and add id for each tweet
tweets.Country1_stem <- tweets.Country1 %>%
select(stripped_text1) %>%
unnest_tokens(word, stripped_text1)
cleaned_tweets.Country1 <- tweets.Country1_stem %>%
anti_join(stop_words)
cleaned_tweets.Country1 %>%
count(word, sort=TRUE) %>%
top_n(10) %>%
mutate(word= reorder(word,n)) %>%
ggplot(aes(x=word, y=n))+
geom_col()+
xlab(NULL)+
coord_flip()+
theme_classic()+
labs(x="Count",
y="Hashtag 1",
title="")
})
output$plotHashCount2 <- renderPlot({
country1 <- search_tweets(input$hash2, n=100, include_rts= FALSE)
tweets.Country1 = country1 %>% select(screen_name,text)
tweets.Country1$stripped_text1 <- gsub("http\\S+","",tweets.Country1$text)
#unnest_tokens() to convert to lower case
#remove punctuations and add id for each tweet
tweets.Country1_stem <- tweets.Country1 %>%
select(stripped_text1) %>%
unnest_tokens(word, stripped_text1)
cleaned_tweets.Country1 <- tweets.Country1_stem %>%
anti_join(stop_words)
cleaned_tweets.Country1 %>%
count(word, sort=TRUE) %>%
top_n(10) %>%
mutate(word= reorder(word,n)) %>%
ggplot(aes(x=word, y=n))+
geom_col()+
xlab(NULL)+
coord_flip()+
theme_classic()+
labs(x="Count",
y="Hashtag 2",
title="")
})
output$plotBingVisually1 <- renderPlot({
country1 <- search_tweets(input$hash1, n=100, include_rts= FALSE)
tweets.Country1 = country1 %>% select(screen_name,text)
tweets.Country1$stripped_text1 <- gsub("http\\S+","",tweets.Country1$text)
#unnest_tokens() to convert to lower case
#remove punctuations and add id for each tweet
tweets.Country1_stem <- tweets.Country1 %>%
select(stripped_text1) %>%
unnest_tokens(word, stripped_text1)
cleaned_tweets.Country1 <- tweets.Country1_stem %>%
anti_join(stop_words)
bing_country1 = cleaned_tweets.Country1 %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
bing_country1 %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word= reorder(word,n)) %>%
ggplot(aes(word,n, fill=sentiment)) +
geom_col(show.legends= FALSE) +
facet_wrap(~sentiment, scales="free_y")+
labs(title= "",
y= "Contribution to sentiment",
x= NULL)+
coord_flip() + theme_bw()
})
output$plotBingVisually2 <- renderPlot({
country1 <- search_tweets(input$hash2, n=100, include_rts= FALSE)
tweets.Country1 = country1 %>% select(screen_name,text)
tweets.Country1$stripped_text1 <- gsub("http\\S+","",tweets.Country1$text)
#unnest_tokens() to convert to lower case
#remove punctuations and add id for each tweet
tweets.Country1_stem <- tweets.Country1 %>%
select(stripped_text1) %>%
unnest_tokens(word, stripped_text1)
cleaned_tweets.Country1 <- tweets.Country1_stem %>%
anti_join(stop_words)
bing_country1 = cleaned_tweets.Country1 %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
bing_country1 %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word= reorder(word,n)) %>%
ggplot(aes(word,n, fill=sentiment)) +
geom_col(show.legends= FALSE) +
facet_wrap(~sentiment, scales="free_y")+
labs(title= "",
y= "Contribution to sentiment",
x= NULL)+
coord_flip() + theme_bw()
})
#Code for Premier League Standing web scrapping
output$PLStanding <- renderTable({
baseUrl <- "https://www.weltfussball.de/"
path <- "spielplan/eng-premier-league-2021-2022-spieltag/"
fileName <- 1
url <- paste0(baseUrl, path, fileName)
url
tables <- read_html(url)
xpath = "/html/body/div[3]/div[2]/div[4]/div[2]/div[1]/div/div[7]/div/table[1]"
nodes <- html_nodes(tables, xpath = xpath)
#html_table(nodes)
# Create emtpy lists
url <- list()
pages <- list()
nodes <- list()
final <- list()
start <- Sys.time()
# For loop.
# It will connect one by one to 38 different url links predefined
# by the line starting with url[[i]]
# Collect the information with read_html(), html_nodes() and html_table()
# Finally each table will be converted to a data frame
for(i in 1:9){
url[[i]] <- paste0(baseUrl, path, i)
pages[[i]] <- read_html(url[[i]])
nodes[[i]] <- html_nodes(pages[[i]], xpath = xpath)
final[[i]] <- data.frame(html_table(nodes[[i]]))
}
# By coding start and end times of the whole process
# I can keep an eye on how fast my code is.
end <- Sys.time()
end-start
## Time difference of 22.62705 secs
final[[9]]
uk18 <- do.call("rbind", final)
uk18 <- uk18 %>% select(3:10)
new_names <- c("team", "week", "won", "drawn", "lost", "goals",
"difference", "points")
colnames(uk18) <- new_names
uk18 <- uk18 %>% separate(goals, c("scored", "against"), sep="\\:")
head(uk18)
uk18_filt <- uk18 %>%
filter(week == 9) %>%
arrange(desc(points))
knitr::kable(uk18_filt)
uk18_filt
})
output$plotPremierLeague <- renderPlot({
baseUrl <- "https://www.weltfussball.de/"
path <- "spielplan/eng-premier-league-2021-2022-spieltag/"
fileName <- 1
url <- paste0(baseUrl, path, fileName)
url
tables <- read_html(url)
xpath = "/html/body/div[3]/div[2]/div[4]/div[2]/div[1]/div/div[7]/div/table[1]"
nodes <- html_nodes(tables, xpath = xpath)
#html_table(nodes)
# Create emtpy lists
url <- list()
pages <- list()
nodes <- list()
final <- list()
start <- Sys.time()
# For loop.
# It will connect one by one to 38 different url links predefined
# by the line starting with url[[i]]
# Collect the information with read_html(), html_nodes() and html_table()
# Finally each table will be converted to a data frame
for(i in 1:9){
url[[i]] <- paste0(baseUrl, path, i)
pages[[i]] <- read_html(url[[i]])
nodes[[i]] <- html_nodes(pages[[i]], xpath = xpath)
final[[i]] <- data.frame(html_table(nodes[[i]]))
}
# By coding start and end times of the whole process
# I can keep an eye on how fast my code is.
end <- Sys.time()
end-start
## Time difference of 22.62705 secs
final[[9]]
uk18 <- do.call("rbind", final)
uk18 <- uk18 %>% select(3:10)
new_names <- c("team", "week", "won", "drawn", "lost", "goals",
"difference", "points")
colnames(uk18) <- new_names
uk18 <- uk18 %>% separate(goals, c("scored", "against"), sep="\\:")
head(uk18)
uk18_filt <- uk18 %>%
filter(week == 9) %>%
arrange(desc(points))
knitr::kable(uk18_filt)
finallevels <- as.character(uk18_filt$team)
uk18$team <- factor(uk18$team, levels = finallevels)
# We need a color palette with 20 colors
colorCount <- length(unique(uk18$team))
# colorRampPalette creatas a getPalette() function
# This can modify an existing palette to include as many colors we want
getPalette <- colorRampPalette(brewer.pal(9, "Set1"))
getPalette(colorCount)
## [1] "#E41A1C" "#9B445D" "#526E9F" "#3C8A9B" "#469F6C" "#54A453" "#747B78"
## [8] "#94539E" "#BD6066" "#E97422" "#FF990A" "#FFCF20" "#FAF632" "#D4AE2D"
## [15] "#AF6729" "#BF6357" "#E17597" "#E884B9" "#C08EA9" "#999999"
# Plot season timeline
uk <- ggplot(uk18, aes(x=week, y=points, col=team)) +
geom_smooth(se=TRUE) +
theme(text = element_text(size=15)) +
scale_color_manual(values = getPalette(colorCount))
uk
})
output$plotPremierLeaguePlay <- renderPlotly({
baseUrl <- "https://www.weltfussball.de/"
path <- "spielplan/eng-premier-league-2021-2022-spieltag/"
fileName <- 1
url <- paste0(baseUrl, path, fileName)
url
tables <- read_html(url)
xpath = "/html/body/div[3]/div[2]/div[4]/div[2]/div[1]/div/div[7]/div/table[1]"
nodes <- html_nodes(tables, xpath = xpath)
#html_table(nodes)
# Create emtpy lists
url <- list()
pages <- list()
nodes <- list()
final <- list()
start <- Sys.time()
# For loop.
# It will connect one by one to 38 different url links predefined
# by the line starting with url[[i]]
# Collect the information with read_html(), html_nodes() and html_table()
# Finally each table will be converted to a data frame
for(i in 1:9){
url[[i]] <- paste0(baseUrl, path, i)
pages[[i]] <- read_html(url[[i]])
nodes[[i]] <- html_nodes(pages[[i]], xpath = xpath)
final[[i]] <- data.frame(html_table(nodes[[i]]))
}
# By coding start and end times of the whole process
# I can keep an eye on how fast my code is.
end <- Sys.time()
end-start
## Time difference of 22.62705 secs
final[[9]]
uk18 <- do.call("rbind", final)
uk18 <- uk18 %>% select(3:10)
new_names <- c("team", "week", "won", "drawn", "lost", "goals",
"difference", "points")
colnames(uk18) <- new_names
uk18 <- uk18 %>% separate(goals, c("scored", "against"), sep="\\:")
head(uk18)
uk18_filt <- uk18 %>%
filter(week == 9) %>%
arrange(desc(points))
knitr::kable(uk18_filt)
finallevels <- as.character(uk18_filt$team)
uk18$team <- factor(uk18$team, levels = finallevels)
# We need a color palette with 20 colors
colorCount <- length(unique(uk18$team))
# colorRampPalette creatas a getPalette() function
# This can modify an existing palette to include as many colors we want
getPalette <- colorRampPalette(brewer.pal(9, "Set1"))
getPalette(colorCount)
## [1] "#E41A1C" "#9B445D" "#526E9F" "#3C8A9B" "#469F6C" "#54A453" "#747B78"
## [8] "#94539E" "#BD6066" "#E97422" "#FF990A" "#FFCF20" "#FAF632" "#D4AE2D"
## [15] "#AF6729" "#BF6357" "#E17597" "#E884B9" "#C08EA9" "#999999"
# Plot season timeline
uk18 %>%
plot_ly(
x = ~week,
y = ~points,
size = ~difference,
color = ~team,
frame = ~week,
text = ~team,
hoverinfo = "text",
type = 'scatter',
mode = 'markers'
)
})
output$plotPremierLeaguePlayTop <- renderPlotly({
baseUrl <- "https://www.weltfussball.de/"
path <- "spielplan/eng-premier-league-2021-2022-spieltag/"
fileName <- 1
url <- paste0(baseUrl, path, fileName)
url
tables <- read_html(url)
xpath = "/html/body/div[3]/div[2]/div[4]/div[2]/div[1]/div/div[7]/div/table[1]"
nodes <- html_nodes(tables, xpath = xpath)
#html_table(nodes)
# Create emtpy lists
url <- list()
pages <- list()
nodes <- list()
final <- list()
start <- Sys.time()
# For loop.
# It will connect one by one to 38 different url links predefined
# by the line starting with url[[i]]
# Collect the information with read_html(), html_nodes() and html_table()
# Finally each table will be converted to a data frame
for(i in 1:9){
url[[i]] <- paste0(baseUrl, path, i)
pages[[i]] <- read_html(url[[i]])
nodes[[i]] <- html_nodes(pages[[i]], xpath = xpath)
final[[i]] <- data.frame(html_table(nodes[[i]]))
}
# By coding start and end times of the whole process
# I can keep an eye on how fast my code is.
end <- Sys.time()
end-start
## Time difference of 22.62705 secs
final[[9]]
uk18 <- do.call("rbind", final)
uk18 <- uk18 %>% select(3:10)
new_names <- c("team", "week", "won", "drawn", "lost", "goals",
"difference", "points")
colnames(uk18) <- new_names
uk18 <- uk18 %>% separate(goals, c("scored", "against"), sep="\\:")
head(uk18)
uk18_filt <- uk18 %>%
filter(week == 9) %>%
arrange(desc(points))
knitr::kable(uk18_filt)
finallevels <- as.character(uk18_filt$team)
uk18$team <- factor(uk18$team, levels = finallevels)
# We need a color palette with 20 colors
colorCount <- length(unique(uk18$team))
# colorRampPalette creatas a getPalette() function
# This can modify an existing palette to include as many colors we want
getPalette <- colorRampPalette(brewer.pal(9, "Set1"))
getPalette(colorCount)
## [1] "#E41A1C" "#9B445D" "#526E9F" "#3C8A9B" "#469F6C" "#54A453" "#747B78"
## [8] "#94539E" "#BD6066" "#E97422" "#FF990A" "#FFCF20" "#FAF632" "#D4AE2D"
## [15] "#AF6729" "#BF6357" "#E17597" "#E884B9" "#C08EA9" "#999999"
uk18 %>% filter(team =="Manchester United" | team=="Chelsea FC" | team=="Liverpool FC" | team=="Manchester City") %>%
plot_ly(
x = ~week,
y = ~points,
size = ~difference,
color = ~team,
frame = ~week,
text = ~team,
hoverinfo = "text",
type = 'scatter',
mode = 'markers'
)
})
#Cumulative football statistics
#declare reactive values------------------------------------------------------
#declare reactive values------------------------------------------------------
values <- reactiveValues()
#an observe function that updates sidebar selectInputs------------------------
observe({
#update division when selecting country
updateSelectInput(
session,
'division',
choices = switch(input$country,
Greece = 'Superleague',
England = c('Premier League',
'Championship',
'League 1',
'League 2',
'Conference'),
Scotland = c('Premier League',
'Division 1',
'Division 2',
'Division 3'),
Germany = c('Bundesliga 1',
'Bundesliga 2'),
Italy = c('Serie A',
'Serie B'),
Spain = c('Primera Division',
'Segunda Division'),
France = c('Ligue 1',
'Ligue 2'),
Netherlands = 'Eredivisie',
Belgium = 'Jupiler League',
Portugal = 'Liga 1',
Turkey = 'Ligi 1')
)
#update years when selecting country
updateSelectInput(
session,
'season',
choices = switch(input$country,
Greece = rev(paste(1994:2017,
shift(1994:2017, type = 'lead'),
sep = '-'))[-1],
England = rev(paste(1993:2017,
shift(1993:2017, type = 'lead'),
sep = '-'))[-1],
Scotland = rev(paste(1994:2017,
shift(1994:2017, type = 'lead'),
sep = '-'))[-1],
Germany = rev(paste(1993:2017,
shift(1993:2017, type = 'lead'),
sep='-'))[-1],
Italy = rev(paste(1993:2017,
shift(1993:2017, type = 'lead'),
sep='-'))[-1],
Spain = rev(paste(1993:2017,
shift(1993:2017, type = 'lead'),
sep = '-'))[-1],
France = rev(paste(1993:2017,
shift(1993:2017, type = 'lead'),
sep = '-'))[-1],
Netherlands = rev(paste(1993:2017,
shift(1993:2017, type = 'lead'),
sep = '-'))[-1],
Belgium = rev(paste(1995:2017,
shift(1995:2017, type = 'lead'),
sep = '-'))[-1],
Portugal = rev(paste(1994:2017,
shift(1994:2017, type = 'lead'),
sep = '-'))[-1],
Turkey = rev(paste(1994:2017,
shift(1994:2017, type = 'lead'),
sep = '-'))[-1]
)
)
#pass country to global reactive values
values$country <- input$country
})
#reactive function that downloads the dataset from----------------------------
#football-data.co.uk----------------------------------------------------------
dataInput <- reactive({
#traslate country to code
country_code <- switch(isolate(values$country),
Greece = 'G',
England = 'E',
Scotland = 'SC',
Germany = 'D',
Italy = 'I',
Spain = 'SP',
France = 'F',
Netherlands = 'N',
Belgium = 'B',
Portugal = 'P',
Turkey = 'T'
)
#translate division to code
division_code <- switch(input$division,
`Premier League` = '0',
Championship = '1',
`League 1` = '2',
`League 2` = '3',
Conference = 'C',
`Premier League` = '0',
`Division 1` = '1',
`Division 2` = '2',
`Division 3` = '3',
`Bundesliga 1` = '1',
`Bundesliga 2` = '2',
`Serie A` = '1',
`Serie B` = '2',
`Primera Division` = '1',
`Segunda Division` = '2',
`Ligue 1` = '1',
`Ligue 2` = '2',
Eredivisie = '1',
`Jupiler League` = '1',
`Liga 1` = '1',
`Ligi 1` = '1',
Superleague = '1'
)
#create season code
# season_code <- paste0(substr(input$season, 3, 4),
# substr(input$season, 8, 9))
#create url and read csv
footy_set <- read.csv(paste0('http://www.football-data.co.uk/mmz4281/',
'20-21',
'/',
country_code,
division_code,
'.csv'))
#some files contain empty lines which cause problems with date conversions
#remove them
footy_set <- footy_set[!footy_set$Date == '', ]
#remove empty columns
footy_set <- footy_set[, grep('^X',
names(footy_set),
invert = TRUE,
value = TRUE)]
#convert to data.table for super fast manipulations
setDT(footy_set)
footy_set[, Date := as.IDate(Date, format='%d/%m/%y')]
#update all in-tab teams (second graphs in each tab)
updateSelectInput(session,
'team',
choices = sort(unique(c(unique(footy_set$HomeTeam),
unique(footy_set$AwayTeam)))))
updateSelectInput(session,
'result_team',
choices = sort(unique(c(unique(footy_set$HomeTeam),
unique(footy_set$AwayTeam)))))
updateSelectInput(session,
'OU_team',
choices = sort(unique(c(unique(footy_set$HomeTeam),
unique(footy_set$AwayTeam)))))
updateSelectInput(session,
'cards_team',
choices = sort(unique(c(unique(footy_set$HomeTeam),
unique(footy_set$AwayTeam)))))
updateSelectInput(session,
'corner_team',
choices = sort(unique(c(unique(footy_set$HomeTeam),
unique(footy_set$AwayTeam)))))
#save min and max Date for on-going seasons. In August min and max year are the same
# min_y <- footy_set[, year(min(Date))]
# max_y <- min_y + 1
# updateSliderInput(session,
# 'date_range',
# value=c(as.IDate(paste0(footy_set[, min_y],
# '-08-01')),
# as.IDate(paste0(footy_set[, max_y],
# '-06-30'))),
# min=as.IDate(paste0(footy_set[, min_y],
# '-08-01')),
# max=as.IDate(paste0(footy_set[, max_y],
# '-06-30')))
footy_set
})
output$league_table <- renderTable({
footy_table <- dataInput()
footy_table <- footy_table[between(Date,
input$date_range[1],
input$date_range[2]), ]
values$data <- footy_table
footy_table
})
#Input for the present season
dataInput1 <- reactive({
#traslate country to code
country_code <- switch(isolate(values$country),
Greece = 'G',
England = 'E',
Scotland = 'SC',
Germany = 'D',
Italy = 'I',
Spain = 'SP',
France = 'F',
Netherlands = 'N',
Belgium = 'B',
Portugal = 'P',
Turkey = 'T'
)
#translate division to code
division_code <- switch(input$division,
`Premier League` = '0',
Championship = '1',
`League 1` = '2',
`League 2` = '3',
Conference = 'C',
`Premier League` = '0',
`Division 1` = '1',
`Division 2` = '2',
`Division 3` = '3',
`Bundesliga 1` = '1',
`Bundesliga 2` = '2',
`Serie A` = '1',
`Serie B` = '2',
`Primera Division` = '1',
`Segunda Division` = '2',
`Ligue 1` = '1',
`Ligue 2` = '2',
Eredivisie = '1',
`Jupiler League` = '1',
`Liga 1` = '1',
`Ligi 1` = '1',
Superleague = '1'
)
#create season code
# season_code <- paste0(substr(input$season, 3, 4),
# substr(input$season, 8, 9))
#create url and read csv
footy_set <- read.csv(paste0('http://www.football-data.co.uk/mmz4281/',
'21-22',
'/',
country_code,
division_code,
'.csv'))
#some files contain empty lines which cause problems with date conversions
#remove them
footy_set <- footy_set[!footy_set$Date == '', ]
#remove empty columns
footy_set <- footy_set[, grep('^X',
names(footy_set),
invert = TRUE,
value = TRUE)]
#convert to data.table for super fast manipulations
setDT(footy_set)
footy_set[, Date := as.IDate(Date, format='%d/%m/%y')]
#update all in-tab teams (second graphs in each tab)
updateSelectInput(session,
'team',
choices = sort(unique(c(unique(footy_set$HomeTeam),
unique(footy_set$AwayTeam)))))
updateSelectInput(session,
'result_team',
choices = sort(unique(c(unique(footy_set$HomeTeam),
unique(footy_set$AwayTeam)))))
updateSelectInput(session,
'OU_team',
choices = sort(unique(c(unique(footy_set$HomeTeam),
unique(footy_set$AwayTeam)))))
updateSelectInput(session,
'cards_team',
choices = sort(unique(c(unique(footy_set$HomeTeam),
unique(footy_set$AwayTeam)))))
updateSelectInput(session,
'corner_team',
choices = sort(unique(c(unique(footy_set$HomeTeam),
unique(footy_set$AwayTeam)))))
#save min and max Date for on-going seasons. In August min and max year are the same
# min_y <- footy_set[, year(min(Date))]
# max_y <- min_y + 1
# updateSliderInput(session,
# 'date_range',
# value=c(as.IDate(paste0(footy_set[, min_y],
# '-08-01')),
# as.IDate(paste0(footy_set[, max_y],
# '-06-30'))),
# min=as.IDate(paste0(footy_set[, min_y],
# '-08-01')),
# max=as.IDate(paste0(footy_set[, max_y],
# '-06-30')))
footy_set
})
output$league_table21 <- renderTable({
footy_table <- dataInput1()
footy_table <- footy_table[between(Date,
input$date_range[1],
input$date_range[2]), ]
values$data <- footy_table
footy_table
})
}
shinyApp(ui, server)
|
6ff3c6b4f136c9355d0513dc8bae3169df958fd5
|
71ee4e97d4857b59eb05ee12a13d497cecdfcfc5
|
/Part I - beta_distribution.R
|
686ad7d043d2963a212805a54aa09d085da61239
|
[] |
no_license
|
nailson/bayesian_statistics_the_fun_way
|
8a53f9199817c23e9ab215eb7e8dc3f2458490f3
|
090ebf11d2df0d37571dbbca8d4c6c5889f0279a
|
refs/heads/main
| 2023-02-22T11:39:29.740870
| 2021-01-23T18:37:08
| 2021-01-23T18:37:08
| 307,875,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,125
|
r
|
Part I - beta_distribution.R
|
#install.packages("ggplot2")
#install.packages("reshape")
library(reshape)
library(ggplot2)
library(dplyr)
# PART I - Chapter 5
# Beta Distribution
# Fair coin A, not so fair coin B and C
coin_tosses = list(ifelse(runif(100) > 0.5, 1, 0),
ifelse(runif(100) > 0.3, 1, 0),
ifelse(runif(100) > 0.7, 1, 0)
)
# Create a Dataframe with all coin tosses
df = as.data.frame(coin_tosses, col.names = c("coin_A","coin_B","coin_C"))
df["coin_toss"] = c(1:nrow(df))
# melt the tosses into a column
df_tosses = melt(df, id=c("coin_toss"))
# Now let's calculate the tosses by coin
grouped_tosses = df_tosses %>% group_by(variable, value) %>% count()
grouped_tosses
pbeta(1/2, 50, 50)
# First, what's the probability that the Coin is Fair with 5% of tolerance
for (coin in unique(grouped_tosses$variable)){
n_heads = grouped_tosses[grouped_tosses$variable==coin & grouped_tosses$value==1, "n"][[1]]
n_tails = grouped_tosses[grouped_tosses$variable==coin & grouped_tosses$value==0, "n"][[1]]
print(integrate(function(x) dbeta(x, n_heads, n_tails),0.45, 0.55))
}
|
9d4f81fd7eb1f3ca932f584883a92418354b983e
|
f2368c5e069c227c12505a6c37e65aa54f047c33
|
/man/events.Rd
|
4230506df28e1c3fcbbefc5efbf39c9dfd6ac14d
|
[] |
no_license
|
simrvprojects/SimRVPedigree
|
55687ca1c7ad25a816d0e300c7073ee58284d03a
|
f05187583a43a276354493d434db57b37356a348
|
refs/heads/master
| 2022-04-01T01:53:41.181100
| 2020-02-09T19:03:52
| 2020-02-09T19:03:52
| 64,960,974
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 436
|
rd
|
events.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Event_Methods.R
\name{events}
\alias{events}
\title{Constructor function for an object of class events}
\usage{
events(life_events)
}
\arguments{
\item{life_events}{The list of items returned by the \code{sim_life} function.}
}
\value{
an object of class \code{events}.
}
\description{
Constructor function for an object of class events
}
\keyword{internal}
|
956a41ac42e7cd8215568da86b92bd9e86f6c6a2
|
e82f699df6f012008e86e54df23ea2cbaf157559
|
/R/myread.R
|
0fd71c5ee9ffe23c4fc804c9723a7cf94261e453
|
[] |
no_license
|
emy424/MATH4753
|
11dffabb25848f8ffbadcc1987b38b72c614a1a2
|
837e52170cc3d1994717d9474196f80b632e4edd
|
refs/heads/master
| 2020-12-31T21:01:47.470007
| 2020-04-17T21:14:11
| 2020-04-17T21:14:11
| 239,024,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 361
|
r
|
myread.R
|
dird="C:\\Users\\Emily\\Documents\\STAT\\MATH4753\\"
#' File Reading Function
#'
#' Takes a CSV file, read in the data
#' @param csv
#' Name of the data file
#'
#' @return the data read in from the csv file
#' @export
#'
#' @examples
#' csv = "spruce.csv"; myread(csv)
myread=function(csv){
fl = paste(dird,csv,sep="")
read.table(fl,header=TRUE,sep=",")
}
|
0848b5bb67aa6c82264c5ba9ab5931801aab5af2
|
b5c8656720b07f660e6a83cd0d334b2a59596c50
|
/r-analysis/R/server.R
|
8cf44cc05468263070b79d46fb9f34aa5664463d
|
[] |
no_license
|
forrestsill/abu
|
e827cb9b21d5022ad97c6fb71bb395d1442bdeb3
|
853688224129398221fa47e50f2f4717a7c55b30
|
refs/heads/master
| 2020-04-29T01:38:45.185845
| 2019-03-15T15:06:57
| 2019-03-15T15:06:57
| 175,737,149
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,428
|
r
|
server.R
|
server <- function(input, output) {
output$distPlot <- renderPlot({
# load data
# data <- readRDS("//samba/proj/pmg-east/MBS Inv Team/Investment Process/Infrastructure/Software/ConvictionSurvey/data.rds")
data <- data.table(read_excel("H:/apps/xp/Desktop/AJ Hackathon/data.xlsx"))
# formatting
data <- data.table(data)
names(data) <- c("date", "user", "trade", "vote")
data <- data[!(data$vote %in% c("MARGINAL_NEGATIVE", "WEAK_NEGATIVE")), ] # clean votes
data$vote <- gsub(" Units", "", data$vote)
data$vote <- as.numeric(data$vote)
# remove fake users, AJ
data <- data[!(data$user %in% c("test", "Test User", "areisman")), ]
# add trade number
data$trade_num <- data$trade
data <- data[!(data$trade_num %in% c("", "Y2 ")), ] # remove fake trade numbers
# add indicator for potential trades
data$pot_trade <- data$trade
# add indicator for active trades
data$active_trade <- ifelse(data$trade_num %in% c(218, 222, 238, 240, 242, 244, 245, 246),
1, 0)
# user-selected dates
data <- data[as.Date(data$date) >= min(input$slider_dates), ]
data <- data[as.Date(data$date) <= max(input$slider_dates), ]
# # user-selected trades
data <- data[data$trade_num %in% input$trade_number, ]
# user-selected pms
data <- data[data$user %in% as.character(input$pm), ]
# grab pot(ential / all trades
# v <- as.numeric(input$trade_type)
# if (v == 1) data <- data[data$active_trade == 1, ]
# if (v == 2) data <- data[data$active_trade == 0, ]
# if (v == 3) data <- data[data$pot_trade == 1, ]
# parameters
# type <- as.numeric(input$type)
v <- c("user", "trade_num")
lines <- v[as.numeric(input$lines)]
v <- c("user", "trade_num")
facet <- v[as.numeric(input$facet)]
# graphics
if (nrow(data) > 0) {
p1 <- ggplot(data, aes(x = date, y = vote,
col = factor(get(lines)))) +
facet_grid(~ get(facet)) +
geom_line(size = 2) +
labs(col = lines, x = "", y = "") +
theme_bw() + xlab("") + ylab("vote") +
scale_color_blk() +
scale_fill_blk()
}
p1
})
}
|
35582bf4664eb7943cf595255015cb4c9dedb926
|
50fd7e8cc064a7f35ec30bd8bdf47ab648ee60bf
|
/man/arseq.mvg.Rd
|
40f2b98577f24751284b385a62d9f6afdf34aa1d
|
[] |
no_license
|
ajitjohnson/arseq
|
a0c8c4996c7137dbe8bf15d9b559191a27fb7242
|
5da13779253b47d3df07effd59d06053294ab6d2
|
refs/heads/master
| 2021-11-27T09:59:37.024830
| 2021-10-25T16:17:28
| 2021-10-25T16:17:28
| 205,567,517
| 8
| 2
| null | 2020-02-05T04:53:18
| 2019-08-31T16:14:14
|
R
|
UTF-8
|
R
| false
| true
| 700
|
rd
|
arseq.mvg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arseq.mvg.R
\name{arseq.mvg}
\alias{arseq.mvg}
\title{Most Variable Genes}
\usage{
arseq.mvg(data, variable.genes = variable.genes, dds.object = TRUE)
}
\arguments{
\item{data}{DESeq2 object or normalized expression dataframe}
\item{variable.genes}{numeric: The number of most variable genes to be identified. By default, the program identifies the top 1000 most variable genes.}
\item{dds.object}{Logical parameter indicating if the data is a DESeq2 object. Default=TRUE}
}
\value{
The most variable genes
}
\description{
Identifying the most variable genes in the dataset
}
\examples{
mvg <- arseq.mvg (example_dds)
}
|
0c8a74c35bd0ebf7dd2a1d9ae813997c87a44050
|
2b3b9231b2b8cfacc1d3054ee673af048e7890e6
|
/test.R
|
6fadeea03f3b314e76d9efcbd93b67b0a8219a88
|
[] |
no_license
|
Dan609/wordcloud
|
0e574078b1dcaa656ef29594d0f6d72f68548629
|
676873931b102a56552b32cba385f52d56f92430
|
refs/heads/master
| 2020-04-13T08:10:33.614327
| 2020-01-23T08:11:22
| 2020-01-23T08:11:22
| 163,074,385
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,262
|
r
|
test.R
|
#### Text analysis
options(max.print=100000)
library(tm) # for text mining
library(SnowballC) # for text stemming
library(wordcloud) # word-cloud generator
library(RColorBrewer) # color palettes
library(RDSTK)
library(readr)
library(twitteR)
library(qdap)
library(syuzhet)
library(ggplot2)
library(dplyr)
library(pdftools)
library(devtools)
library(tcltk)
# Read the text from txt file
text <- readLines(file.choose())
# Read the text from pdf file
text <- pdf_text(file.choose())
# first page text
# cat(text[1])
# Load the data as a corpus
docs <- Corpus(VectorSource(text))
# Text cleaning :
# Remove your own stop word, specify your stopwords as a character vector
# docs <- tm_map(docs, removeWords, c("???"))
docs <- tm_map(docs, content_transformer(tolower))
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, removeWords, stopwords("english"))
docs <- tm_map(docs, removeWords, stopwords("russian"))
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, stripWhitespace)
# Build a term-document matrix
dtm <- TermDocumentMatrix(docs)
m <- as.matrix(dtm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
# The frequency table of words
par(mfrow=c(1,1), mar=c(10,5,1,1))
barplot(d[1:20,]$freq, las = 2, names.arg = d[1:20,]$word,
col ="lightblue", main ="Most frequent words",
ylab = "Word frequencies")
### polarity and sentiment ###
par(mfrow=c(3,1), mar=c(5,10,1,1))
scores <- get_nrc_sentiment(text)
polarity <- scores[1,9:10]
polarity
sentiment <- scores[1,1:8]
sentiment
polarity <- data.matrix(polarity, rownames.force = TRUE)
barplot(polarity)
sentiment <- data.matrix(sentiment, rownames.force = TRUE)
barplot(sentiment)
sentences <- get_sentences(text)
sentiment_vector <- get_sentiment(sentences, method = "syuzhet")
summary(sentiment_vector)
boxplot(sentiment_vector)
##What was the most positive?
max(sentiment_vector)
sentence_sentiment <- data.frame(sentences, sentiment_vector)
View(sentence_sentiment)
which.max(sentence_sentiment$sentiment_vector)
which.min(sentence_sentiment$sentiment_vector)
most_positive <- sentence_sentiment[which.max(sentence_sentiment$sentiment_vector),]
most_positive
most_pnegative <- sentence_sentiment[which.min(sentence_sentiment$sentiment_vector),]
most_pnegative
# Generate the Word clouds
# wordcloud(text, colors = c("blue", "green"))
par(mfrow=c(1,1), mar=c(1,1,1,1))
set.seed(1234)
wordcloud(words = d$word, freq = d$freq,
scale=c(5,1),
min.freq = 1, max.words=50,
random.order=FALSE, rot.per=0.1,
colors=brewer.pal(8, "Dark2"))
# find words that occur at least 20 times :
findFreqTerms(dtm, lowfreq = 20)
# analyze the association between frequent terms :
findAssocs(dtm, terms = "scaffold", corlimit = 0.5)
## Fading cloud
wordcloud(words = d$word, freq = d$freq,
scale=c(4,0.7),
min.freq = 1, max.words=50,
random.order=FALSE, rot.per=0.1,
colors=brewer.pal(9,"BuGn"),
main = file_name)
## Big cloud
set.seed(1234)
wordcloud(words = d$word, freq = d$freq,
scale=c(3,.7),
min.freq = 1, max.words=500,
random.order=FALSE, rot.per=0.1,
colors=brewer.pal(8, "Dark2"))
|
f2680de2f51c3537a96c3db8af738793656cb739
|
b4aa358205968bbe1424b3375c5d81bdbcc7ed6b
|
/R/flow_survival.R
|
aa56288de2945d58d87a7be7f2c1d12b36bd6762
|
[] |
no_license
|
fishsciences/DeltaPassageModel
|
c97d9d2eb2df40bfff504667d214a7b3713f7bf3
|
863f369e1c712e5804a80a3000982f0a5e9f5532
|
refs/heads/master
| 2020-07-13T07:11:10.722527
| 2020-02-22T00:56:15
| 2020-02-22T00:56:15
| 205,028,464
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 627
|
r
|
flow_survival.R
|
#' Flow-survival relationship
#'
#' Calculates survival through a reach as a function of standardized flow in Sac3 reach
#'
#' @md
#' @param reach Sac1, Sac2, Sac3, or SS
#' @param flow Standardized flow in Sac3 on day entering reach
#'
#' @export
#' @examples
#' flow_survival("Sac1", -5)
#' flow_survival("Sac1", 0)
#' flow_survival("Sac1", 5)
#' flow_survival("SS", 0)
#'
flow_survival <- function(reach = c("Sac1", "Sac2", "Sac3", "SS"), flow){
reach <- match.arg(reach)
sac_slope <- 0.52
p <- flow_survival_params[[reach]]
exp(p[["B0"]] + sac_slope * flow)/(1 + exp(p[["B0"]] + sac_slope * flow))
}
|
41c581c88f3a7861ef236fa0783d746fd330e5df
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DiscreteWeibull/examples/ddweibull.Rd.R
|
9cae1405402dd9ebce43e63a89cfcca884f8daae
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 604
|
r
|
ddweibull.Rd.R
|
library(DiscreteWeibull)
### Name: Discrete Weibull (Type 1)
### Title: The type 1 discrete Weibull distribution
### Aliases: ddweibull pdweibull qdweibull rdweibull
### Keywords: distribution
### ** Examples
# Ex.1
x <- 1:10
q <- 0.6
beta <- 0.8
ddweibull(x, q, beta)
t <- qdweibull(0.99, q, beta)
t
pdweibull(t, q, beta)
#
x <- 0:10
ddweibull(x, q, beta, zero=TRUE)
t <- qdweibull(0.99, q, beta, zero=TRUE)
t
pdweibull(t, q, beta, zero=TRUE)
# Ex.2
q <- 0.4
beta <- 0.7
n <- 100
x <- rdweibull(n, q, beta)
tabulate(x)/sum(tabulate(x))
y <- 1:round(max(x))
# compare with
ddweibull(y, q, beta)
|
54e8b246f56eef25fbfef1537c2b6c544ea0d0da
|
102cc3eb35509a8402e940f44fbc4688bf704b1a
|
/.Rprofile
|
32717f19429eb7b2136fc2713e35ecb9912222f6
|
[] |
no_license
|
shenfei/DiagonAlley
|
a21da6bf35ea3681cea0da8c259f7ae5dddf3d87
|
31097a4297511dcec0e018336d5030f31e4cc36a
|
refs/heads/master
| 2023-06-10T13:39:28.142366
| 2023-05-26T07:25:05
| 2023-05-26T07:25:05
| 27,769,747
| 0
| 0
| null | 2023-05-26T07:25:06
| 2014-12-09T14:15:27
|
Emacs Lisp
|
UTF-8
|
R
| false
| false
| 713
|
rprofile
|
.Rprofile
|
# .Rprofile -- commands to execute at the beginning of each R session
#
# You can use this file to load packages, set options, etc.
#
# NOTE: changes in this file won't be reflected until after you quit
# and start a new session
#
.First <- function() {
.libPaths('~/R/library')
options(blogdown.ext = ".Rmd")
options(bookdown.render.file_scope = FALSE)
knitr::opts_chunk$set(fig.retina = 2, # Control using dpi
fig.width = 6, # generated images
fig.pos = "t", # pdf mode
fig.align = "center",
dpi = if (knitr::is_latex_output()) 72 else 300,
out.width = "100%")
}
|
2568356bf219ba3555cdc068a6fa87f815a95656
|
b36dfb61e11b2ca63bb571c665ddf38ad5475330
|
/stringdb_cond1_cond2.R
|
0b38ec865f9e394816493e1f14cdd323d4450a68
|
[] |
no_license
|
mzuer/breast_project
|
83685f855af878c3404df80e839be5f621300212
|
2b23ef1a5f01b9b19f6c795b7fd6e199bf68f5fb
|
refs/heads/main
| 2023-08-27T22:58:21.456109
| 2021-10-27T09:58:39
| 2021-10-27T09:58:39
| 409,911,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,572
|
r
|
stringdb_cond1_cond2.R
|
# Rscript cemitools_cond1_cond2_proteo_Johansson.R
cond1 <- "HER2"
cond2 <- "LumA"
annotCol <- "PAM50.subtype"
cond1 <- "LumBHer2"
cond2 <- "LumA"
annotCol <- "PAM50.subtype_merged"
plotType <- "png"
myWidth <- 400
myHeight <- 400
cond1 <- "LumB"
cond2 <- "LumA"
annotCol <- "PAM50.subtype"
outFolder <- file.path("STRINGDB_COND1_COND2_PROTEO_JOHANSSON", paste0("test_", cond1, "_vs_ref_", cond2))
dir.create(outFolder, recursive=TRUE)
# gmt_file <- system.file("extdata", "pathways.gmt", package = "CEMiTool")
gmt_file <- "c5.go.bp.v7.4.symbols.gmt"
nTop_connect <- 5
myHeightGG <- myWidthGG <- 7
library(STR)
library(ggplot2)
library(igraph)
library(foreach)
source("breast_utils.R")
####################################
### retrieve proteo data
####################################
proteo_dt <- read.delim("data/johansson_data_relative_ratios_to_pool.csv", sep=",")
stopifnot(!duplicated(proteo_dt$gene_symbol))
rownames(proteo_dt) <- proteo_dt$gene_symbol
proteo_dt$gene_symbol <- proteo_dt$gene_symbol <- proteo_dt$ensembl_id <- NULL
####################################
### first retrieve PAM50 annotation data
####################################
annot_dt <- read.delim("data/johansson_tumor_annot.csv", sep=",")
annot_dt$PAM50.subtype_merged <- annot_dt$PAM50.subtype
annot_dt$PAM50.subtype_merged[annot_dt$PAM50.subtype_merged == "LumB" |
annot_dt$PAM50.subtype_merged == "HER2"] <- "LumBHer2"
samp_annot <- setNames(as.character(annot_dt[,paste0(annotCol)]), as.character(annot_dt$Tumor.ID))
samp_annot_all <- setNames(as.character(annot_dt[,paste0("PAM50.subtype")]), as.character(annot_dt$Tumor.ID))
stopifnot(!duplicated(annot_dt$Tumor.ID))
####################################
### select sub DT
####################################
exprDT <- proteo_dt
stopifnot(any(colnames(exprDT) %in% names(samp_annot)))
dim(exprDT)
stopifnot(names(samp_annot) %in% colnames(proteo_dt))
cond1_samps <- names(samp_annot)[samp_annot==cond1]
stopifnot(length(cond1_samps) > 0)
length(cond1_samps)
# 562
cond1_samps <- cond1_samps[cond1_samps %in% colnames(exprDT)]
stopifnot(length(cond1_samps) > 0)
length(cond1_samps)
# 362
cond2_samps <- names(samp_annot)[samp_annot==cond2]
stopifnot(length(cond2_samps) > 0)
length(cond2_samps)
# 209
cond2_samps <- cond2_samps[cond2_samps %in% colnames(exprDT)]
stopifnot(length(cond2_samps) > 0)
length(cond2_samps)
# 167
x <- as.matrix(exprDT)
stopifnot(dim(x) == dim(exprDT))
exprDT <- x
cond1_dt <- exprDT[, colnames(exprDT) %in% cond1_samps]
stopifnot(dim(cond1_dt) > 0)
cond2_dt <- exprDT[, colnames(exprDT) %in% cond2_samps]
stopifnot(dim(cond2_dt) > 0)
cond12_dt <- cbind(cond1_dt, cond2_dt)
### DE ANALYSIS
require(edgeR)
require(limma)
cond12_dt <- cond12_dt[,c(colnames(cond1_dt), colnames(cond2_dt))]
sub_labs <- factor(c(rep(cond1, ncol(cond1_dt)),rep(cond2, ncol(cond2_dt))), levels=c(cond1, cond2))
design <- model.matrix(~ sub_labs)
v <- voom(cond12_dt, design, plot=FALSE)
fit <- lmFit(v, design)
eb_fit <- eBayes(fit)
DE_topTable <- topTable(eb_fit, coef=ncol(v$design), number=Inf, sort.by="p") ## if not 0+ in design -> coef=2
# geneName = "COL14A1"
# plot_dt <- data.frame(
# cond=c(rep(cond1, ncol(cond1_dt)),rep(cond2, ncol(cond2_dt))),
# expr = c(cond1_dt[geneName,], cond2_dt[geneName,]),
# stringsAsFactors = FALSE
# )
# ggboxplot(data=plot_dt, x="cond", y="expr")
# ggboxplot(data=plot_dt, x="cond", y="expr")
### initialization
# WARNING: You didn't specify a species. Hence we will set 9606 (Homo Sapiens) as your species.
# WARNING: Score threshold is not specified. We will be using medium stringency cut-off of 400.
# WARNING: You didn't specify a version of the STRING database to use. Hence we will use STRING 11.0
string_db <- STRINGdb$new( species=9606)
#STRINGdb$methods() # To list all the methods available.
#STRINGdb$help("get_graph") # To visualize their documentation.
##### map stringdb IDs
DE_topTable <- as.data.frame(DE_topTable)
DE_topTable$gene <- rownames(DE_topTable)
###################################################
DE_topTable_mpd <- string_db$map( DE_topTable, "gene", removeUnmappedRows = TRUE )
# this adds a column with STRING_id
# 0% unmapped
##### hits
# extract the most significant 200 genes and we produce an image of the STRING network
# for those
hits <- DE_topTable_mpd$STRING_id[1:200]
string_db$plot_network( hits )
### add_diff_exp_color
# filter by p-value and add a color column
# (i.e. green down-regulated gened and red for up-regulated genes)
DE_topTable_mpd_pval05 <- string_db$add_diff_exp_color( subset(DE_topTable_mpd, adj.P.Val<0.05),
logFcColStr="logFC" )
# post payload information to the STRING server
payload_id <- string_db$post_payload( DE_topTable_mpd_pval05$STRING_id,
colors=DE_topTable_mpd_pval05$color )
##### plot_halo_network
# display a STRING network png with the "halo"
string_db$plot_network( hits, payload_id=payload_id )
### enrichment analysis on selected genes
enrichment <- string_db$get_enrichment( hits )
head(enrichment, n=20)
### to get interactions between list of proteins
string_db$get_interactions( hits )
### annotation without the enrichment
annotations <- string_db$get_annotations( hits )
head(annotations, n=20)
###################################################
### code chunk number 15: clustering1
###################################################
# get clusters
clustersList <- string_db$get_clusters(DE_topTable_mpd_pval05$STRING_id[1:600])
clustersList2 <- string_db$get_clusters(DE_topTable_mpd_pval05$STRING_id[1:600])
###################################################
### code chunk number 17: clustering2
###################################################
getOption("SweaveHooks")[["fig"]]()
# plot first 4 clusters
par(mfrow=c(2,2))
for(i in seq(1:4)){
string_db$plot_network(clustersList[[i]])
}
###################################################
### code chunk number 18: proteins
###################################################
string_proteins <- string_db$get_proteins()
###################################################
### code chunk number 19: atmtp
###################################################
tp53 = string_db$mp( "tp53" )
atm = string_db$mp( "atm" )
###################################################
### code chunk number 20: neighbors (eval = FALSE)
###################################################
## string_db$get_neighbors( c(tp53, atm) )
###################################################
### code chunk number 21: interactions
###################################################
string_db$get_interactions( c(tp53, atm) )
###################################################
### code chunk number 22: paralogs (eval = FALSE)
###################################################
## # Get all homologs of TP53 in human.
## string_db$get_paralogs(tp53)
###################################################
### code chunk number 23: Closest homologs from other species (eval = FALSE)
###################################################
## # get the best hits of the following protein in all the STRING species
## string_db$get_homologs_besthits(tp53)
###################################################
### code chunk number 24: homologs_besthits in target species (eval = FALSE)
###################################################
## # get the homologs of the following two proteins in the mouse (i.e. species_id=10090)
## string_db$get_homologs_besthits(c(tp53, atm), target_species_id=10090, bitscore_threshold=60)
####################### TRASH
# res_de_1a <- topTable(eb_fit, adjust.method="BH", coef=ncol(v$design), number=Inf, sort.by="p")
# res_de_1b <- topTable(eb_fit, adjust.method="BH", number=Inf, sort.by="p")
# res_de_1c <- topTable(eb_fit, adjust.method="BH", coef=1, number=Inf, sort.by="p")
#
# sub_labs <- factor(c(rep(cond1, ncol(cond1_dt)),rep(cond2, ncol(cond2_dt))), levels=c(cond1, cond2))
# design <- model.matrix(~ 0+sub_labs)
# v <- voom(cond12_dt, design, plot=FALSE)
# fit <- lmFit(v, design)
# eb_fit <- eBayes(fit)
# res_de_2a <- topTable(eb_fit, adjust.method="BH", coef=ncol(v$design), number=Inf, sort.by="p")
# res_de_2b <- topTable(eb_fit, adjust.method="BH", number=Inf)
# res_de_2c <- topTable(eb_fit, adjust.method="BH", coef=1, number=Inf)
#
#
# res_de_1b = res_de_1a
# res_de_1c similar but not equal res_de_2a
# res_de_1c similar = res_de_2b (with 2 cols)
# res_de_1c = res_de_2c
|
bd4211dbc79fdc820a3dc5f199212619d891a6f5
|
0d4a16ddebbb1f26ea0789696b048582dcb8ed6d
|
/plot1.R
|
40b5a5b2fd447fdc8ca4af5cac246434ea06edff
|
[] |
no_license
|
flebr1/ExData_Plotting1
|
e71ca2fe465a128c1d94cc8d30bfcf9991aac3ac
|
73076150219d771415cdb6a23eb9dc9521f53f85
|
refs/heads/master
| 2020-05-27T16:46:21.082517
| 2019-05-26T23:21:58
| 2019-05-26T23:21:58
| 188,708,338
| 0
| 0
| null | 2019-05-26T16:44:05
| 2019-05-26T16:44:05
| null |
UTF-8
|
R
| false
| false
| 343
|
r
|
plot1.R
|
#
# exploratory data analysis - project 1
#
# script to generate "Plot 1"
#
source("prep_and_load_data.R")
png(
filename = "plot1.png",
width = 480,
height = 480
)
hist(
hpcDataToPlot$Global_active_power,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",
ylab = "Frequency"
)
dev.off()
|
6f22ed94af8dd99a52bd6f3d655f2f2c8ab6daa5
|
921c50c1a6da54e873446cdc2925734216e85155
|
/Proyecto/PROYECTO.R
|
630391c294735f7bb556490f732181528cd58e6f
|
[] |
no_license
|
otokani2000/Clustering
|
5a6e2ec31ef01bafe6cea60a9c800962976364d0
|
cdb122386ae2642796a1b77009c40283e4052e82
|
refs/heads/master
| 2023-03-23T23:20:03.425995
| 2021-03-21T18:33:06
| 2021-03-21T18:33:06
| 350,076,257
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,659
|
r
|
PROYECTO.R
|
#Se lee el archivo con la info. del blastp
tabla<-read.table("tabla_ORDENADA.txt")
tabla
#Tenemos 9992 alineamientos de 10000 que en teoría deberían ser.
#Para buscar en que posiciones están los 8 faltantes de los 10000 hacemos lo sig.
#Guardamos la columna 1 y 2 en 2 vectores diferentes y vamos a ver cuales comparaciones o alineamientos no tienen valores de bistscores.
#1's para las casillas con valores, 0's en las casillas donde no se hayan producido alineamientos o comparaciones entre n y m secuencia.
pos1<-factor(tabla[1][[1]])
pos2<-factor(tabla[2][[1]])
#Verificamos que longitud sea 10000
length(table(pos1,pos2)==0)
#Guardamos los índices de las secuencias n y m donde no se produjeron alineamientos.
index<-which(table(pos1,pos2)==0)
index
class(which(table(pos1,pos2)==0))
#Guardamos los bitscores en un vector.
bitscores<-tabla[[3]]
length(bitscores)
#Al normalizar los valores de nuestra matriz va a tener valores entre 0-1.
#El valor 1 va a ser para los bitscores más altos y para valores en la diagonal.
#En los indices donde no se produjo alineamientos solo vamos a agregar 0's para poder generar posteriormente un matriz cuadrada.
contador<-0
for (x in index){
bitscores<-append(bitscores,0,after=(x+contador-1))
contador<-contador+1
}
#Verificamos que sean 10000 valores
length(bitscores)
#El valor de bitscore más alto se ecnuentra en la pos. 6162 y es 324
bitscores[6162]
#Gracias a que tenemos los alineamientos ordenados alfabeticamente por los nombres de las secuencias
#y sus respetivos bistscores ordenados, generamos una matriz cuadrada rellenando por filas.
MATRIZ<-matrix(data=bitscores,ncol=100,nrow=100,byrow = TRUE)
# Agregamos los nombres para las filas y verificamos las dimensiones.
rownames(MATRIZ)<-levels(tabla[[2]][1])[1:100]
dim(MATRIZ)
#4.Normalizar las disimilitudes (d) para que queden en el rango [0,1].
#Se divide toda la matriz entre el bitscore más alto.
MATRIZ<-MATRIZ/324
#Rellenar 1's en la diagonal.
for (i in 1:100){
MATRIZ[i,i]<-1
}
#Tenemos una matriz de simiitud y para generar una de disimilitud hacemos lo sig:
MATRIZ<-1-MATRIZ
#El valor máximo debe ser 1 y el mínimo debe ser 0.
max(MATRIZ)
min(MATRIZ)
MATRIZ
#CLUSTERING
library(cluster)
suppressPackageStartupMessages(library(factoextra))
suppressPackageStartupMessages(library(dendextend))
suppressPackageStartupMessages(library(ape))
#Leemos los datos
InputData <- MATRIZ
#5. Correr clustering jerárquico y correr varios métodos para obtener el número de clusters.
#Run the hierarchical clustering and plot the dendogram
ccom <- hclust(dist(InputData), method = "ward.D2")
plot (ccom, hang = -1)
my_tree <- as.phylo(ccom)
write.tree(phy=my_tree, file="jerarquico.tree")
#Fijamos un área para graficar las 4 gráficas al mismo tiempo.
par(mfrow = c(1, 4))
#Fijamos los margenes para graficar
par(mar = c(2, 2, 2, 1) + 0.1)
#Construimos los dendogramas de diferentes métodos.
csin <- hclust(dist(InputData, method = "euclidean"), method = "single")
cave <- hclust(dist(InputData, method = "euclidean"), method = "average")
ccom <- hclust(dist(InputData, method = "euclidean"), method = "complete")
cwar <- hclust(dist(InputData, method = "euclidean"), method = "ward.D2")
#Generamos los coeficientes de clustering para cada método.
coeff_csin<-coef(csin)
coeff_cave<-coef(cave)
coeff_ccom<-coef(ccom)
coeff_cwar<-coef(cwar)
coeff_csin
coeff_cave
coeff_ccom
coeff_cwar
#6. Salvar el dendograma como árbol filogenético en formato Newick en R.
#Salvamos todos los trees de los diferentes métodos en formato Newick.
my_tree <- as.phylo(csin)
write.tree(phy=my_tree, file="csin.tree")
my_tree <- as.phylo(cave)
write.tree(phy=my_tree, file="cave.tree")
my_tree <- as.phylo(ccom)
write.tree(phy=my_tree, file="ccom.tree")
my_tree <- as.phylo(cwar)
write.tree(phy=my_tree, file="cwar.tree")
# Graficamos todos los dendogramas.
plot (csin, hang = -1, main = "Single")
rect.hclust(csin, k=20, border=1:16)
csin20 <- cutree(csin, k=20)
plot (cave, hang = -1, main = "Average")
rect.hclust(cave, k=20, border=1:16)
cave20 <- cutree(cave, k=20)
plot (ccom, hang = -1, main = "Complete")
rect.hclust(ccom, k=20, border=1:16)
ccom20 <- cutree(ccom, k=20)
plot (cwar, hang = -1, main = "Ward.D")
rect.hclust(cwar, k=20, border=1:16)
cwar20 <- cutree(cwar, k=20)
# Visualizamos el cluster ccom de otra manera
cls3 <- cutree(ccom, k=4)
plot(InputData, xlim=c(0,8), ylim=c(0,8), col=cls3)
fviz_cluster(list(data = InputData, cluster = cls3))
#Ahora visualizamos el cluster csin.
csin <- hclust(dist(InputData), method = "single")
plot (csin, hang = -1)
rect.hclust(csin, k=4, border=2:4)
dend1 <- as.dendrogram (ccom)
dend2 <- as.dendrogram (csin)
dend_list <- dendlist(dend1, dend2)
tanglegram(dend1, dend2, main = paste("Entanglement =", round(entanglement(dend_list))))
# Determinamos el numero optimo de clusters.
#Methods: Total Within Sum of Squares (wss), silhouette, gap_stat
fviz_nbclust(InputData, FUN = hcut, method = "silhouette", k.max = 10, print.summary = TRUE)
#The silhouette method applied to hclust
fviz_nbclust(InputData, FUN = hcut, hc_func = "hclust", hc_method = "ward.D2", method = "silhouette", k.max = 20) +
labs(subtitle = "Silhouette method")
#The silhouette method applied to agnes
fviz_nbclust(InputData, FUN = hcut, hc_func = "agnes", hc_method = "ward.D2", method = "silhouette", k.max = 20) +
labs(subtitle = "Silhouette method")
#The silhouette method applied to diana
fviz_nbclust(InputData, FUN = hcut, hc_func = "diana", hc_method = "ward.D2", method = "silhouette", k.max = 20) +
labs(subtitle = "Silhouette method")
|
f878ccd6913cf4f34e51ee3534e620d300446283
|
2c7ab1015aaa4f74cb1fd037fa663063f770bc46
|
/findActive/reduceSpliceEvent.R
|
da1f59fb7507630c6488abda48157008001e72a1
|
[] |
no_license
|
brittanyhowell/spliceAnalysis
|
04d217be2aab5723aef4f901f9a37112253f0d29
|
be03041a5754f970d9347b22e1828619bab4f919
|
refs/heads/master
| 2021-01-17T17:25:26.388064
| 2017-07-27T03:53:08
| 2017-07-27T03:53:08
| 70,382,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 828
|
r
|
reduceSpliceEvent.R
|
setwd("~/Documents/University/Honours_2016/Project/bamReading/Split/Human/Compile")
lEvents <- read.table("landscapeNumEvents.txt")
events <- read.table("AllComp.txt")
subset <- t(data.frame(lEvents[,0:27]))
colnames(events) <- c("All", "Either", "Both", "Stringent")
colours <- c("salmon1" ,"salmon1" ,"salmon1" ,"palevioletred" ,"palevioletred" ,"palevioletred" ,"orchid4","orchid4","orchid4", "slateblue" ,"slateblue" ,"slateblue" ,"cornflowerblue", "cornflowerblue", "cornflowerblue", "aquamarine", "aquamarine", "aquamarine", "darkolivegreen1", "darkolivegreen1", "darkolivegreen1", "chartreuse4", "chartreuse4", "chartreuse4", "darkgreen", "darkgreen", "darkgreen")
barplot(subset, beside=T, col = colours, ylab = "Number of candidate splice events", names.arg = colnames(events))
|
09a90a059da8504e36daa7170b068633ac9a0b91
|
4b9955701ca424c19bec17f0bc4b36f72cfcbcc4
|
/man/invert.Rd
|
dcabdf0cecbeac2ad670548df0000b1fc12a949c
|
[
"BSD-2-Clause"
] |
permissive
|
mlr-org/mlrCPO
|
c238c4ddd72ece8549f8b48a79f02f543dac60e5
|
e6fc62a4aeb2001a3760c9d1126f6f2ddd98cc54
|
refs/heads/master
| 2022-11-21T17:30:54.108189
| 2022-11-16T16:08:10
| 2022-11-16T16:08:10
| 100,395,368
| 39
| 4
|
NOASSERTION
| 2022-10-18T23:46:13
| 2017-08-15T16:08:30
|
R
|
UTF-8
|
R
| false
| true
| 1,655
|
rd
|
invert.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inverter.R
\name{invert}
\alias{invert}
\title{Invert Target Preprocessing}
\usage{
invert(inverter, prediction, predict.type = "response")
}
\arguments{
\item{inverter}{[\code{CPOInverter}]\cr
The retrafo or inverter to apply}
\item{prediction}{[\code{\link[mlr]{Prediction}} | \code{matrix} | \code{data.frame}]\cr
The prediction to invert}
\item{predict.type}{[\code{character(1)}]\cr
The equivalent to the \code{predict.type} property of a \code{\link[mlr:makeLearner]{Learner}}] object,
control what kind of prediction to perform. One of \dQuote{response}, \dQuote{se},
\dQuote{prob}. Default is \dQuote{response}. Care must be taken that the \code{prediction} was generated
with a prediction type that fits this, i.e. it must be of type \code{getCPOPredictType(inverter)[predict.type]}.}
}
\value{
[\code{\link[mlr]{Prediction}} | \code{data.frame}]. A transformed \code{\link{Prediction}} if a prediction was given,
or a \code{data.frame}. If the first object in the chain is a \code{CPORetrafo} object, the \sQuote{truth} column(s) of the
prediction will be dropped.
}
\description{
Invert the transformation, done on the target column(s)
of a data set, after prediction.
Use either a \code{\link{CPORetrafo}} object with invert capability (see \code{\link{getCPOTrainedCapability}},
or a \code{\link{CPOInverter}} retrieved with
\code{\link{inverter}} from a data object that was fed through a retrafo
chain.
If a \code{\link{CPORetrafo}} object is used that contains no target-bound transformations
(i.e. has \dQuote{invert} capability 0), this is a no-op.
}
|
ad8c63cb34499a3d7586751f93a418b47967a55a
|
17b4fd4a2aa4d7741604e07f585bec783d558dde
|
/lessons/A_Monday/scripts/B_Amazon_Xray.R
|
4ed3852ea5f0369666a3b044eac52a422523f73f
|
[
"MIT"
] |
permissive
|
anhnguyendepocen/GSERM_TextMining
|
41a04517212e5adde60865b6e5b3b1c149b7960d
|
0bb2692b1130c9c9fe604e2e610fd057960a44ec
|
refs/heads/master
| 2022-03-27T14:21:53.953619
| 2020-01-16T21:40:07
| 2020-01-16T21:40:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,559
|
r
|
B_Amazon_Xray.R
|
#' Author: Ted Kwartler
#' Data: Jan-12-2020
#' Purpose: GSERM: Intro to basic R operations
#'
### 1. Set working directory to your specific movie
setwd("/cloud/project/lessons/A_Monday/data/xRay/forceAwakens")
# Turn off scientific notation
options(scipen = 999)
### 2. Load libraries to customize R
library(ggplot2)
library(ggthemes)
### 3. Read in data
# Use the read.csv function for your specific onScreenCharacters.csv file
charDF <- read.csv('forceAwakens_onScreenCharacters.csv')
### 4. Apply functions to clean up data & get insights/analysis
# Use the names function to review the names of charDF
names(charDF)
# Review the top 6 records of charDF
head(charDF)
# Remove a raw data column
charDF$changeType <- NULL
# Calculate a new vector sceneEnd - CharAppearance
charDF$charDuration <- charDF$sceneEnd - charDF$appearance
# Divide the appearance by 1000 to calculate seconds
charDF$appearanceSecs <- charDF$appearance / 1000
# Divide the sceneEnd by 1000 to calculate seconds
charDF$sceneEndSecs <- charDF$sceneEnd / 1000
# Total character appearances
nrow(charDF)
# Sometimes there are duplicated rows (not in lego movie)
# ie Star Wars BB-8 is duplicated because there are 2 puppeteers, lets remove any duplicate records
charDF$dupes <- duplicated(charDF) #T/F if it is duplicated
head(charDF)
# Show any rows that are TRUE duplicates
charDF[grep('TRUE', idx),]
# drop dupes
## DO WE WANT TO KEEP DUPES OR NOT?
nrow(charDF)
charDF <- subset(charDF, charDF$dupes != TRUE)
nrow(charDF)
### 5. Project artifacts ie visuals & (if applicable)modeling results/KPI
# Tally the number of scenes by character; like a pivot table in Excel
(charTally <- as.matrix(table(charDF$character)))
# Subset; like "filter" in Excel
(charTally <- subset(charTally, charTally[,1]>2))
# Basic plot
barplot(t(charTally), las = 2)
# Timeline of character appearances
ggplot(charDF, aes(colour=charDF$character)) +
geom_segment(aes(x=charDF$appearanceSecs, xend=charDF$sceneEndSecs,
y=charDF$character, yend=charDF$character),size=3) +
theme_gdocs() + theme(legend.position="none")
# Now see just top n characters
n <- 6
topPerformers <- sort(charTally[,1], decreasing = T)[1:n]
names(topPerformers)
topChars <- charDF[charDF$character %in% names(topPerformers),]
ggplot(topChars, aes(colour=topChars$character)) +
geom_segment(aes(x=topChars$appearanceSecs, xend=topChars$sceneEndSecs,
y=topChars$character, yend=topChars$character),size=3) +
theme_gdocs() + theme(legend.position="none")
# End
|
ae3223861d657ddf70ed677dfc89d8c1faed202e
|
ac95f4a5c652a9a328068edf009ab67cc8f62bff
|
/man/hooks_with_bait.Rd
|
fb2efe3e10929f5a606f5f42f24ae595be889193
|
[] |
no_license
|
pbs-assess/gfiphc
|
8f200dcef7e36de6ecaef09cdb0383166f9307da
|
90ad0b6d4ac6e1b5f52a3e55fc0b7ce0fe0069d3
|
refs/heads/master
| 2023-06-25T09:32:58.894375
| 2023-06-19T17:42:20
| 2023-06-19T17:42:20
| 177,639,706
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 668
|
rd
|
hooks_with_bait.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{hooks_with_bait}
\alias{hooks_with_bait}
\title{Available counts of hooks returned with bait on them, for each set for all years}
\format{
A \code{list} containing a single \code{tibble} called \code{set_counts}, with
columns as described in \code{?get_all_iphc_set_counts}.
}
\source{
Extracted from GFBio using \code{data-raw/sets-skates-hooks-yelloweye.R}.
}
\usage{
hooks_with_bait
}
\description{
Extracts and calculates all avaiable counts, at the set level, of hooks
returned with bait (which is essentially equivalent to a species).
}
\keyword{datasets}
|
ac05e792172a6c5a429047034d12b7a3c25a097f
|
25285a1e0fb2d7c194928a7bb021f30ee6751de7
|
/R/setNames.R
|
19411e3844f37b907c6def3e54e17af7a57734f7
|
[] |
no_license
|
pbiecek/PogromcyDanych
|
e2deb7a548467adcd8ada7cbf06fa896db2635fe
|
a7812861bf57a0ec63e66da7f3ad3f94899c1628
|
refs/heads/master
| 2023-09-01T16:05:14.013614
| 2023-08-20T19:57:00
| 2023-08-20T19:57:00
| 28,668,827
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,986
|
r
|
setNames.R
|
pushGlobal <- function(name, value) {
tf <- tempfile()
assign(name, value = value)
save(list=name, file=tf)
load(tf, envir = .GlobalEnv)
unlink(tf)
}
setLang <- function(lang = "eng") {
auta2012_eng <- PogromcyDanych::auta2012
pearson_eng <- PogromcyDanych::pearson
galton_eng <- PogromcyDanych::galton
WIG_eng <- PogromcyDanych::WIG
TCGA_BRCA_eng <- PogromcyDanych::TCGA_BRCA
diagnosis <- PogromcyDanych::diagnoza
diagnosisDict <- PogromcyDanych::diagnozaDict
mandatySejmik2014_eng <- PogromcyDanych::mandatySejmik2014
imiona_warszawa_eng <- PogromcyDanych::imiona_warszawa
seriale_eng <- PogromcyDanych::serialeIMDB
cats_birds <- PogromcyDanych::koty_ptaki
if (lang == "eng") {
colnames(auta2012_eng) <- c("Price", "Currency", "Price.in.PLN", "Gross.Net", "HP", "kW",
"Brand", "Model", "Version", "Nubmer.of.doors", "Engine.cubic.capacity",
"Mileage", "Type.of.fuel", "Year", "Color", "Country.of.current.registration",
"Country.of.origin", "Is.damaged", "Transmission", "Is.imported",
"Accessories")
colnames(TCGA_BRCA_eng) <- c("TP53", "gender", "vital.status", "days.to.death", "new.tumor")
colnames(seriale_eng) <- c("id", "series", "name", "season","part","note","votes","imdbId")
colnames(pearson_eng) <- c("son", "father")
colnames(galton_eng) <- c("son", "mid_parent")
colnames(imiona_warszawa_eng) <- c("name", "sex", "year", "month", "count")
colnames(mandatySejmik2014_eng) <- c("Voivodeship", "PSL", "PiS", "PO", "SLD", "Other", "Prc_valid_votes",
"long", "lat")
colnames(WIG_eng) <- c("Date", "Name", "Opening Price", "Max Price", "Min Price",
"Closing Price", "Change", "Turnover")
colnames(cats_birds) <- c("species", "weight", "length", "speed", "habitat", "lifespan", "group" )
cats_birds$species <- c("Tiger", "Lion", "Cheetah", "Jaguar", "Puma", "Leopard", "Irbis", "Swift",
"Ostrich", "Golden Eagle", "Peregrine Falcon", "Falcon Norwegian", "Albatros")
cats_birds$group <- c(rep("Cat", 7), rep("Bird", 6))
cats_birds$habitat <- c("Asia", "Africa", "America", "America", "Asia", "Africa", "Asia", "Eurasia", "Africa", "North", "North", "North", "South")
levels(diagnosis$plec) = c("MAN", "WOMAN")
for (i in 1:ncol(diagnosis)) {
if(inherits(diagnosis[,i],"factor"))
diagnosis[,i] <- droplevels(diagnosis[,i])
}
levels(diagnosis$eduk4_2013) = c("PRIMARY/NO EDUCATION", "VOCATIONAL/GRAMMAR",
"SECONDARY", "HIGHER AND POST-SECONDARY")
levels(diagnosis$status9_2013) = c("EMPLOYEES IN PUBLIC SECTOR",
"EMPLOYEES IN PRIVATE SECTOR", "ENTREPRENEUR/SELF-EMPLOYED",
"FARMERS", "PENSIONERS", "RETIREES", "PUPILS AND STUDENTS",
"UNEMPLOYED", "OTHER PROFESSIONALLY INACTIVE")
levels(diagnosis$gp3) = c("DELIGHTED", "PLEASED",
"MOSTLY SATISFIED", "MIXED", "MOSTLY DISSATISFIED", "UNHAPPY",
"TERRIBLE")
levels(diagnosis$gp29) = c("FUN, WELL-BEING, LACK OF STRESS",
"SENSE OF PURPOSE, ACHIEVING IMPORTANT GOALS DESPITE DIFFICUL")
levels(diagnosis$gp54_01) = c("DEFINITELY AGREE", "AGREE", "RATHER AGREE",
"NEITHER AGREE NOR DISAGREE", "RATHER DISAGREE", "DISAGREE",
"DEFINITELY DISAGREE")
levels(diagnosis$gp54_02) = c("DEFINITELY YES", "YES",
"RATHER YES", "NEITHER YES OR NO", "PROBABLY NOT", "NO",
"DEFINITELY NOT")
levels(diagnosis$gp54_03) = c("DEFINITELY YES", "YES", "RATHER YES",
"NEITHER YES OR NO", "PROBABLY NOT", "NO", "DEFINITELY NOT")
levels(diagnosis$gp54_04) = c("DEFINITELY AGREE", "AGREE", "RATHER AGREE",
"NEITHER AGREE NOR DISAGREE", "RATHER DISAGREE", "DISAGREE",
"DEFINITELY DISAGREE")
levels(diagnosis$gp54_05) = c("DEFINITELY AGREE", "AGREE",
"RATHER AGREE", "NEITHER AGREE NOR DISAGREE", "RATHER DISAGREE",
"DISAGREE", "DEFINITELY DISAGREE")
levels(diagnosis$gp54_06) = c("DEFINITELY AGREE",
"AGREE", "RATHER AGREE", "NEITHER AGREE NOR DISAGREE", "RATHER DISAGREE",
"DISAGREE", "DEFINITELY DISAGREE")
levels(diagnosis$gp54_07) = c("ZDECYDOWANIE TAK",
"TAK", "RACZEJ TAK", "ANI TAK, ANI NIE", "RACZEJ NIE", "NIE",
"ZDECYDOWANIE NIE")
levels(diagnosis$gp54_08) = c("DEFINITELY YES", "YES",
"RATHER YES", "NEITHER YES OR NO", "PROBABLY NOT", "NO",
"DEFINITELY NOT")
levels(diagnosis$gp54_09) = c("DEFINITELY YES", "YES", "RATHER YES",
"NEITHER YES OR NO", "PROBABLY NOT", "NO", "DEFINITELY NOT")
levels(diagnosis$gp54_10) = c("DEFINITELY YES", "YES", "RATHER YES", "NEITHER YES OR NO",
"PROBABLY NOT", "NO", "DEFINITELY NOT")
levels(diagnosis$gp54_11) = c("DEFINITELY YES",
"YES", "RATHER YES", "NEITHER YES OR NO", "PROBABLY NOT",
"NO", "DEFINITELY NOT")
levels(diagnosis$gp54_12) = c("DEFINITELY YES", "YES",
"RATHER YES", "NEITHER YES OR NO", "PROBABLY NOT", "NO",
"DEFINITELY NOT")
levels(diagnosis$gp54_13) = c("DEFINITELY AGREE", "AGREE",
"RATHER AGREE", "NEITHER AGREE NOR DISAGREE", "RATHER DISAGREE",
"DISAGREE", "DEFINITELY DISAGREE")
levels(diagnosis$gp54_14) = c("DEFINITELY AGREE",
"AGREE", "RATHER AGREE", "NEITHER AGREE NOR DISAGREE", "RATHER DISAGREE",
"DISAGREE", "DEFINITELY DISAGREE")
levels(diagnosis$gp54_15) = c("DEFINITELY AGREE",
"AGREE", "RATHER AGREE", "NEITHER AGREE NOR DISAGREE", "RATHER DISAGREE",
"DISAGREE", "DEFINITELY DISAGREE")
levels(diagnosis$gp54_16) = c("DEFINITELY YES",
"YES", "RATHER YES", "NEITHER YES OR NO", "PROBABLY NOT",
"NO", "DEFINITELY NOT")
levels(diagnosis$gp54_17) = c("DEFINITELY AGREE",
"AGREE", "RATHER AGREE", "NEITHER AGREE NOR DISAGREE", "RATHER DISAGREE",
"DISAGREE", "DEFINITELY DISAGREE")
levels(diagnosis$gp54_18) = c("DEFINITELY AGREE",
"AGREE", "RATHER AGREE", "NEITHER AGREE NOR DISAGREE", "RATHER DISAGREE",
"DISAGREE", "DEFINITELY DISAGREE")
levels(diagnosis$gp54_19) = c("DEFINITELY YES",
"YES", "RATHER YES", "NEITHER YES OR NO", "PROBABLY NOT",
"NO", "DEFINITELY NOT")
levels(diagnosis$gp54_20) = c("DEFINITELY YES", "YES",
"RATHER YES", "NEITHER YES NOR NOT", "RATHER NOT", "NO",
"DEFINITELY NOT")
levels(diagnosis$gp54_21) = c("DEFINITELY YES", "YES", "RATHER YES",
"NEITHER YES NOR NOT", "RATHER NOT", "NO", "DEFINITELY NOT"
)
levels(diagnosis$gp54_22) = c("DEFINITELY YES", "YES", "RATHER YES", "NEITHER YES NOR NOT",
"RATHER NOT", "NO", "DEFINITELY NOT")
levels(auta2012$Skrzynia.biegow) = c("", "automatic", "manual")
levels(auta2012$Pojazd.uszkodzony) = c("", "Yes")
levels(auta2012$Rodzaj.paliwa) = c("petrol", "petrol+LPG", "ethanol", "hybrid", "electric", "diesel")
levels(auta2012$Kolor) <- c("", "sand", "sand-metallic", "white", "white-metallic",
"dark red", "dark red-metallic", "brown", "brown-metallic",
"black", "black-metallic", "red", "red-metallic",
"violet", "violet-metallic", "graphite", "graphite-metallic",
"dark blue", "dark blue-metallic", "blue", "blue-metallic",
"orange", "orange-metallic", "pink", "pink-metallic",
"silver", "silver-metallic", "grey", "grey-metallic", "cherry",
"cherry-metallic", "green", "green-metallic", "yellow",
"yellow-metallic", "gold", "gold-metallic")
auta2012$Kolor <- factor(as.character(auta2012$Kolor))
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="el. lusterka", replacement="electric mirrors")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="klimatyzacja", replacement="air conditioning")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="alufelgi", replacement="alloy wheels")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="centralny zamek", replacement="central locking")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="poduszka powietrzna", replacement="airbag")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="wspomaganie kierownicy", replacement="power steering")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="komputer", replacement="computer")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="przyciemniane szyby", replacement="tinted windows")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="skorzana tapicerka", replacement="leather upholstery")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="tempomat", replacement="cruise control")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="hak", replacement="hook")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="el. szyby", replacement="el. windows")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="welurowa tapicerka", replacement="velor upholstery")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="lwiatla przeciwmglowe", replacement="fog lights
")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="kierownica wielofunkcyjna", replacement="multifunction steering wheel")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="pod. przednia szyba", replacement="the windshield")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="podgrzewane fotele", replacement="heated seats")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="czujnik parkowania", replacement="parking sensor")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="czujnik deszczu", replacement="rain sensor")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="system nawigacji", replacement="navigation system")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="ksenony", replacement="xeons")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="szyberdach", replacement="sunroof")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="niezalezne ogrzewanie", replacement="independent heating")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="bagaznik na dach", replacement="trunk on the roof")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="blokada skrzyni biegAlw", replacement="gearbox lock")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="reg. wysokole podwozia", replacement="height adjustable chassis")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="blokada dyferencjalu", replacement="differential lock")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="blokada skrzyni biegow", replacement="transmission lock box")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="instalacja gazowa", replacement="gas-fittings")
auta2012$Wyposazenie.dodatkowe <- gsub(auta2012$Wyposazenie.dodatkowe, pattern="klatka", replacement="cage")
pushGlobal("seriesIMDB", value = seriale_eng)
pushGlobal("warsaw_names", value = imiona_warszawa_eng)
pushGlobal("votes2014", value = mandatySejmik2014_eng)
pushGlobal("cats_birds", value = cats_birds)
pushGlobal("diagnosis", value = diagnosis)
pushGlobal("diagnosisDict", value = diagnosisDict)
} else {
colnames(auta2012_eng) <- c("Cena", "Waluta", "Cena.w.PLN", "Brutto.netto", "KM", "kW",
"Marka", "Model", "Wersja", "Liczba.drzwi", "Pojemnosc.skokowa",
"Przebieg.w.km", "Rodzaj.paliwa", "Rok.produkcji", "Kolor", "Kraj.aktualnej.rejestracji",
"Kraj.pochodzenia", "Pojazd.uszkodzony", "Skrzynia.biegow", "Status.pojazdu.sprowadzonego",
"Wyposazenie.dodatkowe")
colnames(TCGA_BRCA_eng) <- c("TP53", "plec", "czy.zyje", "dni.do.smierci", "czy.nowy.guz")
colnames(pearson_eng) <- c("syn", "ojciec")
colnames(galton_eng) <- c("syn", "sr_rodzic")
# colnames(imiona_warszawa_eng) <- c("imie", "plec", "rok", "miesiac", "liczba")
colnames(WIG_eng) <- c("Data", "Nazwa", "Kurs.otwarcia", "Kurs.maksymalny",
"Kurs.minimalny", "Kurs.zamkniecia", "Zmiana", "Wartosc.obrotu.w.tys.zl")
# colnames(mandatySejmik2014_eng) <- c("Wojewodztwo", "PSL", "PiS", "PO", "SLD", "Inne", "ProcentWaznychGlosow",
# "long", "lat")
# colnames(cats_birds) <- c("gatunek", "waga", "dlugosc", "predkosc", "habitat", "zywotnosc", "druzyna" )
# pushGlobal("koty_ptaki", value = cats_birds)
}
pushGlobal("pearson", value = pearson_eng)
pushGlobal("galton", value = galton_eng)
pushGlobal("TCGA_BRCA", value = TCGA_BRCA_eng)
pushGlobal("WIG", value = WIG_eng)
pushGlobal("auta2012", value = auta2012_eng)
invisible(0)
}
|
e3e77cca1910400184dba45ef315159d88757772
|
04902011b8f098524a0a73d22400f5e52fdb4153
|
/R/EPV.R
|
2f1374616b3b7a826fcc3c4a4efb5205d011d3e3
|
[
"MIT"
] |
permissive
|
DaDirnbocher/MarkovThiele
|
800d0f2d7556b3bd8df964d5b4f8bbcb29f1ff51
|
e090c4c51b8eff0f3430470df51d6c65136bde53
|
refs/heads/main
| 2023-02-17T11:24:34.666300
| 2021-01-17T18:56:38
| 2021-01-17T18:56:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,454
|
r
|
EPV.R
|
#' The function completeV creates the expected value of future cashflows
#' for all possible states and times.
#' @export
EPV <- function(mc){
trans <- mc[["trans"]]
cashflowPre <- mc[["cashflowPre"]]
cashflowPost <- mc[["cashflowPost"]]
states <- mc[["states"]]
disc <- mc[["disc"]]
V <- mc[["W"]]
lastAge <- mc[["lastAge"]]
firstAge <- mc[["firstAge"]]
trans[ , toTime := time + 1]
# 0 überprüfe, ob Endbedingungen vorliegen, wenn nicht, setze diese zu lastAge = 0
if (nrow(V)==0) #d.h. keine Endbedingungen
V <- data.table::data.table(state=states,
time =rep(lastAge, length(states)),
v =rep(0, length(states)))
# 1 erstelle st?tzpunkte f?r W
stutz <- data.table::merge.data.table(data.table::data.table(time=firstAge:lastAge, ones=rep(1, lastAge-firstAge+1)),
data.table::data.table(state=states, ones=rep(1, length(states))),
by=("ones"), allow.cartesian = TRUE)
V <- data.table::merge.data.table(stutz, V, by=c("time", "state"), all.x=TRUE)
V$ones <- NULL
# 2 loop through W
for ( year in (lastAge-1):firstAge){
# test
V[ time==year & is.na(v)]
#pre
V <- V[ time==year & is.na(v),
pre := getCashflowPre(state, year, cashflowPre) ]
#post
V <- V[ time==year & is.na(v),
post := disc[time==year+1, pv]/disc[time==year, pv] * getPost(state, year, trans, V)]
V <- V[ time==year & is.na(v),
v := getCashflowPre(state, year, cashflowPre) +
disc[time==year+1, pv]/disc[time==year, pv] * getPost(state, year, trans, V)]
}
return (V)
}
getCashflowPre <- function(selState, selTime, cashflowPre){
# selState..vector, selTime..number, cashflowPre..data.table
# filter cashflowPre for time,
# sum over all entries (in case of double entries) and make unique
tempCF <- cashflowPre[state %in% selState & time==selTime][
, amount := sum(amount), by=state] %>%
unique()
# merge with given states (better alternative use match)
foundAmount <- data.table::merge.data.table(
data.table::data.table(state=selState),
tempCF,
all.x = TRUE)
foundAmount[is.na(amount), amount:=0]
return (foundAmount$amount)
}
getPost <- function(selState, selTime, trans, W){
# selState..vector, selTime..number, cashflowPre..data.table
# get EPV at time + 1 and cashflow at end of period
# start with selected States
temp <- data.table::merge.data.table(
data.table::data.table(from=selState),
trans[from %in% selState & time==selTime],
all.x = TRUE
)
# add EPV at time + 1
temp <- data.table::merge.data.table(
temp,
W[time==(selTime+1)],
all.x=TRUE, by.x=c("to", "toTime"), by.y=c("state", "time"))
# preprocess cashflowPost
tempCF <- cashflowPost[from %in% selState & time==selTime][
, amount := sum(amount), by=.(from, to)] %>%
unique()
# add cashflows at end of period
temp <- data.table::merge.data.table(temp,
tempCF,
all.x=TRUE, by=c("from", "to", "time"))
temp[is.na(amount), amount:=0]
temp[is.na(v), v:=0]
temp[is.na(p), p:=0]
post <- temp[ , sum(p * (amount+v)), by=from]
# print (temp)
return (post$V1)
}
|
ba0f7fe4e5c3acf245f712211aea8a8826b1168d
|
289f05ddf4a2e10e036d5ccfd2415343fb65f90e
|
/R/delete_adset.R
|
8d4632aa7f48d0abfd4172f66f634feffee52f02
|
[] |
no_license
|
JohnCoene/oRion
|
6013270bd63f68fe9368560e5b7076dd1760867e
|
7078198932e15dc2154437c11d45f4abe13940e3
|
refs/heads/master
| 2021-01-17T16:11:36.112796
| 2019-06-09T15:07:00
| 2019-06-09T15:07:00
| 54,460,571
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,190
|
r
|
delete_adset.R
|
#' Delete an adset
#'
#' @description Delete an adset.
#'
#' @param adset.id id of object to delete
#'
#' @examples
#' \dontrun{
#' # authenticate
#' orionOAuth(client.id = 0000,
#' client.secret = "0x00000000x00x0x000xxx0000x0xx0")
#'
#' # list of campaigns
#' camps <- listCampaigns()
#'
#' # list of audience templates
#' audiences <- listAudiences()
#'
#' # create adset using random template, in random campaign
#' adset <- createAdset(body = list(
#' name = "My Adset",
#' bid_type = bidType("CPI"),
#' unit_price = 1000,
#' budget_lifetime = 10000,
#' campaign_id = sample(camps$id, 1),
#' audience_create_way = 2,
#' app_show_type = appShowType("newsfeed"),
#' audience_template_id = sample(audiences$id, 1),
#' click_url = "http://app.adjust.io"))
#'
#' # delete random adset
#' deleteAdset(adset.id = adset$id)
#' }
#'
#' @seealso \code{\link{orionOAuth}}, \code{\link{createAdset}}
#'
#' @author John Coene \email{jcoenep@@gmail.com}
#'
#' @export
deleteAdset <- function(adset.id) {
result <- deleteObject(object = "adset", id = adset.id)
return(result)
}
|
37ce34fbfc46f37437274e5d2f69c7dcde7d0f29
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/cccrm/R/cccvc.R
|
3c88260b16dd05eb4951c65be018e4c2e544059c
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 166
|
r
|
cccvc.R
|
cccvc<-
function(dataset,ry,rind,rmet,covar=NULL,int=FALSE,cl=0.95){
if (int==TRUE)cccvc2(dataset,ry,rind,rmet,covar,cl) else cccvc1(dataset,ry,rind,rmet,covar,cl)}
|
f1ae8e32b4cbfbe512c239d6799a3193793a06d8
|
f1c52caa027b8e5a5593f9c8c4693d6cbfa107df
|
/Experiments.R
|
b3a625b2fef9c1ff7ff7542554395e0e5030a220
|
[] |
no_license
|
alexanderAnokhin/SimOpt
|
2d39cf03fcf365a7bd9d40be1a8760526266085f
|
d445e660556bf9a2fc80a74f5fbe1112668f9e11
|
refs/heads/master
| 2016-08-12T06:48:40.006174
| 2016-01-30T14:07:36
| 2016-01-30T14:07:36
| 50,689,164
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,584
|
r
|
Experiments.R
|
## Experiments
## Seed in order to reproduce experiments
set.seed(20160129)
source("Utils.R")
source("SetUp.R")
source("KGPolicy.R")
source("RandomPolicy.R")
##
## Experiment 1. Average performance along the
## measurement budget [5, 100] and number of alternatives {5, 20}
##
## Seed in order to reproduce experiments
set.seed(20160129)
m = 5
R = 100
results_05 = data.frame()
for (N in seq(from = 5, to = 25, by = 1)) {
rows = NULL
for (r in 1:R) {
p <- getPWithProperty(m)
state0 <- getCorrelatedPriors(p, 1, 3)
resultKG <- KGPolicy(p, state0, N)
resultRnd <- randomPolicy(p, state0, N)
rows <- rbind(rows, c(getDistance(p, resultKG$x), getDistance(p, resultRnd$x)))
}
print(N)
results_05 <- rbind(results_05, c(N, mean(rows[, 1]), sd(rows[, 1]), mean(rows[, 2]), sd(rows[, 2])))
}
names(results_05) <- c("N", "KG.Dist", "KG.Sd", "Rand.Dist", "Rand.Sd")
save(results_05, file = "data/results_05.rda")
## Seed in order to reproduce experiments
set.seed(20160129)
m = 20
R = 100
results_20 = data.frame()
for (N in seq(from = 20, to = 100, by = 10)) {
rows = NULL
for (r in 1:R) {
p <- getPWithProperty(m)
state0 <- getCorrelatedPriors(p, 1, 3)
resultKG <- KGPolicy(p, state0, N)
resultRnd <- randomPolicy(p, state0, N)
rows <- rbind(rows, c(getDistance(p, resultKG$x), getDistance(p, resultRnd$x)))
}
print(N)
results_20 <- rbind(results_20, c(N, mean(rows[, 1]), sd(rows[, 1]), mean(rows[, 2]), sd(rows[, 2])))
}
names(results_20) <- c("N", "KG.Dist", "KG.Sd", "Rand.Dist", "Rand.Sd")
save(results_20, file = "data/results_20.rda")
## Plot results
par(mfcol = c(1, 2))
plot(seq(from = 5, to = 25, by = 1), results_05$KG.Dist, type = "l", xlab = "N", ylab = "dis(x*)", lwd = 2, col = 2, ylim = c(-0.3, 1.6), main = "m = 5", panel.first = grid(lwd = 1.5))
lines(seq(from = 5, to = 25, by = 1), results_05$KG.Dist + results_05$KG.Sd, lwd = 2, col = 2, lty = 3)
lines(seq(from = 5, to = 25, by = 1), results_05$KG.Dist - results_05$KG.Sd, lwd = 2, col = 2, lty = 3)
lines(seq(from = 5, to = 25, by = 1), results_05$Rand.Dist, lwd = 2, col = 3)
lines(seq(from = 5, to = 25, by = 1), results_05$Rand.Dist + results_05$Rand.Sd, lwd = 2, col = 3, lty = 3)
lines(seq(from = 5, to = 25, by = 1), results_05$Rand.Dist - results_05$Rand.Sd, lwd = 2, col = 3, lty = 3)
legend("topright", c("KG", "Random"), lwd = c(2, 2), col = c(2, 3))
plot(seq(from = 20, to = 100, by = 10), results_20$KG.Dist, type = "l", xlab = "N", ylab = "dis(x*)", lwd = 2, col = 2, ylim = c(-0.5, 4.0), main = "m = 20", panel.first = grid(lwd = 1.5))
lines(seq(from = 20, to = 100, by = 10), results_20$KG.Dist + results_20$KG.Sd, lwd = 2, col = 2, lty = 3)
lines(seq(from = 20, to = 100, by = 10), results_20$KG.Dist - results_20$KG.Sd, lwd = 2, col = 2, lty = 3)
lines(seq(from = 20, to = 100, by = 10), results_20$Rand.Dist, lwd = 2, col = 3)
lines(seq(from = 20, to = 100, by = 10), results_20$Rand.Dist + results_20$Rand.Sd, lwd = 2, col = 3, lty = 3)
lines(seq(from = 20, to = 100, by = 10), results_20$Rand.Dist - results_20$Rand.Sd, lwd = 2, col = 3, lty = 3)
legend("topright", c("KG", "Random"), lwd = c(2, 2), col = c(2, 3))
##
## Experiment 2. KG factors over time
##
## Seed in order to reproduce experiments
set.seed(20160129)
m = 5
N = 25
R = 1000
kg.factors = NULL
for (r in 1:R) {
p <- getPWithProperty(m)
state0 <- getCorrelatedPriors(p, 1, 3)
resultKG <- KGPolicy(p, state0, N)
kg.factors <- rbind(kg.factors, resultKG$kg.factors)
print(r)
}
kgFactorMeans <- apply(kg.factors, 2, mean)
notZeroRate <- apply(kg.factors, 2, function(x) {
zeros <- sum(abs(x) < 1e-12)
(length(x) - zeros)/length(x) })
par(mfcol = c(1, 2))
plot(kgFactorMeans, type = "l", lwd = 2, ylim = c(0.0002, 0.0018), xlab = "n", ylab = expression(v^n), panel.first = grid(lwd = 1.5))
plot(notZeroRate, type = "l", lwd = 2, xlab="n", ylab = expression(paste("P[", v^n, " > 0]")), panel.first = grid(lwd = 1.5))
##
## Experiment 3. Effect of p elements
##
set.seed(20160129)
m = 5
R = 1000
results_05_v2 = data.frame()
for (N in seq(from = 5, to = 25, by = 1)) {
rows = NULL
for (r in 1:R) {
p <- getPWithProperty(m, 21)
state0 <- getCorrelatedPriors(p, 1, 3)
resultKG <- KGPolicy(p, state0, N)
resultRnd <- randomPolicy(p, state0, N)
rows <- rbind(rows, c(getDistance(p, resultKG$x), getDistance(p, resultRnd$x)))
}
print(N)
results_05_v2 <- rbind(results_05_v2, c(N, mean(rows[, 1]), sd(rows[, 1]), mean(rows[, 2]), sd(rows[, 2])))
}
names(results_05_v2) <- c("N", "KG.Dist", "KG.Sd", "Rand.Dist", "Rand.Sd")
save(results_05_v2, file = "data/results_05_v2.rda")
par(mfcol=c(1, 1))
plot(seq(from = 5, to = 25, by = 1), results_05_v2$KG.Dist, type = "l", xlab = "N", ylab = "dis(x*)", lwd = 2, col = 2, ylim = c(-0.3, 3.6), panel.first = grid(lwd = 1.5))
lines(seq(from = 5, to = 25, by = 1), results_05_v2$KG.Dist + results_05_v2$KG.Sd, lwd = 2, col = 2, lty = 3)
lines(seq(from = 5, to = 25, by = 1), results_05_v2$KG.Dist - results_05_v2$KG.Sd, lwd = 2, col = 2, lty = 3)
lines(seq(from = 5, to = 25, by = 1), results_05_v2$Rand.Dist, lwd = 2, col = 3)
lines(seq(from = 5, to = 25, by = 1), results_05_v2$Rand.Dist + results_05_v2$Rand.Sd, lwd = 2, col = 3, lty = 3)
lines(seq(from = 5, to = 25, by = 1), results_05_v2$Rand.Dist - results_05_v2$Rand.Sd, lwd = 2, col = 3, lty = 3)
legend("topright", c("KG", "Random"), lwd = c(2, 2), col = c(2, 3))
##
## Experiment 4. Effect of priors
##
set.seed(20160129)
m = 5
R = 1000
results_05_v3 = data.frame()
for (N in seq(from = 5, to = 25, by = 1)) {
rows = NULL
for (r in 1:R) {
p <- getPWithProperty(m)
state0 <- getEqualPriors(p, 4)
resultKG <- KGPolicy(p, state0, N)
resultRnd <- randomPolicy(p, state0, N)
rows <- rbind(rows, c(getDistance(p, resultKG$x), getDistance(p, resultRnd$x)))
}
print(N)
results_05_v3 <- rbind(results_05_v3, c(N, mean(rows[, 1]), sd(rows[, 1]), mean(rows[, 2]), sd(rows[, 2])))
}
names(results_05_v3) <- c("N", "KG.Dist", "KG.Sd", "Rand.Dist", "Rand.Sd")
save(results_05_v3, file = "data/results_05_v3.rda")
par(mfcol=c(1, 1))
plot(seq(from = 5, to = 25, by = 1), results_05_v3$KG.Dist, type = "l", xlab = "N", ylab = "dis(x*)", lwd = 2, col = 2, ylim = c(-0.3, 2), panel.first = grid(lwd = 1.5))
lines(seq(from = 5, to = 25, by = 1), results_05_v3$KG.Dist + results_05_v3$KG.Sd, lwd = 2, col = 2, lty = 3)
lines(seq(from = 5, to = 25, by = 1), results_05_v3$KG.Dist - results_05_v3$KG.Sd, lwd = 2, col = 2, lty = 3)
lines(seq(from = 5, to = 25, by = 1), results_05_v3$Rand.Dist, lwd = 2, col = 3)
lines(seq(from = 5, to = 25, by = 1), results_05_v3$Rand.Dist + results_05_v3$Rand.Sd, lwd = 2, col = 3, lty = 3)
lines(seq(from = 5, to = 25, by = 1), results_05_v3$Rand.Dist - results_05_v3$Rand.Sd, lwd = 2, col = 3, lty = 3)
legend("topright", c("KG", "Random"), lwd = c(2, 2), col = c(2, 3))
## Tests
t.test(results_05_v3$KG.Dist, results_05_v3$Rand.Dist, "less", paired = TRUE)
##
## Experiment 5.
##
plot(5:20, 1/(5:20 * (2 + 1)), ylim = c(0, 0.07),
xlab = expression(m), ylab = expression(paste(Delta, bar(p)[x])), type = "l", lwd = 2, panel.first = grid(lwd = 1.5))
lines(5:20, 1/(5:20 * (3 + 1)), lty = 2, lwd = 2)
lines(5:20, 1/(5:20 * (5 + 1)), lty = 3, lwd = 2)
lines(5:20, 1/(5:20 * (10 + 1)), lty = 4, lwd = 2)
legend("topright", c(expression(paste(n[xy], " = 2")),
expression(paste(n[xy], " = 3")),
expression(paste(n[xy], " = 5")),
expression(paste(n[xy], " = 10"))), lty = 1:4, lwd = rep(2, 4))
|
287eda04912d05d314254ed4ae4516c362e44401
|
f45c49cff40cae33bcd3261f113afbf895777ca9
|
/third/spams-R/spams/man/spams.lassoMask.Rd
|
dc04e60bc0bf65d9ed6c54bd959a2ac70578778e
|
[
"Apache-2.0",
"GPL-3.0-only"
] |
permissive
|
yfyang86/cshap
|
0dc7bc1d3aa576ee0d1f3ab5eb85176f5c0ba50c
|
0c4b3296193a379be2941430657f5c99a6561fcb
|
refs/heads/main
| 2023-04-21T05:52:57.161141
| 2021-04-29T09:23:25
| 2021-04-29T09:23:25
| 360,166,399
| 0
| 0
|
Apache-2.0
| 2021-04-29T09:23:26
| 2021-04-21T12:43:08
|
C++
|
UTF-8
|
R
| false
| false
| 2,547
|
rd
|
spams.lassoMask.Rd
|
\encoding{latin1}
\name{spams.lassoMask}
\alias{spams.lassoMask}
\title{
spams.lassoMask
}
\description{
spams.lasso is a variant of spams.lasso that handles
binary masks. It aims at addressing the following problems
for all columns x of X, and beta of B, it computes one column alpha of A
that solves
1) when mode=0
min_{alpha} ||diag(beta)(x-Dalpha)||_2^2 s.t. ||alpha||_1 <= lambda1
2) when mode=1
min_{alpha} ||alpha||_1 s.t. ||diag(beta)(x-Dalpha)||_2^2
<= lambda1*||beta||_0/m
3) when mode=2
min_{alpha} 0.5||diag(beta)(x-Dalpha)||_2^2 +
lambda1*(||beta||_0/m)*||alpha||_1 +
(lambda2/2)||alpha||_2^2
Possibly, when pos=true, it solves the previous problems
with positivity constraints on the vectors alpha
}
\usage{
spams.lassoMask(X,D,B,L= -1,lambda1= NULL,lambda2= 0.,mode= 'PENALTY',pos= FALSE,
numThreads= -1,verbose=FALSE)
}
\arguments{
\item{X}{%
double m x n matrix (input signals)
\preformatted{%
m is the signal size
n is the number of signals to decompose
}}
\item{D}{%
double m x p matrix (dictionary)
\preformatted{%
p is the number of elements in the dictionary
}}
\item{B}{%
boolean m x n matrix (mask)
\preformatted{%
p is the number of elements in the dictionary
}}
\item{verbose}{%
verbose mode
\preformatted{%
}}
\item{lambda1}{%
(parameter)
\preformatted{%
}}
\item{L}{%
(optional, maximum number of elements of each
\preformatted{%
decomposition)
}}
\item{pos}{%
(optional, adds positivity constraints on the
\preformatted{%
coefficients, false by default)
}}
\item{mode}{%
(see above, by default: 2)
\preformatted{%
}}
\item{lambda2}{%
(optional parameter for solving the Elastic-Net)
\preformatted{%
for mode=0 and mode=1, it adds a ridge on the Gram Matrix
}}
\item{numThreads}{%
(optional, number of threads for exploiting
\preformatted{%
multi-core / multi-cpus. By default, it takes the value -1,
which automatically selects all the available CPUs/cores).
}}
}
\value{
\item{A}{%
double sparse p x n matrix (output coefficients)
\preformatted{%
}}
}
\author{
Julien MAIRAL, 2010 (spams, matlab interface and documentation)
Jean-Paul CHIEZE 2011-2012 (R interface)
}
\note{
this function admits a few experimental usages, which have not
been extensively tested:
- single precision setting (even though the output alpha is double
precision)
}
|
db600f4ab77df312ff702100f7486e8af1ec1c3c
|
e16bad6b016777594f00f3a912a331d84a53bb68
|
/All R Codes/Download Unicheck.R
|
a364a9d9ab93f85fc4631f16071728acfb445c65
|
[] |
no_license
|
ashutoshsingh25/R-Scripts
|
f4d3ad42bd608a752fc8c730a596fb2de04dd6ee
|
2f90d158db16a45c6b0ea9cb3ea0287806258df4
|
refs/heads/master
| 2021-07-15T08:31:57.666827
| 2020-06-11T18:33:13
| 2020-06-11T18:33:13
| 174,949,542
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,064
|
r
|
Download Unicheck.R
|
library(RSelenium)
?tryCatch
driver<- rsDriver(browser=c("chrome")) #starting selenium server for chrome browser
remDr <- driver[["client"]] #linking chrome browser
remDr$navigate("https://unicheck.com/login/business/")
EMail <- remDr$findElement(using = "css","[class='_1uOq0o']") #selecting email id block
EMail$sendKeysToElement(list("category_kp@indiamart.com")) #inserting email id
Password <- remDr$findElement(using = "css","[type='password']") #selecting pw block
Password$sendKeysToElement(list("dehradun@224")) #insert password
Sys.sleep(15)
login <- remDr$findElement(using = "css","[type='submit']") #select submit button
login$clickElement() #click on the submit button
Sys.sleep(15)
folder <- remDr$findElement(using = "css","[title='3rd Batch']") #selecting block
folder$clickElement() #click the selected block
#B1 <- remDr$findElement(using = "css","[title='360017406112']")
#B1$clickElement()
#B2 <- remDr$findElement(using = "css","[class='cabinet-core-Button-___index__button___3Yv6Z cabinet-routes-viewer-viewerButtons-___viewerButtons__button___2EKxB cabinet-core-Button-___index__button--circle___1augV']")
#B2$clickElement()
#remDr$executeScript("window.scrollTo(0,document.body.scrollHeight);") #scroll to end
pg <- remDr$findElement(using = "css","[class='cabinet-routes-library-File-file-actions-___file-actions__file-actions__item___GTy4A']")
#lenOfPage <- remDr$executeScript("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
pg$clickElement()
pg$clickElement()
j=1
for (j in 1:100) {
webElem <- remDr$findElement("css", "body")
webElem$sendKeysToElement(list(key="end"))
#webElem$sendKeysToElement(list(key="down_arrow"))
}
UrlIndex <- remDr$findElements(using = "xpath","//*[@id='app']/div/div[1]/div[2]/div/div[4]/a")
Url <- remDr$getCurrentUrl()[[1]]
i=1
for (i in 2:length(UrlIndex)) {
remDr$navigate(Url)
A1 <- remDr$findElement(using = "xpath",paste0("//*[@id='app']/div/div[1]/div[2]/div/div[4]/a[",i,"]"))
A1$clickElement()
Sys.sleep(10)
#ifelse(remDr$findElement(using = "css","['class=cabinet-routes-process-___index__process-tooltip_text___166Kc']"),A1$doubleclick(),A1$clickElement())
B2 <- remDr$findElement(using = "css","[class='cabinet-core-Button-___index__button___3Yv6Z cabinet-routes-viewer-viewerButtons-___viewerButtons__button___2EKxB cabinet-core-Button-___index__button--circle___1augV']")
B2$clickElement()
Sys.sleep(10)
tryCatch(remDr$findElement(using = "css","[class='cabinet-routes-process-___index__process-tooltip_text___166Kc']"),
error=function(e) ifelse(remDr$findElement(using = "css","[class='cabinet-routes-process-___index__process-tooltip_text___166Kc']")$getElementText() == "Done",B2$clickElement(),Sys.sleep(45)),
finally = B2$clickElement() )
remDr$navigate(Url)
Sys.sleep(30)
}
|
d0b8405121cedac5a37c3c66796c54b2b5ec7f96
|
7e85a5194f22cec34af956f988277e0ec380105d
|
/man/tableau_setting.Rd
|
ba1f594925e1c393bf9a617eb6aa8e4fbb1cd0f1
|
[
"MIT"
] |
permissive
|
rstudio/shinytableau
|
cf9f28ed8a55853d053dc664a3360f439f385670
|
83c752ff861b2db87db5da64174e21b965898e5d
|
refs/heads/master
| 2023-05-29T21:42:08.035909
| 2021-06-05T06:02:20
| 2021-06-05T06:02:20
| 276,984,488
| 62
| 7
|
NOASSERTION
| 2020-11-04T18:03:25
| 2020-07-03T20:45:09
|
JavaScript
|
UTF-8
|
R
| false
| true
| 1,689
|
rd
|
tableau_setting.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/settings.R
\name{tableau_setting}
\alias{tableau_setting}
\alias{tableau_settings_all}
\title{Read Tableau extension settings}
\usage{
tableau_setting(
name,
default = NULL,
session = shiny::getDefaultReactiveDomain()
)
tableau_settings_all(session = shiny::getDefaultReactiveDomain())
}
\arguments{
\item{name}{The name of a setting to retrieve.}
\item{default}{The value to return if the requested setting has not been set.}
\item{session}{The Shiny \code{session} object. (You should probably just use the
default.)}
}
\value{
For \code{tableau_setting()}, an object that was previously saved via
\code{update_tableau_settings_async}, or the \code{default} value. For
\code{tableau_settings_all()}, a named list containing all of the settings.
}
\description{
Call \code{tableau_setting()} from an extension's \code{server} and \code{config_server}
functions to read settings that were previously set via
\code{\link[=update_tableau_settings_async]{update_tableau_settings_async()}}.
}
\details{
A typical extension will call \code{\link[=update_tableau_settings_async]{update_tableau_settings_async()}} from
\code{config_server} to write settings, and \code{\link[=tableau_setting]{tableau_setting()}} from \code{server} to
read settings.
Note that both \code{tableau_setting()} and \code{tableau_settings_all()} are reactive
reads; in order to call these functions you must be inside a reactive context
(i.e. reactive expression, reactive observer, output render expression, or
\code{\link[=isolate]{isolate()}}), and future updates to a setting that was read will cause
reactive invalidation.
}
|
ac95f538715b3e92d17bad50fcb575502a24dfd5
|
9b049c49f418bd480b8adbbb25c81a40c30eb825
|
/experiments/experiment1/experiment1.R
|
587423eca61664f51b412a2d97a4914f543c4c5a
|
[] |
no_license
|
parvsaxena/forestpacking
|
28c1cc183588a05d23a0c81f4149ac3a6a055947
|
dbaa9650976534c4ae5d89482808b5d182c62d66
|
refs/heads/master
| 2020-09-07T19:19:15.662931
| 2019-04-18T16:29:49
| 2019-04-18T16:29:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,982
|
r
|
experiment1.R
|
# ---
# title: "Test speed-up and scale-up of R-Rerf against xg and ranger"
# author: "James Browne"
# date: "Oct 10 2017"
# output: html_document
# ---
library(ggplot2)
library(plyr)
data_summary <- function(data, varname, groupnames){
require(plyr)
summary_func <- function(x, col){
c(mean = median(x[[col]], na.rm=TRUE),
sd = sd(x[[col]], na.rm=TRUE))
}
data_sum<-ddply(data, groupnames, .fun=summary_func,
varname)
data_sum <- rename(data_sum, c("mean" = varname))
return(data_sum)
}
mydata <- read.csv(file="experiment1.csv", header=FALSE)
mydata$V2 <- relevel(mydata$V2, " Bin")
mydata$V2 <- relevel(mydata$V2, " Stat")
mydata$V2 <- relevel(mydata$V2, " DF-")
mydata$V2 <- relevel(mydata$V2, " DF")
mydata$V2 <- relevel(mydata$V2, " BF")
mydata$V2 <- factor(mydata$V2, levels=c(levels(mydata$V2),"ideal"))
mydata$V4 <- as.factor(mydata$V4)
mydata$V2 <- as.factor(mydata$V2)
mydata$V3 <- as.factor(mydata$V3)
mydata$V5 <- as.numeric(mydata$V5)
z <- mydata$V3 == " MNIST"
mydata[z,]$V6 <- mydata[z,]$V6/10000
z <- mydata$V3 == " Higgs"
mydata[z,]$V6 <- mydata[z,]$V6/25000
z <- mydata$V3 == " Allstate"
mydata[z,]$V6 <- mydata[z,]$V6/50000
mydata <- data_summary(mydata,varname="V6",groupnames=c("V2","V3","V4","V5"))
leg <- theme(legend.text = element_text(size = 12), legend.title=element_text(size = 12), plot.title = element_text(size =12 , face="bold"), axis.title.x = element_text(size=12), axis.text.x = element_text(size=12), axis.title.y = element_text(size=12), axis.text.y = element_text(size=10, angle=90), strip.text.x = element_text(size=12))
p <- ggplot(mydata, aes(x=V5, y=V6, group=V2, color=V2)) + geom_line(size=1)
p <- p + scale_fill_brewer(palette="Paired") + theme_minimal()
p <- p + labs(x = "Number of Threads Used", y =expression(paste("Mean Latency(", mu,"s)")))
p <- p + scale_y_continuous(trans = 'log10')
p <- p + scale_color_manual(values=c(" Bin+"="#b2182b", " Bin"="#ef8a62", " Stat"="#fddbc7", " DF"="#d1e5f0", " DF-"="#67a9cf", " BF"="#2166ac", "ideal"="black"), labels=c("BF","DF","DF-","Stat","Bin","Bin+","Ideal"), name=NULL, guide=guide_legend(nrow=1))
p <- p + leg
p <- p + facet_grid(. ~ V3)
p <- p + theme(strip.background = element_rect(fill="grey95"))
p <- p + guides(color=FALSE)
ggsave("scaling.png", width=6.5, height=1.75, units="in")
leg <- theme(legend.text = element_text(size = 12), legend.title=element_text(size = 12), plot.title = element_text(size =12 , face="bold"), axis.title.x = element_text(size=12), axis.text.x = element_text(size=12), axis.title.y = element_text(size=12), axis.text.y = element_text(size=10), strip.text.x = element_text(size=12))
data_speedUp <- function(data, varnameTimes, varnameCores, groupnames){
require(plyr)
summary_func <- function(x, col1, col2){
oneCoreTime <- x[[col1]][x[[col2]]==1]
x[[col1]] = oneCoreTime/x[[col1]]
x
}
data_sum<-ddply(data, groupnames, .fun=summary_func, varnameTimes, varnameCores)
return(data_sum)
}
mydata <- mydata[,1:5]
mydata <- data_speedUp(mydata,varnameTimes="V6",varnameCores="V5",groupnames=c("V2","V3","V4"))
p <- ggplot(mydata, aes(x=V5, y=V6, group=V2, color=V2)) + geom_line(size=1)
p <- p + theme_minimal()
p <- p + guides(fill=FALSE)
p <- p + labs(x = "Number of Threads Used", y = "Speed Up")
p <- p + scale_color_manual(values=c(" Bin+"="#b2182b", " Bin"="#ef8a62", " Stat"="#fddbc7", " DF"="#d1e5f0", " DF-"="#67a9cf", " BF"="#2166ac", "ideal"="black"), labels=c("BF","DF","DF-","Stat","Bin","Bin+","Ideal"), name=NULL, guide=guide_legend(nrow=1))
p <- p + leg
p <- p + facet_grid(. ~ V3, scales="free")
p <- p + geom_abline(intercept = 0 , slope = 1, color="black", size=1)
p <- p + theme(legend.position="bottom",legend.margin=margin(t=-0.30, r=0, b=-0.25, l=0, unit="cm"))
p <- p + theme(strip.text.x=element_blank())
ggsave("speedUp.png", width=6.5, height=1.75, units="in")
|
62bbc63503f8b5e5fea5b724da220ac7fa8ec91e
|
6e4709c4a2b716998e3e14363eb2b35fc46cd1ee
|
/man/sel.line.Rd
|
c3e6cc9f7cd64c738f57e05e9346807de059dd21
|
[] |
no_license
|
tectronics/r4ss
|
3748ba1d019021d172d46f835a907e5ff29cf058
|
fd6090c0a6c20aa452cad30537d1ac89cb6c5db7
|
refs/heads/master
| 2018-01-11T15:02:39.579318
| 2014-07-12T00:20:45
| 2014-07-12T00:20:45
| 45,520,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,448
|
rd
|
sel.line.Rd
|
\name{sel.line}
\alias{sel.line}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ a function for drawing selecitivity curves }
\description{
This function is primarily inteded for use by the selfit function.
}
\usage{
sel.line(x, model, sp, min.dist, max.dist)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ vector of x values (age or length) }
\item{model}{ selectivity model "Double_Normal" or "Double_Logistic" }
\item{sp}{ vector of parameters }
\item{min.dist}{ minimum value for selectivity }
\item{max.dist}{ maximum value for selectivity }
}
% \details{
% ~~ If necessary, more details than the description above ~~
% }
% \value{
% ~Describe the value returned
% If it is a LIST, use
% \item{comp1 }{Description of 'comp1'}
% \item{comp2 }{Description of 'comp2'}
% ...
% }
% \references{ }
\author{ Tommy Garrison }
% \note{ ~~further notes~~
% % ~Make other sections like Warning with \section{Warning }{....} ~
% }
\seealso{ \code{\link{selfit}} }
\examples{
\dontrun{
plot(0, xlim = c(0, 50), ylim = c(0, 1),
xlab = 'Length', ylab = 'Selectivity', type = 'n',
xaxs = 'i', yaxs = 'i')
sel.line(model = 'Double_Normal', min.dist = 10, max.dist = 50,
sp = c(25, -0.5, 3, 3, -5, 0))
}
}
% % Add one or more standard keywords, see file 'KEYWORDS' in the
% % R documentation directory.
\keyword{ dplot }
% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
d9b06a60ecd4146832c81076dc36d80051c45622
|
f2bfd5ceae6bf32cebc28cf18740a8b44e010e7b
|
/pkg/retistruct/man/kr.yhat.cart.Rd
|
012eb4f2f511f824fde6fcc7f16ab90baac9da3a
|
[] |
no_license
|
davidcsterratt/retistruct
|
602972d127b7119df3fda54ac915228d7ac854d1
|
f7075b0a8ac84fdc9773300d553c26a11b45ce2e
|
refs/heads/master
| 2023-08-09T20:08:39.039964
| 2023-07-29T09:27:35
| 2023-07-29T09:27:35
| 25,682,590
| 5
| 7
| null | 2017-07-29T09:14:58
| 2014-10-24T10:05:33
|
R
|
UTF-8
|
R
| false
| true
| 712
|
rd
|
kr.yhat.cart.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kernel-density.R
\name{kr.yhat.cart}
\alias{kr.yhat.cart}
\title{Kernel regression on sphere using Fisherian density with
Cartesian coordinates}
\usage{
kr.yhat.cart(r, mu, y, kappa)
}
\arguments{
\item{r}{Locations at which to estimate dependent variables in
Cartesian coordinates}
\item{mu}{Locations in Cartesian coordinates (independent variables)}
\item{y}{Values at locations (dependent variables)}
\item{kappa}{Concentration parameter}
}
\value{
Estimates of dependent variables at locations \code{r}
}
\description{
Kernel regression on sphere using Fisherian density with
Cartesian coordinates
}
\author{
David Sterratt
}
|
e8746c35f9aa6c9c05c7d99bc1ffb83ae965bed0
|
1e921145eebf8c82188a760d9ed47df1716ed00b
|
/05_QuestionIdentifications.r
|
96ceb62209e00b709317f2eaab1377ac51d1d30c
|
[] |
no_license
|
umisrcps/survey-question-database
|
40595538cb2a53c2c4d99a9b46b4fdf4b714c42a
|
cf7be2ed7381e47da46d0ed4b77b3d9567989c66
|
refs/heads/master
| 2022-04-02T04:10:57.006801
| 2020-02-25T14:58:05
| 2020-02-25T14:58:05
| 229,795,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,793
|
r
|
05_QuestionIdentifications.r
|
library(weights)
library(foreign)
library(memisc)
library(data.table)
library(quanteda)
## Load Key Files
load("EachFileExports/02-02_ToplineFiles.rdata")
load("EachFileExports/04-02_QuestionInformationFromDataFiles.rdata")
toplinetransform <- function(x){
qspre <- as.character(unique(x$QuestionID))
qs <- qspre[!grepl("Number of items", qspre) & !is.na(qspre)]
t(sapply(qs, function(g) c(QuestionTLT=g, wordingTLT=as.character(x$QuestionTxt[x$QuestionID==g][1]), respsTLT=paste(as.character(sort(x$RespTxt[x$QuestionID==g])), collapse="; "))))
}
tlts <- lapply(1:length(importtoplines), function(x) data.frame(StudyID=names(importtoplines)[x], toplinetransform(importtoplines[[x]]), stringsAsFactors=FALSE))
names(tlts) <- names(importtoplines)
tltmerge <- rbindlist(tlts, fill=TRUE)
tltmerge$datatypeTLT <- "Topline"
vardatabase <- rbindlist(allinfopervar, fill=TRUE)
vardatabase$datatype <- "Microdata"
vdbpre <- vardatabase[!duplicated(vardatabase$UniqueID),]
vdbpre2 <- vdbpre[!(is.na(vdbpre$Varname) | vdbpre$Varname=="QNA"),]
vdbpre2$vn <- gsub("_", " ", tolower(vdbpre2$Varname))
tltmerge$vntlt <- gsub("_", " ", sapply(strsplit(tolower(as.character(tltmerge$Question)), ".", fixed=TRUE), function(x) x[length(x)]))
vdbpre2$UniqueIDlower <- tolower(vdbpre2$UniqueID)
tltmerge$UniqueIDlower <- tolower(paste(tltmerge$StudyID, tltmerge$vntlt, sep="~"))
tltmerge$UniqueIDlower[tltmerge$vntlt==""] <- NA
duplicatedQuestionNames <- sort(table(vdbpre2$UniqueID)[table(vdbpre2$UniqueID)>1])
write.csv(duplicatedQuestionNames, "Troubleshooting/05-01_DuplicatedQuestionNames_FixInOriginals.csv")
tltpre <- tltmerge[!duplicated(tltmerge$UniqueIDlower),]
mgd <- merge(vdbpre2, tltpre, by=c("UniqueIDlower", "StudyID"), all.x=TRUE)
tltpre2 <- tltpre[!(tltpre$UniqueIDlower %in% mgd$UniqueIDlower),]
tltorigwording <- tltpre2$wording
tltorigwording <- gsub("[.*]", "", tltorigwording)
tltsplitw1 <- sapply(strsplit(tltorigwording, " "), function(x) x[1])
tltdropfirstword <- (grepl(".", tltsplitw1, fixed=TRUE) | grepl("[0-9]", tltsplitw1) | grepl(":", tltsplitw1, fixed=TRUE)) & nchar(tltsplitw1)<15
tltorigwording[tltdropfirstword] <- gsub("^ ", "", gsub(" ", " ", sapply((1:length(tltorigwording))[tltdropfirstword], function(x) gsub(tltsplitw1[x], "", tltorigwording[x], fixed=TRUE))))
tltpre2$wordingsimp <- gsub(".", "", gsub("?", "", gsub(" ", " ", gsub(",|'", "", tolower(gsub("\x92", "'", gsub("\x85|\x97|\x91|\x92|\x96|\x93|\x94", "", tltorigwording))))), fixed=TRUE), fixed=TRUE)
mgdorigwording <- gsub(".", "", gsub("?", "", gsub(" ", " ", gsub(",|'", "", tolower(gsub("\x92", "'", gsub("\x85|\x97|\x91|\x92|\x96|\x93|\x94", "", mgd$wording))))), fixed=TRUE), fixed=TRUE)
mgdorigwording <- gsub("[.*]", "", mgdorigwording)
mgdsplitw1 <- sapply(strsplit(mgdorigwording, " "), function(x) x[1])
mgddropfirstword <- (grepl(".", mgdsplitw1, fixed=TRUE) | grepl("[0-9]", mgdsplitw1) | grepl(":", mgdsplitw1, fixed=TRUE)) & nchar(mgdsplitw1)<15
mgdorigwording[mgddropfirstword] <- gsub("^ ", "", gsub(" ", " ", sapply((1:length(mgdorigwording))[mgddropfirstword], function(x) gsub(mgdsplitw1[x], "", mgdorigwording[x], fixed=TRUE))))
mgd$wordingsimp <- mgdorigwording#gsub(".", "", gsub("?", "", gsub(" ", " ", gsub(",|'", "", tolower(gsub("\x92", "'", gsub("\x85|\x97|\x91|\x92|\x96|\x93|\x94", "", mgdorigwording))))), fixed=TRUE), fixed=TRUE)
mgd2 <- merge(mgd, tltpre2, by=c("StudyID", "wordingsimp"), all=TRUE)
mgd2$UniqueIDlower <- mgd2$UniqueIDlower.x
mgd2$UniqueIDlower[is.na(mgd2$UniqueIDlower)] <- mgd2$UniqueIDlower.y[is.na(mgd2$UniqueIDlower)]
mgd2$QuestionTLT <- mgd2$QuestionTLT.x
mgd2$QuestionTLT[is.na(mgd2$QuestionTLT)] <- mgd2$QuestionTLT.y[is.na(mgd2$QuestionTLT)]
mgd2$wordingTLT <- mgd2$wordingTLT.x
mgd2$wordingTLT[is.na(mgd2$wordingTLT)] <- mgd2$wordingTLT.y[is.na(mgd2$wordingTLT)]
mgd2$respsTLT <- mgd2$respsTLT.x
mgd2$respsTLT[is.na(mgd2$respsTLT)] <- mgd2$respsTLT.y[is.na(mgd2$respsTLT)]
mgd2$datatypeTLT <- mgd2$datatypeTLT.x
mgd2$datatypeTLT[is.na(mgd2$datatypeTLT)] <- mgd2$datatypeTLT.y[is.na(mgd2$datatypeTLT)]
mgd2$vntlt <- mgd2$vntlt.x
mgd2$vntlt[is.na(mgd2$vntlt)] <- mgd2$vntlt.y[is.na(mgd2$vntlt)]
mgd2$datatype <- factor((mgd2$datatype=="Microdata" & !is.na(mgd2$datatype)) + 2*(mgd2$datatypeTLT=="Topline" & !is.na(mgd2$datatypeTLT)), 1:3, c("Microdata", "Topline", "Both"))
mgd2$wordingsimp[mgd2$wordingsimp==" "|is.na(mgd2$wordingsimp)] <- ""
mgd2$resps[is.na(mgd2$resps)] <- mgd2$respsTLT[is.na(mgd2$resps)]
mgd2$resps[is.na(mgd2$resps)] <- ""
mgd2$vn[is.na(mgd2$vn)] <- mgd2$vntlt[is.na(mgd2$vn)]
mgd2$vn[is.na(mgd2$vn)] <- ""
mgd3 <- mgd2[!duplicated(mgd2$UniqueIDlower),]
md <- with(mgd3, data.frame(StudyID, UniqueID, UniqueIDlower, Varname, vn, vntlt, wordingsimp, wording, wordingTLT, resps, respsTLT, type, datatype, stringsAsFactors=FALSE))
md$wordingsimp[is.na(md$wordingsimp)] <- gsub(".", "", gsub("?", "", gsub(" ", " ", gsub(",|'", "", tolower(gsub("\x92", "'", gsub("\x85|\x97|\x91|\x92|\x96|\x93|\x94", "", md$wording))))), fixed=TRUE), fixed=TRUE)[is.na(md$wordingsimp)]
md$wordingsimp[is.na(md$wordingsimp)] <- gsub(".", "", gsub("?", "", gsub(" ", " ", gsub(",|'", "", tolower(gsub("\x92", "'", gsub("\x85|\x97|\x91|\x92|\x96|\x93|\x94", "", md$wordingTLT))))), fixed=TRUE), fixed=TRUE)[is.na(md$wordingsimp)]
md$respssimp <- gsub(".", "", gsub("?", "", gsub(" ", " ", gsub(",|'", "", tolower(gsub("\x92", "'", gsub("\xf1", "n", gsub("\xfa", "u", gsub("\xed", "i", gsub("\xe9", "e", gsub("\x85|\x97|\x91|\x92|\x96|\x93|\x94|\xaf", "", md$resps))))))))), fixed=TRUE), fixed=TRUE)
md$wordingresponses <- paste(md$vn, md$wordingsimp, md$respssimp, sep="; ")
md$vnames <- paste(md$vn, md$wordingsimp, sep="; ")
save(md, file="EachFileExports/05-01_AllQuestionWordingResponses.rdata")
|
deb17caa2e3c81e9aa2c56f893285fa2bc42e0bc
|
26ae3eeb7ac438cd4ff6618c6728a574d45dc86b
|
/Code/Ch14-Missing Value.R
|
6765812b4f2158f16001a1464e14915150a7bdda
|
[] |
no_license
|
queenqueen89/Data_Wrangling
|
628696ba5c336f7ee47e6327f212d59223ae001f
|
66286537f9a5704aad50966ea0a0cadf51c638f2
|
refs/heads/main
| 2023-02-24T19:19:34.668500
| 2021-02-04T04:57:27
| 2021-02-04T04:57:27
| 335,839,506
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,845
|
r
|
Ch14-Missing Value.R
|
#----------Chapter 14 Missing Values----------#
#---------------------------------------------#
#### 1. Test for Missing Values
# (1) test a vector with NA
(x <- c(1:4, NA, 6:7, NA))
is.na(x)
# (2) test a matrix with NA
df <- data.frame(col1 = c(1:3, NA),
col2 = c("this", NA,"is", "text"),
col3 = c(TRUE, FALSE, TRUE, TRUE),
col4 = c(2.5, 4.2, 3.2, NA),
stringsAsFactors = FALSE)
is.na(df)
# (3) identify NAs in specific data frame column is.na(df$col4)
is.na(df$col4)
# (4) identify location of NAs
which(is.na(x))
# (5) identify numebr of NAs
sum(is.na(df))
#---------------------------------------------#
#### 2. Recode Missing Values
# (1) vector: recode missing values with the mean
x[is.na(x)] <- mean(x, na.rm = TRUE)
round(x, 2)
is.na(x)
# (2) data frame: missing values=99
(df <- data.frame(col1 = c(1:3, 99), col2 = c(2.5, 4.2, 99, 3.2)))
# (3) data frame: recode missing values=99 to NAs
df[df == 99] <- NA
df
#---------------------------------------------#
#### 3. Excluding Missing Values
# (1) A vector with missing values
(x <- c(1:4, NA, 6:7, NA))
# (2) cannot calculate mean with NA values
mean(x)
# (3) find mean with NA values
mean(x, na.rm = TRUE)
# (4) data frame with missing values
(df <- data.frame(col1 = c(1:3, NA),
col2 = c("this", NA,"is", "text"),
col3 = c(TRUE, FALSE, TRUE, TRUE),
col4 = c(2.5, 4.2, 3.2, NA),
stringsAsFactors = FALSE))
# (5) data frame: find complete cases (if no NA, it's TRUE)
complete.cases(df)
# (6) subset to get complete cases (without any NA)
# Method 1
na.omit(df)
# Method 2
df[complete.cases(df), ]
# (7) subset to get incomplete cases (with only NA)
df[!complete.cases(df), ]
#---------------------------------------------#
|
ff63a7d741ca8455844c3825755e920ea6201457
|
0eb5f5076303791fbf3c8a9d25c568c3867ba86f
|
/Kedar_Shiny_Tutorial/execution_script.R
|
0fc1cbcbf054dd4d31833f53a2fad94a682fc1c2
|
[] |
no_license
|
aha2140/team-shiny
|
10dde0a5b55c92fa03ec4c2b8ed90529565bb80d
|
b7fa128e20e95dba94fe88b0d3135ba50ef5c6ec
|
refs/heads/master
| 2021-03-12T23:08:32.669448
| 2015-04-26T15:33:04
| 2015-04-26T15:33:04
| 31,603,835
| 0
| 5
| null | 2019-08-22T19:53:58
| 2015-03-03T14:56:18
|
R
|
UTF-8
|
R
| false
| false
| 843
|
r
|
execution_script.R
|
library("shiny")
getwd()
setwd("/Users/kedarpatil/Documents/St-4701 - Data Visualization/team-shiny/Kedar_Shiny_Tutorial")
runApp("old_faithful")
runApp("old_faithful", display.mode="showcase")
runApp("old_faithful_dynamic")
#GO FURTHER - LESSON1
system.file("examples", package="shiny")
runExample("01_hello", display.mode="showcase") # a histogram
runExample("02_text", display.mode="showcase") # tables and data frames
runExample("03_reactivity") # a reactive expression
runExample("04_mpg") # global variables
runExample("05_sliders") # slider bars
runExample("06_tabsets") # tabbed panels
runExample("07_widgets") # help text and submit buttons
runExample("08_html") # Shiny app built from HTML
runExample("09_upload") # file upload wizard
runExample("10_download") # file download wizard
runExample("11_timer") # an automated timer
|
5a9412593836b433cdbfe93390d13327c0d21590
|
1b141d6887525dd038885603ba0525a4799fb297
|
/R/PROC_LOC.R
|
1781f3459ed1ba9f1baf9c954d80c68ac8645b90
|
[
"MIT"
] |
permissive
|
mjkarlsen/traumaR
|
c7b032ad24f5511d891348cf40b764e14a4d784b
|
dd52deec08282e8955c5fea6ad1fb7b2a80e0a9f
|
refs/heads/master
| 2022-09-17T04:17:13.452037
| 2020-06-06T18:47:08
| 2020-06-06T18:47:08
| 260,229,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,017
|
r
|
PROC_LOC.R
|
#' Location of Medical Procedures
#'
#' @param col A column in PTOS data that typically ends with _lc
#'
#' @return It translations of the code into human friendly values.
#' @export
proc_lc <- function(col) {
col_value <- case.(col == 1, "ED",
col == 2, "OR",
col == 3, "ICU",
col == 4, "Med/Surg Floor",
col == 5, "Step-down Unit",
col == 6, "Radiology",
col == 7, "Neclear Medicine",
col == 8, "Burn Unit",
col == 9, "PMR",
col == 10, "Minor Surgery Unit",
col == 13, "PACU",
col == 14, "Postmortem" ,
col == 15, "EMS",
col == 16, "Referring Facility",
col == 17, "Special Procedure Unit",
col == 18, "Angoigraphy",
default = "Unknown")
return(col_value)
}
|
69d16e4622c8e6666ff515121b2a13034647c9d2
|
2c687e32ccf9790bad7675cfc7f8e49d3e6396a6
|
/R/get_dCT_singleRep.R
|
90c8a9502516b30b19a1d4babc9ea64c9cee4a44
|
[] |
no_license
|
pythseq/WARPqpcr
|
8f19b9b2b3b4c95920b9bab341f67ffc50e16a38
|
75833ccd8f05612cc4cc7ea68f4c21cada64c858
|
refs/heads/master
| 2021-02-18T15:28:06.686687
| 2020-02-25T23:07:07
| 2020-02-25T23:07:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,507
|
r
|
get_dCT_singleRep.R
|
#' @title get_dCT_singlerep
#'
#' @description Calculates the dCT values for all samples in situations where
#' there is only a single biological replicate per condition.
#'
#' @param sampleObj A qPCRBatch object generated from the \code{\link{readSampleSheet}} function.
#' @param hkg A character vector containing the housekeeping gene to use for dCT calculations.
#'
#' @details This function is primarily intended for pilot/preliminary data in which biological replicates have not
#' yet been produced or cases in which multiple vectors are being evaluated for efficiency (e.g. shRNA knockdowns, overexpression
#' constructs, etc.)
#'
#' @details The error for these calculations is propagated from the technical replicate standard deviation.
#'
#' @return A data.frame containing dCT values for each condition.
#'
#' @export
#'
#' @examples
#'
get_dCT_singleRep <- function(sampleObj, hkg){
if(missing(hkg)){
stop("Must provide a housekeeping gene (hkg)!")
}
batchTechReps <- combineTechRepsWithSD(sampleObj)
cq_avg <- as.data.frame(exprs(batchTechReps))
cq_sd <- as.data.frame(se.exprs(batchTechReps))
dct_test <- apply(cq_avg, 2, function(x) x - x[hkg])
sd_prop_test <- apply(cq_sd, 2, function(x) sqrt((x^2) + (x[hkg]^2)))
dctMelt <- melt(as.matrix(dct_test), id.vars = rownames(dct_test))
sdPropMelt <- melt(as.matrix(sd_prop_test), id.vars = rownames(sd_prop_test))
colnames(sdPropMelt)[3] <- "ErrProp"
dctErrPropDF <- merge(dctMelt, sdPropMelt)
return(dctErrPropDF)
}
|
15123088e1631ed06dee3c6db942dc304d780a9a
|
f914fbd3a914030c7bc3da68b29ae6ee3c17b749
|
/Scripts/draftsEnStuff.R
|
38bc283cf70d3ccca3b6a38d120ceacb1ac63627
|
[] |
no_license
|
AlexHNova/eop
|
2b0b68544baffc78a18e007523cadd288ec4ea47
|
17447d3bb0e89af719b635533c9ee0317f8c9a2f
|
refs/heads/master
| 2016-09-01T14:49:17.014474
| 2016-01-08T13:56:33
| 2016-01-08T13:56:33
| 49,271,193
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,481
|
r
|
draftsEnStuff.R
|
library(lubridate)
source("C:/Users/Alex H/Dropbox (Nova SA)/Rfunctions/binconf.uur.R")
df_ibutton_data_classified$fire_start <- FALSE
splits <- split(x = df_ibutton_data_classified, f = df_ibutton_data_classified$stand_number)
temp <- lapply(X = splits, FUN = function(stnd) {
if (!("vuur" %in% names(stnd))) {next}
for (r in 2:nrow(stnd)) {
if (is.na(stnd[r, "vuur"]) | is.na(stnd[r-1, "vuur"])) {next}
if ((stnd[r, "vuur"] == "vuur") & (stnd[r-1, "vuur"] == "nie")) {
stnd[r, "fire_start"] <- TRUE
}
}
return(stnd)
})
temp <- do.call("rbind", temp)
# kom ons sit 'n tweede verandering hierin.
temp$type <- paste(temp$energy_type, temp$insulation_type, sep = "_")
temp$hod <- hour(temp$date)
temp$mo <- month(temp$date)
temp$yr <- year(temp$date)
splits <- split(x = temp, f = temp$type)
catch <- lapply(X = splits, FUN = function(t) {
#t$hod <- as.factor(t$hod)
# t <- t[which(t$fire_start == "TRUE"),]
# t_tbled <- table(t$hod, t$fire_start, exclude = NULL)
# t_tbled$hod <- rownames(t_tbled)
#
#ggplot(data = t_tbled, mapping = aes(x = hod, y = "TRUE")) + geom_bar()
tpe <- unique(t$type)
nobs <- nrow(t)
nstands <- length(unique(t$stand_number))
ttle <- paste(tpe, "- Proportion of fires ignited\n by hour of day (with 95% CI; nobs = ", nobs, "; n_stands = ", nstands, ")", sep = "")
ctch <- binconf.uur(x = t, uurvar = "hod", vuurvar = "fire_start", vuurnaam = "TRUE", maandvar = "mo", jaarvar = "yr", plot = "bar", stoor = FALSE, ttl = ttle, ylm = c(0, 0.1))
return(ctch)
})
# ---------------------------------------------------------------------- #
# Sommer net 'n verandering. Weereens
# ---------------------------------------------------------------------- #
stand_df_list <- split(x = df_coal_data_with_calc_cols, f = df_coal_data_with_calc_cols$stand_number)
stand_df_list <- lapply(X = stand_df_list, FUN = function(x) {
x$cumday <- x$yrday - min(x$yrday)
x$days_between <- NA_integer_
x$data_quality <- "theo"
for (r in 2:nrow(x)) {
x[r, "days_between"] <- x[r, "cumday"] - x[r-1, "cumday"]
if (x[r, "units_bought_since_last_weighing"] < 1) {
x[r, "data_quality"] <- "emp"
}
}
x$mom_day_avg <- x$usage_net_kg / x$days_between
return(x)
})
# ---------------------------------------------------------------------- #
sapply(X = stand_df_list, FUN = function(s) {
print(max(s$mom_day_avg))
})
# ---------------------------------------------------------------------- #
fwsplits <- split(x = df_coal_data_wcc, f = df_coal_data_wcc$fieldworker_name)
lapply(X = fwsplits, FUN = function(x) {
#print(summaryBy(data = x, formula = mom_day_avg ~ data_quality, FUN = max, na.rm = TRUE))
summaryBy(data = x, formula = mom_day_avg ~ data_quality, FUN = function(x) {c(mean = mean(x, na.rm = TRUE), n = length(x))} )
})
nms <- apply(X = as.array(subdirs), MARGIN = 1, FUN = function(nm) {
nm <- tolower(nm)
nm <- gsub(pattern = ".csv", replacement = "", x = nm, fixed = TRUE)
nm <- gsub(pattern = c("basic/", "full/"), replacement = "", x = nm, fixed = TRUE)
nm <- gsub(pattern = "eop_", replacement = "", x = nm, fixed = TRUE)
str_elements <- (strsplit(x = nm, split = "_", fixed = TRUE))[[1]]
stand_number <- str_elements[[1]]
read_date <- str_elements[[3]]
button_place <- str_elements[[4]]
button_place <- substr(x = button_place, start = nchar(button_place), stop = nchar(button_place))
return()
})
|
e24a82bfe7b1a9d1a569556cbf2dd39a9b21e9d6
|
b35d2f33a64884781714b46d0e279d5dd5727254
|
/data_preparation/R_batch2/step9_prepare_covariate_data_AA.R
|
42717203dea4faf8f9544df54debdef87bf5eaf7
|
[] |
no_license
|
jasa-acs/Mapping-Tumor-Specific-Expression-QTLs-in-Impure-Tumor-Samples
|
229e67c269ccd478c038fbbdaccd5c473bb56c73
|
12975b1aa4f9fa96777bff1c546fa08d45fb5dcc
|
refs/heads/master
| 2020-09-03T23:49:39.778711
| 2019-12-01T20:29:11
| 2019-12-01T20:29:11
| 219,604,850
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,151
|
r
|
step9_prepare_covariate_data_AA.R
|
##
## take 0 gene expression PCs and 2 genotype PC
##
# --------------------------------------------------------
# read in sample information
# --------------------------------------------------------
setwd("/Volumes/Moon/TCGA_BRCA/info/")
sam = read.table("brca_samples2use_after_qc_female.txt",
header=TRUE, sep="\t", as.is=TRUE)
dim(sam)
sam[1,]
pam50 = read.table("BRCA.819.DataFreeze.20120912_pam50scores.txt",
header=TRUE, sep="\t", as.is=TRUE)
dim(pam50)
pam50[1,]
all(sam$RNA_RNAseq.Tumor %in% pam50$bcr_patient_barcode)
# --------------------------------------------------------
# read in clinical information
# --------------------------------------------------------
setwd("/Volumes/Moon/TCGA_BRCA/clinical_brca/")
clin = read.table("clinical_patient_brca.txt", quote="",
header=TRUE, sep="\t", as.is=TRUE, na.string="[Not Available]")
dim(clin)
names(clin)
all(sam$DNAnorml_patientID %in% clin$patient_id)
# --------------------------------------------------------
# check gene expression PCs
# --------------------------------------------------------
setwd("/Volumes/Moon/TCGA_BRCA/data/")
PCs = read.table("PCs_log_TReC_AA.txt", sep="\t", header=TRUE)
dim(PCs)
PCs[1:2,1:5]
PCs$id = paste("expression", PCs$id, sep="")
# --------------------------------------------------------
# take a subset of sample information
# --------------------------------------------------------
sam1 = sam[match(names(PCs)[-1], sam$DNAnorml_patientID),]
dim(sam1)
all(names(PCs)[-1] == sam1$DNAnorml_patientID)
table(sam1$RNA_institution, useNA="ifany")
table(sam1$RNA_type, useNA="ifany")
table(sam1$RNA_plate, useNA="ifany")
table(sam1$gender, useNA="ifany")
pam50 = pam50[match(sam1$RNA_RNAseq.Tumor, pam50$bcr_patient_barcode),]
dim(pam50)
all(sam1$RNA_RNAseq.Tumor == pam50$bcr_patient_barcode)
table(pam50$tissue, useNA="ifany")
table(pam50$Call, useNA="ifany")
table(pam50$Confidence==1)
summary(pam50$Confidence[pam50$Confidence < 1])
clin = clin[match(sam1$DNAnorml_patientID, clin$patient_id),]
dim(clin)
all(sam1$DNAnorml_patientID == clin$patient_id)
table(!is.na(clin$days_to_death), !is.na(clin$days_to_last_known_alive), useNA="ifany")
table(clin$tumor_tissue_site, useNA="ifany")
table(clin$race, useNA="ifany")
table(clin$gender, useNA="ifany")
table(sam1$gender, clin$gender, useNA="ifany")
table(clin$breast_carcinoma_estrogen_receptor_status, useNA="ifany")
table(clin$vital_status, useNA="ifany")
summary(clin$age_at_initial_pathologic_diagnosis)
table(sam1$RNA_institution, clin$tissue_source_site, useNA="ifany")
table(clin$ajcc_tumor_stage_code, useNA="ifany")
table(clin$ajcc_neoplasm_disease_stage, useNA="ifany")
table(clin$ajcc_neoplasm_disease_stage, clin$ajcc_tumor_stage_code, useNA="ifany")
table(clin$ajcc_cancer_metastasis_stage_code, clin$ajcc_neoplasm_disease_stage, useNA="ifany")
table(clin$tissue_prospective_collection_indicator, useNA="ifany")
# --------------------------------------------------------
# check the relation between PCs and covariates
# --------------------------------------------------------
pvals = matrix(NA, nrow=20, ncol=7)
for(i in 1:20){
PCi = as.numeric(PCs[i,-1])
ai = anova(lm(PCi ~ as.factor(sam1$RNA_institution)
+ as.factor(sam1$RNA_plate)
+ as.factor(clin$ajcc_neoplasm_disease_stage)
+ as.factor(clin$ajcc_tumor_stage_code)
+ as.factor(clin$breast_carcinoma_estrogen_receptor_status)
+ clin$age_at_initial_pathologic_diagnosis
+ as.factor(pam50$Call)
))
pvals[i,] = ai$Pr[1:7]
}
colnames(pvals) = c("institution", "plate", "disease_stage", "stage_code", "ER", "age", "pam50")
signif(pvals,2)
table(sam1$RNA_plate, pam50$Call)
table(sam1$RNA_institution, pam50$Call)
chisq.test(sam1$RNA_plate, pam50$Call)
chisq.test(sam1$RNA_institution, pam50$Call)
table(sam1$RNA_institution, useNA="ifany")
table(sam1$RNA_plate, useNA="ifany")
table(sam1$RNA_institution, sam1$RNA_plate)
# --------------------------------------------------------
# check genotype PCs
# --------------------------------------------------------
PCsDNA = read.table("PCs_genotype_AA.txt", sep="\t", header=TRUE)
dim(PCsDNA)
PCsDNA[1:2,1:5]
PCsDNA$id = paste("genotype", PCsDNA$id, sep="")
sam1 = sam[match(names(PCsDNA)[-1], sam$DNAnorml_patientID),]
dim(sam1)
all(names(PCs)[-1] == sam1$DNAnorml_patientID)
table(sam1$DNAnorml_plate)
table(sam1$DNAnorml_institution)
table(sam1$DNAnorml_type)
table(sam1$DNAnorml_portion)
pvals = matrix(NA, nrow=20, ncol=4)
for(i in 1:20){
PCi = as.numeric(PCsDNA[i,-1])
ai = anova(lm(PCi ~ as.factor(sam1$DNAnorml_plate)
+ as.factor(sam1$DNAnorml_institution)
+ as.factor(sam1$DNAnorml_type)
+ as.factor(sam1$DNAnorml_portion)))
pvals[i,] = ai$Pr[1:4]
}
signif(pvals,2)
# --------------------------------------------------------
# write out results
# --------------------------------------------------------
write.table(PCsDNA[1:2,], file = "covariates_AA.txt", append = FALSE,
quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
|
ece3d08ef3f7013cff2196079ae9b2e3eee2086b
|
4d6560931cb362449e07903abd7a3493e7d0f1ab
|
/mult-regression.R
|
37bec15e9d190a179a1663f6c0b4ad4ff17da956
|
[] |
no_license
|
RonnyldoSilva/My_R
|
9bee1c7c8da93e5a1d2048a285f0f40e3a86b196
|
8a80d6614e0570a5e60fc0ba31d909b37d8e802a
|
refs/heads/master
| 2023-01-14T15:00:23.518601
| 2020-11-18T12:48:49
| 2020-11-18T12:48:49
| 285,350,353
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 210
|
r
|
mult-regression.R
|
library(readxl)
data <- read_excel("/media/ronn/hd_ubuntu/workspace/my_git/My_R/ageandheight.xls", sheet = "Hoja2") #Upload the data
fit <- lm(height ~ age, data=data)
summary(fit) # show results
#plot(fit)
|
cabd2bd400737324483e595bbfe6b102c7e47cbe
|
11f9de6c03b7115273d78ddd6fc2cb9297763d78
|
/countriesAbbr.r
|
fbd5af37bec6a2421622cc37d9a619fad71da7da
|
[] |
no_license
|
buckeye76guy/UEFA-Champions-Leage-2014-2015-Players-Data
|
4f0d61dc14cbc9c7e6a5e1093b256a08fc1cba6d
|
201d9e471979e1ee45f3ac5b54c29a7df6fbe989
|
refs/heads/master
| 2021-01-20T04:36:02.479440
| 2015-07-04T02:08:41
| 2015-07-04T02:08:41
| 38,470,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 551
|
r
|
countriesAbbr.r
|
# Now use Xpath to get list abbreviation to all countries!
library(RCurl)
library(XML)
input <- "http://www.worldatlas.com/aatlas/ctycodes.htm"
html <- getURL(input, followlocation = TRUE)
doc <- htmlParse(html, asText = TRUE)
fullNames <- xpathSApply(doc, "//td[@class=\"cell01\"]",xmlValue)
fullNames <- gsub("\n", "", fullNames)
abbr3L <- xpathSApply(doc, "//td[@class=\"cell03\"]", xmlValue)
abbr3L <- gsub("\n", "", abbr3L)
worldCountries <- data.frame(Name = fullNames, UNabbr = abbr3L,
stringsAsFactors = FALSE)
|
f040d7ae70cd4d92c625eb6c3f24df0bf8af3fc9
|
4117f83ea4a5fa2b1d6d67c664292b744894ba2b
|
/ui.R
|
e7d1f2e9ebb82b2f3df9a957d9f33607118300ff
|
[] |
no_license
|
red1k/ocr
|
50cd39f9ba004b5ba133d0b36ed52e837bb6b437
|
c4f145bc7738b8d3bde1f9315c4d58b78592f930
|
refs/heads/master
| 2023-07-25T15:03:07.353370
| 2021-08-28T18:16:46
| 2021-08-28T18:16:46
| 374,390,710
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 928
|
r
|
ui.R
|
library(shiny)
library(DT)
fluidPage(
fluidRow(
column(2),
column(8,
h1("QR code to TEXT"),
hr(),
radioButtons("qrtype", h4("Turul songono uu:"), c("Oroh" = "in", "Garah" = "out"), inline = T),
),
column(2),
),
fluidRow(
column(2),
column(8,
fileInput("upload", NULL, buttonLabel = "Upload...", multiple = TRUE, width = "100%"),
dataTableOutput("files"),
hr(),
fluidRow(
column(4,
downloadButton('sgc', 'SGC', class = 'btn-block'),
),
column(4,
downloadButton('kbtl', 'KBTL', class = 'btn-block'),
),
column(4,
downloadButton('terra', 'TERRA', class = 'btn-block'),
),
)
),
column(2),
),
)
|
f4e8517c612c47a46c2d4f516e86539f4bd1222b
|
4d52315ba412db56ae6b179753de3a07d8736e40
|
/BINARY/BINtoSQL/BINtoSQL_R.R
|
2a416b4d0e8bf457e94512afe85984bc5cd2446a
|
[] |
no_license
|
ParfaitG/DATA_MIGRATION
|
7180bb80b5153642fa70d567ffc4c7387de83aa4
|
470c6fab6ef22ac9051211b8d20d1efc1d7d74db
|
refs/heads/master
| 2021-08-16T00:56:51.919753
| 2020-06-07T21:06:12
| 2020-06-07T21:06:12
| 41,966,932
| 14
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 341
|
r
|
BINtoSQL_R.R
|
library(RSQLite)
# LOAD OBJECT
cldata <- readRDS("../BuildBINs/CLData_R.rds")
# APPEND TO DATABASE
conn <- dbConnect(RSQLite::SQLite(), "CLData.db")
dbWriteTable(conn, name="cldata", value=cldata, row.names=FALSE,
overwrite=TRUE, append=FALSE)
dbDisconnect(conn)
cat("Successfully migrated binary data to SQL database!\n")
|
d7ad812c001c6dc2181f1f682071b22a59ea0537
|
2b2a11cb47b38c673f2ef7409a6f92828e3c4af8
|
/ui.R
|
da6762a36ea4f347570061c822544b69015a6018
|
[] |
no_license
|
HariniAsaithambi/CourseEraDataProducts
|
a27e7ec6a52b395c4524046d3356c8aa10bd9f33
|
d1c5021f0a3793ee058fe8e98cf0222b94edaa86
|
refs/heads/master
| 2022-10-23T11:21:19.356563
| 2020-06-17T16:37:52
| 2020-06-17T16:37:52
| 273,016,230
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,036
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application
shinyUI(fluidPage(
# Application title
titlePanel("Diamond price application"),
# Sidebar with options selectors
sidebarLayout(
sidebarPanel(
h4(helpText("This application predicts the price of a diamond based on user's inputs.")),
helpText("Select carat, cut and color of your choice and press the submit button to get the predicted Diamond price."),
#slider for carat selection
sliderInput("Dcarat", h3("Diamond Carat"), 0.2, 5.01, value = 1),
#Radiobuttons for cut selection
radioButtons("Dcut", h3("Diamond Cut"),
choices = list( "Fair" = "Fair", "Good" = "^Good",
"Very Good" = "Very Good", "Premium" = "Premium",
"Ideal" = "Ideal")),
#Radiobuttons for color selection
radioButtons("Dcolor", h3("Diamond Color"),
choices = list( "D" = "D", "E" = "E",
"F" = "F", "G" ="G",
"H" = "H", "I" = "I",
"J" = "J")),
#Adding submit button for delayed reactivity
submitButton("Submit")
),
# Output - plot, values, final price
mainPanel(
plotOutput("DiamondPlot"),
h3("For the selected values of"),
h3("Carat, Cut & Color"),
h3(textOutput("value")),
h3("Predicted value of the diamond is:",style = "color:red"),
h3(textOutput("result"),style = "color:red"),
)
)
))
|
ec79a7acd120583e358e8c3c686f86df7cee328f
|
51cfad73fc3ee69231927a79d91b0846b9ab2a92
|
/markdown/render-scotland-summary.R
|
d5719d46bfa398c516e8738f71f1da518d2bdea8
|
[] |
no_license
|
Public-Health-Scotland/cocin-scotland
|
6c374189ab56fb1fe3f110867badee0f970f6a3d
|
b857b264c835885969eae28006d8d3fcf1d372f7
|
refs/heads/master
| 2023-07-15T08:51:51.492357
| 2021-07-28T09:48:05
| 2021-07-28T09:48:05
| 258,522,865
| 1
| 0
| null | 2021-08-30T13:56:17
| 2020-04-24T13:39:53
|
R
|
UTF-8
|
R
| false
| false
| 819
|
r
|
render-scotland-summary.R
|
###########################################################
# Name of script: knit-markdown.R
# Written by: Analysts working in HPS Enhanced Surveillance
# Cell - Hospital/ICU Work Stream
#
# Type of script: Knit Scotland summary markdown report
# Written/run on: R Studio Desktop
# Version of R: 3.6.1
#
# Description: Knit Scotland summary markdown report
# and save to output folder
###########################################################
### 0 - Setup environment ----
source("extract-data/00_setup-environment.R")
### 1 - Knit markdown report ----
rmarkdown::render(
input = here("markdown", "scotland-summary.Rmd"),
output_file = here(
"output",
paste0(
str_sub(latest_extract_date(), 1, 10),
"_scotland-summary.docx"
)
)
)
### END OF SCRIPT ###
|
7165b60456dbfa9331fda7407575944cb2692b37
|
49b9074ebae34d7bcce916d455b785e1cc7043fc
|
/R/functions.R
|
0bf0f78048078d95c8aa0023ae97d3fe3c2a48b2
|
[] |
no_license
|
xulong82/bayes.glmm
|
0f9bc80d17592b9da9d371d8f8feaf2e892d807b
|
09d9fe690c9d0c3295e9913cd49fb969400da1bc
|
refs/heads/master
| 2021-01-10T01:20:51.358277
| 2018-04-11T02:06:03
| 2018-04-11T02:06:03
| 53,289,688
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,610
|
r
|
functions.R
|
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
#' Choose a stan model ("glm" or "glmm").
#'
#' @param model model type ("glm" or "glmm)
#' @param rebuild whether to rebuild the stan model from source (TRUE or FALSE)
#' @return A stan model in rstan::stanmodel class
#' @seealso \code{\link{extdata/*.stan}} for source code.
#' @export
#' @examples
#' stan_models("glm", rebuild = F)
#' stan_models("glmm", rebuild = F)
#' @author Xulong Wang, \email{xwang@@jax.org}
stan_models <- function(
model = "dummy",
rebuild = F
) {
if(! model %in% c("glm", "glmm")) stop("Error: \"model\" must be \"glm\" or \"glmm\".")
mydir = find.package("bayes.glmm")
if(model == "glm") { # fixed model
if(rebuild == F) {
mypath = paste0(mydir, "/", "extdata/glm.rdt")
load(mypath)
my_stan_model = myglm
}
if(rebuild == T) {
mypath = paste0(mydir, "/", "extdata/glm.stan")
my_stan_model = stan_model(mypath)
}
}
if(model == "glmm") { # mixed model
if(rebuild == F) {
mypath = paste0(mydir, "/", "extdata/glmm.rdt")
load(mypath)
my_stan_model = myglmm
}
if(rebuild == T) {
mypath = paste0(mydir, "/", "extdata/glmm.stan")
my_stan_model = stan_model(mypath)
}
}
return(my_stan_model)
}
#' Fit a stan model.
#'
#' @param model A stan model of rstan:stanmodel class.
#' @param method Method to estimate model parameters ("optimizing", or "sampling").
#' @param type Variable type of the response variable ("linear", "binary", "categorical").
#' @param cov A matrix of model covariates.
#' @param geno A vector of variant genotype data (0, 1, 2).
#' @param pheno A vector of response variable data.
#' @return A list of model fit.
#' @seealso \code{\link{rstan package}} which this function wraps
#' @export
#' @examples
#' myGWAS_fit(mymodel, rebuild = F)
#' data("sample", package = "bayes.glmm")
#' pheno = data$pheno
#' geno = data$geno
#' cov = data$cov
#' L <- t(chol(data$K))
#' mymodel = stan_models(model = "glm")
#' y1 = myGWAS_fit(model = mymodel, method = "optimizing", type = "categorical", cov = cov, geno = geno[c(1, 3), ], pheno = pheno)
#' y2 = myGWAS_fit(model = mymodel, method = "sampling", type = "categorical", cov = cov, geno = geno[c(1, 3), ], pheno = pheno)
#' @author Xulong Wang, \email{xwang@@jax.org}
myGWAS_fit <- function(
model = "dummy", # stan model
method = "dummy", # inference method
type = "dummy", # model type
cov = NULL, # covariate data
geno = NULL, # genotype data
pheno = NULL, # phenotype data
prior_non = 1, # non-informative prior
prior_inf = 0, # informative prior
iter = 1000, # mcmc iteration
warmup = 200, # mcmc warnup
chains = 3, # mcmc chains
cores = 3 # mcmc cores
) {
if(class(model) != "stanmodel")
stop("Error: a stan model is required.")
if(! method %in% c("optimizing", "sampling"))
stop("Error: \"method\" must be \"optimizing\" or \"sampling\".")
if(! type %in% c("linear", "binary", "categorical"))
stop("Error: \"type\" must be \"linear\", \"binary\", or \"categorical\".")
if(any(is.na(cov)))
stop("Error: \"cov\" is required.")
if(any(is.na(geno)))
stop("Error: \"geno\" is required.")
if(any(is.na(pheno)))
stop("Error: \"pheno\" is required.")
N = nrow(cov) # sample number
D = ncol(cov) # covariate number
vId = rownames(geno) # variant name
pId = c("a", paste0("beta[", 1:D, "]"), "p") # covariate
if(type == "linear") K = 1
if(type != "linear") K = length(unique(pheno))
dt0 = list(N = N, K = K, D = D, cov = cov, L = L, pheno = pheno)
dt0$prior_non = prior_non
dt0$prior_inf = prior_inf
fit = list()
for (i in 1:length(vId)) {
dt1 = within(dt0, {geno = geno[i, ]})
if(method == "optimizing") {
out = optimizing(model, verbose = FALSE, hessian = TRUE, algorithm = "LBFGS", data = dt1)
hessian = out$hessian
hessian.idx = apply(out$hessian, 1, function(x) all(x == 0))
hessian = hessian[! hessian.idx, ! hessian.idx]
se1 = sqrt(diag(solve(-hessian)))["p"]
fit[[i]] = c(out$par[pId], se1)
names(fit[[i]]) = c(pId, "se.p")
}
if(method == "sampling") {
out = sampling(model, data = dt1, chains = chains, iter = iter, warmup = warmup, cores = cores)
fit[[i]] = summary(out, pars = pId)
}
}
names(fit) = vId
return(fit)
}
|
e40f154fc391a5efcf23f2281d6ebcac0d5d92fd
|
05eec74a8736ca433d1415f2c3231641ef8a3113
|
/man/statFitTbl1.Rd
|
294f7735e343d41ebad00c40d14a3131a715eec6
|
[
"MIT"
] |
permissive
|
nmedina17/oir
|
d4aaa1051696f2d5211f513857ff1544dec5c8da
|
e7a59cfe1d53ec9d3f7cba1e310bd6fb85998c78
|
refs/heads/main
| 2023-06-29T23:11:34.158595
| 2023-06-22T06:52:57
| 2023-06-22T06:52:57
| 420,233,162
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 458
|
rd
|
statFitTbl1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statsTbl1.R
\name{statFitTbl1}
\alias{statFitTbl1}
\title{Mutates lm and summary output, of already-summarized data, onto tbl--needs $varData1, $statTest}
\usage{
statFitTbl1(...nestedVarDataTbl, ....formula)
}
\arguments{
\item{....formula}{}
}
\value{
mutated tbl
}
\description{
Mutates lm and summary output, of already-summarized data, onto tbl--needs $varData1, $statTest
}
|
10d068b069fe28ca393df245b4a560bcd7c6d069
|
577bfdb8d89707b6d485aff04299c6b4e566f42d
|
/R/armando_dfs.R
|
042cc231cfeca95898cb12979b223561eb48afce
|
[] |
no_license
|
castillosebastian/NLU_legal_domain
|
ee64efe068ebe73093f9f198662bb1dd8d0a72f3
|
c162376021510656758bb13489e2b85bce802a5c
|
refs/heads/master
| 2023-01-02T15:30:21.690888
| 2020-11-03T15:43:17
| 2020-11-03T15:43:17
| 297,676,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,407
|
r
|
armando_dfs.R
|
# Armando DFs
# Librearías
source("~/estudios-misc/R/informe.R")
library(DataExplorer)
library(qdapRegex)
boletin_infojuba_N50_RAW <- read_csv("data/boletin_infojuba_N50_RAW.csv")
colnames(boletin_infojuba_N50_RAW) <- c("file", "boletin")
boletines <- boletin_infojuba_N50_RAW
# Encabezado
temp <- str_extract(boletines$boletin, "\\(\\s*(.*?)\\s*\\)")
bnro <- str_sub(temp, 6,8)
bfecha <- str_sub(temp, 10, -3)
# Creo df
dfBoletines <- tibble(bnro, bfecha)
# En la exploración de la tarea de segmentación de los textos (para determinar
# dato básico de materia se ecuentra variabilidad
# por lo tanto los falsos negativos son muy elevados. Descarto ese camino, por ahora
# Extracción de sumarios. Eficacia detección de sumarios 100%
sumario_extract <- function(boletin_nro) {
boletines$boletin[[boletin_nro]]
indice_sumarios_xbol[[boletin_nro]]
marca_inic = indice_sumarios_xbol[[boletin_nro]][,2] + 1
marca_fin = indice_sumarios_xbol[[boletin_nro]][,1] - 1
marca_fin = marca_fin[-1]
marca_fin = c(marca_fin, nchar(boletines$boletin[boletin_nro]))
sumarios = data_frame(marca_inic, marca_fin)
bd <- vector()
for (i in seq_along(sumarios$marca_inic)) {
bd[i] <- str_sub(boletines$boletin[[boletin_nro]], sumarios$marca_inic[i], sumarios$marca_fin[i])
}
bd
}
# creando df de sumarios de causa
dfSumarios <- data_frame()
for (i in seq_along(boletines$boletin)) {
dfSumarios <- dfSumarios %>%
bind_rows(tibble(sumario_extract(i)) %>% tibble() %>%
rename(sumarios = 1) %>%
mutate(bnro = dfBoletines$bnro[i]) %>%
rowwise() %>%
mutate(nro_fres_caratula = str_sub(sumarios, 0, str_locate(sumarios, '\nMagistrados')[1]-2),
caratula = str_remove_all(str_split(nro_fres_caratula,'“|”', simplify = T)[2], "\n|”"),
fres = unlist(ex_date(nro_fres_caratula, trim = T))[1],
nro_causa = as_numeric2(rm_number(str_remove_all(nro_fres_caratula, ',|\\.'), extract=TRUE)[[1]][1]),
magistrados = str_match(sumarios, "\nMagistrados votantes:\\s*(.*?)\\s*\n")[, 2]))
dfSumarios
}
# Exploracion de Datos
library(DataExplorer)
plot_missing(dfSumarios)
plot_str(dfSumarios)
plot_bar(dfSumarios)
# Borrar todo registro con al menos un NA
dfSumarios <- dfSumarios %>% drop_na()
# Extrayendo texto sumarios
vec_sumario <- vector()
for (i in seq_along(dfSumarios$sumarios)) {
magistrados = str_match(dfSumarios$sumarios[i], "\nMagistrados votantes:\\s*(.*?)\\s*\n")[, 2]
vec_sumario[i] <- str_sub(dfSumarios$sumarios[i],
str_locate(dfSumarios$sumarios[i], magistrados)[2]+1,
str_locate(dfSumarios$sumarios[i], "\\(Texto")[1]-1)
}
dfSumText <- tibble(vec_sumario)
plot_missing(dfSumText)
# Creando df
dfSumarios <- dfSumarios %>%
mutate(sumario_txtcorto = dfSumText$vec_sumario)
# creando df de extractos de doctrina
vec_doctrina <- vector()
for (i in seq_along(dfSumarios$sumarios)) {
vec_doctrina[i] <- str_sub(dfSumarios$sumarios[i],
str_locate(dfSumarios$sumarios[[i]], "DOCTRINA")[2]+1,
str_locate(dfSumarios$sumarios[[i]], "<<")[1]-1)
}
dfDoctrina <- data_frame(vec_doctrina)
# voces
vec_voces <- vector()
for (i in seq_along(dfDoctrina$vec_doctrina)) {
voces <- unique(unlist(rm_caps_phrase(iconv(dfDoctrina$vec_doctrina[i], to='ASCII//TRANSLIT'), extract=TRUE)))
#voces <- str_remove_all(voces, "-[IVXLCDM]+")
#voces <- voces[!(voces %in% c("", "-"))]
voces <- str_c(voces, collapse = ",")
vec_voces[i] <- voces
}
dfDoctrina$voces <- vec_voces
plot_missing(dfDoctrina)
# Unificar
dfSumarios <- dfSumarios %>%
bind_cols(dfDoctrina)
# Creando tabla final 1: causa-sumario-voces
tabla_causa_sumarios_voces <- dfSumarios %>%
select(-sumarios, -nro_fres_caratula, -vec_doctrina) %>%
select(bnro, nro_causa, fres, caratula, magistrados, sumario = sumario_txtcorto, voces)
# Creando tabla final 2: bo, causa y textos de doctrina
# sumarios de doctrina
text_doctrina <- vector()
for (i in seq_along(dfSumarios$vec_doctrina)) {
temp <- dfSumarios$vec_doctrina[i]
nro_causa <- dfSumarios$nro_causa[i]
bnro <- dfSumarios$bnro[i]
marca_inic <- str_locate_all(temp, "\n[:digit:]+\\. ")[[1]][,1]
marca_fin <- marca_inic[-1]
marca_fin <- c(marca_fin, nchar(temp))
temp2 <- vector()
for(j in seq_along(marca_inic)) {
temp2[j] <- rm_caps_phrase(str_sub(temp, marca_inic[j], marca_fin[j]))
}
temp2 <- str_c(bnro,'&', nro_causa, "#", temp2)
text_doctrina <- c(text_doctrina, temp2)
}
text_doctrina <- tibble(text_doctrina)
text_doctrina <- text_doctrina %>% separate(text_doctrina, into = c("bnro", "text"), sep = "&")
text_doctrina <- text_doctrina %>% separate(text, into = c("nro_causa", "doctrina"), sep = "#")
tabla_bo_causa_textdoctrina <- text_doctrina %>%
mutate(doctrina_nro = str_remove(str_sub(doctrina, 1,2), "\\.")) %>%
mutate(doctrina = str_trim(str_sub(doctrina, 3))) %>%
mutate(doctrina = str_remove(doctrina, "^\\. "))
select(bnro, nro_causa, doctrina_nro, doctrina)
# formateo de tablas
tabla_bo_causa_textdoctrina <- tabla_bo_causa_textdoctrina %>%
mutate(bnro = as.integer(bnro),
nro_causa = as.integer(nro_causa),
doctrina_nro = as.integer(doctrina_nro))
tabla_causa_sumarios_voces <- tabla_causa_sumarios_voces %>%
mutate(bnro = as.integer(bnro),
fres = dmy(fres))
tabla_causa_sumarios_voces <- na.omit(tabla_causa_sumarios_voces)
tabla_bo_causa_textdoctrina <- na.omit(tabla_bo_causa_textdoctrina)
# write.table(tabla_bo_causa_textdoctrina, "tabla_bo_causa_textdoctrina.csv", sep = ",", col.names = T,
# row.names = F)
# write.table(tabla_causa_sumarios_voces, "tabla_causa_sumarios_voces.csv", sep = ",", col.names = T,
# row.names = F)
# Exportar como json
library(jsonlite)
tbsumarios_json <- toJSON(tabla_causa_sumarios_voces)
df <- fromJSON(tbsumarios_json)
write(tbsumarios_json, "tbsumarios.json")
df <- fromJSON(txt = "~/NLU_legal_domain/data/tbsumarios.json")
dplyr::setequal(tabla_causa_sumarios_voces, df)
colnames(df)
colnames(tabla_causa_sumarios_voces)
str(df)
str(tabla_causa_sumarios_voces)
tbdoctrina <- toJSON(tabla_bo_causa_textdoctrina)
write(tbdoctrina, "tbdoctrina.json")
# formateo corpus
corpus_textosfallos <- fromJSON(txt = "~/NLU_legal_domain/data/corpus_textosfallos.json")
corpus_fallosmetdat <- fromJSON(txt = "~/NLU_legal_domain/data/corpus_fallosmetdat.json")
corpus_textosfallos <- corpus_textosfallos %>%
filter(!str_detect(corpus_textosfallos$textos_fallo, "TEXTO COMPLETO PRIVADO"))
corpus_fallosmetdat <- corpus_fallosmetdat %>%
mutate(nro_causa = rm_number(id_causa, extract = T),
fres = dmy(corpus_fallosmetdat$fecha_res))
sum(corpus_fallosmetdat$nro_causa %in% tbsumarios$nro_causa)
# Tools------------------------------------------------------------------------
# # Descomposición de sumarios---------------------------------------------------
#
# # nro_causa, fecha de resolución, caratula: OK
#
# nro_fres_caratula <- str_sub(bd$`sumario_extract(30)`[1],0, str_locate(bd$`sumario_extract(30)`[1], '”'))[2]
# nro_fres_caratula
# caratula <- str_split(nro_fres_caratula,'“', simplify = T)[2] %>% str_remove_all("\n") %>% str_remove_all("”")
# fres <- unlist(ex_date(nro_fres_caratula, trim = T))
# nro <- as_numeric2(rm_number(str_remove_all(nro_fres_caratula, ',|\\.'), extract=TRUE))
#
# # Magistrados Votantes: OK (puede haber falso negativo )
#
# magistrados <- str_match(bd$`sumario_extract(30)`, "\nMagistrados votantes:\\s*(.*?)\\s*\n")[, 2]
#
# # Voces-Palabras clavas: se repite con los encabezados de doctrina. La descarto
#
# # Sumario: OK (metodo eficaz con algo de basura)
#
# text_sumario2 <- vector()
#
# for (i in seq_along(magistrados)) {
#
# firstmark <- magistrados[i]
# lastmark <- "\\(Texto"
#
# temp <- str_sub(bd$`sumario_extract(30)`[i],
# str_locate(bd$`sumario_extract(30)`[i], firstmark)[2]+2,
# str_locate(bd$`sumario_extract(30)`[i], lastmark)[1]-2)
#
# text_sumario2[i] <- str_sub(temp, str_locate_all(temp, '\n')[[1]][2])
#
#
# }
#
# text_sumario2 <- tibble(text_sumario2)
#
#
#
# # extraccion de sumarios de doctrina y voces-----------------------------------
# # la cantidad de sumario es un dato valioso
# bd$`sumario_extract(30)`[3]
#
# # desde doctrina a menu
# temp <- str_sub(bd$`sumario_extract(30)`[3],
# str_locate(bd$`sumario_extract(30)`[3], "DOCTRINA")[2]+1,
# str_locate(bd$`sumario_extract(30)`[3], "<<")[1]-1)
#
# # voces
# voces <- unique(unlist(rm_caps_phrase(temp, extract=TRUE)))
# voces <- str_remove_all(voces, "-[IVXLCDM]+")
# voces <- voces[!(voces %in% c("", "-"))]
#
#
# # sumarios de doctrina
# text_doctrina <- vector()
#
# marca_inic <- str_locate_all(temp, "\n \n[:digit:]+\\. ")[[1]][,1]
# marca_fin <- marca_inic[-1]
# marca_fin <- c(marca_fin, nchar(temp))
#
# for (i in seq_along(marca_inic)) {
#
# text_doctrina[i] <- rm_caps_phrase(str_sub(temp, marca_inic[i], marca_fin[i]))
#
# }
#
# text_doctrina <- tibble(text_doctrina)
#
# text_doctrina %>% View()
#
#
#
|
d2520260ab15d275bac0ae0337e87195b5e0d350
|
ce5d5469fcc81b9047db1523811193a2edc97761
|
/raw_script/PA.R
|
95702815797db8fe57145bf2183d57d012b6fb08
|
[] |
no_license
|
gwgarrison/ml_project
|
5a1f8bd183199edec7a8f294c4578b821e0f50c5
|
be4823635f5336562d428202f9d18938f3ed56ee
|
refs/heads/master
| 2016-09-05T10:00:58.149724
| 2014-08-22T22:05:02
| 2014-08-22T22:05:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,777
|
r
|
PA.R
|
# project raw script
if (file.exists("F:/machine_learning/assignments/ml_project")) {
setwd("f:/machine_learning/assignments/ml_project")
} else if (file.exists("E:/machine_learning/assignments/ml_project")){
setwd("e:/machine_learning/assignments/ml_project")
} else {
stop("no directory")
}
library(ggplot2); library(caret)
### Data Manipulation
# read the data
pml <- read.csv("./raw_data/pml-training.csv",stringsAsFactors=FALSE)
# eliminate variables with low variability
# using the nearZeroVar function of the caret package
nz <- nearZeroVar(pml)
pml <- pml[,-nz]
# get rid of columns that are mostly NA
pml <- pml[,colSums(is.na(pml))/nrow(pml)<.9]
# get rid of user name and time related variables
pml <- pml[,-c(1:6)]
# make classe a factor variable
pml$classe <- factor(pml$classe)
# split data into training and test sets
set.seed(77)
inTrain <- createDataPartition(y=pml$classe,p=0.6, list=FALSE)
training <- pml[inTrain,]
testing <- pml[-inTrain,]
### Exploratory Analysis
# build boxplots for all variables
#for (i in 1:ncol(training)){
# p <- qplot(x=classe,y=training[,i],data=training,ylab=names(training[i]),geom="boxplot")
# ggsave(paste("./exploratory_graphs/plot_",i,".png"))
#}
### Model training/building
#rfFit <- train(classe~ .,data=training,method="rf")
#rFit <- train(classe~ .,data=training,method="rpart")
#gbmFit <- train(classe ~ ., method="gbm",data=training,verbose=FALSE,tuneLength=5)
#rPred <- predict(rFit,newdata=testing)
knn.fit <- train(classe ~.,data=training,method="knn")
knnpred <- predict(knn.fit,newdata=testing)
confusionMatrix(table(knnpred,testing$classe))
library(randomForest)
rf.fit <- randomForest(classe ~., data=training,type="class")
rfpred <- predict(rf.fit,newdata=testing)
rf.fit
confusionMatrix(table(rf.fit$predicted,training$classe))
confusionMatrix(table(rfpred,testing$classe))
vi <- varImp(rf.fit)
varImpPlot(rf.fit)
# submission
pml.test <- read.csv("./raw_data/pml-testing.csv",stringsAsFactors=FALSE)
answers <- predict(rf.fit,newdata=pml.test)
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("./answers/problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(answers)
# References
1. Velloso, E.; Bulling, A.; Gellersen, H.; Ugulino, W.; Fuks, H. Qualitative Activity Recognition of Weight Lifting Exercises. Proceedings of 4th International Conference in Cooperation with SIGCHI (Augmented Human '13) . Stuttgart, Germany: ACM SIGCHI, 2013.
2. Kuhn, Max Building Predictive Models in R Using the caret Package. Journal of Statistical Software. Vol. 28. November 2008.
3. Breiman, L; Cutler, A; Random Forests. http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
|
86018a2efffe514560f06060cb1aae027ec06b68
|
2f7d9d4fcd27d7a0b406c52744260a10b67fec71
|
/shiny/ui.R
|
e255a306429c8a0cdda71e565c08deaa53b916b9
|
[] |
no_license
|
ansolotli/shinyapp-decision-support-tool
|
3ed96b060ec6515460fdf9f13ca8891f61b8a88f
|
827bbba7fe2ebed83cb712207dad07b49a908368
|
refs/heads/master
| 2022-04-14T02:01:45.955092
| 2020-03-30T12:10:24
| 2020-03-30T12:10:24
| 222,639,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,839
|
r
|
ui.R
|
suppressPackageStartupMessages({
library(shiny)
library(shinydashboard)
library(shinyBS) # popovers
library(stringi)
library(ggplot2)
library(rmarkdown) # needed for deployment
library(reshape2)
library(shinyjs) # enables javascript
library(patchwork) # arrange pie charts
library(dplyr)
library(scales)
library(shinyhelper) # help popovers
})
# Header
header <- dashboardHeader(title = "BONUS BLUEWEBS decision support tool", titleWidth = 450)
# Manually links menuItems to the corresponding tabItems that would otherwise become unlinked
convertMenuItem <- function(mi,tabName) {
mi$children[[1]]$attribs['data-toggle']="tab"
mi$children[[1]]$attribs['data-value'] = tabName
mi
}
# Sidebar
sidebar <- dashboardSidebar(
sidebarMenu(
convertMenuItem(
menuItem("About", tabName = "about"), tabName = "about"),
convertMenuItem(
menuItem(HTML("Predicted time series <br> of fish stocks"), tabName = "predicted",
# Create the dropdowns of scenario options
# wrap rendered elements and add popovers to them
popify(selectInput(inputId = 'F',
label = "Fishery Policy",
choices = c("Sustainable" = "Sus", "Pelagics-Focused" = "Pel", "Open Access" = "OA"),
selected = "Sus"
), "Fishery policies",
content = "<b>Sustainable fishery policy</b> sets healthy states of all three major fish stocks (i.e. cod, herring and sprat) in the Central Baltic Sea as a management objective. This policy provides a basis for developing a sustainable exploitation of the Baltic Sea. <br><br> <b>Pelagics-Focused</b> scenario explores the option where fishery on the pelagic stocks (i.e. herring and sprat) would be prioritised over cod. <br><br> <b>Open access</b> scenario enables exploring the impacts of largely unregulated fisheries management in the future Baltic Sea."
, placement = "right", trigger = "hover",
#increase the size of the popover according to the length of content
options = list(container = "body")),
popify(selectInput(inputId = 'Nutr_scen',
label = "Nutrient Loading Policy",
choices = c("Baltic Sea Action Plan" = "BSAP", "Reference conditions" = "Ref"),
selected = "BSAP"
), "Nutrient loading policies",
content = "Nutrient loads were modelled both according to the Baltic Sea Action Plan (BSAP) by HELCOM (Baltic Marine Environment Protection Commission – Helsinki Commission), and the reference conditions. The nutrient loads in the Reference conditions scenario are based on the HELCOM assessment of the average nutrient loads between 1995 and 2002. <br><br> <b>Reference conditions</b> correspond with the current level of nutrient loading in the Baltic Sea whereas nutrient loads outlined in the <b>Baltic Sea Action Plan</b> would be lower."
, placement = "right", trigger = "hover", options = list(container = "body")),
popify(selectInput(inputId = 'Climate',
label = "Climate Change Scenario",
choices = c("RCP4.5", "RCP8.5"),
selected = "RCP4.5"
), "Climate change scenarios",
content = "The two climate change scenarios follow Representative Concentration Pathways (RCP) <b>4.5</b> and <b>8.5</b>, according to the fifth Assessment Report of the Intergovernmental Panel on Climate Change (IPCC) in 2014. <br><br> These pathways describe the greenhouse gas concentrations in the atmosphere, with higher concentrations resulting in higher warming effects on earth. <br><br> <b>RCP 4.5</b> is predicted to cause moderate rise in CO<sup>2</sup> emissions, reaching a plateau around 2040. <b>RCP 8.5</b> would lead to steeper rise in CO<sup>2</sup> emissions, with continuous increase."
, placement = "right", trigger = "hover", options = list(container = "body")),
# Create the two colums of checkbox groups (biomass and catch)
fluidRow(
column(width = 5,
checkboxGroupInput(inputId = "bioVars",
label = "Biomass Variables",
choiceNames = list("Cod", "Herring", "Sprat"),
choiceValues = list("plotCod", "plotHer", "plotSpr"),
selected = "plotCod")
),
column(width = 6, offset = 1,
checkboxGroupInput(inputId = "catchVars",
label = "Catch Variables",
choiceNames = list("Cod", "Herring", "Sprat"),
choiceValues = list("plotCodCatch","plotHerCatch","plotSprCatch"),
selected = "plotCodCatch")
)
)
), tabName = "predicted"),
convertMenuItem(
menuItem("Novelty", tabName = "novelty",
# Create the dropdowns of scenario options
popify(selectInput(inputId = 'Nutr_scen_nov',
label = "Nutrient Loading Policy",
choices = c("Baltic Sea Action Plan" = "BSAP", "Reference conditions" = "Ref"),
selected = "BSAP"
), "Nutrient loading policies",
content = "Nutrient loads were modelled both according to the Baltic Sea Action Plan (BSAP) by HELCOM (Baltic Marine Environment Protection Commission – Helsinki Commission), and the reference conditions. The nutrient loads in the Reference conditions scenario are based on the HELCOM assessment of the average nutrient loads between 1995 and 2002. <br><br> <b>Reference conditions</b> correspond with the current level of nutrient loading in the Baltic Sea whereas nutrient loads outlined in the <b>Baltic Sea Action Plan</b> would be lower."
, placement = "right", trigger = "hover", options = list(container = "body")),
popify(selectInput(inputId = 'Climate_nov',
label = "Climate Change Scenario",
choices = c("RCP4.5", "RCP8.5"),
selected = "RCP4.5"
), "Climate change scenarios",
content = "The two climate change scenarios follow Representative Concentration Pathways (RCP) <b>4.5</b> and <b>8.5</b>, according to the fifth Assessment Report of the Intergovernmental Panel on Climate Change (IPCC) in 2014. <br><br> These pathways describe the greenhouse gas concentrations in the atmosphere, with higher concentrations resulting in higher warming effects on earth. <br><br> <b>RCP 4.5</b> is predicted to cause moderate rise in CO<sup>2</sup> emissions, reaching a plateau around 2040. <b>RCP 8.5</b> would lead to steeper rise in CO<sup>2</sup> emissions, with continuous increase."
, placement = "right", trigger = "hover", options = list(container = "body")),
# Create the column of checkbox groups (totalNov, codRV, temp_MarchMay050, temp_Aug060, notHypoxic)
fluidRow(
column(width = 12,
popify(checkboxGroupInput(inputId = "novelVars",
label = "Novelty variables",
choices = list("Cod reproductive volume" = "codRV",
"Spring temperature" = "T_050_MarchMay",
"Summer temperature" = "Aug060mT",
"Inverse hypoxic area" = "notHypoxicA"),
#choiceValues = list("plotRv", "plotTemp1", "plotTemp2", "plotHyp"),
selected = "codRV"),
title = "Novelty variables",
content = "Cod spawns in the open sea and the eggs drift in the water. The <b>reproductive volume</b> indicates changes in the size of the water layer where salinity is high enough and thereby also the density of the water (weight of water per its unit volume) is high enough to prevent the eggs from sinking into the oxygen deficient deep water. <br><br> Eutrophication and less saline water inflow to the Baltic Sea via the Danish Straits tend to decrease the reproductive volume of cod. <br><br> The <b>temperature</b> variables describe the water temperature in the surface layer (0-50m for March-May temperatures and 0-60m for August temperatures) of the water column during spring months and in the summer. <br><br> <b>Inverse hypoxic area</b> is the inverse of the deep water area that has gone hypoxic i.e. this variable describes the proportion of the study area where oxygen concentration in the deep water is above 2 mg/l."
, placement = "right", trigger = "hover", options = list(container = "body")),
popify(checkboxInput(inputId = "novelTotal",
label = "Total abiotic novelty", value = TRUE),
title = "Total abiotic novelty",
content = "<b>Total abiotic novelty</b> is the sum of other novelty variables and describes the total novelty expected to occur in the Central Baltic Sea in the current century. <br><br> The four other novelty variables were each scaled between 0 and 1; 0 denoting values falling in the 95% confidence interval from observational data, and 1 denoting the extreme values of the model predictions till the end of the 21st century."
, placement = "right", trigger = "hover", options = list(container = "body"))
)
)
), tabName = "novelty"),
convertMenuItem(
menuItem(HTML("Balance profit and <br> good environmental status"), tabName = "optimize")
, tabName = "optimize")
)
)
# Body
body <- dashboardBody(
#enable javascript
useShinyjs(),
tabItems(
tabItem("about",
titlePanel("About the decision support tool"),
fluidRow(
tabBox(width = 12,
id = "aboutBoxes",
tabPanel("Introduction",
fluidRow(
uiOutput("about_shortly"),
box(imageOutput("intro", height = "auto"), width = 12, solidHeader = TRUE),
uiOutput("about_continued")
)
),
tabPanel("Decision tool",
fluidRow(
uiOutput("modelText"),
box(solidHeader = TRUE, width = 12, imageOutput("model", height = "auto")),
uiOutput("modelText2"),
box(solidHeader = TRUE, width = 12, imageOutput("foodweb", height = "auto")),
uiOutput("foodwebText")
)
),
tabPanel("Model area",
fluidRow(
box(solidHeader = TRUE, imageOutput("map", height = "auto")),
uiOutput("mapText")
)
),
tabPanel("BLUEWEBS",
fluidRow(
uiOutput("about_bluewebs"),
uiOutput("acknowledgement"),
box(imageOutput("logos"), width = 12, solidHeader = TRUE))
))
)
),
tabItem("predicted",
titlePanel("Explore the predicted biomasses and catches of fish stocks in different management scenarios"),
fluidRow(
box(id = "timeseries_box",
uiOutput("timeseries_info"),
actionLink("time_series_link", "Show more."),
hidden(
uiOutput("timeseries_info2")
),
solidHeader = TRUE, width = 12, status = "primary")
),
fluidRow(
splitLayout(cellWidths = c("50%", "50%"),
uiOutput("bio_plot_list"),
uiOutput("catch_plot_list"))
)
),
tabItem("novelty",
titlePanel("Explore the uncertainty of model forecasts under novel conditions"),
fluidRow(
tabBox(width = 12,
id = "novelty_boxes",
tabPanel("Novelty in the Baltic Sea",
uiOutput('novel_info'),
fluidRow(
box(plotOutput("novel_plot", height = 300), width = 12, solidHeader = TRUE),
uiOutput("novel_plot_total")
)),
tabPanel("What is novelty?",
fluidRow(
uiOutput("aboutNovelty"),
column(width = 5, uiOutput("aboutNovelty_fig")),
column(width = 7, box(solidHeader = TRUE, width = 12, imageOutput("noveltyci", height = "auto")))
))
))
)
,
tabItem("optimize",
helper(titlePanel("Balance profit and good environmental status"),
type = "markdown", content = "optimize_info", icon = "question-circle", size = "l"),
fluidRow(
box(
column(width = 4,
popify(selectInput(inputId = 'Profit',
label = "Minimum acceptable profit",
choices = c("No profit", "Profit larger than 0", "Profit larger than 100", "Profit larger than 200"),
selected = "Profit larger than 100"
), title = "Minimum acceptable profit",
content = "Minimum acceptable profit enables selection of the acceptable lowest limit of annual profitability of fisheries on cod, herring, and sprat in millions of euros."
, placement = "right", trigger = "hover", options = list(container = "body")),
popify(radioButtons(inputId = 'Novelty',
label = "Include expert knowledge",
choices = c("Yes" = "Yes", "No" = "No"),
selected = "Yes"
), title = "Expert knowledge",
content = "The decision support system includes studies on ecological novelty which denotes unprecedented, human-mediated changes at different ecological levels. <br><br> Novelty and its effect on a system are difficult to predict with models as novel conditions can not be calibrated against historical observations. <br><br> Adding expert scientific knowledge about this uncertainty on top of numerical modelling can increase the certainty of modelled predictions."
, placement = "right", trigger = "hover", options = list(container = "body"))
),
column(width = 4,
popify(selectInput(inputId = 'F_GES',
label = "Status of fish stocks",
choices = c("All stocks above the ref. point" = "All above", "One stock below the ref. point" = "One below",
"Two stocks below the ref. point" = "Two below", "All stocks below the ref. point" = "All below"),
selected = "All above"
), title = "Status of fish stocks",
content = "The spawning-stock biomass of the three major commercially exploited fish species (i.e. cod, herring and sprat) in relation to the reference point describes the expected status of these fish stocks. The stocks are considered to be in in good condition if their spawning-stock biomass exceeds the reference point. <br><br> The scale ranges from all three fish species being above the reference point limit to none of the species faring well."
, placement = "right", trigger = "hover", options = list(container = "body")),
popify(radioButtons(inputId = 'Ref_point',
label = "Fish stock reference point",
choices = c("Blim" = "Blim", "B MSY trigger" = "B MSY"),
selected = "Blim"
), title = "Reference point",
content = "Reference points are indexes applied in the regulation of fisheries. They are defined by the International Council for the Exploration of the Sea (ICES), and they aim to describe if the status of the fish stocks is good or if they are overexploited or endangered. <br><br> <b>Blim</b> is the reference point describing the limit below which there is a high risk of reduced recruitment. <br><br> <b>B MSY trigger</b> (or MSYBtrigger) is considered to be the lower bound of spawning–stock biomass fluctuation below which fishing needs to be reduced to allow a fish stock to rebuild to levels capable of producing maximum sustainable yield (MSY)."
, placement = "right", trigger = "hover", options = list(container = "body"))
),
column(width = 4,
popify(selectInput(inputId = 'Nutr_GES',
label = "Status of water quality",
choices = c("In Good Environmental Status" = "GES", "Not in Good Environmental Status" = "Sub-GES"),
selected = "Above"
), title = "Status of water quality",
content = "The indicators of water quality comprise of the average concentrations of nitrogen, phosphorus and chlorophyll <i>a</i>. <br><br> Low concentrations suggest high probability of reaching the <b>Good Environmental Status (GES)</b> as defined by HELCOM (Baltic Marine Environment Protection Commission – Helsinki Commission) whereas high concentrations indicate increased eutrophication."
, placement = "below", trigger = "hover", options = list(container = "body"))
),
width = 12, solidHeader = TRUE, title = "Choose your management goal")
),
fluidRow(
column(width = 12,
uiOutput("scens_info"),
htmlOutput("opt_scens"),
box(plotOutput("opt_plots", width = 700), width = 12, solidHeader = TRUE)
))
)),
# CSS
tags$head(tags$link(rel = "stylesheet", type = "text/css", href = "custom.css"))
)
# Create the UI
ui <- dashboardPage(header, sidebar, body, skin = "black")
|
9bb1d4e38a0e29d5f498e38b46caf679233f1cbe
|
a2f1d4c852abe5ce896a2c586ccb0304eea5ddfa
|
/solutions/day4.R
|
4167559a03266e9276ff6dd001727939dd66283c
|
[] |
no_license
|
mahendra-mariadassou/advent_of_code_2018
|
0126345bd5119bc817532bbaa5ee77182596a2fd
|
98f578fc4c212b732fef9db97366e58afa26d3f8
|
refs/heads/master
| 2020-04-09T20:06:54.362664
| 2018-12-17T15:24:01
| 2018-12-17T15:24:01
| 160,564,437
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,108
|
r
|
day4.R
|
library(tidyverse)
library(lubridate)
## input
test <- c(
"[1518-11-01 00:00] Guard #10 begins shift",
"[1518-11-01 00:05] falls asleep",
"[1518-11-01 00:25] wakes up",
"[1518-11-01 00:30] falls asleep",
"[1518-11-01 00:55] wakes up",
"[1518-11-01 23:58] Guard #99 begins shift",
"[1518-11-02 00:40] falls asleep",
"[1518-11-02 00:50] wakes up",
"[1518-11-03 00:05] Guard #10 begins shift",
"[1518-11-03 00:24] falls asleep",
"[1518-11-03 00:29] wakes up",
"[1518-11-04 00:02] Guard #99 begins shift",
"[1518-11-04 00:36] falls asleep",
"[1518-11-04 00:46] wakes up",
"[1518-11-05 00:03] Guard #99 begins shift",
"[1518-11-05 00:45] falls asleep",
"[1518-11-05 00:55] wakes up"
)
input <- read_lines("input/day4.txt")
format_input <- function(x) {
x %>%
tibble(raw = .) %>%
mutate(time = str_replace_all(raw, "\\[|\\].*", "") %>% ymd_hm(),
guard = str_extract(raw, "Guard #[0-9]*") %>% str_remove("Guard #") %>% as.integer(),
sleeps = str_detect(raw, "falls asleep"),
wakes = str_detect(raw, "wakes"),
minute = minute(time)) %>%
arrange(time) %>%
mutate(block = cumsum(!is.na(guard))) %>%
group_by(block) %>%
mutate(guard = guard[1], event = (1:n()) %/% 2, type = if_else(sleeps, "start", "end")) %>%
filter(event > 0) %>%
select(guard, minute, event, type, block) %>%
spread(key = type, value = minute)
}
solve_problem_1 <- function(input) {
format_input(input) %>%
group_by(guard) %>%
nest() %>%
mutate(
## total sleep duration
sleep.duration = map_int(data, ~ sum(.$end - .$start)),
## create hour long vector and record how many times the guard was asleep at any given minute
distribution = map(data, function(data) {
time <- rep(0, 60)
pwalk(data %>% select(start, end), function(start, end) { time[(start+1):end] <<- time[(start+1):end]+1 })
time
}),
## most slept minute
most.sleepy = map_int(distribution, which.max) - 1,
result = most.sleepy * guard
) %>%
arrange(desc(sleep.duration)) %>%
slice(1) %>%
pull(result)
}
solve_problem_2 <- function(input) {
format_input(input) %>%
group_by(guard) %>%
nest() %>%
mutate(
## total sleep duration
sleep.duration = map_int(data, ~ sum(.$end - .$start)),
## create hour long vector and record how many times the guard was asleep at any given minute
distribution = map(data, function(data) {
time <- rep(0L, 60)
pwalk(data %>% select(start, end), function(start, end) { time[(start+1):end] <<- time[(start+1):end]+1L })
time
}),
## most slept minute
most.sleepy = map_int(distribution, which.max) - 1,
## number of times it is slept
nb.sleeps = map_int(distribution, max),
result = most.sleepy * guard
) %>%
arrange(desc(nb.sleeps)) %>%
slice(1) %>%
pull(result)
}
## solve_problem_1(test)
solve_problem_1(input) ## 0.34 s, 39698
## solve_problem_2(test)
solve_problem_2(input) ## 0.29s, 14920
|
4364a4d6fedcab05df349d49b11032edff3b3a02
|
16729b86f56fb320d76f604918ce8c84bea474de
|
/man/cached.Rd
|
155bd9b25daa436ac20edacd1167e4bfe55e7e9c
|
[] |
no_license
|
BAAQMD/cacher
|
7c148228b6805e258598c9f2ca3fc2df85df2136
|
7b0b0137f024c49ba92594c492327559d4bfc88d
|
refs/heads/master
| 2021-07-14T04:58:24.162834
| 2020-05-15T03:22:54
| 2020-05-15T03:22:54
| 131,067,521
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 645
|
rd
|
cached.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cached.R
\name{cached}
\alias{cached}
\title{Declare a cached object}
\usage{
cached(
...,
root = cache_root(),
ext = c(".rds", ".fst", ".feather", ".rds.lz4", ".rds.snz"),
verbose = getOption("verbose")
)
}
\arguments{
\item{\dots}{components of cache key (NOT the cached expression!)}
\item{root}{function that yields the path on disk}
\item{ext}{form of cached data (".fst" is fastest for tabular data)}
\item{verbose}{logical}
}
\description{
Declare a cached object
}
\examples{
cached("exp", "1", verbose = TRUE) \%or\% exp(1)
}
\seealso{
lazy_or
}
|
11d45508f2b7f13032d2490b344ec9e71cec3e61
|
852b1aae6ad8138dc164eafbf4045e3ea5de87c8
|
/R/extract_fin_metadata.R
|
e38eb382539232a395e5b61340bcd57303dadae6
|
[] |
no_license
|
PinkDiamond1/fin2ddh
|
35bafcb1a3de815d11554d87f85fb45b3d6262d0
|
40ffcf8f79ff3bb9845b742e57ddb1fbf5f82e66
|
refs/heads/master
| 2023-03-17T00:52:19.799023
| 2019-05-08T01:24:10
| 2019-05-08T01:24:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 368
|
r
|
extract_fin_metadata.R
|
#' extract_fin_metadata
#'
#' Extract specific metadata from the Finance API JSON response
#'
#'
#' @return list
#' @export
#'
extract_fin_metadata <- function() {
url = "http://finances.worldbank.org//api/search/views.json?limitTo=tables&datasetView=DATASET"
finance_data <- jsonlite::fromJSON(url, simplifyVector = FALSE)
return(finance_data)
}
|
3f5f18e91e71b0512880f2f99f59c4703866ee69
|
1d2278031396b3acae4bf003e202f77dd1fbe195
|
/Script_for_HardyWeinberg_frame.R
|
a38db08492d4ff0c606f17e33aa2704f153ba06f
|
[] |
no_license
|
Drumph/Code_snippet
|
0706b407b0eb9b2145f533f1fd6250dc20af10a4
|
d442e95fa5e274d669095e9902b8b414b4d78f98
|
refs/heads/master
| 2020-03-16T16:47:15.953457
| 2018-05-10T22:50:34
| 2018-05-10T22:50:34
| 132,803,208
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,685
|
r
|
Script_for_HardyWeinberg_frame.R
|
Rscript --vanilla --default-packages=utils
args <- commandArgs()
path_file=args[7] ######## Path where the files are stored
file_name=args[9] ######## This file name contains the chromosome number to be used
file_list=list.files(path=path_file,pattern = "\\.hwe")
filenames1=file1[grep(pattern=file_name,file_list)] ###### grepping only specific chromosome
col_names=do.call(rbind, strsplit(filenames1, '\\_'))[,7] ###### getting the specific population name to used as column name later
Pval_cols=paste0("P_",gsub(".","_",col_names,fixed=T))
split_data_hwe <- function(x){
l2=read.table(x,header=T,stringsAsFactors=F)
s<- strsplit(as.character(l2$SNP), ';')
tt=data.frame(CHR = rep(l2$CHR, sapply(s, length)), SNP=unlist(s),P= rep(l2$P, sapply(s, length)))
return(tt)
}
datalist1 = lapply(filenames1, function(x){split_data_hwe(x)}) #### Creating a list of dataframes
r1=Reduce(function(x,y) {merge(x,y,by=c("CHR","SNP"))}, datalist1) ##### Merging the list of dataframe
colnames(r1)[3:(length(filenames1)+2)]= Pval_cols #### pasting the specific colnames for each population
r1[((length(filenames1)+2)+1):((length(filenames1)*2)+2)][r1[3:(length(filenames1)+2)]>0.000001]=0
r1[((length(filenames1)+2)+1):((length(filenames1)*2)+2)][r1[3:(length(filenames1)+2)]<=0.000001]=1
colnames(r1)[((length(filenames1)+2)+1):((length(filenames1)*2)+2)]= Pval_cols
r1$TRUTH="F"
r1$TRUTH[rowSums(r1[((length(filenames1)+2)+1):((length(filenames1)*2)+2)])==0]="T" #### if the rowsums aren't summing to zero that means the snp is failed in specific population
write.table(r1,file=paste(file_name, "_matrix_HWE.txt", sep=""), sep="\t", row.names=F, quote=F)
|
7479d08855baced130ea64ca2c39fe9763a03813
|
bbd5c1def3e22bc54b5f100150d7d80292719ab5
|
/man/validateASR.Rd
|
82602ddcbf3206ab5515cfa99032058f2ff450cd
|
[] |
no_license
|
jeffbaumes/aRbor
|
d70f25b3c023ee471e6d59d1dc7147e5d7feb153
|
e33b8d7f206f684ec64fbf5f095bb198ab49fc52
|
refs/heads/master
| 2021-01-18T10:38:52.579128
| 2014-11-05T19:33:24
| 2014-11-05T19:33:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 662
|
rd
|
validateASR.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{validateASR}
\alias{validateASR}
\title{Function that uses "leave 1 out" validation of a discrete character model}
\usage{
validateASR(asr, prior = "stationary", plot = TRUE, cex.node = 0.5,
cex.tip = 0.5, cex.lab = 1, cex.actual = 5, pal = rainbow,
label.offset = 0.1, ...)
}
\arguments{
\item{asr}{An asrArbor object produced by aceArbor}
\item{prior}{Either "stationary" or "equal"}
\item{plot}{A logical indicating whether a plot should be produced}
\item{pal}{A color palette to color the nodes}
}
\description{
Function that uses "leave 1 out" validation of a discrete character model
}
|
81b57f72d5d58cd9a4bce818c7bb73baa5436d8b
|
4cb3384013df1476e310c04a6f17aa7a1bddcb0b
|
/plot1.r
|
b3f7cb7c1c4b8964e5036b42bde50c040695cf21
|
[] |
no_license
|
rcoursera1/ExData_Plotting1
|
151dc944373c34e3c0bf2e9f2d70bc45176e308b
|
2365707f15dbb87232f9050751618fd7e7a0fcb3
|
refs/heads/master
| 2021-01-18T02:39:55.308467
| 2014-05-11T20:53:43
| 2014-05-11T20:53:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 220
|
r
|
plot1.r
|
source("load_data.r")
png(filename="plot1.png", width = 480, height = 480, bg="transparent")
hist(data$Global_active_power, xlab="Global Active Power (kilowatts)",
main="Global Active Power", col="red")
dev.off()
|
d071924679549c651f7ec2ecbe511c5a64bd706e
|
d1197d5bfac715e2fbf554debcca2f785c0dcc50
|
/stan_tutorial.R
|
0e87a280400ca6926da7dac553e0a99975f22a61
|
[] |
no_license
|
jasonwee-cw/psyteam504
|
c3d730b867205028bce3f2ce07543c49ebcdc98f
|
02aa5c37995fae595c83c34f0585f4f1d1171215
|
refs/heads/master
| 2021-01-03T10:44:16.256639
| 2020-04-21T17:58:34
| 2020-04-21T17:58:34
| 240,045,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,277
|
r
|
stan_tutorial.R
|
#Set working directory
setwd("~/Documents/Princeton/PSY504 Statistics")
library(data.table)
library(tidyverse)
library(rstan)
rstan_options(auto_write = TRUE)
#Hi @everyone, for the OLS stan practice, please use the mtcars data linked here: https://gist.github.com/seankross/a412dfbd88b3db70b74b This dataset is included with R so there's no need to download it if you use R
#For the OLS regression, simply build a model that predicts "mpg" based on "hp" (horsepower) and "wt" (weight) of the vehicle.
#mpq ~ hp + wt
## Load mtcars data
mtcars <- mtcars
### Set up
y <- mtcars$mpg
x <- matrix(c(mtcars$hp, mtcars$wt),ncol=2)
N <- as.numeric(nrow(mtcars))
K <- ncol(x)
### Parse into list for Stan
mtcars_data <- list(N=N, K=K, x=x, y=y)
# specify model
ols_text <-
"data {
int<lower=0> N; // number of data items
int<lower=0> K; // number of predictors
matrix[N, K] x; // predictor matrix
vector[N] y; // outcome vector
}
parameters {
real alpha; // intercept
vector[K] beta; // coefficients for predictors
real<lower=0> sigma; // error scale
}
model {
y ~ normal(x * beta + alpha, sigma); // likelihood
}
"
## Fit the model
mtcars_fit <- stan(model_code=ols_text, data = mtcars_data,
verbose=TRUE)
summary(mtcars_fit)
|
e2be6750088606aaed37f159937bac8161cb9c4b
|
aecd1ebdaefe3976c7f5b4cd9090fcc74b09151f
|
/lld_plotter.R
|
2941d70c5957509a6407ca5ad4fca41f2659448f
|
[] |
no_license
|
moorelr/plot_llds
|
2d80b594193371e6337efc50399d93d9c2ea25b8
|
308d63c58d7e135c580e60f1ecb4e6a310691f61
|
refs/heads/main
| 2023-03-01T17:40:05.825518
| 2021-02-10T01:09:52
| 2021-02-10T01:09:52
| 337,581,071
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,755
|
r
|
lld_plotter.R
|
# moorelr/georoc-parser is licensed under The MIT License
# Copyright 2019 Lowell R. Moore
georoc_directory <- getwd()
georoc <- read.csv(paste(georoc_directory, "Georoc_parsed.csv", sep = "/"))
plot_elements <- c("MgO", "SiO2", "TiO2", "Al2O3", "CaO", "Na2O", "K2O")
plot_layout <- c(2, 3)
pdf_w <- 8
pdf_h <- 6
add_llds <- function(x_elem, y_elem, directory = getwd()){
flag_melts_files <- grep("melts-liquid", list.files(directory))
for(lld_i in flag_melts_files){
# debug: lld_i <- 4
path_lld_i <- paste(directory, list.files(directory)[lld_i], sep = "/")
liquid <- read.csv(path_lld_i)
x_melts <- liquid[,paste("wt..", x_elem, sep = "")]
y_melts <- liquid[,paste("wt..", y_elem, sep = "")]
lines(x_melts, y_melts, lwd = 2, col = rgb(0, 0, 0.8))
x_label <- mean(x_melts)
y_label <- mean(y_melts)
label_text <- list.files(directory)[lld_i]
text(x_label, y_label, labels = label_text, adj = c(0.5, 0.5), cex = 0.5, col = "blue")
}
}
draw_plot <- function(x_elem, y_elem, directory = getwd()){
# Debug: x_elem <- "MgO"; y_elem <- "CaO"; directory <- melts_output
x_georoc <- georoc[,paste(toupper(x_elem), ".WT..", sep = "")]
y_georoc <- georoc[,paste(toupper(y_elem), ".WT..", sep = "")]
plot(x_georoc, y_georoc, pch = 19, col = rgb(0.8, 0, 0, 0.5)
, xlab = paste(x_elem, ", wt%", sep = ""), ylab = paste(ylab = y_elem, ", wt%", sep = "")
)
add_llds(x_elem, y_elem, directory)
}
pdf(file = "lld_plots.pdf"
, width = pdf_w, height = pdf_h, useDingbats = FALSE
)
par(mfrow = plot_layout)
for(i in 2:length(plot_elements)){
draw_plot(plot_elements[1], plot_elements[i])
}
par(mfrow = c(1, 1))
dev.off()
|
ecc67b465c6e151cd3260b476b7e708161c9909c
|
ed28666d9201bf050c305f0740756f7730a66ef3
|
/NatureEE-data-archive/Run203121/JAFSdata/JAFSnumPerPatch9500.R
|
b7ff9a07f7476141fba33e08e61dd11499a8b34a
|
[] |
no_license
|
flaxmans/NatureEE2017
|
7ee3531b08d50b3022d5c23dbcf177156c599f10
|
b3183abe6bb70f34b400d0f5ec990cce45e10b33
|
refs/heads/master
| 2021-01-12T08:02:37.153781
| 2017-01-27T15:48:55
| 2017-01-27T15:48:55
| 77,110,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32
|
r
|
JAFSnumPerPatch9500.R
|
numPerPatch9500 <- c(2534,2466)
|
f61127a8e8d7d1ff10b8d6031199de1692f9383b
|
44d6dde8a996445a6997fb512b66a8f02aa1d154
|
/man/cities.Rd
|
73b2b4b517abc11bae33389a5772929e0777de32
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
cran/inldata
|
d1a784311f74c0ab92e9e5d99d2ce45e328bf23f
|
73fab84452651371661df8e4c0331a18bd63f604
|
refs/heads/master
| 2022-12-16T19:47:11.841892
| 2020-09-17T11:40:03
| 2020-09-17T11:40:03
| 296,363,559
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,126
|
rd
|
cities.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cities.R
\docType{data}
\name{cities}
\alias{cities}
\title{Cities and Towns}
\format{
A SpatialPointsDataFrame of the \pkg{sp} package with 24 records and 16 variables.
See \code{\link{projection}} dataset for coordinate reference system information.
}
\source{
U.S. Department of Commerce, U.S. Census Bureau, Geography Division/Cartographic Products Branch.
Spatial extract from the Master Address File / Topologically Integrated Geographic Encoding and Referencing
(\href{https://catalog.data.gov/dataset/tiger-line-shapefile-2019-series-information-for-the-current-place-state-based-shapefile}{MAF/TIGER}) Database (MTDB),
2019 data collection, released April 2, 2020.
}
\usage{
cities
}
\description{
Cities and towns (populated places) in the vicinity of Idaho National Laboratory, eastern Idaho.
}
\examples{
inlmisc::PlotMap(cities, dms.tick = TRUE)
sp::plot(cities, pch = 19, add = TRUE)
raster::text(cities, cities@data$NAME,
pos = 1, cex = 0.6)
str(cities@data)
}
\keyword{datasets}
|
13b7fe0700d06e635b40f21f31af006422a3973f
|
13751f13f4bf7197593d10f21496ebfc8c9ed496
|
/GSM/TRX_per_TG.R
|
f72c758bb0436677cf1ae1db978787155ab1a4fb
|
[] |
no_license
|
fernandofsilva/Ericsson_Network_Design
|
e95b1f517e72e0e6b19fc734ee6682ce99acdaec
|
aa323af684bcea1d563ca64b03e7cc2551060ef7
|
refs/heads/master
| 2021-10-19T06:36:55.671174
| 2019-02-18T20:01:35
| 2019-02-18T20:01:35
| 91,488,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,792
|
r
|
TRX_per_TG.R
|
# TRX per TG
# This Code is to be used in a Packet Abis Dimensioning for operator TIM
# The input for the code is a SQL query on MoView database
#Load library
library(data.table)
library(RODBC)
############################## Loading databases ###############################
#Connect to SQL moView TIM db
odbcChannel <- odbcDriverConnect('driver={SQL Server};
server=146.250.136.14;
database=moView_TIM;
Uid=mv_tim;Pwd=tim')
#Loading the site list
sites <- fread("/Users/esssfff/Documents/Inputs/ListaSites.csv",
stringsAsFactors = FALSE, header = TRUE,
col.names = c("BSC", "SITEID"))
#Loop over the site list selecting just the elements of the list
RXOTRX <- data.frame()
for(i in seq_along(sites$BSC)) {
x <- sqlQuery(odbcChannel,
paste("SELECT nodeLabel, CELL , TG, TRX FROM dbo.RXMOP",
" WHERE nodeLabel = '", sites[i,1],
"' AND CELL LIKE '", sites[i, 2],
"%' ORDER BY TG", sep = ""))
x <- x[!duplicated(x), ]
RXOTRX <- rbind(RXOTRX, x)
}
rm(i, x)
#Close channel
odbcClose(odbcChannel)
rm(odbcChannel)
############################## Handling Databases ##############################
RXOTRX <- data.table(RXOTRX)
CELL <- RXOTRX[, lapply(.SD, length), by= c("nodeLabel", "CELL"), .SDcols = c("TRX")]
TG <- RXOTRX[, lapply(.SD, length), by= c("nodeLabel", "CELL", "TG"), .SDcols = c("TRX")]
write.csv(x = TG,
file = "/Users/esssfff/Documents/Inputs/TRX_per_TG.csv",
row.names = FALSE)
write.csv(x = CELL,
file = "/Users/esssfff/Documents/Inputs/TRX_per_CELL.csv",
row.names = FALSE)
rm(list = ls())
|
72596f53ca2f8f53c52f5f32eaf1246a99cee321
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/paws/R/datapipeline_service.R
|
053d71742872d297fc705297df1995bc1c1671db
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| false
| 8,813
|
r
|
datapipeline_service.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config merge_config
NULL
#' AWS Data Pipeline
#'
#' @description
#' AWS Data Pipeline configures and manages a data-driven workflow called a
#' pipeline. AWS Data Pipeline handles the details of scheduling and
#' ensuring that data dependencies are met so that your application can
#' focus on processing the data.
#'
#' AWS Data Pipeline provides a JAR implementation of a task runner called
#' AWS Data Pipeline Task Runner. AWS Data Pipeline Task Runner provides
#' logic for common data management scenarios, such as performing database
#' queries and running data analysis using Amazon Elastic MapReduce (Amazon
#' EMR). You can use AWS Data Pipeline Task Runner as your task runner, or
#' you can write your own task runner to provide custom data management.
#'
#' AWS Data Pipeline implements two main sets of functionality. Use the
#' first set to create a pipeline and define data sources, schedules,
#' dependencies, and the transforms to be performed on the data. Use the
#' second set in your task runner application to receive the next task
#' ready for processing. The logic for performing the task, such as
#' querying the data, running data analysis, or converting the data from
#' one format to another, is contained within the task runner. The task
#' runner performs the task assigned to it by the web service, reporting
#' progress to the web service as it does so. When the task is done, the
#' task runner reports the final success or failure of the task to the web
#' service.
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#' \itemize{
#' \item{\strong{credentials}:} {\itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
#' \item{\strong{region}:} {The AWS Region used in instantiating the client.}
#' }}
#' \item{\strong{close_connection}:} {Immediately close all HTTP connections.}
#' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
#' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.}
#' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
#' }
#' @param
#' credentials
#' Optional credentials shorthand for the config parameter
#' \itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' }
#' @param
#' endpoint
#' Optional shorthand for complete URL to use for the constructed client.
#' @param
#' region
#' Optional shorthand for AWS Region used in instantiating the client.
#'
#' @section Service syntax:
#' ```
#' svc <- datapipeline(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string",
#' close_connection = "logical",
#' timeout = "numeric",
#' s3_force_path_style = "logical",
#' sts_regional_endpoint = "string"
#' ),
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- datapipeline()
#' svc$activate_pipeline(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=datapipeline_activate_pipeline]{activate_pipeline} \tab Validates the specified pipeline and starts processing pipeline tasks\cr
#' \link[=datapipeline_add_tags]{add_tags} \tab Adds or modifies tags for the specified pipeline\cr
#' \link[=datapipeline_create_pipeline]{create_pipeline} \tab Creates a new, empty pipeline\cr
#' \link[=datapipeline_deactivate_pipeline]{deactivate_pipeline} \tab Deactivates the specified running pipeline\cr
#' \link[=datapipeline_delete_pipeline]{delete_pipeline} \tab Deletes a pipeline, its pipeline definition, and its run history\cr
#' \link[=datapipeline_describe_objects]{describe_objects} \tab Gets the object definitions for a set of objects associated with the pipeline\cr
#' \link[=datapipeline_describe_pipelines]{describe_pipelines} \tab Retrieves metadata about one or more pipelines\cr
#' \link[=datapipeline_evaluate_expression]{evaluate_expression} \tab Task runners call EvaluateExpression to evaluate a string in the context of the specified object\cr
#' \link[=datapipeline_get_pipeline_definition]{get_pipeline_definition} \tab Gets the definition of the specified pipeline\cr
#' \link[=datapipeline_list_pipelines]{list_pipelines} \tab Lists the pipeline identifiers for all active pipelines that you have permission to access\cr
#' \link[=datapipeline_poll_for_task]{poll_for_task} \tab Task runners call PollForTask to receive a task to perform from AWS Data Pipeline\cr
#' \link[=datapipeline_put_pipeline_definition]{put_pipeline_definition} \tab Adds tasks, schedules, and preconditions to the specified pipeline\cr
#' \link[=datapipeline_query_objects]{query_objects} \tab Queries the specified pipeline for the names of objects that match the specified set of conditions\cr
#' \link[=datapipeline_remove_tags]{remove_tags} \tab Removes existing tags from the specified pipeline\cr
#' \link[=datapipeline_report_task_progress]{report_task_progress} \tab Task runners call ReportTaskProgress when assigned a task to acknowledge that it has the task\cr
#' \link[=datapipeline_report_task_runner_heartbeat]{report_task_runner_heartbeat} \tab Task runners call ReportTaskRunnerHeartbeat every 15 minutes to indicate that they are operational\cr
#' \link[=datapipeline_set_status]{set_status} \tab Requests that the status of the specified physical or logical pipeline objects be updated in the specified pipeline\cr
#' \link[=datapipeline_set_task_status]{set_task_status} \tab Task runners call SetTaskStatus to notify AWS Data Pipeline that a task is completed and provide information about the final status\cr
#' \link[=datapipeline_validate_pipeline_definition]{validate_pipeline_definition} \tab Validates the specified pipeline definition to ensure that it is well formed and can be run without error
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname datapipeline
#' @export
datapipeline <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) {
config <- merge_config(
config,
list(
credentials = credentials,
endpoint = endpoint,
region = region
)
)
svc <- .datapipeline$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.datapipeline <- list()
.datapipeline$operations <- list()
.datapipeline$metadata <- list(
service_name = "datapipeline",
endpoints = list("*" = list(endpoint = "datapipeline.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "datapipeline.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "datapipeline.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "datapipeline.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "Data Pipeline",
api_version = "2012-10-29",
signing_name = "datapipeline",
json_version = "1.1",
target_prefix = "DataPipeline"
)
.datapipeline$service <- function(config = list()) {
handlers <- new_handlers("jsonrpc", "v4")
new_service(.datapipeline$metadata, handlers, config)
}
|
05e11d469a83985f44a1a716616bfd627ebc357a
|
0754d534933ecb6d66d8193b00b0d3ba95eee10b
|
/man/hmdb_protein.Rd
|
995b830ea55b4f8688b72335e64bdd37cb0cfc1f
|
[] |
no_license
|
vjcitn/hmdbQuery
|
a5f41aa6d13181b3df312114e4da73e705c78f00
|
cb888316a26bbef64e75ebf5ecbbec78448aff40
|
refs/heads/master
| 2022-11-05T16:29:09.640871
| 2022-10-29T19:59:55
| 2022-10-29T19:59:55
| 104,580,306
| 8
| 4
| null | 2022-10-29T19:59:30
| 2017-09-23T16:04:08
|
R
|
UTF-8
|
R
| false
| true
| 417
|
rd
|
hmdb_protein.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{hmdb_protein}
\alias{hmdb_protein}
\title{hmdb_protein: Sept 2017 extract from hmdb.ca of direct protein associations}
\format{
S4Vectors DataFrame
}
\source{
www.hmdb.ca xml
}
\usage{
hmdb_protein
}
\description{
hmdb_protein: Sept 2017 extract from hmdb.ca of direct protein associations
}
\keyword{datasets}
|
3a55b7eeb1e17bf3cab5ab9e720af69fcfaabce4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mnreadR/examples/logMARcorrect.Rd.R
|
fe129b6b4618d6db34c8b14c844bb374fb469eb9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 396
|
r
|
logMARcorrect.Rd.R
|
library(mnreadR)
### Name: logMARcorrect
### Title: Print size correction for non-standard viewing distance
### Aliases: logMARcorrect
### ** Examples
# inspect the strucutre of the dataframe
head(data_low_vision, 10)
# run the correction
data_low_vision_new <- logMARcorrect(data_low_vision, ps, vd)
# inspect the structure of the newly created dataframe
head(data_low_vision_new, 10)
|
0e991ccc91d37cd2fe1f21d756ac179e1ace5740
|
7efbeabade0b3137d2cb6a12ede18c30617bc15e
|
/plot1.R
|
5f600f5c5c874be94f051303857ec529e416e5b0
|
[] |
no_license
|
smuralid1/ExData_Plotting1
|
01195460b97e3fa9d91a02da1ff64f200d702652
|
71623c5910c783a263dc556acc540e68f025f121
|
refs/heads/master
| 2021-01-15T14:37:57.511922
| 2016-01-10T18:41:27
| 2016-01-10T18:41:27
| 49,365,366
| 0
| 0
| null | 2016-01-10T12:12:53
| 2016-01-10T12:12:53
| null |
UTF-8
|
R
| false
| false
| 300
|
r
|
plot1.R
|
Data<-read.csv2("household_power_consumption.csv")
subset_data<-subset(Data,Date=='1/2/2007'|Date=='2/2/2007')
png(file = "plot1.png")
hist(as.numeric(as.character(subset_data$Global_active_power)),col = "red",main="Global Active Power",ylab="Frequency",xlab = "Global Active Power (kilowatts)")
|
c8265f79d56e9e8afcd4d0aedfd23fbc059db5fa
|
a0b71fb24d340c23c14cf56b1cf0913d591f3abf
|
/scripts/pls_analysis.R
|
25a0c2e63729d9f345ee6dd2a15bf1428116c429
|
[] |
no_license
|
ambuneves/Bio722_indiv_proj
|
6395bb665349b587120879739143d10f8a2f3f5e
|
21a2001d6e4c70eb1c75e18c60e030786a2a4f96
|
refs/heads/main
| 2023-04-12T16:58:36.843086
| 2021-05-03T16:44:09
| 2021-05-03T16:44:09
| 363,979,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,163
|
r
|
pls_analysis.R
|
#### Load and install required packages ####
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("mixOmics") #For PLS-DA
BiocManager::install("tximport") #To import gene counts from Salmon
BiocManager::install("GenomeInfoDb")
BiocManager::install("org.Dm.eg.db")
BiocManager::install("TxDb.Dmelanogaster.UCSC.dm6.ensGene")
install.packages("geomorph") #For shape landmark alignment and 2BPLS
install.packages("pls")
library(mixOmics)
library(tximport)
library(GenomeInfoDb)
library(org.Dm.eg.db)
library(TxDb.Dmelanogaster.UCSC.dm6.ensGene)
library(geomorph)
library(pls)
#### Set up data ####
#set up the working directory
setwd("/Users/amandan/Desktop/grad_classes/bio722/individual_project/data")
### Load in gene counts
#define path to count files
quant_files <- file.path("dgrp_counts", list.files("dgrp_counts"), "quant.sf")
#read in the sample metadata
sample_info <- read.csv("dgrp-sample_info.csv")
#order sample_info so that it matches quant_files
sample_info <- sample_info[order(sample_info$fastqFile),]
#add a column for "Line" so that lines can be matched with the shape data Line column
sample_info$LineNum <- gsub("_|_1|_2|-1|-2|_T1|T2", "", x = sample_info$lineName)
### Load in shape data
raw_shape <- read.csv("BothLabs_Wings_28Oct.csv")
#I am only going to work with the flies from the Dworkin lab, not the Houle lab, so subset these
shape_data <- raw_shape[grep("Dwo", raw_shape$Lab),]
#Combine $Line and $Ind into one column for easier identification
shape_data$ID <- paste(shape_data$Line, shape_data$Ind, sep = "_")
#Adjust csize (centroid size, a measure of wing size), needs to be multiplied by the scale
shape_data$Csize <- shape_data$Csize * shape_data$Scale
#There are 4 replicates for each line. Prior work has shown the differences between replicates
#(and sexes) are negligible, so to make things easy I am going to only use the 3rd replicate for each sample
shape_data <- shape_data[grep("rep3", shape_data$Rep),]
#### Data set matching ####
#Since the exact lines measured for the RNA seq and shape analysis might not be the same,
#I want to subset both datasets so that only individuals from matching lines are kept (otherwise, they are not really comparable)
as.integer(sample_info$LineNum)
shape_data$LineNum
gene_sample <- sample_info[sample_info$LineNum %in% shape_data$LineNum,]
dim(gene_sample) #only contains the 81 matching lines
shape_sample <- shape_data[shape_data$LineNum %in% sample_info$LineNum,]
dim(shape_sample) #now contains 4496 shape measurements
#Back to the counts, use the metadata to give names to the quant files, while removing file extensions from sample names
names(quant_files) <- gsub(".txt.gz|.fastq.gz", "", x = sample_info$fastqFile)
#subset quant_files so that it contains only the matching lines, as in gene_sample
quant_files <- quant_files[sample_info$LineNum %in% shape_data$LineNum]
#Now that the data has been subsetted and R knows which files to read in, I am going to import the sample counts
#Setting up my gene to transcript identifier
txdb <- TxDb.Dmelanogaster.UCSC.dm6.ensGene
k <- keys(txdb, keytype = "TXNAME")
tx2gene <- select(x = txdb, keys = k, "GENEID", "TXNAME")
#Import the count files using Tximport
txi <- tximport(quant_files,
type = "salmon",
tx2gene = tx2gene,
countsFromAbundance="scaledTPM")
cts <- txi$counts #Make the counts their own object
dim(cts) #Looks to be the right number of samples (87 columns)
#For a quick check to make sure things imported okay, I am going to check out the counts for
#vestigial (vg), which is known to be expressed in the wing
cts[rownames(cts) == 'FBgn0003975',] #Looks good
#checking eyeless FBgn0005558, should not be in the wing
cts[rownames(cts) == 'FBgn0005558',] #Looks good (not exactly 0, which is to be expected, but close)
#To make my life easier, I am only going to look at a subset of genes, one for which we expect to be expressed during
#wing development in the wing tissue
#I downloaded a list of genes associated with wing development from FlyBase.org
wing_genes <- read.csv("FlyBase_IDs_wing_development.txt", header = FALSE)
cts_wing <- cts[rownames(cts) %in% wing_genes$V1,]
dim(cts_wing) #This leaves us with 372 genes from an original value of 13701
#Turn cts into a 3D array so that it behaves well with geomorph two.b.pls
gene_arr <- array(data = cts_wing, dim = c(372, 1, 87), dimnames = list(rownames(cts_wing), "count", gene_sample$lineName))
#The shape coordinates have already been aligned in order to remove the effects of variation
#in location, orientation, and scale. This ensures what we are left with is shape data
#and not size data
coords <- shape_sample[,10:105] #extract just the landmark coordinates
coords_arr <- arrayspecs(coords, p = 48, k = 2) #turn the landmark coordinates into a 3D array, which behaves better with geomorph
plotAllSpecimens(coords_arr) #plot the landmark coordinates. Does this look like a wing to you?
#To finish setting up the data, I am going to rename the dimensions of coords_arr so things are easier to look at and keep track of
lm_num <- 1:48
dim_names <- c("X", "Y")
line_names <- as.character(shape_sample$ID)
dimnames(coords_arr) <- list(lm_num, dim_names, line_names)
#### 2BPLS Analysis ####
#geomorph two.b.pls
#The considerations here are that we need a 3d landmark array and a 2d variable matrix (for gene counts)
#Additionally, the function assumes that all specimens are in the same order
#Again, I am going to ignore replicates and just take one of each sample for both the
#shape data and the expression data
#Turn the gene data into a 2d matrix (rows = specimens, columns = variables)
gene_mat <- two.d.array(gene_arr)
rownames(gene_mat)
gene_subset <- gene_mat[-grep("_2|-2|_T2", rownames(gene_mat)),] #remove 2nd reps
rownames(gene_subset) <- gsub("_1|-1|_T1", "", rownames(gene_subset)) #fix row names
#Remove two (out of four) of the 440 samples manually
grep("440", rownames(gene_subset))
rownames(gene_subset)[grep("440", rownames(gene_subset))]
gene_subset <- gene_subset[-c(35,56,73),]
grep("440", rownames(gene_subset))
dim(gene_subset) #left with 72 unique samples
#Do the same for shape_sample, this time using match()
shape_subset <- shape_sample[match(rownames(gene_subset), shape_sample$LineNum),]
#Remake the array
shape_subset_arr <- arrayspecs(shape_subset[,10:105], p = 48, k = 2)
line_names <- as.character(shape_subset$LineNum)
dimnames(shape_subset_arr) <- list(lm_num, dim_names, line_names)
#two.b.pls, consider adding log size as another matrix
gm_2bpls <- two.b.pls(shape_subset_arr, gene_subset)
gm_2bpls$right.pls.vectors #first column is vecotr with greatest amount of covariation
gm_2bpls$left.pls.vectors #shape effects
str(gm_2bpls$svd) #$d is singular values, $u is vector of shape weights, $vt is vector of gene weights
gm_2bpls$svd$d[1]/sum(gm_2bpls$svd$d) #first singular value accounts for ... covariation
hist(abs(gm_2bpls$right.pls.vectors[,1]))#larger absolute value, the more weight they contribute
which.max(abs(gm_2bpls$right.pls.vectors[,1]))
gm_2bpls$right.pls.vectors[175,1]
sort(abs(gm_2bpls$right.pls.vectors[,1]), decreasing = TRUE)
summary(gm_2bpls)
#### sparse PLS regression with mixOmics ####
shape_subset_mat <- two.d.array(shape_subset_arr)
X <- shape_subset_mat
Y <- gene_subset
spls_results <- spls(X, Y, keepX=c(96,96), keepY = c(5,5)) #Keep all landmark coordinates but only top 5 genes
gene_loadings <- spls_results$loadings$Y #give the loading vectors their own object
head(sort(abs(gene_loadings[,1]), decreasing = TRUE)) #sort the loading vectors of the first component
head(sort(abs(gene_loadings[,2]), decreasing = TRUE)) #sort the loading vectors of the second component
#### PLS regression (SIMPLS) with pls ####
simpls_pls <- simpls.fit(X, Y, ncomp = 2) #fit the regression using the SIMPLS method
head(sort(abs(simpls_pls$Yloadings[,1]), decreasing = TRUE)) #sort the loading vectors of the first component
head(sort(abs(simpls_pls$Yloadings[,2]), decreasing = TRUE)) #sort the loading vectors of the second component
|
acfa36729185df712340d9f365bd4061395606ab
|
6e557dcce1ef82647b2a840ffd0eb580d4023964
|
/R/visualization.R
|
e4e0f11104e3e131f83ab85959e52cb083f68f15
|
[
"MIT"
] |
permissive
|
pat-s/pathogen-modeling
|
caeeb5bc31ed4a9efb3cb6dcf4fcaa12ec091930
|
f8ff9ebf0484c53204f9e36f2f42e3fd0723a4dc
|
refs/heads/master
| 2020-04-10T05:57:27.214165
| 2019-11-07T08:50:43
| 2019-11-07T08:50:43
| 160,842,050
| 21
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,551
|
r
|
visualization.R
|
#' @title Visualization of partitions used in benchmarking
#' @param resampling_sp A benchmark/resampling object with a spatial resampling
#' @param resampling_nsp A benchmark/resampling object with a non-spatial resampling
vis_partitions = function(data, resampling_sp, resampling_nsp) {
coordinates <- data[, c("x", "y")]
data$geometry <- NULL
data$x = NULL
data$y = NULL
task_spatial <- makeClassifTask(id = "pathogen_data", data = data,
target = "diplo01", positive = "1",
coordinates = coordinates)
plist <- createSpatialResamplingPlots(task_spatial, list("CV" = resampling_nsp$results$diplodia$classif.binomial,
"SpCV" = resampling_sp$results$diplodia$classif.binomial),
crs = 32630, repetitions = 1, point.size = 1.5,
y.axis.breaks = c(42.8, 43.2),
x.axis.breaks = c(-2, -3),
axis.text.size = 18)
sp <- plot_grid(plist[["Plots"]][[1]], plist[["Plots"]][[2]],
plist[["Plots"]][[3]], ncol = 3, nrow = 1,
labels = plist[["Labels"]][1:3], label_size = 18)
sp2 <- plot_grid(plist[["Plots"]][[4]], plist[["Plots"]][[5]], NULL,
ncol = 3, nrow = 1, labels = plist[["Labels"]][4:5],
label_size = 18)
nsp <- plot_grid(plist[["Plots"]][[6]], plist[["Plots"]][[7]],
plist[["Plots"]][[8]], ncol = 3, nrow = 1,
labels = plist[["Labels"]][6:8],
label_size = 18)
nsp2 <- plot_grid(plist[["Plots"]][[9]], plist[["Plots"]][[10]], NULL,
ncol = 3, nrow = 1, labels = plist[["Labels"]][9:10],
label_size = 18)
final_plot <- plot_grid(sp, sp2, nsp, nsp2, ncol = 1)
return(final_plot)
}
#' @title Visualization of optimization paths
#' @param models List of Benchmark/Resample results
#' @param n_folds Number of folds to visualize (e.g. 1-5)
vis_opt_path = function(models) {
opt_path_list = map(models, function(.i)
imap(
.i$extract[1:5],
~getOptPathY(.x$opt.path) %>%
as_tibble() %>%
mutate(iter = 1:length(value)) %>%
mutate(value = cummin(value)) %>%
mutate(fold_id = glue("Fold {.y}"))
) %>%
bind_rows()
)
opt_path_plot = imap(opt_path_list, ~
ggline(.x,
x = "iter",
y = "value",
plot_type = "l",
title = "",
ylab = "Brier score",
xlab = "Iteration",
numeric.x.axis = T,
point.size = 0.03, facet.by = "fold_id"
) +
ggtitle(.y) +
geom_vline(xintercept = 30, linetype = "dashed") +
theme(axis.title.y =
element_text(margin = margin(t = 0, r = 20,
b = 0, l = 0))) +
facet_wrap(~fold_id, scales = "free") # we use the ggplot2 version here as we can specify `scales` which is not possible in the ggpubr argument
)
}
#' @title Visualization of tuning effects
#' @param models List of Benchmark/Resample results
#' @param model_name Name of algorithm for title
#' @param resampling String specifying the resample setting
#' @param hyperparam String of 2L specyfying the hyperparamters to compare
#' @param xlim x-axis limits
#' @param ylim y-axis limits
#' @param default Default hyperparameter settings (x and y). A red cross will
#' denote the default settings in the plot.
vis_tuning_effects = function(models, model_name, resampling, hyperparameter,
xlim, ylim, default) {
plot_list = pmap(list(x = models, y = model_name, z = hyperparameter,
xlim = xlim, ylim = ylim, default = default),
function(x, y, z, xlim, ylim, default) {
hyperpar_effect <- generateHyperParsEffectData(x, partial.dep = TRUE)
# find best combinations by folds
hyperpar_effect$data %<>%
group_by(nested_cv_run) %>%
filter(brier.test.mean == min(brier.test.mean, na.rm = TRUE)) %>%
ungroup() %>%
mutate(nested_cv_run = as.factor(nested_cv_run))
plot = ggplot(hyperpar_effect$data, aes_string(x = z[1], y = z[2],
label = "nested_cv_run")) +
geom_point(alpha = 0.1) +
geom_point(aes(x = default[1], y = default[2]), shape = 4,
color = "red", size = 3) + # default values for num.trees and mtry
coord_cartesian(
ylim = ylim,
xlim = xlim
) +
labs(
title = glue("{y} ({resampling})"),
subtitle = glue("{z[1]} and {z[2]}")
) +
geom_label_repel(
data = subset(
hyperpar_effect$data,
as.integer(nested_cv_run) <= 5),
min.segment.length = unit(0, "lines")) +
#scale_y_continuous(breaks = seq(1, 11, 2), labels = seq(1, 11, 2)) +
theme_pubr() +
theme(
axis.text = element_text(size = 12),
plot.margin = unit(c(0.25, 0.2, 0.5, 0.2), "cm"),
axis.title.y = element_text(angle = -90, vjust = 1)
)
if (y == "SVM") {
plot = plot +
labs(
x = bquote(2^{
cost
}),
y = bquote(2^{
gamma
})
)
}
plot_marg <- ggMarginal(plot,
type = "density",
fill = "transparent", size = 20
)
})
}
#' @title Save plot and convert to pdf
#' @param plot A ggplot2 plot
#' @param path The path where to save the plot (including filename)
#' @param nrow number of rows
#' @param ncol number of columns
#'
#' @details Same behaviour as [cowplot::save_plot]
save_plot_custom = function(plot, path, nrow, ncol) {
save_plot(file_out(path),
plot, ncol = ncol, nrow = nrow)
system("cd analysis/figures && exec ls -1 *.png |
parallel convert '{}' '{.}.pdf'")
}
#' @title Create continuos and nominal LaTeX tables from dataset
#' @param data [data.frame]
#' @param type Options: "continuous" or "nominal"
create_dataset_tables = function(data, type = "continuous", drop_vars = NULL,
stats = NULL) {
if (is.null(stats)) {
stats = c("n", "min", "q1", "median", "mean", "q3", "max",
"s", "iqr", "na")
}
if (!is.null(drop_vars)) {
data %<>%
dplyr::select(-!!drop_vars)
}
if (type == "continuous") {
table = tableContinuous(data[, sapply(data, is.numeric)],
longtable = FALSE, cumsum = FALSE,
cap = "Summary of numerical predictor variables",
label = "table:descriptive_summary_numeric",
stats = stats)
} else if (type == "nominal") {
table = tableNominal(data[, !sapply(data, is.numeric)],
longtable = FALSE, cumsum = FALSE,
cap = "Summary of nominal predictor variables",
label = "table:descriptive_summary_non_numeric",
stats = stats)
}
return(table)
}
|
93ea9a48b88da171f84ed0596c332d3492c84526
|
b5c723088885098e0847db9d55d3df68c354e8cf
|
/R/S.SI.r
|
52e6b7e64a1f7397569001abb8914bd955a82d0e
|
[] |
no_license
|
psirusteam/TeachingSampling
|
5fd95cd428bb55319a3a0273c52aa38f14d19a9b
|
2659f3b16907055a14aabd722be5bfdd5bd9fad6
|
refs/heads/master
| 2021-06-04T04:34:18.096049
| 2020-04-21T19:55:59
| 2020-04-21T19:55:59
| 101,214,525
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 202
|
r
|
S.SI.r
|
#' @export
S.SI<-function(N,n,e=runif(N))
{
c<-matrix(0,N,1)
dec<-matrix(0,N,1)
sam<-matrix(0,N,1)
for(k in 1:N){
c[k]<-(n-dec[k])/(N-k+1)
if(e[k]<c[k]){
dec[k:N]<-dec[k]+1
sam[k]<-k}
}
sam
}
|
d0f6dea6d4dbb27e1908bc168fa5c6dc10f9f0b7
|
6f110bbd6ecc249c6a5301d92756d38075c4becf
|
/R/myviolin_plot.R
|
e3f33d39e2a8835dcadf71a705efa9dcfd500664
|
[
"MIT"
] |
permissive
|
Lian-Lab/LIANLAB
|
78b0555fbe8bb2dbe65271bb9580d83056fb92b4
|
379749c2eff6c1adbfc075f69de4255300303634
|
refs/heads/master
| 2023-05-31T10:07:09.881767
| 2021-06-22T09:56:54
| 2021-06-22T09:56:54
| 342,519,064
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,916
|
r
|
myviolin_plot.R
|
#' This function is used to plot the stacked violinplot of single cell RNAseq data
#' @details This function is used to plot the stacked violinplot of single cell RNAseq data or any
#' seurat object for specified genes expressed on celltype clusters.
#' @param gene a vector of the genes name to plot
#' @param seurat_object the seurat object of your data
#' @param cluster choose to cluster you interested
#' @param limits.max numeric,the max legend value to regulate the color levels
#' @param width the width of the figure
#' @param height the height of the figure
#' @param flip logical,set the direction of coordinate axis
#' @param filename the name of the generated file
#' @param text.size numeric,the text size of gene names
#' @param Mean whether choose to mean
#' @param col col of the figure
#'
#' @importFrom ggplot2 geom_violin scale_color_gradientn ggtitle
#' @importFrom scater multiplot
#' @export
#'
#' @examples
#' \dontrun{
#' input.file <- system.file('extdata','pbmc_1k.RDS',package = 'LIANLABDATA')
#' pbmc_1k <- readRDS(input.file)
#' gene=c('CD8A','CD4')
# stacked_violin_plot(gene,seurat_object = pbmc_1k,text.size = 10,flip = F,
# filename = "myviolinplot",width = 12,height = 10,limits.max = 9)
#' }
stacked_violin_plot=function(gene,seurat_object,cluster=NULL,limits.max=7,
width=13,height=10.3,flip=T,filename="",text.size=10,Mean=T,
col=NULL){
if (is.null(col)) {
colorful <- LIANLAB::colorful
col = colorful[["colors"]]
}
if(length(cluster)>0){
seurat_object=subset(seurat_object,idents = cluster)
}
ave_expression=AverageExpression(seurat_object,assays = "RNA")$RNA
ave_expression=log2(ave_expression+1)
data_matrix=seurat_object@assays$RNA@data
plot.list=list()
#g=gene[5]
no=1
#gene=c(gene[1],gene,gene[length(gene)])
for (g in gene) {
ave_gene_choose=ave_expression[which(rownames(ave_expression)==g),]
data_matrix_choose=as.data.frame(data_matrix)[which(rownames(data_matrix)==g),]
#data_matrix_choose=as.data.frame(data_matrix_choose)
df=data.frame(expression=as.numeric(data_matrix_choose),cluster=as.character(seurat_object@active.ident))
mean=vector()
for (i in df$cluster) {
mean=c(mean,ave_gene_choose[i])
}
df[,"mean"]=as.data.frame(mean)
df=as.data.frame(df)
if(!flip){
df[,"cluster"]=factor(df$cluster,levels=levels(seurat_object))
if(Mean==F){
p <- ggplot(df, aes(x=cluster, y=expression, fill= cluster, color=cluster))+
geom_violin(scale="width") +
labs(title=paste(g), y ="Expression", x="Cluster")+
#theme_classic() +
scale_fill_manual(values = col)+
scale_color_manual(values = col)+
theme(axis.title.y = element_blank())+
#theme(axis.ticks.y = element_blank())+
#theme(axis.line.y = element_blank())+
#theme(axis.text.y = element_blank())+
theme(axis.title.x = element_blank())+
theme(legend.position="none" )
}else{
p <- ggplot(df, aes(x=cluster, y=expression, fill= mean, color=mean))+
geom_violin(scale="width") +
labs(title=paste(g), y ="Expression", x="Cluster")+
#theme_classic() +
scale_color_gradientn(colors = c("#FFFF00", "#FFD000","#FF0000","#360101"),
limits=c(0,limits.max))+
scale_fill_gradientn(colors = c("#FFFF00", "#FFD000","#FF0000","#360101"),
limits=c(0,limits.max))+
theme(axis.title.y = element_blank())+
#theme(axis.ticks.y = element_blank())+
#theme(axis.line.y = element_blank())+
#theme(axis.text.y = element_blank())+
theme(axis.title.x = element_blank())+
theme(legend.position="none" )
}
if(no!=length(gene)){
p<-p+
theme( axis.line.x=element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank())
}else{
p<-p+
theme(axis.text.x = element_text(size = 10,vjust = 0.5,face = "bold",color = "black"))
}
#p<-p+theme(plot.title = element_text(size=text.size,face="bold",hjust = 0.5))
p=p+theme(panel.border = element_rect(fill = "NA",size = 0.5,color = "black"),
panel.background = element_blank(),
panel.grid = element_blank())
if(no==1){
plot.margin=unit(c(0.2, 0.5, 0.2, 0.5), "cm")
p=p+theme(legend.title = element_text(size = 9,face = "bold"),
legend.text = element_text(size = 9,face = "bold"),
legend.key.size = unit(1, "lines"))
legend.position="none"
}else if(no==length(gene)){
plot.margin=unit(c(-0.3, 0.5, 0.2, 0.5), "cm")
legend.position="none"
}else{
plot.margin=unit(c(-0.74, 0.5, 0, 0.5), "cm")
legend.position="none"
}
p=p+xlab("") + ylab(g) + ggtitle("") +
theme(legend.position = legend.position,
#axis.text.x = element_blank(),
axis.text.y = element_blank(),
#axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.title.y = element_text(size=text.size,face="bold",hjust = 0.5),
plot.margin = plot.margin )
if(length(plot.list)==0){
plot.list=list(p)
}else{
plot.list=c(plot.list,list(p))
}
no=no+1
}else{
####******########
df$cluster=factor(df$cluster,levels=rev(levels(seurat_object)))
if(Mean==F){
p <- ggplot(df, aes(x=cluster, y=expression, fill= cluster, color=cluster))+
geom_violin(scale="width") +
labs(title=paste(g), y ="Expression", x="Cluster")+
#theme_classic() +
scale_fill_manual(values = col)+
scale_color_manual(values = col)+
theme(axis.title.y = element_blank())+
#theme(axis.ticks.y = element_blank())+
#theme(axis.line.y = element_blank())+
#theme(axis.text.y = element_blank())+
theme(axis.title.x = element_blank())+
theme(legend.position="right")
}else{
p <- ggplot(df, aes(x=cluster, y=expression, fill= mean, color=mean))+
geom_violin(scale="width") +
labs(title=paste(g), y ="Expression", x="Cluster")+
#theme_classic() +
scale_color_gradientn(colors = c("#FFFF00", "#FFD000","#FF0000","#360101"),
limits=c(0,limits.max))+
scale_fill_gradientn(colors = c("#FFFF00", "#FFD000","#FF0000","#360101"),
limits=c(0,limits.max))+
theme(axis.title.y = element_blank())+
#theme(axis.ticks.y = element_blank())+
#theme(axis.line.y = element_blank())+
#theme(axis.text.y = element_blank())+
theme(axis.title.x = element_blank())+
theme(legend.position="right")
}
if(no!=length(gene)){
p<-p+
theme( axis.line.x=element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank())
}else{
p<-p+
theme(axis.text.x = element_text(size = 10,vjust = 0.2,face = "bold",color = "black"))
}
p<-p+theme(plot.title = element_text(size=4,face="bold",hjust = 0.5,color = "black"))
p=p+theme(panel.border = element_rect(fill = "NA",size = 0.5,color = "black"),
panel.background = element_blank(),
panel.grid = element_blank())
if(no==1){
plot.margin=unit(c(0.5, 0.5, 0.5, 0.2), "cm")
p=p+theme(legend.title = element_text(size = 9,face = "bold"),
legend.text = element_text(size = 9,face = "bold"),
legend.key.size = unit(0.5, "lines"))
#legend.position="left"
}else if(no==length(gene)){
plot.margin=unit(c(0.5, 0.2, 0.5, 0.3), "cm")
#legend.position="none"
}else{
plot.margin=unit(c(0.5, 0, 0.5, -0.11), "cm")
#legend.position="none"
}
if(no==1){
p=p+xlab("") + ylab("") +
theme(legend.position = "none",
axis.text.x = element_blank(),
#axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
#axis.ticks.y = element_blank(),
#axis.title.x = element_text(size=10,face="bold",hjust = 0.5),
plot.title = element_text(colour = "black", face = "bold",
size = text.size, vjust = 0.2),
axis.text.y = element_text(size=10,face="bold",hjust = 1,color = "black"),
plot.margin = plot.margin )+coord_flip()
}else if(no==length(gene)){
p=p+xlab("") + ylab("") +
theme(legend.position = "right",
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
#axis.title.x = element_text(size=10,face="bold",hjust = 0.5),
plot.title = element_text(colour = "black", face = "bold",
size = text.size, vjust = 0.2),
#axis.text.y = element_text(size=10,face="bold",hjust = 1,color = "black"),
plot.margin = plot.margin )+coord_flip()
}else{
p=p+xlab("") + ylab("") +
theme(legend.position = "none",
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
#axis.title.x = element_text(size=10,face="bold",hjust = 0.5),
plot.title = element_text(colour = "black", face = "bold",
size = text.size, vjust = 0.2),
#axis.text.y = element_text(size=10,face="bold",hjust = 1,color = "black"),
plot.margin = plot.margin )+coord_flip()
}
if(length(plot.list)==0){
plot.list=list(p)
}else{
plot.list=c(plot.list,list(p))
}
no=no+1
}
}
pdf(paste0(filename,"_vln_manual",".pdf"),width = width,height = height)
if(flip){
m = multiplot(plotlist = plot.list,cols = length(gene))
}else{
m = multiplot(plotlist = plot.list,cols=1)
}
dev.off()
if(flip){
m = multiplot(plotlist = plot.list,cols = length(gene))
}else{
m = multiplot(plotlist = plot.list,cols=1)
}
}
#' plot the stacked violinplot of single cell RNAseq data
#'
#' @param gene a vector of the genes name to plot
#' @param seurat_object the seurat object of your data
#' @param cluster choose to cluster you interested
#' @param axis logical,set the hidden axes
#' @param legend_position position of legend
#' @param limits.max numeric,the max legend value to regulate the color levels
#' @param gene_name logical, set gene titile bold
#' @param width the width of the figure
#' @param height the height of the figure
#' @param flip logical, set the direction of coordinate axis
#'
#' @export
#'
#' @examples
#' \dontrun{
#' input.file <- system.file('extdata','pbmc_1k.RDS',package = 'LIANLABDATA')
#' pbmc_1k <- readRDS(input.file)
# stacked_violin_plot(gene = 'CD8A',seurat_object = pbmc_1k,text.size = 10,flip = F,
# filename = "myviolinplot",width = 12,height = 10,limits.max = 9)
#' }
plot_violin_manual <- function(gene,seurat_object,cluster=NULL, axis=FALSE,
legend_position="none",limits.max=7,gene_name=FALSE,
width=3.6,height=2,flip=F){
if(length(cluster)>0){
seurat_object=subset(seurat_object,idents = cluster)
}
ave_expression=AverageExpression(seurat_object,assays = "RNA")$RNA
ave_expression=log2(ave_expression+1)
data_matrix=seurat_object@assays$RNA@data
for (g in gene) {
ave_gene_choose=ave_expression[g,]
data_matrix_choose=data_matrix[g,]
df=data.frame(expression=data_matrix_choose,cluster=as.character(seurat_object@active.ident))
mean=vector()
for (i in df$cluster) {
mean=c(mean,ave_gene_choose[i])
}
df$mean=mean
df$cluster=factor(df$cluster,levels=levels(seurat_object))
p <- ggplot(df, aes(x=cluster, y=expression, fill= mean, color=mean))+
geom_violin(scale="width") +
labs(title=paste(g), y ="Expression", x="Cluster")+
#theme_classic() +
scale_color_gradientn(colors = c("#FFFF00", "#FFD000","#FF0000","#360101"),
limits=c(0,limits.max))+
scale_fill_gradientn(colors = c("#FFFF00", "#FFD000","#FF0000","#360101"),
limits=c(0,limits.max))+
theme(axis.title.y = element_blank())+
#theme(axis.ticks.y = element_blank())+
#theme(axis.line.y = element_blank())+
#theme(axis.text.y = element_blank())+
theme(axis.title.x = element_blank())+
theme(legend.position=legend_position )
if(axis == FALSE){
p<-p+
theme( axis.line.x=element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank())
}else{
p<-p+
theme(axis.text.x = element_text(size = 4,vjust = 0.5,face = "bold"))
}
if(gene_name == FALSE){
p<-p+theme(plot.title = element_blank())
}else{ p<-p+theme(plot.title = element_text(size=10,face="bold",hjust = 0.5))}
if(flip==T){
p=p+coord_flip()
}
p=p+theme(panel.border = element_rect(fill = "NA",size = 0.8,color = "black"),
panel.background = element_blank(),
panel.grid = element_blank())
pdf(paste0(g,"_vln_manual",".pdf"),width = width,height = height)
print(p)
dev.off()
print(p)
}
}
|
44f0e18eb6938032977da1371b64d8e19e6bb4b1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/likert/examples/likert.Rd.R
|
6e6a168ca496fa5186a1b061314700f9ff8e5b16
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
likert.Rd.R
|
library(likert)
### Name: likert
### Title: Analyze Likert type items.
### Aliases: likert
### ** Examples
data(pisaitems)
items29 <- pisaitems[,substr(names(pisaitems), 1,5) == 'ST25Q']
names(items29) <- c("Magazines", "Comic books", "Fiction",
"Non-fiction books", "Newspapers")
l29 <- likert(items29)
summary(l29)
plot(l29)
|
55b1ea5041823af5597d8be670aec4abe4b4ab05
|
4fcf35e59be301899576b27f9cd2316af01da09b
|
/R/RNAseqTFValidation.R
|
53e66f638191c8ae1f93529c43d0119fc0aff145
|
[] |
no_license
|
FertigLab/ATACCoGAPS
|
02511811da902d3dd24755d91808b4b80f99e536
|
536cc18d9dcad4acdd20b7bbf0192993d2003a70
|
refs/heads/master
| 2023-02-06T06:16:31.746749
| 2023-01-26T20:16:30
| 2023-01-26T20:16:30
| 213,665,688
| 5
| 0
| null | 2022-04-18T19:37:43
| 2019-10-08T14:28:58
|
R
|
UTF-8
|
R
| false
| false
| 1,859
|
r
|
RNAseqTFValidation.R
|
#' Validate TF Findings with RNA-seq CoGAPS
#'
#' Use results from CoGAPS run on matched RNA-seq data to verify TF activity
#' suggested by motif matching analysis of ATAC CoGAPS output. Uses the fgsea
#' package to find enrichment of PatternMarker genes among genes regulated by
#' identified candidate TFs
#'
#' @param TFGenes genes regulated by the TFs as returned by simpleMotifTFMatch()
#' or findRegulatoryNetworks()
#' @param RNACoGAPSResult CoGAPSResult object from matched RNA-seq data, or, if
#' matrix = TRUE, a matrix containing patternMarker gene ranks. Must contain
#' gene names
#' @param ATACPatternSet vector of patterns found by CoGAPS in the ATAC data to
#' match against patterns found in RNA
#' @param RNAPatternSet vector of patterns found by CoGAPS in RNA to match
#' against those found in ATAC
#' @param matrix TRUE if inputting matrix of PatternMarker genes, FALSE if
#' inputting CoGAPS result object. FALSE by default
#'
#' @return Result matrices from the fgsea function for each pattern comparison
#' @examples \donttest{
#' gseaList = RNAseqTFValidation(TFMatchResult$RegulatoryNetworks, RNACoGAPS,
#' c(1,3), c(2,7), matrix = FALSE)
#' }
#' @export
RNAseqTFValidation <- function(TFGenes, RNACoGAPSResult, ATACPatternSet,
RNAPatternSet, matrix = FALSE) {
if(matrix == FALSE) {
patMarkers <- CoGAPS::patternMarkers(RNACoGAPSResult)
genesRanks <- patMarkers$PatternMarkerRanks
}
else{
genesRanks <- RNACoGAPSResult
}
gseaResults <- vector(mode = "list",
length = length(ATACPatternSet)*length(RNAPatternSet))
k<-1
for(i in ATACPatternSet){
for(j in RNAPatternSet){
gsea <- fgsea::fgsea(TFGenes[[i]], genesRanks[,j], 50000)
gseaResults[[k]] <- gsea[order(gsea$pval),]
k<-k+1
}
}
return(gseaResults)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.